linux/drivers/infiniband/core/mad_rmpp.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005 Intel Inc. All rights reserved.
   3 * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
   4 * Copyright (c) 2014 Intel Corporation.  All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/slab.h>
  36
  37#include "mad_priv.h"
  38#include "mad_rmpp.h"
  39
  40enum rmpp_state {
  41        RMPP_STATE_ACTIVE,
  42        RMPP_STATE_TIMEOUT,
  43        RMPP_STATE_COMPLETE,
  44        RMPP_STATE_CANCELING
  45};
  46
  47struct mad_rmpp_recv {
  48        struct ib_mad_agent_private *agent;
  49        struct list_head list;
  50        struct delayed_work timeout_work;
  51        struct delayed_work cleanup_work;
  52        struct completion comp;
  53        enum rmpp_state state;
  54        spinlock_t lock;
  55        atomic_t refcount;
  56
  57        struct ib_ah *ah;
  58        struct ib_mad_recv_wc *rmpp_wc;
  59        struct ib_mad_recv_buf *cur_seg_buf;
  60        int last_ack;
  61        int seg_num;
  62        int newwin;
  63        int repwin;
  64
  65        __be64 tid;
  66        u32 src_qp;
  67        u32 slid;
  68        u8 mgmt_class;
  69        u8 class_version;
  70        u8 method;
  71        u8 base_version;
  72};
  73
  74static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
  75{
  76        if (atomic_dec_and_test(&rmpp_recv->refcount))
  77                complete(&rmpp_recv->comp);
  78}
  79
  80static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
  81{
  82        deref_rmpp_recv(rmpp_recv);
  83        wait_for_completion(&rmpp_recv->comp);
  84        rdma_destroy_ah(rmpp_recv->ah, RDMA_DESTROY_AH_SLEEPABLE);
  85        kfree(rmpp_recv);
  86}
  87
  88void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
  89{
  90        struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv;
  91        unsigned long flags;
  92
  93        spin_lock_irqsave(&agent->lock, flags);
  94        list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
  95                if (rmpp_recv->state != RMPP_STATE_COMPLETE)
  96                        ib_free_recv_mad(rmpp_recv->rmpp_wc);
  97                rmpp_recv->state = RMPP_STATE_CANCELING;
  98        }
  99        spin_unlock_irqrestore(&agent->lock, flags);
 100
 101        list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
 102                cancel_delayed_work(&rmpp_recv->timeout_work);
 103                cancel_delayed_work(&rmpp_recv->cleanup_work);
 104        }
 105
 106        flush_workqueue(agent->qp_info->port_priv->wq);
 107
 108        list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
 109                                 &agent->rmpp_list, list) {
 110                list_del(&rmpp_recv->list);
 111                destroy_rmpp_recv(rmpp_recv);
 112        }
 113}
 114
 115static void format_ack(struct ib_mad_send_buf *msg,
 116                       struct ib_rmpp_mad *data,
 117                       struct mad_rmpp_recv *rmpp_recv)
 118{
 119        struct ib_rmpp_mad *ack = msg->mad;
 120        unsigned long flags;
 121
 122        memcpy(ack, &data->mad_hdr, msg->hdr_len);
 123
 124        ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
 125        ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
 126        ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
 127
 128        spin_lock_irqsave(&rmpp_recv->lock, flags);
 129        rmpp_recv->last_ack = rmpp_recv->seg_num;
 130        ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
 131        ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
 132        spin_unlock_irqrestore(&rmpp_recv->lock, flags);
 133}
 134
 135static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
 136                     struct ib_mad_recv_wc *recv_wc)
 137{
 138        struct ib_mad_send_buf *msg;
 139        int ret, hdr_len;
 140
 141        hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
 142        msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
 143                                 recv_wc->wc->pkey_index, 1, hdr_len,
 144                                 0, GFP_KERNEL,
 145                                 IB_MGMT_BASE_VERSION);
 146        if (IS_ERR(msg))
 147                return;
 148
 149        format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
 150        msg->ah = rmpp_recv->ah;
 151        ret = ib_post_send_mad(msg, NULL);
 152        if (ret)
 153                ib_free_send_mad(msg);
 154}
 155
 156static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
 157                                                  struct ib_mad_recv_wc *recv_wc)
 158{
 159        struct ib_mad_send_buf *msg;
 160        struct ib_ah *ah;
 161        int hdr_len;
 162
 163        ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
 164                                  recv_wc->recv_buf.grh, agent->port_num);
 165        if (IS_ERR(ah))
 166                return (void *) ah;
 167
 168        hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
 169        msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
 170                                 recv_wc->wc->pkey_index, 1,
 171                                 hdr_len, 0, GFP_KERNEL,
 172                                 IB_MGMT_BASE_VERSION);
 173        if (IS_ERR(msg))
 174                rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
 175        else {
 176                msg->ah = ah;
 177                msg->context[0] = ah;
 178        }
 179
 180        return msg;
 181}
 182
 183static void ack_ds_ack(struct ib_mad_agent_private *agent,
 184                       struct ib_mad_recv_wc *recv_wc)
 185{
 186        struct ib_mad_send_buf *msg;
 187        struct ib_rmpp_mad *rmpp_mad;
 188        int ret;
 189
 190        msg = alloc_response_msg(&agent->agent, recv_wc);
 191        if (IS_ERR(msg))
 192                return;
 193
 194        rmpp_mad = msg->mad;
 195        memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
 196
 197        rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
 198        ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
 199        rmpp_mad->rmpp_hdr.seg_num = 0;
 200        rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1);
 201
 202        ret = ib_post_send_mad(msg, NULL);
 203        if (ret) {
 204                rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE);
 205                ib_free_send_mad(msg);
 206        }
 207}
 208
 209void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
 210{
 211        if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah)
 212                rdma_destroy_ah(mad_send_wc->send_buf->ah,
 213                                RDMA_DESTROY_AH_SLEEPABLE);
 214        ib_free_send_mad(mad_send_wc->send_buf);
 215}
 216
 217static void nack_recv(struct ib_mad_agent_private *agent,
 218                      struct ib_mad_recv_wc *recv_wc, u8 rmpp_status)
 219{
 220        struct ib_mad_send_buf *msg;
 221        struct ib_rmpp_mad *rmpp_mad;
 222        int ret;
 223
 224        msg = alloc_response_msg(&agent->agent, recv_wc);
 225        if (IS_ERR(msg))
 226                return;
 227
 228        rmpp_mad = msg->mad;
 229        memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
 230
 231        rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
 232        rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION;
 233        rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT;
 234        ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
 235        rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status;
 236        rmpp_mad->rmpp_hdr.seg_num = 0;
 237        rmpp_mad->rmpp_hdr.paylen_newwin = 0;
 238
 239        ret = ib_post_send_mad(msg, NULL);
 240        if (ret) {
 241                rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE);
 242                ib_free_send_mad(msg);
 243        }
 244}
 245
 246static void recv_timeout_handler(struct work_struct *work)
 247{
 248        struct mad_rmpp_recv *rmpp_recv =
 249                container_of(work, struct mad_rmpp_recv, timeout_work.work);
 250        struct ib_mad_recv_wc *rmpp_wc;
 251        unsigned long flags;
 252
 253        spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
 254        if (rmpp_recv->state != RMPP_STATE_ACTIVE) {
 255                spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
 256                return;
 257        }
 258        rmpp_recv->state = RMPP_STATE_TIMEOUT;
 259        list_del(&rmpp_recv->list);
 260        spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
 261
 262        rmpp_wc = rmpp_recv->rmpp_wc;
 263        nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L);
 264        destroy_rmpp_recv(rmpp_recv);
 265        ib_free_recv_mad(rmpp_wc);
 266}
 267
 268static void recv_cleanup_handler(struct work_struct *work)
 269{
 270        struct mad_rmpp_recv *rmpp_recv =
 271                container_of(work, struct mad_rmpp_recv, cleanup_work.work);
 272        unsigned long flags;
 273
 274        spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
 275        if (rmpp_recv->state == RMPP_STATE_CANCELING) {
 276                spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
 277                return;
 278        }
 279        list_del(&rmpp_recv->list);
 280        spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
 281        destroy_rmpp_recv(rmpp_recv);
 282}
 283
 284static struct mad_rmpp_recv *
 285create_rmpp_recv(struct ib_mad_agent_private *agent,
 286                 struct ib_mad_recv_wc *mad_recv_wc)
 287{
 288        struct mad_rmpp_recv *rmpp_recv;
 289        struct ib_mad_hdr *mad_hdr;
 290
 291        rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL);
 292        if (!rmpp_recv)
 293                return NULL;
 294
 295        rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
 296                                             mad_recv_wc->wc,
 297                                             mad_recv_wc->recv_buf.grh,
 298                                             agent->agent.port_num);
 299        if (IS_ERR(rmpp_recv->ah))
 300                goto error;
 301
 302        rmpp_recv->agent = agent;
 303        init_completion(&rmpp_recv->comp);
 304        INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
 305        INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
 306        spin_lock_init(&rmpp_recv->lock);
 307        rmpp_recv->state = RMPP_STATE_ACTIVE;
 308        atomic_set(&rmpp_recv->refcount, 1);
 309
 310        rmpp_recv->rmpp_wc = mad_recv_wc;
 311        rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
 312        rmpp_recv->newwin = 1;
 313        rmpp_recv->seg_num = 1;
 314        rmpp_recv->last_ack = 0;
 315        rmpp_recv->repwin = 1;
 316
 317        mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
 318        rmpp_recv->tid = mad_hdr->tid;
 319        rmpp_recv->src_qp = mad_recv_wc->wc->src_qp;
 320        rmpp_recv->slid = mad_recv_wc->wc->slid;
 321        rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
 322        rmpp_recv->class_version = mad_hdr->class_version;
 323        rmpp_recv->method  = mad_hdr->method;
 324        rmpp_recv->base_version  = mad_hdr->base_version;
 325        return rmpp_recv;
 326
 327error:  kfree(rmpp_recv);
 328        return NULL;
 329}
 330
 331static struct mad_rmpp_recv *
 332find_rmpp_recv(struct ib_mad_agent_private *agent,
 333               struct ib_mad_recv_wc *mad_recv_wc)
 334{
 335        struct mad_rmpp_recv *rmpp_recv;
 336        struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
 337
 338        list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
 339                if (rmpp_recv->tid == mad_hdr->tid &&
 340                    rmpp_recv->src_qp == mad_recv_wc->wc->src_qp &&
 341                    rmpp_recv->slid == mad_recv_wc->wc->slid &&
 342                    rmpp_recv->mgmt_class == mad_hdr->mgmt_class &&
 343                    rmpp_recv->class_version == mad_hdr->class_version &&
 344                    rmpp_recv->method == mad_hdr->method)
 345                        return rmpp_recv;
 346        }
 347        return NULL;
 348}
 349
 350static struct mad_rmpp_recv *
 351acquire_rmpp_recv(struct ib_mad_agent_private *agent,
 352                  struct ib_mad_recv_wc *mad_recv_wc)
 353{
 354        struct mad_rmpp_recv *rmpp_recv;
 355        unsigned long flags;
 356
 357        spin_lock_irqsave(&agent->lock, flags);
 358        rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
 359        if (rmpp_recv)
 360                atomic_inc(&rmpp_recv->refcount);
 361        spin_unlock_irqrestore(&agent->lock, flags);
 362        return rmpp_recv;
 363}
 364
 365static struct mad_rmpp_recv *
 366insert_rmpp_recv(struct ib_mad_agent_private *agent,
 367                 struct mad_rmpp_recv *rmpp_recv)
 368{
 369        struct mad_rmpp_recv *cur_rmpp_recv;
 370
 371        cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc);
 372        if (!cur_rmpp_recv)
 373                list_add_tail(&rmpp_recv->list, &agent->rmpp_list);
 374
 375        return cur_rmpp_recv;
 376}
 377
 378static inline int get_last_flag(struct ib_mad_recv_buf *seg)
 379{
 380        struct ib_rmpp_mad *rmpp_mad;
 381
 382        rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
 383        return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;
 384}
 385
 386static inline int get_seg_num(struct ib_mad_recv_buf *seg)
 387{
 388        struct ib_rmpp_mad *rmpp_mad;
 389
 390        rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
 391        return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
 392}
 393
 394static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
 395                                                    struct ib_mad_recv_buf *seg)
 396{
 397        if (seg->list.next == rmpp_list)
 398                return NULL;
 399
 400        return container_of(seg->list.next, struct ib_mad_recv_buf, list);
 401}
 402
 403static inline int window_size(struct ib_mad_agent_private *agent)
 404{
 405        return max(agent->qp_info->recv_queue.max_active >> 3, 1);
 406}
 407
 408static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
 409                                                  int seg_num)
 410{
 411        struct ib_mad_recv_buf *seg_buf;
 412        int cur_seg_num;
 413
 414        list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
 415                cur_seg_num = get_seg_num(seg_buf);
 416                if (seg_num > cur_seg_num)
 417                        return seg_buf;
 418                if (seg_num == cur_seg_num)
 419                        break;
 420        }
 421        return NULL;
 422}
 423
 424static void update_seg_num(struct mad_rmpp_recv *rmpp_recv,
 425                           struct ib_mad_recv_buf *new_buf)
 426{
 427        struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list;
 428
 429        while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) {
 430                rmpp_recv->cur_seg_buf = new_buf;
 431                rmpp_recv->seg_num++;
 432                new_buf = get_next_seg(rmpp_list, new_buf);
 433        }
 434}
 435
 436static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
 437{
 438        struct ib_rmpp_mad *rmpp_mad;
 439        int hdr_size, data_size, pad;
 440        bool opa = rdma_cap_opa_mad(rmpp_recv->agent->qp_info->port_priv->device,
 441                                    rmpp_recv->agent->qp_info->port_priv->port_num);
 442
 443        rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
 444
 445        hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
 446        if (opa && rmpp_recv->base_version == OPA_MGMT_BASE_VERSION) {
 447                data_size = sizeof(struct opa_rmpp_mad) - hdr_size;
 448                pad = OPA_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
 449                if (pad > OPA_MGMT_RMPP_DATA || pad < 0)
 450                        pad = 0;
 451        } else {
 452                data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
 453                pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
 454                if (pad > IB_MGMT_RMPP_DATA || pad < 0)
 455                        pad = 0;
 456        }
 457
 458        return hdr_size + rmpp_recv->seg_num * data_size - pad;
 459}
 460
 461static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
 462{
 463        struct ib_mad_recv_wc *rmpp_wc;
 464
 465        ack_recv(rmpp_recv, rmpp_recv->rmpp_wc);
 466        if (rmpp_recv->seg_num > 1)
 467                cancel_delayed_work(&rmpp_recv->timeout_work);
 468
 469        rmpp_wc = rmpp_recv->rmpp_wc;
 470        rmpp_wc->mad_len = get_mad_len(rmpp_recv);
 471        /* 10 seconds until we can find the packet lifetime */
 472        queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq,
 473                           &rmpp_recv->cleanup_work, msecs_to_jiffies(10000));
 474        return rmpp_wc;
 475}
 476
 477static struct ib_mad_recv_wc *
 478continue_rmpp(struct ib_mad_agent_private *agent,
 479              struct ib_mad_recv_wc *mad_recv_wc)
 480{
 481        struct mad_rmpp_recv *rmpp_recv;
 482        struct ib_mad_recv_buf *prev_buf;
 483        struct ib_mad_recv_wc *done_wc;
 484        int seg_num;
 485        unsigned long flags;
 486
 487        rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc);
 488        if (!rmpp_recv)
 489                goto drop1;
 490
 491        seg_num = get_seg_num(&mad_recv_wc->recv_buf);
 492
 493        spin_lock_irqsave(&rmpp_recv->lock, flags);
 494        if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) ||
 495            (seg_num > rmpp_recv->newwin))
 496                goto drop3;
 497
 498        if ((seg_num <= rmpp_recv->last_ack) ||
 499            (rmpp_recv->state == RMPP_STATE_COMPLETE)) {
 500                spin_unlock_irqrestore(&rmpp_recv->lock, flags);
 501                ack_recv(rmpp_recv, mad_recv_wc);
 502                goto drop2;
 503        }
 504
 505        prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num);
 506        if (!prev_buf)
 507                goto drop3;
 508
 509        done_wc = NULL;
 510        list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list);
 511        if (rmpp_recv->cur_seg_buf == prev_buf) {
 512                update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf);
 513                if (get_last_flag(rmpp_recv->cur_seg_buf)) {
 514                        rmpp_recv->state = RMPP_STATE_COMPLETE;
 515                        spin_unlock_irqrestore(&rmpp_recv->lock, flags);
 516                        done_wc = complete_rmpp(rmpp_recv);
 517                        goto out;
 518                } else if (rmpp_recv->seg_num == rmpp_recv->newwin) {
 519                        rmpp_recv->newwin += window_size(agent);
 520                        spin_unlock_irqrestore(&rmpp_recv->lock, flags);
 521                        ack_recv(rmpp_recv, mad_recv_wc);
 522                        goto out;
 523                }
 524        }
 525        spin_unlock_irqrestore(&rmpp_recv->lock, flags);
 526out:
 527        deref_rmpp_recv(rmpp_recv);
 528        return done_wc;
 529
 530drop3:  spin_unlock_irqrestore(&rmpp_recv->lock, flags);
 531drop2:  deref_rmpp_recv(rmpp_recv);
 532drop1:  ib_free_recv_mad(mad_recv_wc);
 533        return NULL;
 534}
 535
 536static struct ib_mad_recv_wc *
 537start_rmpp(struct ib_mad_agent_private *agent,
 538           struct ib_mad_recv_wc *mad_recv_wc)
 539{
 540        struct mad_rmpp_recv *rmpp_recv;
 541        unsigned long flags;
 542
 543        rmpp_recv = create_rmpp_recv(agent, mad_recv_wc);
 544        if (!rmpp_recv) {
 545                ib_free_recv_mad(mad_recv_wc);
 546                return NULL;
 547        }
 548
 549        spin_lock_irqsave(&agent->lock, flags);
 550        if (insert_rmpp_recv(agent, rmpp_recv)) {
 551                spin_unlock_irqrestore(&agent->lock, flags);
 552                /* duplicate first MAD */
 553                destroy_rmpp_recv(rmpp_recv);
 554                return continue_rmpp(agent, mad_recv_wc);
 555        }
 556        atomic_inc(&rmpp_recv->refcount);
 557
 558        if (get_last_flag(&mad_recv_wc->recv_buf)) {
 559                rmpp_recv->state = RMPP_STATE_COMPLETE;
 560                spin_unlock_irqrestore(&agent->lock, flags);
 561                complete_rmpp(rmpp_recv);
 562        } else {
 563                spin_unlock_irqrestore(&agent->lock, flags);
 564                /* 40 seconds until we can find the packet lifetimes */
 565                queue_delayed_work(agent->qp_info->port_priv->wq,
 566                                   &rmpp_recv->timeout_work,
 567                                   msecs_to_jiffies(40000));
 568                rmpp_recv->newwin += window_size(agent);
 569                ack_recv(rmpp_recv, mad_recv_wc);
 570                mad_recv_wc = NULL;
 571        }
 572        deref_rmpp_recv(rmpp_recv);
 573        return mad_recv_wc;
 574}
 575
 576static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
 577{
 578        struct ib_rmpp_mad *rmpp_mad;
 579        int timeout;
 580        u32 paylen = 0;
 581
 582        rmpp_mad = mad_send_wr->send_buf.mad;
 583        ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
 584        rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(++mad_send_wr->seg_num);
 585
 586        if (mad_send_wr->seg_num == 1) {
 587                rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
 588                paylen = (mad_send_wr->send_buf.seg_count *
 589                          mad_send_wr->send_buf.seg_rmpp_size) -
 590                          mad_send_wr->pad;
 591        }
 592
 593        if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) {
 594                rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
 595                paylen = mad_send_wr->send_buf.seg_rmpp_size - mad_send_wr->pad;
 596        }
 597        rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
 598
 599        /* 2 seconds for an ACK until we can find the packet lifetime */
 600        timeout = mad_send_wr->send_buf.timeout_ms;
 601        if (!timeout || timeout > 2000)
 602                mad_send_wr->timeout = msecs_to_jiffies(2000);
 603
 604        return ib_send_mad(mad_send_wr);
 605}
 606
 607static void abort_send(struct ib_mad_agent_private *agent,
 608                       struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status)
 609{
 610        struct ib_mad_send_wr_private *mad_send_wr;
 611        struct ib_mad_send_wc wc;
 612        unsigned long flags;
 613
 614        spin_lock_irqsave(&agent->lock, flags);
 615        mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
 616        if (!mad_send_wr)
 617                goto out;       /* Unmatched send */
 618
 619        if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
 620            (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
 621                goto out;       /* Send is already done */
 622
 623        ib_mark_mad_done(mad_send_wr);
 624        spin_unlock_irqrestore(&agent->lock, flags);
 625
 626        wc.status = IB_WC_REM_ABORT_ERR;
 627        wc.vendor_err = rmpp_status;
 628        wc.send_buf = &mad_send_wr->send_buf;
 629        ib_mad_complete_send_wr(mad_send_wr, &wc);
 630        return;
 631out:
 632        spin_unlock_irqrestore(&agent->lock, flags);
 633}
 634
 635static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr,
 636                                   int seg_num)
 637{
 638        struct list_head *list;
 639
 640        wr->last_ack = seg_num;
 641        list = &wr->last_ack_seg->list;
 642        list_for_each_entry(wr->last_ack_seg, list, list)
 643                if (wr->last_ack_seg->num == seg_num)
 644                        break;
 645}
 646
 647static void process_ds_ack(struct ib_mad_agent_private *agent,
 648                           struct ib_mad_recv_wc *mad_recv_wc, int newwin)
 649{
 650        struct mad_rmpp_recv *rmpp_recv;
 651
 652        rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
 653        if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE)
 654                rmpp_recv->repwin = newwin;
 655}
 656
 657static void process_rmpp_ack(struct ib_mad_agent_private *agent,
 658                             struct ib_mad_recv_wc *mad_recv_wc)
 659{
 660        struct ib_mad_send_wr_private *mad_send_wr;
 661        struct ib_rmpp_mad *rmpp_mad;
 662        unsigned long flags;
 663        int seg_num, newwin, ret;
 664
 665        rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
 666        if (rmpp_mad->rmpp_hdr.rmpp_status) {
 667                abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
 668                nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
 669                return;
 670        }
 671
 672        seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
 673        newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
 674        if (newwin < seg_num) {
 675                abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
 676                nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
 677                return;
 678        }
 679
 680        spin_lock_irqsave(&agent->lock, flags);
 681        mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
 682        if (!mad_send_wr) {
 683                if (!seg_num)
 684                        process_ds_ack(agent, mad_recv_wc, newwin);
 685                goto out;       /* Unmatched or DS RMPP ACK */
 686        }
 687
 688        if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) &&
 689            (mad_send_wr->timeout)) {
 690                spin_unlock_irqrestore(&agent->lock, flags);
 691                ack_ds_ack(agent, mad_recv_wc);
 692                return;         /* Repeated ACK for DS RMPP transaction */
 693        }
 694
 695        if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
 696            (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
 697                goto out;       /* Send is already done */
 698
 699        if (seg_num > mad_send_wr->send_buf.seg_count ||
 700            seg_num > mad_send_wr->newwin) {
 701                spin_unlock_irqrestore(&agent->lock, flags);
 702                abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
 703                nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
 704                return;
 705        }
 706
 707        if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
 708                goto out;       /* Old ACK */
 709
 710        if (seg_num > mad_send_wr->last_ack) {
 711                adjust_last_ack(mad_send_wr, seg_num);
 712                mad_send_wr->retries_left = mad_send_wr->max_retries;
 713        }
 714        mad_send_wr->newwin = newwin;
 715        if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
 716                /* If no response is expected, the ACK completes the send */
 717                if (!mad_send_wr->send_buf.timeout_ms) {
 718                        struct ib_mad_send_wc wc;
 719
 720                        ib_mark_mad_done(mad_send_wr);
 721                        spin_unlock_irqrestore(&agent->lock, flags);
 722
 723                        wc.status = IB_WC_SUCCESS;
 724                        wc.vendor_err = 0;
 725                        wc.send_buf = &mad_send_wr->send_buf;
 726                        ib_mad_complete_send_wr(mad_send_wr, &wc);
 727                        return;
 728                }
 729                if (mad_send_wr->refcount == 1)
 730                        ib_reset_mad_timeout(mad_send_wr,
 731                                             mad_send_wr->send_buf.timeout_ms);
 732                spin_unlock_irqrestore(&agent->lock, flags);
 733                ack_ds_ack(agent, mad_recv_wc);
 734                return;
 735        } else if (mad_send_wr->refcount == 1 &&
 736                   mad_send_wr->seg_num < mad_send_wr->newwin &&
 737                   mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
 738                /* Send failure will just result in a timeout/retry */
 739                ret = send_next_seg(mad_send_wr);
 740                if (ret)
 741                        goto out;
 742
 743                mad_send_wr->refcount++;
 744                list_move_tail(&mad_send_wr->agent_list,
 745                              &mad_send_wr->mad_agent_priv->send_list);
 746        }
 747out:
 748        spin_unlock_irqrestore(&agent->lock, flags);
 749}
 750
 751static struct ib_mad_recv_wc *
 752process_rmpp_data(struct ib_mad_agent_private *agent,
 753                  struct ib_mad_recv_wc *mad_recv_wc)
 754{
 755        struct ib_rmpp_hdr *rmpp_hdr;
 756        u8 rmpp_status;
 757
 758        rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr;
 759
 760        if (rmpp_hdr->rmpp_status) {
 761                rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS;
 762                goto bad;
 763        }
 764
 765        if (rmpp_hdr->seg_num == cpu_to_be32(1)) {
 766                if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
 767                        rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
 768                        goto bad;
 769                }
 770                return start_rmpp(agent, mad_recv_wc);
 771        } else {
 772                if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) {
 773                        rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
 774                        goto bad;
 775                }
 776                return continue_rmpp(agent, mad_recv_wc);
 777        }
 778bad:
 779        nack_recv(agent, mad_recv_wc, rmpp_status);
 780        ib_free_recv_mad(mad_recv_wc);
 781        return NULL;
 782}
 783
 784static void process_rmpp_stop(struct ib_mad_agent_private *agent,
 785                              struct ib_mad_recv_wc *mad_recv_wc)
 786{
 787        struct ib_rmpp_mad *rmpp_mad;
 788
 789        rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
 790
 791        if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
 792                abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
 793                nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
 794        } else
 795                abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
 796}
 797
 798static void process_rmpp_abort(struct ib_mad_agent_private *agent,
 799                               struct ib_mad_recv_wc *mad_recv_wc)
 800{
 801        struct ib_rmpp_mad *rmpp_mad;
 802
 803        rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
 804
 805        if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
 806            rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
 807                abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
 808                nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
 809        } else
 810                abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
 811}
 812
 813struct ib_mad_recv_wc *
 814ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
 815                        struct ib_mad_recv_wc *mad_recv_wc)
 816{
 817        struct ib_rmpp_mad *rmpp_mad;
 818
 819        rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
 820        if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
 821                return mad_recv_wc;
 822
 823        if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
 824                abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
 825                nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
 826                goto out;
 827        }
 828
 829        switch (rmpp_mad->rmpp_hdr.rmpp_type) {
 830        case IB_MGMT_RMPP_TYPE_DATA:
 831                return process_rmpp_data(agent, mad_recv_wc);
 832        case IB_MGMT_RMPP_TYPE_ACK:
 833                process_rmpp_ack(agent, mad_recv_wc);
 834                break;
 835        case IB_MGMT_RMPP_TYPE_STOP:
 836                process_rmpp_stop(agent, mad_recv_wc);
 837                break;
 838        case IB_MGMT_RMPP_TYPE_ABORT:
 839                process_rmpp_abort(agent, mad_recv_wc);
 840                break;
 841        default:
 842                abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
 843                nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
 844                break;
 845        }
 846out:
 847        ib_free_recv_mad(mad_recv_wc);
 848        return NULL;
 849}
 850
 851static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr)
 852{
 853        struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv;
 854        struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad;
 855        struct mad_rmpp_recv *rmpp_recv;
 856        struct rdma_ah_attr ah_attr;
 857        unsigned long flags;
 858        int newwin = 1;
 859
 860        if (!(mad_hdr->method & IB_MGMT_METHOD_RESP))
 861                goto out;
 862
 863        spin_lock_irqsave(&agent->lock, flags);
 864        list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
 865                if (rmpp_recv->tid != mad_hdr->tid ||
 866                    rmpp_recv->mgmt_class != mad_hdr->mgmt_class ||
 867                    rmpp_recv->class_version != mad_hdr->class_version ||
 868                    (rmpp_recv->method & IB_MGMT_METHOD_RESP))
 869                        continue;
 870
 871                if (rdma_query_ah(mad_send_wr->send_buf.ah, &ah_attr))
 872                        continue;
 873
 874                if (rmpp_recv->slid == rdma_ah_get_dlid(&ah_attr)) {
 875                        newwin = rmpp_recv->repwin;
 876                        break;
 877                }
 878        }
 879        spin_unlock_irqrestore(&agent->lock, flags);
 880out:
 881        return newwin;
 882}
 883
 884int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
 885{
 886        struct ib_rmpp_mad *rmpp_mad;
 887        int ret;
 888
 889        rmpp_mad = mad_send_wr->send_buf.mad;
 890        if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
 891              IB_MGMT_RMPP_FLAG_ACTIVE))
 892                return IB_RMPP_RESULT_UNHANDLED;
 893
 894        if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
 895                mad_send_wr->seg_num = 1;
 896                return IB_RMPP_RESULT_INTERNAL;
 897        }
 898
 899        mad_send_wr->newwin = init_newwin(mad_send_wr);
 900
 901        /* We need to wait for the final ACK even if there isn't a response */
 902        mad_send_wr->refcount += (mad_send_wr->timeout == 0);
 903        ret = send_next_seg(mad_send_wr);
 904        if (!ret)
 905                return IB_RMPP_RESULT_CONSUMED;
 906        return ret;
 907}
 908
 909int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
 910                            struct ib_mad_send_wc *mad_send_wc)
 911{
 912        struct ib_rmpp_mad *rmpp_mad;
 913        int ret;
 914
 915        rmpp_mad = mad_send_wr->send_buf.mad;
 916        if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
 917              IB_MGMT_RMPP_FLAG_ACTIVE))
 918                return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
 919
 920        if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
 921                return IB_RMPP_RESULT_INTERNAL;  /* ACK, STOP, or ABORT */
 922
 923        if (mad_send_wc->status != IB_WC_SUCCESS ||
 924            mad_send_wr->status != IB_WC_SUCCESS)
 925                return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
 926
 927        if (!mad_send_wr->timeout)
 928                return IB_RMPP_RESULT_PROCESSED; /* Response received */
 929
 930        if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
 931                mad_send_wr->timeout =
 932                        msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
 933                return IB_RMPP_RESULT_PROCESSED; /* Send done */
 934        }
 935
 936        if (mad_send_wr->seg_num == mad_send_wr->newwin ||
 937            mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count)
 938                return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
 939
 940        ret = send_next_seg(mad_send_wr);
 941        if (ret) {
 942                mad_send_wc->status = IB_WC_GENERAL_ERR;
 943                return IB_RMPP_RESULT_PROCESSED;
 944        }
 945        return IB_RMPP_RESULT_CONSUMED;
 946}
 947
 948int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
 949{
 950        struct ib_rmpp_mad *rmpp_mad;
 951        int ret;
 952
 953        rmpp_mad = mad_send_wr->send_buf.mad;
 954        if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
 955              IB_MGMT_RMPP_FLAG_ACTIVE))
 956                return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
 957
 958        if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count)
 959                return IB_RMPP_RESULT_PROCESSED;
 960
 961        mad_send_wr->seg_num = mad_send_wr->last_ack;
 962        mad_send_wr->cur_seg = mad_send_wr->last_ack_seg;
 963
 964        ret = send_next_seg(mad_send_wr);
 965        if (ret)
 966                return IB_RMPP_RESULT_PROCESSED;
 967
 968        return IB_RMPP_RESULT_CONSUMED;
 969}
 970