linux/drivers/infiniband/core/mad_rmpp.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005 Intel Inc. All rights reserved.
   3 * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/slab.h>
  35
  36#include "mad_priv.h"
  37#include "mad_rmpp.h"
  38
  39enum rmpp_state {
  40        RMPP_STATE_ACTIVE,
  41        RMPP_STATE_TIMEOUT,
  42        RMPP_STATE_COMPLETE,
  43        RMPP_STATE_CANCELING
  44};
  45
  46struct mad_rmpp_recv {
  47        struct ib_mad_agent_private *agent;
  48        struct list_head list;
  49        struct delayed_work timeout_work;
  50        struct delayed_work cleanup_work;
  51        struct completion comp;
  52        enum rmpp_state state;
  53        spinlock_t lock;
  54        atomic_t refcount;
  55
  56        struct ib_ah *ah;
  57        struct ib_mad_recv_wc *rmpp_wc;
  58        struct ib_mad_recv_buf *cur_seg_buf;
  59        int last_ack;
  60        int seg_num;
  61        int newwin;
  62        int repwin;
  63
  64        __be64 tid;
  65        u32 src_qp;
  66        u16 slid;
  67        u8 mgmt_class;
  68        u8 class_version;
  69        u8 method;
  70};
  71
  72static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
  73{
  74        if (atomic_dec_and_test(&rmpp_recv->refcount))
  75                complete(&rmpp_recv->comp);
  76}
  77
  78static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
  79{
  80        deref_rmpp_recv(rmpp_recv);
  81        wait_for_completion(&rmpp_recv->comp);
  82        ib_destroy_ah(rmpp_recv->ah);
  83        kfree(rmpp_recv);
  84}
  85
  86void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
  87{
  88        struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv;
  89        unsigned long flags;
  90
  91        spin_lock_irqsave(&agent->lock, flags);
  92        list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
  93                if (rmpp_recv->state != RMPP_STATE_COMPLETE)
  94                        ib_free_recv_mad(rmpp_recv->rmpp_wc);
  95                rmpp_recv->state = RMPP_STATE_CANCELING;
  96        }
  97        spin_unlock_irqrestore(&agent->lock, flags);
  98
  99        list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
 100                cancel_delayed_work(&rmpp_recv->timeout_work);
 101                cancel_delayed_work(&rmpp_recv->cleanup_work);
 102        }
 103
 104        flush_workqueue(agent->qp_info->port_priv->wq);
 105
 106        list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
 107                                 &agent->rmpp_list, list) {
 108                list_del(&rmpp_recv->list);
 109                destroy_rmpp_recv(rmpp_recv);
 110        }
 111}
 112
 113static void format_ack(struct ib_mad_send_buf *msg,
 114                       struct ib_rmpp_mad *data,
 115                       struct mad_rmpp_recv *rmpp_recv)
 116{
 117        struct ib_rmpp_mad *ack = msg->mad;
 118        unsigned long flags;
 119
 120        memcpy(ack, &data->mad_hdr, msg->hdr_len);
 121
 122        ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
 123        ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
 124        ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
 125
 126        spin_lock_irqsave(&rmpp_recv->lock, flags);
 127        rmpp_recv->last_ack = rmpp_recv->seg_num;
 128        ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
 129        ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
 130        spin_unlock_irqrestore(&rmpp_recv->lock, flags);
 131}
 132
 133static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
 134                     struct ib_mad_recv_wc *recv_wc)
 135{
 136        struct ib_mad_send_buf *msg;
 137        int ret, hdr_len;
 138
 139        hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
 140        msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
 141                                 recv_wc->wc->pkey_index, 1, hdr_len,
 142                                 0, GFP_KERNEL);
 143        if (IS_ERR(msg))
 144                return;
 145
 146        format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
 147        msg->ah = rmpp_recv->ah;
 148        ret = ib_post_send_mad(msg, NULL);
 149        if (ret)
 150                ib_free_send_mad(msg);
 151}
 152
 153static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
 154                                                  struct ib_mad_recv_wc *recv_wc)
 155{
 156        struct ib_mad_send_buf *msg;
 157        struct ib_ah *ah;
 158        int hdr_len;
 159
 160        ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
 161                                  recv_wc->recv_buf.grh, agent->port_num);
 162        if (IS_ERR(ah))
 163                return (void *) ah;
 164
 165        hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
 166        msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
 167                                 recv_wc->wc->pkey_index, 1,
 168                                 hdr_len, 0, GFP_KERNEL);
 169        if (IS_ERR(msg))
 170                ib_destroy_ah(ah);
 171        else {
 172                msg->ah = ah;
 173                msg->context[0] = ah;
 174        }
 175
 176        return msg;
 177}
 178
 179static void ack_ds_ack(struct ib_mad_agent_private *agent,
 180                       struct ib_mad_recv_wc *recv_wc)
 181{
 182        struct ib_mad_send_buf *msg;
 183        struct ib_rmpp_mad *rmpp_mad;
 184        int ret;
 185
 186        msg = alloc_response_msg(&agent->agent, recv_wc);
 187        if (IS_ERR(msg))
 188                return;
 189
 190        rmpp_mad = msg->mad;
 191        memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
 192
 193        rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
 194        ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
 195        rmpp_mad->rmpp_hdr.seg_num = 0;
 196        rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1);
 197
 198        ret = ib_post_send_mad(msg, NULL);
 199        if (ret) {
 200                ib_destroy_ah(msg->ah);
 201                ib_free_send_mad(msg);
 202        }
 203}
 204
 205void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
 206{
 207        if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah)
 208                ib_destroy_ah(mad_send_wc->send_buf->ah);
 209        ib_free_send_mad(mad_send_wc->send_buf);
 210}
 211
 212static void nack_recv(struct ib_mad_agent_private *agent,
 213                      struct ib_mad_recv_wc *recv_wc, u8 rmpp_status)
 214{
 215        struct ib_mad_send_buf *msg;
 216        struct ib_rmpp_mad *rmpp_mad;
 217        int ret;
 218
 219        msg = alloc_response_msg(&agent->agent, recv_wc);
 220        if (IS_ERR(msg))
 221                return;
 222
 223        rmpp_mad = msg->mad;
 224        memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
 225
 226        rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
 227        rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION;
 228        rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT;
 229        ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
 230        rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status;
 231        rmpp_mad->rmpp_hdr.seg_num = 0;
 232        rmpp_mad->rmpp_hdr.paylen_newwin = 0;
 233
 234        ret = ib_post_send_mad(msg, NULL);
 235        if (ret) {
 236                ib_destroy_ah(msg->ah);
 237                ib_free_send_mad(msg);
 238        }
 239}
 240
 241static void recv_timeout_handler(struct work_struct *work)
 242{
 243        struct mad_rmpp_recv *rmpp_recv =
 244                container_of(work, struct mad_rmpp_recv, timeout_work.work);
 245        struct ib_mad_recv_wc *rmpp_wc;
 246        unsigned long flags;
 247
 248        spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
 249        if (rmpp_recv->state != RMPP_STATE_ACTIVE) {
 250                spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
 251                return;
 252        }
 253        rmpp_recv->state = RMPP_STATE_TIMEOUT;
 254        list_del(&rmpp_recv->list);
 255        spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
 256
 257        rmpp_wc = rmpp_recv->rmpp_wc;
 258        nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L);
 259        destroy_rmpp_recv(rmpp_recv);
 260        ib_free_recv_mad(rmpp_wc);
 261}
 262
 263static void recv_cleanup_handler(struct work_struct *work)
 264{
 265        struct mad_rmpp_recv *rmpp_recv =
 266                container_of(work, struct mad_rmpp_recv, cleanup_work.work);
 267        unsigned long flags;
 268
 269        spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
 270        if (rmpp_recv->state == RMPP_STATE_CANCELING) {
 271                spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
 272                return;
 273        }
 274        list_del(&rmpp_recv->list);
 275        spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
 276        destroy_rmpp_recv(rmpp_recv);
 277}
 278
 279static struct mad_rmpp_recv *
 280create_rmpp_recv(struct ib_mad_agent_private *agent,
 281                 struct ib_mad_recv_wc *mad_recv_wc)
 282{
 283        struct mad_rmpp_recv *rmpp_recv;
 284        struct ib_mad_hdr *mad_hdr;
 285
 286        rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL);
 287        if (!rmpp_recv)
 288                return NULL;
 289
 290        rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
 291                                             mad_recv_wc->wc,
 292                                             mad_recv_wc->recv_buf.grh,
 293                                             agent->agent.port_num);
 294        if (IS_ERR(rmpp_recv->ah))
 295                goto error;
 296
 297        rmpp_recv->agent = agent;
 298        init_completion(&rmpp_recv->comp);
 299        INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
 300        INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
 301        spin_lock_init(&rmpp_recv->lock);
 302        rmpp_recv->state = RMPP_STATE_ACTIVE;
 303        atomic_set(&rmpp_recv->refcount, 1);
 304
 305        rmpp_recv->rmpp_wc = mad_recv_wc;
 306        rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
 307        rmpp_recv->newwin = 1;
 308        rmpp_recv->seg_num = 1;
 309        rmpp_recv->last_ack = 0;
 310        rmpp_recv->repwin = 1;
 311
 312        mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
 313        rmpp_recv->tid = mad_hdr->tid;
 314        rmpp_recv->src_qp = mad_recv_wc->wc->src_qp;
 315        rmpp_recv->slid = mad_recv_wc->wc->slid;
 316        rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
 317        rmpp_recv->class_version = mad_hdr->class_version;
 318        rmpp_recv->method  = mad_hdr->method;
 319        return rmpp_recv;
 320
 321error:  kfree(rmpp_recv);
 322        return NULL;
 323}
 324
 325static struct mad_rmpp_recv *
 326find_rmpp_recv(struct ib_mad_agent_private *agent,
 327               struct ib_mad_recv_wc *mad_recv_wc)
 328{
 329        struct mad_rmpp_recv *rmpp_recv;
 330        struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
 331
 332        list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
 333                if (rmpp_recv->tid == mad_hdr->tid &&
 334                    rmpp_recv->src_qp == mad_recv_wc->wc->src_qp &&
 335                    rmpp_recv->slid == mad_recv_wc->wc->slid &&
 336                    rmpp_recv->mgmt_class == mad_hdr->mgmt_class &&
 337                    rmpp_recv->class_version == mad_hdr->class_version &&
 338                    rmpp_recv->method == mad_hdr->method)
 339                        return rmpp_recv;
 340        }
 341        return NULL;
 342}
 343
 344static struct mad_rmpp_recv *
 345acquire_rmpp_recv(struct ib_mad_agent_private *agent,
 346                  struct ib_mad_recv_wc *mad_recv_wc)
 347{
 348        struct mad_rmpp_recv *rmpp_recv;
 349        unsigned long flags;
 350
 351        spin_lock_irqsave(&agent->lock, flags);
 352        rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
 353        if (rmpp_recv)
 354                atomic_inc(&rmpp_recv->refcount);
 355        spin_unlock_irqrestore(&agent->lock, flags);
 356        return rmpp_recv;
 357}
 358
 359static struct mad_rmpp_recv *
 360insert_rmpp_recv(struct ib_mad_agent_private *agent,
 361                 struct mad_rmpp_recv *rmpp_recv)
 362{
 363        struct mad_rmpp_recv *cur_rmpp_recv;
 364
 365        cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc);
 366        if (!cur_rmpp_recv)
 367                list_add_tail(&rmpp_recv->list, &agent->rmpp_list);
 368
 369        return cur_rmpp_recv;
 370}
 371
 372static inline int get_last_flag(struct ib_mad_recv_buf *seg)
 373{
 374        struct ib_rmpp_mad *rmpp_mad;
 375
 376        rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
 377        return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;
 378}
 379
 380static inline int get_seg_num(struct ib_mad_recv_buf *seg)
 381{
 382        struct ib_rmpp_mad *rmpp_mad;
 383
 384        rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
 385        return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
 386}
 387
 388static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
 389                                                    struct ib_mad_recv_buf *seg)
 390{
 391        if (seg->list.next == rmpp_list)
 392                return NULL;
 393
 394        return container_of(seg->list.next, struct ib_mad_recv_buf, list);
 395}
 396
 397static inline int window_size(struct ib_mad_agent_private *agent)
 398{
 399        return max(agent->qp_info->recv_queue.max_active >> 3, 1);
 400}
 401
 402static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
 403                                                  int seg_num)
 404{
 405        struct ib_mad_recv_buf *seg_buf;
 406        int cur_seg_num;
 407
 408        list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
 409                cur_seg_num = get_seg_num(seg_buf);
 410                if (seg_num > cur_seg_num)
 411                        return seg_buf;
 412                if (seg_num == cur_seg_num)
 413                        break;
 414        }
 415        return NULL;
 416}
 417
 418static void update_seg_num(struct mad_rmpp_recv *rmpp_recv,
 419                           struct ib_mad_recv_buf *new_buf)
 420{
 421        struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list;
 422
 423        while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) {
 424                rmpp_recv->cur_seg_buf = new_buf;
 425                rmpp_recv->seg_num++;
 426                new_buf = get_next_seg(rmpp_list, new_buf);
 427        }
 428}
 429
 430static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
 431{
 432        struct ib_rmpp_mad *rmpp_mad;
 433        int hdr_size, data_size, pad;
 434
 435        rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
 436
 437        hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
 438        data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
 439        pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
 440        if (pad > IB_MGMT_RMPP_DATA || pad < 0)
 441                pad = 0;
 442
 443        return hdr_size + rmpp_recv->seg_num * data_size - pad;
 444}
 445
 446static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
 447{
 448        struct ib_mad_recv_wc *rmpp_wc;
 449
 450        ack_recv(rmpp_recv, rmpp_recv->rmpp_wc);
 451        if (rmpp_recv->seg_num > 1)
 452                cancel_delayed_work(&rmpp_recv->timeout_work);
 453
 454        rmpp_wc = rmpp_recv->rmpp_wc;
 455        rmpp_wc->mad_len = get_mad_len(rmpp_recv);
 456        /* 10 seconds until we can find the packet lifetime */
 457        queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq,
 458                           &rmpp_recv->cleanup_work, msecs_to_jiffies(10000));
 459        return rmpp_wc;
 460}
 461
 462static struct ib_mad_recv_wc *
 463continue_rmpp(struct ib_mad_agent_private *agent,
 464              struct ib_mad_recv_wc *mad_recv_wc)
 465{
 466        struct mad_rmpp_recv *rmpp_recv;
 467        struct ib_mad_recv_buf *prev_buf;
 468        struct ib_mad_recv_wc *done_wc;
 469        int seg_num;
 470        unsigned long flags;
 471
 472        rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc);
 473        if (!rmpp_recv)
 474                goto drop1;
 475
 476        seg_num = get_seg_num(&mad_recv_wc->recv_buf);
 477
 478        spin_lock_irqsave(&rmpp_recv->lock, flags);
 479        if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) ||
 480            (seg_num > rmpp_recv->newwin))
 481                goto drop3;
 482
 483        if ((seg_num <= rmpp_recv->last_ack) ||
 484            (rmpp_recv->state == RMPP_STATE_COMPLETE)) {
 485                spin_unlock_irqrestore(&rmpp_recv->lock, flags);
 486                ack_recv(rmpp_recv, mad_recv_wc);
 487                goto drop2;
 488        }
 489
 490        prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num);
 491        if (!prev_buf)
 492                goto drop3;
 493
 494        done_wc = NULL;
 495        list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list);
 496        if (rmpp_recv->cur_seg_buf == prev_buf) {
 497                update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf);
 498                if (get_last_flag(rmpp_recv->cur_seg_buf)) {
 499                        rmpp_recv->state = RMPP_STATE_COMPLETE;
 500                        spin_unlock_irqrestore(&rmpp_recv->lock, flags);
 501                        done_wc = complete_rmpp(rmpp_recv);
 502                        goto out;
 503                } else if (rmpp_recv->seg_num == rmpp_recv->newwin) {
 504                        rmpp_recv->newwin += window_size(agent);
 505                        spin_unlock_irqrestore(&rmpp_recv->lock, flags);
 506                        ack_recv(rmpp_recv, mad_recv_wc);
 507                        goto out;
 508                }
 509        }
 510        spin_unlock_irqrestore(&rmpp_recv->lock, flags);
 511out:
 512        deref_rmpp_recv(rmpp_recv);
 513        return done_wc;
 514
 515drop3:  spin_unlock_irqrestore(&rmpp_recv->lock, flags);
 516drop2:  deref_rmpp_recv(rmpp_recv);
 517drop1:  ib_free_recv_mad(mad_recv_wc);
 518        return NULL;
 519}
 520
 521static struct ib_mad_recv_wc *
 522start_rmpp(struct ib_mad_agent_private *agent,
 523           struct ib_mad_recv_wc *mad_recv_wc)
 524{
 525        struct mad_rmpp_recv *rmpp_recv;
 526        unsigned long flags;
 527
 528        rmpp_recv = create_rmpp_recv(agent, mad_recv_wc);
 529        if (!rmpp_recv) {
 530                ib_free_recv_mad(mad_recv_wc);
 531                return NULL;
 532        }
 533
 534        spin_lock_irqsave(&agent->lock, flags);
 535        if (insert_rmpp_recv(agent, rmpp_recv)) {
 536                spin_unlock_irqrestore(&agent->lock, flags);
 537                /* duplicate first MAD */
 538                destroy_rmpp_recv(rmpp_recv);
 539                return continue_rmpp(agent, mad_recv_wc);
 540        }
 541        atomic_inc(&rmpp_recv->refcount);
 542
 543        if (get_last_flag(&mad_recv_wc->recv_buf)) {
 544                rmpp_recv->state = RMPP_STATE_COMPLETE;
 545                spin_unlock_irqrestore(&agent->lock, flags);
 546                complete_rmpp(rmpp_recv);
 547        } else {
 548                spin_unlock_irqrestore(&agent->lock, flags);
 549                /* 40 seconds until we can find the packet lifetimes */
 550                queue_delayed_work(agent->qp_info->port_priv->wq,
 551                                   &rmpp_recv->timeout_work,
 552                                   msecs_to_jiffies(40000));
 553                rmpp_recv->newwin += window_size(agent);
 554                ack_recv(rmpp_recv, mad_recv_wc);
 555                mad_recv_wc = NULL;
 556        }
 557        deref_rmpp_recv(rmpp_recv);
 558        return mad_recv_wc;
 559}
 560
 561static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
 562{
 563        struct ib_rmpp_mad *rmpp_mad;
 564        int timeout;
 565        u32 paylen = 0;
 566
 567        rmpp_mad = mad_send_wr->send_buf.mad;
 568        ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
 569        rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(++mad_send_wr->seg_num);
 570
 571        if (mad_send_wr->seg_num == 1) {
 572                rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
 573                paylen = mad_send_wr->send_buf.seg_count * IB_MGMT_RMPP_DATA -
 574                         mad_send_wr->pad;
 575        }
 576
 577        if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) {
 578                rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
 579                paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad;
 580        }
 581        rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
 582
 583        /* 2 seconds for an ACK until we can find the packet lifetime */
 584        timeout = mad_send_wr->send_buf.timeout_ms;
 585        if (!timeout || timeout > 2000)
 586                mad_send_wr->timeout = msecs_to_jiffies(2000);
 587
 588        return ib_send_mad(mad_send_wr);
 589}
 590
 591static void abort_send(struct ib_mad_agent_private *agent,
 592                       struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status)
 593{
 594        struct ib_mad_send_wr_private *mad_send_wr;
 595        struct ib_mad_send_wc wc;
 596        unsigned long flags;
 597
 598        spin_lock_irqsave(&agent->lock, flags);
 599        mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
 600        if (!mad_send_wr)
 601                goto out;       /* Unmatched send */
 602
 603        if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
 604            (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
 605                goto out;       /* Send is already done */
 606
 607        ib_mark_mad_done(mad_send_wr);
 608        spin_unlock_irqrestore(&agent->lock, flags);
 609
 610        wc.status = IB_WC_REM_ABORT_ERR;
 611        wc.vendor_err = rmpp_status;
 612        wc.send_buf = &mad_send_wr->send_buf;
 613        ib_mad_complete_send_wr(mad_send_wr, &wc);
 614        return;
 615out:
 616        spin_unlock_irqrestore(&agent->lock, flags);
 617}
 618
 619static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr,
 620                                   int seg_num)
 621{
 622        struct list_head *list;
 623
 624        wr->last_ack = seg_num;
 625        list = &wr->last_ack_seg->list;
 626        list_for_each_entry(wr->last_ack_seg, list, list)
 627                if (wr->last_ack_seg->num == seg_num)
 628                        break;
 629}
 630
 631static void process_ds_ack(struct ib_mad_agent_private *agent,
 632                           struct ib_mad_recv_wc *mad_recv_wc, int newwin)
 633{
 634        struct mad_rmpp_recv *rmpp_recv;
 635
 636        rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
 637        if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE)
 638                rmpp_recv->repwin = newwin;
 639}
 640
 641static void process_rmpp_ack(struct ib_mad_agent_private *agent,
 642                             struct ib_mad_recv_wc *mad_recv_wc)
 643{
 644        struct ib_mad_send_wr_private *mad_send_wr;
 645        struct ib_rmpp_mad *rmpp_mad;
 646        unsigned long flags;
 647        int seg_num, newwin, ret;
 648
 649        rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
 650        if (rmpp_mad->rmpp_hdr.rmpp_status) {
 651                abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
 652                nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
 653                return;
 654        }
 655
 656        seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
 657        newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
 658        if (newwin < seg_num) {
 659                abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
 660                nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
 661                return;
 662        }
 663
 664        spin_lock_irqsave(&agent->lock, flags);
 665        mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
 666        if (!mad_send_wr) {
 667                if (!seg_num)
 668                        process_ds_ack(agent, mad_recv_wc, newwin);
 669                goto out;       /* Unmatched or DS RMPP ACK */
 670        }
 671
 672        if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) &&
 673            (mad_send_wr->timeout)) {
 674                spin_unlock_irqrestore(&agent->lock, flags);
 675                ack_ds_ack(agent, mad_recv_wc);
 676                return;         /* Repeated ACK for DS RMPP transaction */
 677        }
 678
 679        if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
 680            (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
 681                goto out;       /* Send is already done */
 682
 683        if (seg_num > mad_send_wr->send_buf.seg_count ||
 684            seg_num > mad_send_wr->newwin) {
 685                spin_unlock_irqrestore(&agent->lock, flags);
 686                abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
 687                nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
 688                return;
 689        }
 690
 691        if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
 692                goto out;       /* Old ACK */
 693
 694        if (seg_num > mad_send_wr->last_ack) {
 695                adjust_last_ack(mad_send_wr, seg_num);
 696                mad_send_wr->retries_left = mad_send_wr->max_retries;
 697        }
 698        mad_send_wr->newwin = newwin;
 699        if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
 700                /* If no response is expected, the ACK completes the send */
 701                if (!mad_send_wr->send_buf.timeout_ms) {
 702                        struct ib_mad_send_wc wc;
 703
 704                        ib_mark_mad_done(mad_send_wr);
 705                        spin_unlock_irqrestore(&agent->lock, flags);
 706
 707                        wc.status = IB_WC_SUCCESS;
 708                        wc.vendor_err = 0;
 709                        wc.send_buf = &mad_send_wr->send_buf;
 710                        ib_mad_complete_send_wr(mad_send_wr, &wc);
 711                        return;
 712                }
 713                if (mad_send_wr->refcount == 1)
 714                        ib_reset_mad_timeout(mad_send_wr,
 715                                             mad_send_wr->send_buf.timeout_ms);
 716                spin_unlock_irqrestore(&agent->lock, flags);
 717                ack_ds_ack(agent, mad_recv_wc);
 718                return;
 719        } else if (mad_send_wr->refcount == 1 &&
 720                   mad_send_wr->seg_num < mad_send_wr->newwin &&
 721                   mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
 722                /* Send failure will just result in a timeout/retry */
 723                ret = send_next_seg(mad_send_wr);
 724                if (ret)
 725                        goto out;
 726
 727                mad_send_wr->refcount++;
 728                list_move_tail(&mad_send_wr->agent_list,
 729                              &mad_send_wr->mad_agent_priv->send_list);
 730        }
 731out:
 732        spin_unlock_irqrestore(&agent->lock, flags);
 733}
 734
 735static struct ib_mad_recv_wc *
 736process_rmpp_data(struct ib_mad_agent_private *agent,
 737                  struct ib_mad_recv_wc *mad_recv_wc)
 738{
 739        struct ib_rmpp_hdr *rmpp_hdr;
 740        u8 rmpp_status;
 741
 742        rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr;
 743
 744        if (rmpp_hdr->rmpp_status) {
 745                rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS;
 746                goto bad;
 747        }
 748
 749        if (rmpp_hdr->seg_num == cpu_to_be32(1)) {
 750                if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
 751                        rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
 752                        goto bad;
 753                }
 754                return start_rmpp(agent, mad_recv_wc);
 755        } else {
 756                if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) {
 757                        rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
 758                        goto bad;
 759                }
 760                return continue_rmpp(agent, mad_recv_wc);
 761        }
 762bad:
 763        nack_recv(agent, mad_recv_wc, rmpp_status);
 764        ib_free_recv_mad(mad_recv_wc);
 765        return NULL;
 766}
 767
 768static void process_rmpp_stop(struct ib_mad_agent_private *agent,
 769                              struct ib_mad_recv_wc *mad_recv_wc)
 770{
 771        struct ib_rmpp_mad *rmpp_mad;
 772
 773        rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
 774
 775        if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
 776                abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
 777                nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
 778        } else
 779                abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
 780}
 781
 782static void process_rmpp_abort(struct ib_mad_agent_private *agent,
 783                               struct ib_mad_recv_wc *mad_recv_wc)
 784{
 785        struct ib_rmpp_mad *rmpp_mad;
 786
 787        rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
 788
 789        if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
 790            rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
 791                abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
 792                nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
 793        } else
 794                abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
 795}
 796
 797struct ib_mad_recv_wc *
 798ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
 799                        struct ib_mad_recv_wc *mad_recv_wc)
 800{
 801        struct ib_rmpp_mad *rmpp_mad;
 802
 803        rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
 804        if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
 805                return mad_recv_wc;
 806
 807        if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
 808                abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
 809                nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
 810                goto out;
 811        }
 812
 813        switch (rmpp_mad->rmpp_hdr.rmpp_type) {
 814        case IB_MGMT_RMPP_TYPE_DATA:
 815                return process_rmpp_data(agent, mad_recv_wc);
 816        case IB_MGMT_RMPP_TYPE_ACK:
 817                process_rmpp_ack(agent, mad_recv_wc);
 818                break;
 819        case IB_MGMT_RMPP_TYPE_STOP:
 820                process_rmpp_stop(agent, mad_recv_wc);
 821                break;
 822        case IB_MGMT_RMPP_TYPE_ABORT:
 823                process_rmpp_abort(agent, mad_recv_wc);
 824                break;
 825        default:
 826                abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
 827                nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
 828                break;
 829        }
 830out:
 831        ib_free_recv_mad(mad_recv_wc);
 832        return NULL;
 833}
 834
 835static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr)
 836{
 837        struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv;
 838        struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad;
 839        struct mad_rmpp_recv *rmpp_recv;
 840        struct ib_ah_attr ah_attr;
 841        unsigned long flags;
 842        int newwin = 1;
 843
 844        if (!(mad_hdr->method & IB_MGMT_METHOD_RESP))
 845                goto out;
 846
 847        spin_lock_irqsave(&agent->lock, flags);
 848        list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
 849                if (rmpp_recv->tid != mad_hdr->tid ||
 850                    rmpp_recv->mgmt_class != mad_hdr->mgmt_class ||
 851                    rmpp_recv->class_version != mad_hdr->class_version ||
 852                    (rmpp_recv->method & IB_MGMT_METHOD_RESP))
 853                        continue;
 854
 855                if (ib_query_ah(mad_send_wr->send_buf.ah, &ah_attr))
 856                        continue;
 857
 858                if (rmpp_recv->slid == ah_attr.dlid) {
 859                        newwin = rmpp_recv->repwin;
 860                        break;
 861                }
 862        }
 863        spin_unlock_irqrestore(&agent->lock, flags);
 864out:
 865        return newwin;
 866}
 867
 868int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
 869{
 870        struct ib_rmpp_mad *rmpp_mad;
 871        int ret;
 872
 873        rmpp_mad = mad_send_wr->send_buf.mad;
 874        if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
 875              IB_MGMT_RMPP_FLAG_ACTIVE))
 876                return IB_RMPP_RESULT_UNHANDLED;
 877
 878        if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
 879                mad_send_wr->seg_num = 1;
 880                return IB_RMPP_RESULT_INTERNAL;
 881        }
 882
 883        mad_send_wr->newwin = init_newwin(mad_send_wr);
 884
 885        /* We need to wait for the final ACK even if there isn't a response */
 886        mad_send_wr->refcount += (mad_send_wr->timeout == 0);
 887        ret = send_next_seg(mad_send_wr);
 888        if (!ret)
 889                return IB_RMPP_RESULT_CONSUMED;
 890        return ret;
 891}
 892
 893int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
 894                            struct ib_mad_send_wc *mad_send_wc)
 895{
 896        struct ib_rmpp_mad *rmpp_mad;
 897        int ret;
 898
 899        rmpp_mad = mad_send_wr->send_buf.mad;
 900        if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
 901              IB_MGMT_RMPP_FLAG_ACTIVE))
 902                return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
 903
 904        if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
 905                return IB_RMPP_RESULT_INTERNAL;  /* ACK, STOP, or ABORT */
 906
 907        if (mad_send_wc->status != IB_WC_SUCCESS ||
 908            mad_send_wr->status != IB_WC_SUCCESS)
 909                return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
 910
 911        if (!mad_send_wr->timeout)
 912                return IB_RMPP_RESULT_PROCESSED; /* Response received */
 913
 914        if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
 915                mad_send_wr->timeout =
 916                        msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
 917                return IB_RMPP_RESULT_PROCESSED; /* Send done */
 918        }
 919
 920        if (mad_send_wr->seg_num == mad_send_wr->newwin ||
 921            mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count)
 922                return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
 923
 924        ret = send_next_seg(mad_send_wr);
 925        if (ret) {
 926                mad_send_wc->status = IB_WC_GENERAL_ERR;
 927                return IB_RMPP_RESULT_PROCESSED;
 928        }
 929        return IB_RMPP_RESULT_CONSUMED;
 930}
 931
 932int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
 933{
 934        struct ib_rmpp_mad *rmpp_mad;
 935        int ret;
 936
 937        rmpp_mad = mad_send_wr->send_buf.mad;
 938        if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
 939              IB_MGMT_RMPP_FLAG_ACTIVE))
 940                return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
 941
 942        if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count)
 943                return IB_RMPP_RESULT_PROCESSED;
 944
 945        mad_send_wr->seg_num = mad_send_wr->last_ack;
 946        mad_send_wr->cur_seg = mad_send_wr->last_ack_seg;
 947
 948        ret = send_next_seg(mad_send_wr);
 949        if (ret)
 950                return IB_RMPP_RESULT_PROCESSED;
 951
 952        return IB_RMPP_RESULT_CONSUMED;
 953}
 954