linux/net/sctp/chunk.c
<<
>>
Prefs
   1/* SCTP kernel implementation
   2 * (C) Copyright IBM Corp. 2003, 2004
   3 *
   4 * This file is part of the SCTP kernel implementation
   5 *
   6 * This file contains the code relating the chunk abstraction.
   7 *
   8 * This SCTP implementation is free software;
   9 * you can redistribute it and/or modify it under the terms of
  10 * the GNU General Public License as published by
  11 * the Free Software Foundation; either version 2, or (at your option)
  12 * any later version.
  13 *
  14 * This SCTP implementation is distributed in the hope that it
  15 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  16 *                 ************************
  17 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  18 * See the GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with GNU CC; see the file COPYING.  If not, see
  22 * <http://www.gnu.org/licenses/>.
  23 *
  24 * Please send any bug reports or fixes you make to the
  25 * email address(es):
  26 *    lksctp developers <linux-sctp@vger.kernel.org>
  27 *
  28 * Written or modified by:
  29 *    Jon Grimm             <jgrimm@us.ibm.com>
  30 *    Sridhar Samudrala     <sri@us.ibm.com>
  31 */
  32
  33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  34
  35#include <linux/types.h>
  36#include <linux/kernel.h>
  37#include <linux/net.h>
  38#include <linux/inet.h>
  39#include <linux/skbuff.h>
  40#include <linux/slab.h>
  41#include <net/sock.h>
  42#include <net/sctp/sctp.h>
  43#include <net/sctp/sm.h>
  44
  45/* This file is mostly in anticipation of future work, but initially
  46 * populate with fragment tracking for an outbound message.
  47 */
  48
  49/* Initialize datamsg from memory. */
  50static void sctp_datamsg_init(struct sctp_datamsg *msg)
  51{
  52        atomic_set(&msg->refcnt, 1);
  53        msg->send_failed = 0;
  54        msg->send_error = 0;
  55        msg->can_abandon = 0;
  56        msg->can_delay = 1;
  57        msg->expires_at = 0;
  58        INIT_LIST_HEAD(&msg->chunks);
  59}
  60
  61/* Allocate and initialize datamsg. */
  62static struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp)
  63{
  64        struct sctp_datamsg *msg;
  65        msg = kmalloc(sizeof(struct sctp_datamsg), gfp);
  66        if (msg) {
  67                sctp_datamsg_init(msg);
  68                SCTP_DBG_OBJCNT_INC(datamsg);
  69        }
  70        return msg;
  71}
  72
  73/* Final destructruction of datamsg memory. */
  74static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
  75{
  76        struct list_head *pos, *temp;
  77        struct sctp_chunk *chunk;
  78        struct sctp_sock *sp;
  79        struct sctp_ulpevent *ev;
  80        struct sctp_association *asoc = NULL;
  81        int error = 0, notify;
  82
  83        /* If we failed, we may need to notify. */
  84        notify = msg->send_failed ? -1 : 0;
  85
  86        /* Release all references. */
  87        list_for_each_safe(pos, temp, &msg->chunks) {
  88                list_del_init(pos);
  89                chunk = list_entry(pos, struct sctp_chunk, frag_list);
  90                /* Check whether we _really_ need to notify. */
  91                if (notify < 0) {
  92                        asoc = chunk->asoc;
  93                        if (msg->send_error)
  94                                error = msg->send_error;
  95                        else
  96                                error = asoc->outqueue.error;
  97
  98                        sp = sctp_sk(asoc->base.sk);
  99                        notify = sctp_ulpevent_type_enabled(SCTP_SEND_FAILED,
 100                                                            &sp->subscribe);
 101                }
 102
 103                /* Generate a SEND FAILED event only if enabled. */
 104                if (notify > 0) {
 105                        int sent;
 106                        if (chunk->has_tsn)
 107                                sent = SCTP_DATA_SENT;
 108                        else
 109                                sent = SCTP_DATA_UNSENT;
 110
 111                        ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent,
 112                                                            error, GFP_ATOMIC);
 113                        if (ev)
 114                                sctp_ulpq_tail_event(&asoc->ulpq, ev);
 115                }
 116
 117                sctp_chunk_put(chunk);
 118        }
 119
 120        SCTP_DBG_OBJCNT_DEC(datamsg);
 121        kfree(msg);
 122}
 123
 124/* Hold a reference. */
 125static void sctp_datamsg_hold(struct sctp_datamsg *msg)
 126{
 127        atomic_inc(&msg->refcnt);
 128}
 129
 130/* Release a reference. */
 131void sctp_datamsg_put(struct sctp_datamsg *msg)
 132{
 133        if (atomic_dec_and_test(&msg->refcnt))
 134                sctp_datamsg_destroy(msg);
 135}
 136
 137/* Assign a chunk to this datamsg. */
 138static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chunk)
 139{
 140        sctp_datamsg_hold(msg);
 141        chunk->msg = msg;
 142}
 143
 144
 145/* A data chunk can have a maximum payload of (2^16 - 20).  Break
 146 * down any such message into smaller chunks.  Opportunistically, fragment
 147 * the chunks down to the current MTU constraints.  We may get refragmented
 148 * later if the PMTU changes, but it is _much better_ to fragment immediately
 149 * with a reasonable guess than always doing our fragmentation on the
 150 * soft-interrupt.
 151 */
 152struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
 153                                            struct sctp_sndrcvinfo *sinfo,
 154                                            struct iov_iter *from)
 155{
 156        int max, whole, i, offset, over, err;
 157        int len, first_len;
 158        int max_data;
 159        struct sctp_chunk *chunk;
 160        struct sctp_datamsg *msg;
 161        struct list_head *pos, *temp;
 162        size_t msg_len = iov_iter_count(from);
 163        __u8 frag;
 164
 165        msg = sctp_datamsg_new(GFP_KERNEL);
 166        if (!msg)
 167                return ERR_PTR(-ENOMEM);
 168
 169        /* Note: Calculate this outside of the loop, so that all fragments
 170         * have the same expiration.
 171         */
 172        if (sinfo->sinfo_timetolive) {
 173                /* sinfo_timetolive is in milliseconds */
 174                msg->expires_at = jiffies +
 175                                    msecs_to_jiffies(sinfo->sinfo_timetolive);
 176                msg->can_abandon = 1;
 177
 178                pr_debug("%s: msg:%p expires_at:%ld jiffies:%ld\n", __func__,
 179                         msg, msg->expires_at, jiffies);
 180        }
 181
 182        if (asoc->peer.prsctp_capable &&
 183            SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags))
 184                msg->expires_at =
 185                        jiffies + msecs_to_jiffies(sinfo->sinfo_timetolive);
 186
 187        /* This is the biggest possible DATA chunk that can fit into
 188         * the packet
 189         */
 190        max_data = (asoc->pathmtu -
 191                sctp_sk(asoc->base.sk)->pf->af->net_header_len -
 192                sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk)) & ~3;
 193
 194        max = asoc->frag_point;
 195        /* If the the peer requested that we authenticate DATA chunks
 196         * we need to account for bundling of the AUTH chunks along with
 197         * DATA.
 198         */
 199        if (sctp_auth_send_cid(SCTP_CID_DATA, asoc)) {
 200                struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc);
 201
 202                if (hmac_desc)
 203                        max_data -= WORD_ROUND(sizeof(sctp_auth_chunk_t) +
 204                                            hmac_desc->hmac_len);
 205        }
 206
 207        /* Now, check if we need to reduce our max */
 208        if (max > max_data)
 209                max = max_data;
 210
 211        whole = 0;
 212        first_len = max;
 213
 214        /* Check to see if we have a pending SACK and try to let it be bundled
 215         * with this message.  Do this if we don't have any data queued already.
 216         * To check that, look at out_qlen and retransmit list.
 217         * NOTE: we will not reduce to account for SACK, if the message would
 218         * not have been fragmented.
 219         */
 220        if (timer_pending(&asoc->timers[SCTP_EVENT_TIMEOUT_SACK]) &&
 221            asoc->outqueue.out_qlen == 0 &&
 222            list_empty(&asoc->outqueue.retransmit) &&
 223            msg_len > max)
 224                max_data -= WORD_ROUND(sizeof(sctp_sack_chunk_t));
 225
 226        /* Encourage Cookie-ECHO bundling. */
 227        if (asoc->state < SCTP_STATE_COOKIE_ECHOED)
 228                max_data -= SCTP_ARBITRARY_COOKIE_ECHO_LEN;
 229
 230        /* Now that we adjusted completely, reset first_len */
 231        if (first_len > max_data)
 232                first_len = max_data;
 233
 234        /* Account for a different sized first fragment */
 235        if (msg_len >= first_len) {
 236                msg_len -= first_len;
 237                whole = 1;
 238                msg->can_delay = 0;
 239        }
 240
 241        /* How many full sized?  How many bytes leftover? */
 242        whole += msg_len / max;
 243        over = msg_len % max;
 244        offset = 0;
 245
 246        if ((whole > 1) || (whole && over))
 247                SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
 248
 249        /* Create chunks for all the full sized DATA chunks. */
 250        for (i = 0, len = first_len; i < whole; i++) {
 251                frag = SCTP_DATA_MIDDLE_FRAG;
 252
 253                if (0 == i)
 254                        frag |= SCTP_DATA_FIRST_FRAG;
 255
 256                if ((i == (whole - 1)) && !over) {
 257                        frag |= SCTP_DATA_LAST_FRAG;
 258
 259                        /* The application requests to set the I-bit of the
 260                         * last DATA chunk of a user message when providing
 261                         * the user message to the SCTP implementation.
 262                         */
 263                        if ((sinfo->sinfo_flags & SCTP_EOF) ||
 264                            (sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY))
 265                                frag |= SCTP_DATA_SACK_IMM;
 266                }
 267
 268                chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag,
 269                                                 0, GFP_KERNEL);
 270
 271                if (!chunk) {
 272                        err = -ENOMEM;
 273                        goto errout;
 274                }
 275
 276                err = sctp_user_addto_chunk(chunk, len, from);
 277                if (err < 0)
 278                        goto errout_chunk_free;
 279
 280                /* Put the chunk->skb back into the form expected by send.  */
 281                __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
 282                           - (__u8 *)chunk->skb->data);
 283
 284                sctp_datamsg_assign(msg, chunk);
 285                list_add_tail(&chunk->frag_list, &msg->chunks);
 286
 287                /* The first chunk, the first chunk was likely short
 288                 * to allow bundling, so reset to full size.
 289                 */
 290                if (0 == i)
 291                        len = max;
 292        }
 293
 294        /* .. now the leftover bytes. */
 295        if (over) {
 296                if (!whole)
 297                        frag = SCTP_DATA_NOT_FRAG;
 298                else
 299                        frag = SCTP_DATA_LAST_FRAG;
 300
 301                if ((sinfo->sinfo_flags & SCTP_EOF) ||
 302                    (sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY))
 303                        frag |= SCTP_DATA_SACK_IMM;
 304
 305                chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag,
 306                                                 0, GFP_KERNEL);
 307
 308                if (!chunk) {
 309                        err = -ENOMEM;
 310                        goto errout;
 311                }
 312
 313                err = sctp_user_addto_chunk(chunk, over, from);
 314
 315                /* Put the chunk->skb back into the form expected by send.  */
 316                __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
 317                           - (__u8 *)chunk->skb->data);
 318                if (err < 0)
 319                        goto errout_chunk_free;
 320
 321                sctp_datamsg_assign(msg, chunk);
 322                list_add_tail(&chunk->frag_list, &msg->chunks);
 323        }
 324
 325        return msg;
 326
 327errout_chunk_free:
 328        sctp_chunk_free(chunk);
 329
 330errout:
 331        list_for_each_safe(pos, temp, &msg->chunks) {
 332                list_del_init(pos);
 333                chunk = list_entry(pos, struct sctp_chunk, frag_list);
 334                sctp_chunk_free(chunk);
 335        }
 336        sctp_datamsg_put(msg);
 337        return ERR_PTR(err);
 338}
 339
 340/* Check whether this message has expired. */
 341int sctp_chunk_abandoned(struct sctp_chunk *chunk)
 342{
 343        if (!chunk->asoc->peer.prsctp_capable ||
 344            !SCTP_PR_POLICY(chunk->sinfo.sinfo_flags)) {
 345                struct sctp_datamsg *msg = chunk->msg;
 346
 347                if (!msg->can_abandon)
 348                        return 0;
 349
 350                if (time_after(jiffies, msg->expires_at))
 351                        return 1;
 352
 353                return 0;
 354        }
 355
 356        if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&
 357            time_after(jiffies, chunk->msg->expires_at)) {
 358                if (chunk->sent_count)
 359                        chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
 360                else
 361                        chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
 362                return 1;
 363        } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&
 364                   chunk->sent_count > chunk->sinfo.sinfo_timetolive) {
 365                chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
 366                return 1;
 367        }
 368        /* PRIO policy is processed by sendmsg, not here */
 369
 370        return 0;
 371}
 372
 373/* This chunk (and consequently entire message) has failed in its sending. */
 374void sctp_chunk_fail(struct sctp_chunk *chunk, int error)
 375{
 376        chunk->msg->send_failed = 1;
 377        chunk->msg->send_error = error;
 378}
 379