linux/net/sctp/chunk.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* SCTP kernel implementation
   3 * (C) Copyright IBM Corp. 2003, 2004
   4 *
   5 * This file is part of the SCTP kernel implementation
   6 *
   7 * This file contains the code relating the chunk abstraction.
   8 *
   9 * Please send any bug reports or fixes you make to the
  10 * email address(es):
  11 *    lksctp developers <linux-sctp@vger.kernel.org>
  12 *
  13 * Written or modified by:
  14 *    Jon Grimm             <jgrimm@us.ibm.com>
  15 *    Sridhar Samudrala     <sri@us.ibm.com>
  16 */
  17
  18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19
  20#include <linux/types.h>
  21#include <linux/kernel.h>
  22#include <linux/net.h>
  23#include <linux/inet.h>
  24#include <linux/skbuff.h>
  25#include <linux/slab.h>
  26#include <net/sock.h>
  27#include <net/sctp/sctp.h>
  28#include <net/sctp/sm.h>
  29
  30/* This file is mostly in anticipation of future work, but initially
  31 * populate with fragment tracking for an outbound message.
  32 */
  33
  34/* Initialize datamsg from memory. */
  35static void sctp_datamsg_init(struct sctp_datamsg *msg)
  36{
  37        refcount_set(&msg->refcnt, 1);
  38        msg->send_failed = 0;
  39        msg->send_error = 0;
  40        msg->can_delay = 1;
  41        msg->abandoned = 0;
  42        msg->expires_at = 0;
  43        INIT_LIST_HEAD(&msg->chunks);
  44}
  45
  46/* Allocate and initialize datamsg. */
  47static struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp)
  48{
  49        struct sctp_datamsg *msg;
  50        msg = kmalloc(sizeof(struct sctp_datamsg), gfp);
  51        if (msg) {
  52                sctp_datamsg_init(msg);
  53                SCTP_DBG_OBJCNT_INC(datamsg);
  54        }
  55        return msg;
  56}
  57
  58void sctp_datamsg_free(struct sctp_datamsg *msg)
  59{
  60        struct sctp_chunk *chunk;
  61
  62        /* This doesn't have to be a _safe vairant because
  63         * sctp_chunk_free() only drops the refs.
  64         */
  65        list_for_each_entry(chunk, &msg->chunks, frag_list)
  66                sctp_chunk_free(chunk);
  67
  68        sctp_datamsg_put(msg);
  69}
  70
  71/* Final destructruction of datamsg memory. */
  72static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
  73{
  74        struct sctp_association *asoc = NULL;
  75        struct list_head *pos, *temp;
  76        struct sctp_chunk *chunk;
  77        struct sctp_ulpevent *ev;
  78        int error, sent;
  79
  80        /* Release all references. */
  81        list_for_each_safe(pos, temp, &msg->chunks) {
  82                list_del_init(pos);
  83                chunk = list_entry(pos, struct sctp_chunk, frag_list);
  84
  85                if (!msg->send_failed) {
  86                        sctp_chunk_put(chunk);
  87                        continue;
  88                }
  89
  90                asoc = chunk->asoc;
  91                error = msg->send_error ?: asoc->outqueue.error;
  92                sent = chunk->has_tsn ? SCTP_DATA_SENT : SCTP_DATA_UNSENT;
  93
  94                if (sctp_ulpevent_type_enabled(asoc->subscribe,
  95                                               SCTP_SEND_FAILED)) {
  96                        ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent,
  97                                                            error, GFP_ATOMIC);
  98                        if (ev)
  99                                asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
 100                }
 101
 102                if (sctp_ulpevent_type_enabled(asoc->subscribe,
 103                                               SCTP_SEND_FAILED_EVENT)) {
 104                        ev = sctp_ulpevent_make_send_failed_event(asoc, chunk,
 105                                                                  sent, error,
 106                                                                  GFP_ATOMIC);
 107                        if (ev)
 108                                asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
 109                }
 110
 111                sctp_chunk_put(chunk);
 112        }
 113
 114        SCTP_DBG_OBJCNT_DEC(datamsg);
 115        kfree(msg);
 116}
 117
 118/* Hold a reference. */
 119static void sctp_datamsg_hold(struct sctp_datamsg *msg)
 120{
 121        refcount_inc(&msg->refcnt);
 122}
 123
 124/* Release a reference. */
 125void sctp_datamsg_put(struct sctp_datamsg *msg)
 126{
 127        if (refcount_dec_and_test(&msg->refcnt))
 128                sctp_datamsg_destroy(msg);
 129}
 130
 131/* Assign a chunk to this datamsg. */
 132static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chunk)
 133{
 134        sctp_datamsg_hold(msg);
 135        chunk->msg = msg;
 136}
 137
 138
 139/* A data chunk can have a maximum payload of (2^16 - 20).  Break
 140 * down any such message into smaller chunks.  Opportunistically, fragment
 141 * the chunks down to the current MTU constraints.  We may get refragmented
 142 * later if the PMTU changes, but it is _much better_ to fragment immediately
 143 * with a reasonable guess than always doing our fragmentation on the
 144 * soft-interrupt.
 145 */
 146struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
 147                                            struct sctp_sndrcvinfo *sinfo,
 148                                            struct iov_iter *from)
 149{
 150        size_t len, first_len, max_data, remaining;
 151        size_t msg_len = iov_iter_count(from);
 152        struct sctp_shared_key *shkey = NULL;
 153        struct list_head *pos, *temp;
 154        struct sctp_chunk *chunk;
 155        struct sctp_datamsg *msg;
 156        int err;
 157
 158        msg = sctp_datamsg_new(GFP_KERNEL);
 159        if (!msg)
 160                return ERR_PTR(-ENOMEM);
 161
 162        /* Note: Calculate this outside of the loop, so that all fragments
 163         * have the same expiration.
 164         */
 165        if (asoc->peer.prsctp_capable && sinfo->sinfo_timetolive &&
 166            (SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags) ||
 167             !SCTP_PR_POLICY(sinfo->sinfo_flags)))
 168                msg->expires_at = jiffies +
 169                                  msecs_to_jiffies(sinfo->sinfo_timetolive);
 170
 171        /* This is the biggest possible DATA chunk that can fit into
 172         * the packet
 173         */
 174        max_data = asoc->frag_point;
 175        if (unlikely(!max_data)) {
 176                max_data = sctp_min_frag_point(sctp_sk(asoc->base.sk),
 177                                               sctp_datachk_len(&asoc->stream));
 178                pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%zu)",
 179                                    __func__, asoc, max_data);
 180        }
 181
 182        /* If the peer requested that we authenticate DATA chunks
 183         * we need to account for bundling of the AUTH chunks along with
 184         * DATA.
 185         */
 186        if (sctp_auth_send_cid(SCTP_CID_DATA, asoc)) {
 187                struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc);
 188
 189                if (hmac_desc)
 190                        max_data -= SCTP_PAD4(sizeof(struct sctp_auth_chunk) +
 191                                              hmac_desc->hmac_len);
 192
 193                if (sinfo->sinfo_tsn &&
 194                    sinfo->sinfo_ssn != asoc->active_key_id) {
 195                        shkey = sctp_auth_get_shkey(asoc, sinfo->sinfo_ssn);
 196                        if (!shkey) {
 197                                err = -EINVAL;
 198                                goto errout;
 199                        }
 200                } else {
 201                        shkey = asoc->shkey;
 202                }
 203        }
 204
 205        /* Set first_len and then account for possible bundles on first frag */
 206        first_len = max_data;
 207
 208        /* Check to see if we have a pending SACK and try to let it be bundled
 209         * with this message.  Do this if we don't have any data queued already.
 210         * To check that, look at out_qlen and retransmit list.
 211         * NOTE: we will not reduce to account for SACK, if the message would
 212         * not have been fragmented.
 213         */
 214        if (timer_pending(&asoc->timers[SCTP_EVENT_TIMEOUT_SACK]) &&
 215            asoc->outqueue.out_qlen == 0 &&
 216            list_empty(&asoc->outqueue.retransmit) &&
 217            msg_len > max_data)
 218                first_len -= SCTP_PAD4(sizeof(struct sctp_sack_chunk));
 219
 220        /* Encourage Cookie-ECHO bundling. */
 221        if (asoc->state < SCTP_STATE_COOKIE_ECHOED)
 222                first_len -= SCTP_ARBITRARY_COOKIE_ECHO_LEN;
 223
 224        /* Account for a different sized first fragment */
 225        if (msg_len >= first_len) {
 226                msg->can_delay = 0;
 227                if (msg_len > first_len)
 228                        SCTP_INC_STATS(asoc->base.net,
 229                                       SCTP_MIB_FRAGUSRMSGS);
 230        } else {
 231                /* Which may be the only one... */
 232                first_len = msg_len;
 233        }
 234
 235        /* Create chunks for all DATA chunks. */
 236        for (remaining = msg_len; remaining; remaining -= len) {
 237                u8 frag = SCTP_DATA_MIDDLE_FRAG;
 238
 239                if (remaining == msg_len) {
 240                        /* First frag, which may also be the last */
 241                        frag |= SCTP_DATA_FIRST_FRAG;
 242                        len = first_len;
 243                } else {
 244                        /* Middle frags */
 245                        len = max_data;
 246                }
 247
 248                if (len >= remaining) {
 249                        /* Last frag, which may also be the first */
 250                        len = remaining;
 251                        frag |= SCTP_DATA_LAST_FRAG;
 252
 253                        /* The application requests to set the I-bit of the
 254                         * last DATA chunk of a user message when providing
 255                         * the user message to the SCTP implementation.
 256                         */
 257                        if ((sinfo->sinfo_flags & SCTP_EOF) ||
 258                            (sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY))
 259                                frag |= SCTP_DATA_SACK_IMM;
 260                }
 261
 262                chunk = asoc->stream.si->make_datafrag(asoc, sinfo, len, frag,
 263                                                       GFP_KERNEL);
 264                if (!chunk) {
 265                        err = -ENOMEM;
 266                        goto errout;
 267                }
 268
 269                err = sctp_user_addto_chunk(chunk, len, from);
 270                if (err < 0)
 271                        goto errout_chunk_free;
 272
 273                chunk->shkey = shkey;
 274
 275                /* Put the chunk->skb back into the form expected by send.  */
 276                __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr -
 277                                       chunk->skb->data);
 278
 279                sctp_datamsg_assign(msg, chunk);
 280                list_add_tail(&chunk->frag_list, &msg->chunks);
 281        }
 282
 283        return msg;
 284
 285errout_chunk_free:
 286        sctp_chunk_free(chunk);
 287
 288errout:
 289        list_for_each_safe(pos, temp, &msg->chunks) {
 290                list_del_init(pos);
 291                chunk = list_entry(pos, struct sctp_chunk, frag_list);
 292                sctp_chunk_free(chunk);
 293        }
 294        sctp_datamsg_put(msg);
 295
 296        return ERR_PTR(err);
 297}
 298
 299/* Check whether this message has expired. */
 300int sctp_chunk_abandoned(struct sctp_chunk *chunk)
 301{
 302        if (!chunk->asoc->peer.prsctp_capable)
 303                return 0;
 304
 305        if (chunk->msg->abandoned)
 306                return 1;
 307
 308        if (!chunk->has_tsn &&
 309            !(chunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG))
 310                return 0;
 311
 312        if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&
 313            time_after(jiffies, chunk->msg->expires_at)) {
 314                struct sctp_stream_out *streamout =
 315                        SCTP_SO(&chunk->asoc->stream,
 316                                chunk->sinfo.sinfo_stream);
 317
 318                if (chunk->sent_count) {
 319                        chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
 320                        streamout->ext->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
 321                } else {
 322                        chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
 323                        streamout->ext->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
 324                }
 325                chunk->msg->abandoned = 1;
 326                return 1;
 327        } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&
 328                   chunk->sent_count > chunk->sinfo.sinfo_timetolive) {
 329                struct sctp_stream_out *streamout =
 330                        SCTP_SO(&chunk->asoc->stream,
 331                                chunk->sinfo.sinfo_stream);
 332
 333                chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
 334                streamout->ext->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
 335                chunk->msg->abandoned = 1;
 336                return 1;
 337        } else if (!SCTP_PR_POLICY(chunk->sinfo.sinfo_flags) &&
 338                   chunk->msg->expires_at &&
 339                   time_after(jiffies, chunk->msg->expires_at)) {
 340                chunk->msg->abandoned = 1;
 341                return 1;
 342        }
 343        /* PRIO policy is processed by sendmsg, not here */
 344
 345        return 0;
 346}
 347
 348/* This chunk (and consequently entire message) has failed in its sending. */
 349void sctp_chunk_fail(struct sctp_chunk *chunk, int error)
 350{
 351        chunk->msg->send_failed = 1;
 352        chunk->msg->send_error = error;
 353}
 354