linux/net/tipc/msg.c
<<
>>
Prefs
   1/*
   2 * net/tipc/msg.c: TIPC message header routines
   3 *
   4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
   5 * Copyright (c) 2005, 2010-2011, Wind River Systems
   6 * All rights reserved.
   7 *
   8 * Redistribution and use in source and binary forms, with or without
   9 * modification, are permitted provided that the following conditions are met:
  10 *
  11 * 1. Redistributions of source code must retain the above copyright
  12 *    notice, this list of conditions and the following disclaimer.
  13 * 2. Redistributions in binary form must reproduce the above copyright
  14 *    notice, this list of conditions and the following disclaimer in the
  15 *    documentation and/or other materials provided with the distribution.
  16 * 3. Neither the names of the copyright holders nor the names of its
  17 *    contributors may be used to endorse or promote products derived from
  18 *    this software without specific prior written permission.
  19 *
  20 * Alternatively, this software may be distributed under the terms of the
  21 * GNU General Public License ("GPL") version 2 as published by the Free
  22 * Software Foundation.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34 * POSSIBILITY OF SUCH DAMAGE.
  35 */
  36
  37#include <net/sock.h>
  38#include "core.h"
  39#include "msg.h"
  40#include "addr.h"
  41#include "name_table.h"
  42
  43#define MAX_FORWARD_SIZE 1024
  44#define BUF_HEADROOM (LL_MAX_HEADER + 48)
  45#define BUF_TAILROOM 16
  46
  47static unsigned int align(unsigned int i)
  48{
  49        return (i + 3) & ~3u;
  50}
  51
  52/**
  53 * tipc_buf_acquire - creates a TIPC message buffer
  54 * @size: message size (including TIPC header)
  55 *
  56 * Returns a new buffer with data pointers set to the specified size.
  57 *
  58 * NOTE: Headroom is reserved to allow prepending of a data link header.
  59 *       There may also be unrequested tailroom present at the buffer's end.
  60 */
  61struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
  62{
  63        struct sk_buff *skb;
  64        unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
  65
  66        skb = alloc_skb_fclone(buf_size, gfp);
  67        if (skb) {
  68                skb_reserve(skb, BUF_HEADROOM);
  69                skb_put(skb, size);
  70                skb->next = NULL;
  71        }
  72        return skb;
  73}
  74
  75void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
  76                   u32 hsize, u32 dnode)
  77{
  78        memset(m, 0, hsize);
  79        msg_set_version(m);
  80        msg_set_user(m, user);
  81        msg_set_hdr_sz(m, hsize);
  82        msg_set_size(m, hsize);
  83        msg_set_prevnode(m, own_node);
  84        msg_set_type(m, type);
  85        if (hsize > SHORT_H_SIZE) {
  86                msg_set_orignode(m, own_node);
  87                msg_set_destnode(m, dnode);
  88        }
  89}
  90
  91struct sk_buff *tipc_msg_create(uint user, uint type,
  92                                uint hdr_sz, uint data_sz, u32 dnode,
  93                                u32 onode, u32 dport, u32 oport, int errcode)
  94{
  95        struct tipc_msg *msg;
  96        struct sk_buff *buf;
  97
  98        buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
  99        if (unlikely(!buf))
 100                return NULL;
 101
 102        msg = buf_msg(buf);
 103        tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
 104        msg_set_size(msg, hdr_sz + data_sz);
 105        msg_set_origport(msg, oport);
 106        msg_set_destport(msg, dport);
 107        msg_set_errcode(msg, errcode);
 108        if (hdr_sz > SHORT_H_SIZE) {
 109                msg_set_orignode(msg, onode);
 110                msg_set_destnode(msg, dnode);
 111        }
 112        return buf;
 113}
 114
 115/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
 116 * @*headbuf: in:  NULL for first frag, otherwise value returned from prev call
 117 *            out: set when successful non-complete reassembly, otherwise NULL
 118 * @*buf:     in:  the buffer to append. Always defined
 119 *            out: head buf after successful complete reassembly, otherwise NULL
 120 * Returns 1 when reassembly complete, otherwise 0
 121 */
 122int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
 123{
 124        struct sk_buff *head = *headbuf;
 125        struct sk_buff *frag = *buf;
 126        struct sk_buff *tail = NULL;
 127        struct tipc_msg *msg;
 128        u32 fragid;
 129        int delta;
 130        bool headstolen;
 131
 132        if (!frag)
 133                goto err;
 134
 135        msg = buf_msg(frag);
 136        fragid = msg_type(msg);
 137        frag->next = NULL;
 138        skb_pull(frag, msg_hdr_sz(msg));
 139
 140        if (fragid == FIRST_FRAGMENT) {
 141                if (unlikely(head))
 142                        goto err;
 143                if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
 144                        goto err;
 145                head = *headbuf = frag;
 146                *buf = NULL;
 147                TIPC_SKB_CB(head)->tail = NULL;
 148                if (skb_is_nonlinear(head)) {
 149                        skb_walk_frags(head, tail) {
 150                                TIPC_SKB_CB(head)->tail = tail;
 151                        }
 152                } else {
 153                        skb_frag_list_init(head);
 154                }
 155                return 0;
 156        }
 157
 158        if (!head)
 159                goto err;
 160
 161        if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
 162                kfree_skb_partial(frag, headstolen);
 163        } else {
 164                tail = TIPC_SKB_CB(head)->tail;
 165                if (!skb_has_frag_list(head))
 166                        skb_shinfo(head)->frag_list = frag;
 167                else
 168                        tail->next = frag;
 169                head->truesize += frag->truesize;
 170                head->data_len += frag->len;
 171                head->len += frag->len;
 172                TIPC_SKB_CB(head)->tail = frag;
 173        }
 174
 175        if (fragid == LAST_FRAGMENT) {
 176                TIPC_SKB_CB(head)->validated = false;
 177                if (unlikely(!tipc_msg_validate(&head)))
 178                        goto err;
 179                *buf = head;
 180                TIPC_SKB_CB(head)->tail = NULL;
 181                *headbuf = NULL;
 182                return 1;
 183        }
 184        *buf = NULL;
 185        return 0;
 186err:
 187        kfree_skb(*buf);
 188        kfree_skb(*headbuf);
 189        *buf = *headbuf = NULL;
 190        return 0;
 191}
 192
 193/* tipc_msg_validate - validate basic format of received message
 194 *
 195 * This routine ensures a TIPC message has an acceptable header, and at least
 196 * as much data as the header indicates it should.  The routine also ensures
 197 * that the entire message header is stored in the main fragment of the message
 198 * buffer, to simplify future access to message header fields.
 199 *
 200 * Note: Having extra info present in the message header or data areas is OK.
 201 * TIPC will ignore the excess, under the assumption that it is optional info
 202 * introduced by a later release of the protocol.
 203 */
 204bool tipc_msg_validate(struct sk_buff **_skb)
 205{
 206        struct sk_buff *skb = *_skb;
 207        struct tipc_msg *hdr;
 208        int msz, hsz;
 209
 210        /* Ensure that flow control ratio condition is satisfied */
 211        if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
 212                skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
 213                if (!skb)
 214                        return false;
 215                kfree_skb(*_skb);
 216                *_skb = skb;
 217        }
 218
 219        if (unlikely(TIPC_SKB_CB(skb)->validated))
 220                return true;
 221        if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
 222                return false;
 223
 224        hsz = msg_hdr_sz(buf_msg(skb));
 225        if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
 226                return false;
 227        if (unlikely(!pskb_may_pull(skb, hsz)))
 228                return false;
 229
 230        hdr = buf_msg(skb);
 231        if (unlikely(msg_version(hdr) != TIPC_VERSION))
 232                return false;
 233
 234        msz = msg_size(hdr);
 235        if (unlikely(msz < hsz))
 236                return false;
 237        if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
 238                return false;
 239        if (unlikely(skb->len < msz))
 240                return false;
 241
 242        TIPC_SKB_CB(skb)->validated = true;
 243        return true;
 244}
 245
 246/**
 247 * tipc_msg_build - create buffer chain containing specified header and data
 248 * @mhdr: Message header, to be prepended to data
 249 * @m: User message
 250 * @dsz: Total length of user data
 251 * @pktmax: Max packet size that can be used
 252 * @list: Buffer or chain of buffers to be returned to caller
 253 *
 254 * Note that the recursive call we are making here is safe, since it can
 255 * logically go only one further level down.
 256 *
 257 * Returns message data size or errno: -ENOMEM, -EFAULT
 258 */
 259int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
 260                   int dsz, int pktmax, struct sk_buff_head *list)
 261{
 262        int mhsz = msg_hdr_sz(mhdr);
 263        struct tipc_msg pkthdr;
 264        int msz = mhsz + dsz;
 265        int pktrem = pktmax;
 266        struct sk_buff *skb;
 267        int drem = dsz;
 268        int pktno = 1;
 269        char *pktpos;
 270        int pktsz;
 271        int rc;
 272
 273        msg_set_size(mhdr, msz);
 274
 275        /* No fragmentation needed? */
 276        if (likely(msz <= pktmax)) {
 277                skb = tipc_buf_acquire(msz, GFP_KERNEL);
 278
 279                /* Fall back to smaller MTU if node local message */
 280                if (unlikely(!skb)) {
 281                        if (pktmax != MAX_MSG_SIZE)
 282                                return -ENOMEM;
 283                        rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
 284                        if (rc != dsz)
 285                                return rc;
 286                        if (tipc_msg_assemble(list))
 287                                return dsz;
 288                        return -ENOMEM;
 289                }
 290                skb_orphan(skb);
 291                __skb_queue_tail(list, skb);
 292                skb_copy_to_linear_data(skb, mhdr, mhsz);
 293                pktpos = skb->data + mhsz;
 294                if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
 295                        return dsz;
 296                rc = -EFAULT;
 297                goto error;
 298        }
 299
 300        /* Prepare reusable fragment header */
 301        tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
 302                      FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
 303        msg_set_size(&pkthdr, pktmax);
 304        msg_set_fragm_no(&pkthdr, pktno);
 305        msg_set_importance(&pkthdr, msg_importance(mhdr));
 306
 307        /* Prepare first fragment */
 308        skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
 309        if (!skb)
 310                return -ENOMEM;
 311        skb_orphan(skb);
 312        __skb_queue_tail(list, skb);
 313        pktpos = skb->data;
 314        skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
 315        pktpos += INT_H_SIZE;
 316        pktrem -= INT_H_SIZE;
 317        skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
 318        pktpos += mhsz;
 319        pktrem -= mhsz;
 320
 321        do {
 322                if (drem < pktrem)
 323                        pktrem = drem;
 324
 325                if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
 326                        rc = -EFAULT;
 327                        goto error;
 328                }
 329                drem -= pktrem;
 330
 331                if (!drem)
 332                        break;
 333
 334                /* Prepare new fragment: */
 335                if (drem < (pktmax - INT_H_SIZE))
 336                        pktsz = drem + INT_H_SIZE;
 337                else
 338                        pktsz = pktmax;
 339                skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
 340                if (!skb) {
 341                        rc = -ENOMEM;
 342                        goto error;
 343                }
 344                skb_orphan(skb);
 345                __skb_queue_tail(list, skb);
 346                msg_set_type(&pkthdr, FRAGMENT);
 347                msg_set_size(&pkthdr, pktsz);
 348                msg_set_fragm_no(&pkthdr, ++pktno);
 349                skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
 350                pktpos = skb->data + INT_H_SIZE;
 351                pktrem = pktsz - INT_H_SIZE;
 352
 353        } while (1);
 354        msg_set_type(buf_msg(skb), LAST_FRAGMENT);
 355        return dsz;
 356error:
 357        __skb_queue_purge(list);
 358        __skb_queue_head_init(list);
 359        return rc;
 360}
 361
 362/**
 363 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
 364 * @skb: the buffer to append to ("bundle")
 365 * @msg:  message to be appended
 366 * @mtu:  max allowable size for the bundle buffer
 367 * Consumes buffer if successful
 368 * Returns true if bundling could be performed, otherwise false
 369 */
 370bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
 371{
 372        struct tipc_msg *bmsg;
 373        unsigned int bsz;
 374        unsigned int msz = msg_size(msg);
 375        u32 start, pad;
 376        u32 max = mtu - INT_H_SIZE;
 377
 378        if (likely(msg_user(msg) == MSG_FRAGMENTER))
 379                return false;
 380        if (!skb)
 381                return false;
 382        bmsg = buf_msg(skb);
 383        bsz = msg_size(bmsg);
 384        start = align(bsz);
 385        pad = start - bsz;
 386
 387        if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL))
 388                return false;
 389        if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
 390                return false;
 391        if (unlikely(msg_user(bmsg) != MSG_BUNDLER))
 392                return false;
 393        if (unlikely(skb_tailroom(skb) < (pad + msz)))
 394                return false;
 395        if (unlikely(max < (start + msz)))
 396                return false;
 397        if ((msg_importance(msg) < TIPC_SYSTEM_IMPORTANCE) &&
 398            (msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE))
 399                return false;
 400
 401        skb_put(skb, pad + msz);
 402        skb_copy_to_linear_data_offset(skb, start, msg, msz);
 403        msg_set_size(bmsg, start + msz);
 404        msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
 405        return true;
 406}
 407
 408/**
 409 *  tipc_msg_extract(): extract bundled inner packet from buffer
 410 *  @skb: buffer to be extracted from.
 411 *  @iskb: extracted inner buffer, to be returned
 412 *  @pos: position in outer message of msg to be extracted.
 413 *        Returns position of next msg
 414 *  Consumes outer buffer when last packet extracted
 415 *  Returns true when when there is an extracted buffer, otherwise false
 416 */
 417bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
 418{
 419        struct tipc_msg *hdr, *ihdr;
 420        int imsz;
 421
 422        *iskb = NULL;
 423        if (unlikely(skb_linearize(skb)))
 424                goto none;
 425
 426        hdr = buf_msg(skb);
 427        if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
 428                goto none;
 429
 430        ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
 431        imsz = msg_size(ihdr);
 432
 433        if ((*pos + imsz) > msg_data_sz(hdr))
 434                goto none;
 435
 436        *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
 437        if (!*iskb)
 438                goto none;
 439
 440        skb_copy_to_linear_data(*iskb, ihdr, imsz);
 441        if (unlikely(!tipc_msg_validate(iskb)))
 442                goto none;
 443
 444        *pos += align(imsz);
 445        return true;
 446none:
 447        kfree_skb(skb);
 448        kfree_skb(*iskb);
 449        *iskb = NULL;
 450        return false;
 451}
 452
 453/**
 454 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
 455 * @list: the buffer chain, where head is the buffer to replace/append
 456 * @skb: buffer to be created, appended to and returned in case of success
 457 * @msg: message to be appended
 458 * @mtu: max allowable size for the bundle buffer, inclusive header
 459 * @dnode: destination node for message. (Not always present in header)
 460 * Returns true if success, otherwise false
 461 */
 462bool tipc_msg_make_bundle(struct sk_buff **skb,  struct tipc_msg *msg,
 463                          u32 mtu, u32 dnode)
 464{
 465        struct sk_buff *_skb;
 466        struct tipc_msg *bmsg;
 467        u32 msz = msg_size(msg);
 468        u32 max = mtu - INT_H_SIZE;
 469
 470        if (msg_user(msg) == MSG_FRAGMENTER)
 471                return false;
 472        if (msg_user(msg) == TUNNEL_PROTOCOL)
 473                return false;
 474        if (msg_user(msg) == BCAST_PROTOCOL)
 475                return false;
 476        if (msz > (max / 2))
 477                return false;
 478
 479        _skb = tipc_buf_acquire(max, GFP_ATOMIC);
 480        if (!_skb)
 481                return false;
 482
 483        skb_trim(_skb, INT_H_SIZE);
 484        bmsg = buf_msg(_skb);
 485        tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
 486                      INT_H_SIZE, dnode);
 487        if (msg_isdata(msg))
 488                msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
 489        else
 490                msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
 491        msg_set_seqno(bmsg, msg_seqno(msg));
 492        msg_set_ack(bmsg, msg_ack(msg));
 493        msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
 494        tipc_msg_bundle(_skb, msg, mtu);
 495        *skb = _skb;
 496        return true;
 497}
 498
 499/**
 500 * tipc_msg_reverse(): swap source and destination addresses and add error code
 501 * @own_node: originating node id for reversed message
 502 * @skb:  buffer containing message to be reversed; will be consumed
 503 * @err:  error code to be set in message, if any
 504 * Replaces consumed buffer with new one when successful
 505 * Returns true if success, otherwise false
 506 */
 507bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
 508{
 509        struct sk_buff *_skb = *skb;
 510        struct tipc_msg *_hdr, *hdr;
 511        int hlen, dlen;
 512
 513        if (skb_linearize(_skb))
 514                goto exit;
 515        _hdr = buf_msg(_skb);
 516        dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
 517        hlen = msg_hdr_sz(_hdr);
 518
 519        if (msg_dest_droppable(_hdr))
 520                goto exit;
 521        if (msg_errcode(_hdr))
 522                goto exit;
 523
 524        /* Never return SHORT header */
 525        if (hlen == SHORT_H_SIZE)
 526                hlen = BASIC_H_SIZE;
 527
 528        /* Don't return data along with SYN+, - sender has a clone */
 529        if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
 530                dlen = 0;
 531
 532        /* Allocate new buffer to return */
 533        *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
 534        if (!*skb)
 535                goto exit;
 536        memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
 537        memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
 538
 539        /* Build reverse header in new buffer */
 540        hdr = buf_msg(*skb);
 541        msg_set_hdr_sz(hdr, hlen);
 542        msg_set_errcode(hdr, err);
 543        msg_set_non_seq(hdr, 0);
 544        msg_set_origport(hdr, msg_destport(_hdr));
 545        msg_set_destport(hdr, msg_origport(_hdr));
 546        msg_set_destnode(hdr, msg_prevnode(_hdr));
 547        msg_set_prevnode(hdr, own_node);
 548        msg_set_orignode(hdr, own_node);
 549        msg_set_size(hdr, hlen + dlen);
 550        skb_orphan(_skb);
 551        kfree_skb(_skb);
 552        return true;
 553exit:
 554        kfree_skb(_skb);
 555        *skb = NULL;
 556        return false;
 557}
 558
 559bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
 560{
 561        struct sk_buff *skb, *_skb;
 562
 563        skb_queue_walk(msg, skb) {
 564                _skb = skb_clone(skb, GFP_ATOMIC);
 565                if (!_skb) {
 566                        __skb_queue_purge(cpy);
 567                        pr_err_ratelimited("Failed to clone buffer chain\n");
 568                        return false;
 569                }
 570                __skb_queue_tail(cpy, _skb);
 571        }
 572        return true;
 573}
 574
 575/**
 576 * tipc_msg_lookup_dest(): try to find new destination for named message
 577 * @skb: the buffer containing the message.
 578 * @err: error code to be used by caller if lookup fails
 579 * Does not consume buffer
 580 * Returns true if a destination is found, false otherwise
 581 */
 582bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
 583{
 584        struct tipc_msg *msg = buf_msg(skb);
 585        u32 dport, dnode;
 586        u32 onode = tipc_own_addr(net);
 587
 588        if (!msg_isdata(msg))
 589                return false;
 590        if (!msg_named(msg))
 591                return false;
 592        if (msg_errcode(msg))
 593                return false;
 594        *err = TIPC_ERR_NO_NAME;
 595        if (skb_linearize(skb))
 596                return false;
 597        msg = buf_msg(skb);
 598        if (msg_reroute_cnt(msg))
 599                return false;
 600        dnode = tipc_scope2node(net, msg_lookup_scope(msg));
 601        dport = tipc_nametbl_translate(net, msg_nametype(msg),
 602                                       msg_nameinst(msg), &dnode);
 603        if (!dport)
 604                return false;
 605        msg_incr_reroute_cnt(msg);
 606        if (dnode != onode)
 607                msg_set_prevnode(msg, onode);
 608        msg_set_destnode(msg, dnode);
 609        msg_set_destport(msg, dport);
 610        *err = TIPC_OK;
 611
 612        if (!skb_cloned(skb))
 613                return true;
 614
 615        return true;
 616}
 617
 618/* tipc_msg_assemble() - assemble chain of fragments into one message
 619 */
 620bool tipc_msg_assemble(struct sk_buff_head *list)
 621{
 622        struct sk_buff *skb, *tmp = NULL;
 623
 624        if (skb_queue_len(list) == 1)
 625                return true;
 626
 627        while ((skb = __skb_dequeue(list))) {
 628                skb->next = NULL;
 629                if (tipc_buf_append(&tmp, &skb)) {
 630                        __skb_queue_tail(list, skb);
 631                        return true;
 632                }
 633                if (!tmp)
 634                        break;
 635        }
 636        __skb_queue_purge(list);
 637        __skb_queue_head_init(list);
 638        pr_warn("Failed do assemble buffer\n");
 639        return false;
 640}
 641
 642/* tipc_msg_reassemble() - clone a buffer chain of fragments and
 643 *                         reassemble the clones into one message
 644 */
 645bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
 646{
 647        struct sk_buff *skb, *_skb;
 648        struct sk_buff *frag = NULL;
 649        struct sk_buff *head = NULL;
 650        int hdr_len;
 651
 652        /* Copy header if single buffer */
 653        if (skb_queue_len(list) == 1) {
 654                skb = skb_peek(list);
 655                hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
 656                _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
 657                if (!_skb)
 658                        return false;
 659                __skb_queue_tail(rcvq, _skb);
 660                return true;
 661        }
 662
 663        /* Clone all fragments and reassemble */
 664        skb_queue_walk(list, skb) {
 665                frag = skb_clone(skb, GFP_ATOMIC);
 666                if (!frag)
 667                        goto error;
 668                frag->next = NULL;
 669                if (tipc_buf_append(&head, &frag))
 670                        break;
 671                if (!head)
 672                        goto error;
 673        }
 674        __skb_queue_tail(rcvq, frag);
 675        return true;
 676error:
 677        pr_warn("Failed do clone local mcast rcv buffer\n");
 678        kfree_skb(head);
 679        return false;
 680}
 681
 682bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
 683                        struct sk_buff_head *cpy)
 684{
 685        struct sk_buff *skb, *_skb;
 686
 687        skb_queue_walk(msg, skb) {
 688                _skb = pskb_copy(skb, GFP_ATOMIC);
 689                if (!_skb) {
 690                        __skb_queue_purge(cpy);
 691                        return false;
 692                }
 693                msg_set_destnode(buf_msg(_skb), dst);
 694                __skb_queue_tail(cpy, _skb);
 695        }
 696        return true;
 697}
 698
 699/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
 700 * @list: list to be appended to
 701 * @seqno: sequence number of buffer to add
 702 * @skb: buffer to add
 703 */
 704void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
 705                             struct sk_buff *skb)
 706{
 707        struct sk_buff *_skb, *tmp;
 708
 709        if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
 710                __skb_queue_head(list, skb);
 711                return;
 712        }
 713
 714        if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
 715                __skb_queue_tail(list, skb);
 716                return;
 717        }
 718
 719        skb_queue_walk_safe(list, _skb, tmp) {
 720                if (more(seqno, buf_seqno(_skb)))
 721                        continue;
 722                if (seqno == buf_seqno(_skb))
 723                        break;
 724                __skb_queue_before(list, _skb, skb);
 725                return;
 726        }
 727        kfree_skb(skb);
 728}
 729
 730void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
 731                     struct sk_buff_head *xmitq)
 732{
 733        if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
 734                __skb_queue_tail(xmitq, skb);
 735}
 736