linux/net/ax25/ax25_out.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *
   4 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
   5 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
   6 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
   7 */
   8#include <linux/errno.h>
   9#include <linux/types.h>
  10#include <linux/socket.h>
  11#include <linux/in.h>
  12#include <linux/kernel.h>
  13#include <linux/module.h>
  14#include <linux/timer.h>
  15#include <linux/string.h>
  16#include <linux/sockios.h>
  17#include <linux/spinlock.h>
  18#include <linux/net.h>
  19#include <linux/slab.h>
  20#include <net/ax25.h>
  21#include <linux/inet.h>
  22#include <linux/netdevice.h>
  23#include <linux/skbuff.h>
  24#include <net/sock.h>
  25#include <linux/uaccess.h>
  26#include <linux/fcntl.h>
  27#include <linux/mm.h>
  28#include <linux/interrupt.h>
  29
  30static DEFINE_SPINLOCK(ax25_frag_lock);
  31
  32ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
  33{
  34        ax25_dev *ax25_dev;
  35        ax25_cb *ax25;
  36
  37        /*
  38         * Take the default packet length for the device if zero is
  39         * specified.
  40         */
  41        if (paclen == 0) {
  42                if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
  43                        return NULL;
  44
  45                paclen = ax25_dev->values[AX25_VALUES_PACLEN];
  46        }
  47
  48        /*
  49         * Look for an existing connection.
  50         */
  51        if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) {
  52                ax25_output(ax25, paclen, skb);
  53                return ax25;            /* It already existed */
  54        }
  55
  56        if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
  57                return NULL;
  58
  59        if ((ax25 = ax25_create_cb()) == NULL)
  60                return NULL;
  61
  62        ax25_fillin_cb(ax25, ax25_dev);
  63
  64        ax25->source_addr = *src;
  65        ax25->dest_addr   = *dest;
  66
  67        if (digi != NULL) {
  68                ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC);
  69                if (ax25->digipeat == NULL) {
  70                        ax25_cb_put(ax25);
  71                        return NULL;
  72                }
  73        }
  74
  75        switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
  76        case AX25_PROTO_STD_SIMPLEX:
  77        case AX25_PROTO_STD_DUPLEX:
  78                ax25_std_establish_data_link(ax25);
  79                break;
  80
  81#ifdef CONFIG_AX25_DAMA_SLAVE
  82        case AX25_PROTO_DAMA_SLAVE:
  83                if (ax25_dev->dama.slave)
  84                        ax25_ds_establish_data_link(ax25);
  85                else
  86                        ax25_std_establish_data_link(ax25);
  87                break;
  88#endif
  89        }
  90
  91        /*
  92         * There is one ref for the state machine; a caller needs
  93         * one more to put it back, just like with the existing one.
  94         */
  95        ax25_cb_hold(ax25);
  96
  97        ax25_cb_add(ax25);
  98
  99        ax25->state = AX25_STATE_1;
 100
 101        ax25_start_heartbeat(ax25);
 102
 103        ax25_output(ax25, paclen, skb);
 104
 105        return ax25;                    /* We had to create it */
 106}
 107
 108EXPORT_SYMBOL(ax25_send_frame);
 109
 110/*
 111 *      All outgoing AX.25 I frames pass via this routine. Therefore this is
 112 *      where the fragmentation of frames takes place. If fragment is set to
 113 *      zero then we are not allowed to do fragmentation, even if the frame
 114 *      is too large.
 115 */
 116void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
 117{
 118        struct sk_buff *skbn;
 119        unsigned char *p;
 120        int frontlen, len, fragno, ka9qfrag, first = 1;
 121
 122        if (paclen < 16) {
 123                WARN_ON_ONCE(1);
 124                kfree_skb(skb);
 125                return;
 126        }
 127
 128        if ((skb->len - 1) > paclen) {
 129                if (*skb->data == AX25_P_TEXT) {
 130                        skb_pull(skb, 1); /* skip PID */
 131                        ka9qfrag = 0;
 132                } else {
 133                        paclen -= 2;    /* Allow for fragment control info */
 134                        ka9qfrag = 1;
 135                }
 136
 137                fragno = skb->len / paclen;
 138                if (skb->len % paclen == 0) fragno--;
 139
 140                frontlen = skb_headroom(skb);   /* Address space + CTRL */
 141
 142                while (skb->len > 0) {
 143                        spin_lock_bh(&ax25_frag_lock);
 144                        if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
 145                                spin_unlock_bh(&ax25_frag_lock);
 146                                printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
 147                                return;
 148                        }
 149
 150                        if (skb->sk != NULL)
 151                                skb_set_owner_w(skbn, skb->sk);
 152
 153                        spin_unlock_bh(&ax25_frag_lock);
 154
 155                        len = (paclen > skb->len) ? skb->len : paclen;
 156
 157                        if (ka9qfrag == 1) {
 158                                skb_reserve(skbn, frontlen + 2);
 159                                skb_set_network_header(skbn,
 160                                                      skb_network_offset(skb));
 161                                skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
 162                                p = skb_push(skbn, 2);
 163
 164                                *p++ = AX25_P_SEGMENT;
 165
 166                                *p = fragno--;
 167                                if (first) {
 168                                        *p |= AX25_SEG_FIRST;
 169                                        first = 0;
 170                                }
 171                        } else {
 172                                skb_reserve(skbn, frontlen + 1);
 173                                skb_set_network_header(skbn,
 174                                                      skb_network_offset(skb));
 175                                skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
 176                                p = skb_push(skbn, 1);
 177                                *p = AX25_P_TEXT;
 178                        }
 179
 180                        skb_pull(skb, len);
 181                        skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
 182                }
 183
 184                kfree_skb(skb);
 185        } else {
 186                skb_queue_tail(&ax25->write_queue, skb);          /* Throw it on the queue */
 187        }
 188
 189        switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
 190        case AX25_PROTO_STD_SIMPLEX:
 191        case AX25_PROTO_STD_DUPLEX:
 192                ax25_kick(ax25);
 193                break;
 194
 195#ifdef CONFIG_AX25_DAMA_SLAVE
 196        /*
 197         * A DAMA slave is _required_ to work as normal AX.25L2V2
 198         * if no DAMA master is available.
 199         */
 200        case AX25_PROTO_DAMA_SLAVE:
 201                if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
 202                break;
 203#endif
 204        }
 205}
 206
 207/*
 208 *  This procedure is passed a buffer descriptor for an iframe. It builds
 209 *  the rest of the control part of the frame and then writes it out.
 210 */
 211static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit)
 212{
 213        unsigned char *frame;
 214
 215        if (skb == NULL)
 216                return;
 217
 218        skb_reset_network_header(skb);
 219
 220        if (ax25->modulus == AX25_MODULUS) {
 221                frame = skb_push(skb, 1);
 222
 223                *frame = AX25_I;
 224                *frame |= (poll_bit) ? AX25_PF : 0;
 225                *frame |= (ax25->vr << 5);
 226                *frame |= (ax25->vs << 1);
 227        } else {
 228                frame = skb_push(skb, 2);
 229
 230                frame[0] = AX25_I;
 231                frame[0] |= (ax25->vs << 1);
 232                frame[1] = (poll_bit) ? AX25_EPF : 0;
 233                frame[1] |= (ax25->vr << 1);
 234        }
 235
 236        ax25_start_idletimer(ax25);
 237
 238        ax25_transmit_buffer(ax25, skb, AX25_COMMAND);
 239}
 240
 241void ax25_kick(ax25_cb *ax25)
 242{
 243        struct sk_buff *skb, *skbn;
 244        int last = 1;
 245        unsigned short start, end, next;
 246
 247        if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4)
 248                return;
 249
 250        if (ax25->condition & AX25_COND_PEER_RX_BUSY)
 251                return;
 252
 253        if (skb_peek(&ax25->write_queue) == NULL)
 254                return;
 255
 256        start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs;
 257        end   = (ax25->va + ax25->window) % ax25->modulus;
 258
 259        if (start == end)
 260                return;
 261
 262        /*
 263         * Transmit data until either we're out of data to send or
 264         * the window is full. Send a poll on the final I frame if
 265         * the window is filled.
 266         */
 267
 268        /*
 269         * Dequeue the frame and copy it.
 270         * Check for race with ax25_clear_queues().
 271         */
 272        skb  = skb_dequeue(&ax25->write_queue);
 273        if (!skb)
 274                return;
 275
 276        ax25->vs = start;
 277
 278        do {
 279                if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
 280                        skb_queue_head(&ax25->write_queue, skb);
 281                        break;
 282                }
 283
 284                if (skb->sk != NULL)
 285                        skb_set_owner_w(skbn, skb->sk);
 286
 287                next = (ax25->vs + 1) % ax25->modulus;
 288                last = (next == end);
 289
 290                /*
 291                 * Transmit the frame copy.
 292                 * bke 960114: do not set the Poll bit on the last frame
 293                 * in DAMA mode.
 294                 */
 295                switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
 296                case AX25_PROTO_STD_SIMPLEX:
 297                case AX25_PROTO_STD_DUPLEX:
 298                        ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
 299                        break;
 300
 301#ifdef CONFIG_AX25_DAMA_SLAVE
 302                case AX25_PROTO_DAMA_SLAVE:
 303                        ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
 304                        break;
 305#endif
 306                }
 307
 308                ax25->vs = next;
 309
 310                /*
 311                 * Requeue the original data frame.
 312                 */
 313                skb_queue_tail(&ax25->ack_queue, skb);
 314
 315        } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
 316
 317        ax25->condition &= ~AX25_COND_ACK_PENDING;
 318
 319        if (!ax25_t1timer_running(ax25)) {
 320                ax25_stop_t3timer(ax25);
 321                ax25_calculate_t1(ax25);
 322                ax25_start_t1timer(ax25);
 323        }
 324}
 325
 326void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
 327{
 328        struct sk_buff *skbn;
 329        unsigned char *ptr;
 330        int headroom;
 331
 332        if (ax25->ax25_dev == NULL) {
 333                ax25_disconnect(ax25, ENETUNREACH);
 334                return;
 335        }
 336
 337        headroom = ax25_addr_size(ax25->digipeat);
 338
 339        if (skb_headroom(skb) < headroom) {
 340                if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) {
 341                        printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
 342                        kfree_skb(skb);
 343                        return;
 344                }
 345
 346                if (skb->sk != NULL)
 347                        skb_set_owner_w(skbn, skb->sk);
 348
 349                consume_skb(skb);
 350                skb = skbn;
 351        }
 352
 353        ptr = skb_push(skb, headroom);
 354
 355        ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);
 356
 357        ax25_queue_xmit(skb, ax25->ax25_dev->dev);
 358}
 359
 360/*
 361 *      A small shim to dev_queue_xmit to add the KISS control byte, and do
 362 *      any packet forwarding in operation.
 363 */
 364void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
 365{
 366        unsigned char *ptr;
 367
 368        skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
 369
 370        ptr  = skb_push(skb, 1);
 371        *ptr = 0x00;                    /* KISS */
 372
 373        dev_queue_xmit(skb);
 374}
 375
 376int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr)
 377{
 378        if (ax25->vs == nr) {
 379                ax25_frames_acked(ax25, nr);
 380                ax25_calculate_rtt(ax25);
 381                ax25_stop_t1timer(ax25);
 382                ax25_start_t3timer(ax25);
 383                return 1;
 384        } else {
 385                if (ax25->va != nr) {
 386                        ax25_frames_acked(ax25, nr);
 387                        ax25_calculate_t1(ax25);
 388                        ax25_start_t1timer(ax25);
 389                        return 1;
 390                }
 391        }
 392        return 0;
 393}
 394