linux/net/caif/cfserl.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) ST-Ericsson AB 2010
   3 * Author:      Sjur Brendeland/sjur.brandeland@stericsson.com
   4 * License terms: GNU General Public License (GPL) version 2
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
   8
   9#include <linux/stddef.h>
  10#include <linux/spinlock.h>
  11#include <linux/slab.h>
  12#include <net/caif/caif_layer.h>
  13#include <net/caif/cfpkt.h>
  14#include <net/caif/cfserl.h>
  15
  16#define container_obj(layr) ((struct cfserl *) layr)
  17
  18#define CFSERL_STX 0x02
  19#define SERIAL_MINIUM_PACKET_SIZE 4
  20#define SERIAL_MAX_FRAMESIZE 4096
  21struct cfserl {
  22        struct cflayer layer;
  23        struct cfpkt *incomplete_frm;
  24        /* Protects parallel processing of incoming packets */
  25        spinlock_t sync;
  26        bool usestx;
  27};
  28
  29static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
  30static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
  31static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
  32                                int phyid);
  33
  34struct cflayer *cfserl_create(int type, int instance, bool use_stx)
  35{
  36        struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC);
  37        if (!this) {
  38                pr_warn("Out of memory\n");
  39                return NULL;
  40        }
  41        caif_assert(offsetof(struct cfserl, layer) == 0);
  42        memset(this, 0, sizeof(struct cfserl));
  43        this->layer.receive = cfserl_receive;
  44        this->layer.transmit = cfserl_transmit;
  45        this->layer.ctrlcmd = cfserl_ctrlcmd;
  46        this->layer.type = type;
  47        this->usestx = use_stx;
  48        spin_lock_init(&this->sync);
  49        snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1");
  50        return &this->layer;
  51}
  52
  53static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
  54{
  55        struct cfserl *layr = container_obj(l);
  56        u16 pkt_len;
  57        struct cfpkt *pkt = NULL;
  58        struct cfpkt *tail_pkt = NULL;
  59        u8 tmp8;
  60        u16 tmp;
  61        u8 stx = CFSERL_STX;
  62        int ret;
  63        u16 expectlen = 0;
  64
  65        caif_assert(newpkt != NULL);
  66        spin_lock(&layr->sync);
  67
  68        if (layr->incomplete_frm != NULL) {
  69                layr->incomplete_frm =
  70                    cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
  71                pkt = layr->incomplete_frm;
  72                if (pkt == NULL) {
  73                        spin_unlock(&layr->sync);
  74                        return -ENOMEM;
  75                }
  76        } else {
  77                pkt = newpkt;
  78        }
  79        layr->incomplete_frm = NULL;
  80
  81        do {
  82                /* Search for STX at start of pkt if STX is used */
  83                if (layr->usestx) {
  84                        cfpkt_extr_head(pkt, &tmp8, 1);
  85                        if (tmp8 != CFSERL_STX) {
  86                                while (cfpkt_more(pkt)
  87                                       && tmp8 != CFSERL_STX) {
  88                                        cfpkt_extr_head(pkt, &tmp8, 1);
  89                                }
  90                                if (!cfpkt_more(pkt)) {
  91                                        cfpkt_destroy(pkt);
  92                                        layr->incomplete_frm = NULL;
  93                                        spin_unlock(&layr->sync);
  94                                        return -EPROTO;
  95                                }
  96                        }
  97                }
  98
  99                pkt_len = cfpkt_getlen(pkt);
 100
 101                /*
 102                 *  pkt_len is the accumulated length of the packet data
 103                 *  we have received so far.
 104                 *  Exit if frame doesn't hold length.
 105                 */
 106
 107                if (pkt_len < 2) {
 108                        if (layr->usestx)
 109                                cfpkt_add_head(pkt, &stx, 1);
 110                        layr->incomplete_frm = pkt;
 111                        spin_unlock(&layr->sync);
 112                        return 0;
 113                }
 114
 115                /*
 116                 *  Find length of frame.
 117                 *  expectlen is the length we need for a full frame.
 118                 */
 119                cfpkt_peek_head(pkt, &tmp, 2);
 120                expectlen = le16_to_cpu(tmp) + 2;
 121                /*
 122                 * Frame error handling
 123                 */
 124                if (expectlen < SERIAL_MINIUM_PACKET_SIZE
 125                    || expectlen > SERIAL_MAX_FRAMESIZE) {
 126                        if (!layr->usestx) {
 127                                if (pkt != NULL)
 128                                        cfpkt_destroy(pkt);
 129                                layr->incomplete_frm = NULL;
 130                                expectlen = 0;
 131                                spin_unlock(&layr->sync);
 132                                return -EPROTO;
 133                        }
 134                        continue;
 135                }
 136
 137                if (pkt_len < expectlen) {
 138                        /* Too little received data */
 139                        if (layr->usestx)
 140                                cfpkt_add_head(pkt, &stx, 1);
 141                        layr->incomplete_frm = pkt;
 142                        spin_unlock(&layr->sync);
 143                        return 0;
 144                }
 145
 146                /*
 147                 * Enough data for at least one frame.
 148                 * Split the frame, if too long
 149                 */
 150                if (pkt_len > expectlen)
 151                        tail_pkt = cfpkt_split(pkt, expectlen);
 152                else
 153                        tail_pkt = NULL;
 154
 155                /* Send the first part of packet upwards.*/
 156                spin_unlock(&layr->sync);
 157                ret = layr->layer.up->receive(layr->layer.up, pkt);
 158                spin_lock(&layr->sync);
 159                if (ret == -EILSEQ) {
 160                        if (layr->usestx) {
 161                                if (tail_pkt != NULL)
 162                                        pkt = cfpkt_append(pkt, tail_pkt, 0);
 163                                /* Start search for next STX if frame failed */
 164                                continue;
 165                        } else {
 166                                cfpkt_destroy(pkt);
 167                                pkt = NULL;
 168                        }
 169                }
 170
 171                pkt = tail_pkt;
 172
 173        } while (pkt != NULL);
 174
 175        spin_unlock(&layr->sync);
 176        return 0;
 177}
 178
 179static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt)
 180{
 181        struct cfserl *layr = container_obj(layer);
 182        int ret;
 183        u8 tmp8 = CFSERL_STX;
 184        if (layr->usestx)
 185                cfpkt_add_head(newpkt, &tmp8, 1);
 186        ret = layer->dn->transmit(layer->dn, newpkt);
 187        if (ret < 0)
 188                cfpkt_extr_head(newpkt, &tmp8, 1);
 189
 190        return ret;
 191}
 192
 193static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
 194                                int phyid)
 195{
 196        layr->up->ctrlcmd(layr->up, ctrl, phyid);
 197}
 198