linux/drivers/net/caif/caif_hsi.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) ST-Ericsson AB 2010
   3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
   4 * Author:  Daniel Martensson / daniel.martensson@stericsson.com
   5 *          Dmitry.Tarnyagin  / dmitry.tarnyagin@stericsson.com
   6 * License terms: GNU General Public License (GPL) version 2.
   7 */
   8
   9#define pr_fmt(fmt) KBUILD_MODNAME fmt
  10
  11#include <linux/init.h>
  12#include <linux/module.h>
  13#include <linux/device.h>
  14#include <linux/netdevice.h>
  15#include <linux/string.h>
  16#include <linux/list.h>
  17#include <linux/interrupt.h>
  18#include <linux/delay.h>
  19#include <linux/sched.h>
  20#include <linux/if_arp.h>
  21#include <linux/timer.h>
  22#include <net/rtnetlink.h>
  23#include <linux/pkt_sched.h>
  24#include <net/caif/caif_layer.h>
  25#include <net/caif/caif_hsi.h>
  26
  27MODULE_LICENSE("GPL");
  28MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
  29MODULE_DESCRIPTION("CAIF HSI driver");
  30
  31/* Returns the number of padding bytes for alignment. */
  32#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
  33                                (((pow)-((x)&((pow)-1)))))
  34
  35static const struct cfhsi_config  hsi_default_config = {
  36
  37        /* Inactivity timeout on HSI, ms */
  38        .inactivity_timeout = HZ,
  39
  40        /* Aggregation timeout (ms) of zero means no aggregation is done*/
  41        .aggregation_timeout = 1,
  42
  43        /*
  44         * HSI link layer flow-control thresholds.
  45         * Threshold values for the HSI packet queue. Flow-control will be
  46         * asserted when the number of packets exceeds q_high_mark. It will
  47         * not be de-asserted before the number of packets drops below
  48         * q_low_mark.
  49         * Warning: A high threshold value might increase throughput but it
  50         * will at the same time prevent channel prioritization and increase
  51         * the risk of flooding the modem. The high threshold should be above
  52         * the low.
  53         */
  54        .q_high_mark = 100,
  55        .q_low_mark = 50,
  56
  57        /*
  58         * HSI padding options.
  59         * Warning: must be a base of 2 (& operation used) and can not be zero !
  60         */
  61        .head_align = 4,
  62        .tail_align = 4,
  63};
  64
  65#define ON 1
  66#define OFF 0
  67
  68static LIST_HEAD(cfhsi_list);
  69
  70static void cfhsi_inactivity_tout(unsigned long arg)
  71{
  72        struct cfhsi *cfhsi = (struct cfhsi *)arg;
  73
  74        netdev_dbg(cfhsi->ndev, "%s.\n",
  75                __func__);
  76
  77        /* Schedule power down work queue. */
  78        if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
  79                queue_work(cfhsi->wq, &cfhsi->wake_down_work);
  80}
  81
  82static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
  83                                           const struct sk_buff *skb,
  84                                           int direction)
  85{
  86        struct caif_payload_info *info;
  87        int hpad, tpad, len;
  88
  89        info = (struct caif_payload_info *)&skb->cb;
  90        hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
  91        tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
  92        len = skb->len + hpad + tpad;
  93
  94        if (direction > 0)
  95                cfhsi->aggregation_len += len;
  96        else if (direction < 0)
  97                cfhsi->aggregation_len -= len;
  98}
  99
 100static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
 101{
 102        int i;
 103
 104        if (cfhsi->cfg.aggregation_timeout == 0)
 105                return true;
 106
 107        for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
 108                if (cfhsi->qhead[i].qlen)
 109                        return true;
 110        }
 111
 112        /* TODO: Use aggregation_len instead */
 113        if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
 114                return true;
 115
 116        return false;
 117}
 118
 119static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
 120{
 121        struct sk_buff *skb;
 122        int i;
 123
 124        for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
 125                skb = skb_dequeue(&cfhsi->qhead[i]);
 126                if (skb)
 127                        break;
 128        }
 129
 130        return skb;
 131}
 132
 133static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
 134{
 135        int i, len = 0;
 136        for (i = 0; i < CFHSI_PRIO_LAST; ++i)
 137                len += skb_queue_len(&cfhsi->qhead[i]);
 138        return len;
 139}
 140
 141static void cfhsi_abort_tx(struct cfhsi *cfhsi)
 142{
 143        struct sk_buff *skb;
 144
 145        for (;;) {
 146                spin_lock_bh(&cfhsi->lock);
 147                skb = cfhsi_dequeue(cfhsi);
 148                if (!skb)
 149                        break;
 150
 151                cfhsi->ndev->stats.tx_errors++;
 152                cfhsi->ndev->stats.tx_dropped++;
 153                cfhsi_update_aggregation_stats(cfhsi, skb, -1);
 154                spin_unlock_bh(&cfhsi->lock);
 155                kfree_skb(skb);
 156        }
 157        cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 158        if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 159                mod_timer(&cfhsi->inactivity_timer,
 160                        jiffies + cfhsi->cfg.inactivity_timeout);
 161        spin_unlock_bh(&cfhsi->lock);
 162}
 163
 164static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
 165{
 166        char buffer[32]; /* Any reasonable value */
 167        size_t fifo_occupancy;
 168        int ret;
 169
 170        netdev_dbg(cfhsi->ndev, "%s.\n",
 171                __func__);
 172
 173        do {
 174                ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
 175                                &fifo_occupancy);
 176                if (ret) {
 177                        netdev_warn(cfhsi->ndev,
 178                                "%s: can't get FIFO occupancy: %d.\n",
 179                                __func__, ret);
 180                        break;
 181                } else if (!fifo_occupancy)
 182                        /* No more data, exitting normally */
 183                        break;
 184
 185                fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
 186                set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
 187                ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
 188                                cfhsi->ops);
 189                if (ret) {
 190                        clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
 191                        netdev_warn(cfhsi->ndev,
 192                                "%s: can't read data: %d.\n",
 193                                __func__, ret);
 194                        break;
 195                }
 196
 197                ret = 5 * HZ;
 198                ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
 199                         !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
 200
 201                if (ret < 0) {
 202                        netdev_warn(cfhsi->ndev,
 203                                "%s: can't wait for flush complete: %d.\n",
 204                                __func__, ret);
 205                        break;
 206                } else if (!ret) {
 207                        ret = -ETIMEDOUT;
 208                        netdev_warn(cfhsi->ndev,
 209                                "%s: timeout waiting for flush complete.\n",
 210                                __func__);
 211                        break;
 212                }
 213        } while (1);
 214
 215        return ret;
 216}
 217
 218static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 219{
 220        int nfrms = 0;
 221        int pld_len = 0;
 222        struct sk_buff *skb;
 223        u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 224
 225        skb = cfhsi_dequeue(cfhsi);
 226        if (!skb)
 227                return 0;
 228
 229        /* Clear offset. */
 230        desc->offset = 0;
 231
 232        /* Check if we can embed a CAIF frame. */
 233        if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
 234                struct caif_payload_info *info;
 235                int hpad;
 236                int tpad;
 237
 238                /* Calculate needed head alignment and tail alignment. */
 239                info = (struct caif_payload_info *)&skb->cb;
 240
 241                hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
 242                tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
 243
 244                /* Check if frame still fits with added alignment. */
 245                if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
 246                        u8 *pemb = desc->emb_frm;
 247                        desc->offset = CFHSI_DESC_SHORT_SZ;
 248                        *pemb = (u8)(hpad - 1);
 249                        pemb += hpad;
 250
 251                        /* Update network statistics. */
 252                        spin_lock_bh(&cfhsi->lock);
 253                        cfhsi->ndev->stats.tx_packets++;
 254                        cfhsi->ndev->stats.tx_bytes += skb->len;
 255                        cfhsi_update_aggregation_stats(cfhsi, skb, -1);
 256                        spin_unlock_bh(&cfhsi->lock);
 257
 258                        /* Copy in embedded CAIF frame. */
 259                        skb_copy_bits(skb, 0, pemb, skb->len);
 260
 261                        /* Consume the SKB */
 262                        consume_skb(skb);
 263                        skb = NULL;
 264                }
 265        }
 266
 267        /* Create payload CAIF frames. */
 268        pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 269        while (nfrms < CFHSI_MAX_PKTS) {
 270                struct caif_payload_info *info;
 271                int hpad;
 272                int tpad;
 273
 274                if (!skb)
 275                        skb = cfhsi_dequeue(cfhsi);
 276
 277                if (!skb)
 278                        break;
 279
 280                /* Calculate needed head alignment and tail alignment. */
 281                info = (struct caif_payload_info *)&skb->cb;
 282
 283                hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
 284                tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
 285
 286                /* Fill in CAIF frame length in descriptor. */
 287                desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
 288
 289                /* Fill head padding information. */
 290                *pfrm = (u8)(hpad - 1);
 291                pfrm += hpad;
 292
 293                /* Update network statistics. */
 294                spin_lock_bh(&cfhsi->lock);
 295                cfhsi->ndev->stats.tx_packets++;
 296                cfhsi->ndev->stats.tx_bytes += skb->len;
 297                cfhsi_update_aggregation_stats(cfhsi, skb, -1);
 298                spin_unlock_bh(&cfhsi->lock);
 299
 300                /* Copy in CAIF frame. */
 301                skb_copy_bits(skb, 0, pfrm, skb->len);
 302
 303                /* Update payload length. */
 304                pld_len += desc->cffrm_len[nfrms];
 305
 306                /* Update frame pointer. */
 307                pfrm += skb->len + tpad;
 308
 309                /* Consume the SKB */
 310                consume_skb(skb);
 311                skb = NULL;
 312
 313                /* Update number of frames. */
 314                nfrms++;
 315        }
 316
 317        /* Unused length fields should be zero-filled (according to SPEC). */
 318        while (nfrms < CFHSI_MAX_PKTS) {
 319                desc->cffrm_len[nfrms] = 0x0000;
 320                nfrms++;
 321        }
 322
 323        /* Check if we can piggy-back another descriptor. */
 324        if (cfhsi_can_send_aggregate(cfhsi))
 325                desc->header |= CFHSI_PIGGY_DESC;
 326        else
 327                desc->header &= ~CFHSI_PIGGY_DESC;
 328
 329        return CFHSI_DESC_SZ + pld_len;
 330}
 331
 332static void cfhsi_start_tx(struct cfhsi *cfhsi)
 333{
 334        struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
 335        int len, res;
 336
 337        netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
 338
 339        if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 340                return;
 341
 342        do {
 343                /* Create HSI frame. */
 344                len = cfhsi_tx_frm(desc, cfhsi);
 345                if (!len) {
 346                        spin_lock_bh(&cfhsi->lock);
 347                        if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
 348                                spin_unlock_bh(&cfhsi->lock);
 349                                res = -EAGAIN;
 350                                continue;
 351                        }
 352                        cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 353                        /* Start inactivity timer. */
 354                        mod_timer(&cfhsi->inactivity_timer,
 355                                jiffies + cfhsi->cfg.inactivity_timeout);
 356                        spin_unlock_bh(&cfhsi->lock);
 357                        break;
 358                }
 359
 360                /* Set up new transfer. */
 361                res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
 362                if (WARN_ON(res < 0))
 363                        netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
 364                                __func__, res);
 365        } while (res < 0);
 366}
 367
 368static void cfhsi_tx_done(struct cfhsi *cfhsi)
 369{
 370        netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
 371
 372        if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 373                return;
 374
 375        /*
 376         * Send flow on if flow off has been previously signalled
 377         * and number of packets is below low water mark.
 378         */
 379        spin_lock_bh(&cfhsi->lock);
 380        if (cfhsi->flow_off_sent &&
 381                        cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
 382                        cfhsi->cfdev.flowctrl) {
 383
 384                cfhsi->flow_off_sent = 0;
 385                cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
 386        }
 387
 388        if (cfhsi_can_send_aggregate(cfhsi)) {
 389                spin_unlock_bh(&cfhsi->lock);
 390                cfhsi_start_tx(cfhsi);
 391        } else {
 392                mod_timer(&cfhsi->aggregation_timer,
 393                        jiffies + cfhsi->cfg.aggregation_timeout);
 394                spin_unlock_bh(&cfhsi->lock);
 395        }
 396
 397        return;
 398}
 399
 400static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
 401{
 402        struct cfhsi *cfhsi;
 403
 404        cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
 405        netdev_dbg(cfhsi->ndev, "%s.\n",
 406                __func__);
 407
 408        if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 409                return;
 410        cfhsi_tx_done(cfhsi);
 411}
 412
 413static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 414{
 415        int xfer_sz = 0;
 416        int nfrms = 0;
 417        u16 *plen = NULL;
 418        u8 *pfrm = NULL;
 419
 420        if ((desc->header & ~CFHSI_PIGGY_DESC) ||
 421                        (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
 422                netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
 423                        __func__);
 424                return -EPROTO;
 425        }
 426
 427        /* Check for embedded CAIF frame. */
 428        if (desc->offset) {
 429                struct sk_buff *skb;
 430                u8 *dst = NULL;
 431                int len = 0;
 432                pfrm = ((u8 *)desc) + desc->offset;
 433
 434                /* Remove offset padding. */
 435                pfrm += *pfrm + 1;
 436
 437                /* Read length of CAIF frame (little endian). */
 438                len = *pfrm;
 439                len |= ((*(pfrm+1)) << 8) & 0xFF00;
 440                len += 2;       /* Add FCS fields. */
 441
 442                /* Sanity check length of CAIF frame. */
 443                if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
 444                        netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
 445                                __func__);
 446                        return -EPROTO;
 447                }
 448
 449                /* Allocate SKB (OK even in IRQ context). */
 450                skb = alloc_skb(len + 1, GFP_ATOMIC);
 451                if (!skb) {
 452                        netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
 453                                __func__);
 454                        return -ENOMEM;
 455                }
 456                caif_assert(skb != NULL);
 457
 458                dst = skb_put(skb, len);
 459                memcpy(dst, pfrm, len);
 460
 461                skb->protocol = htons(ETH_P_CAIF);
 462                skb_reset_mac_header(skb);
 463                skb->dev = cfhsi->ndev;
 464
 465                /*
 466                 * We are in a callback handler and
 467                 * unfortunately we don't know what context we're
 468                 * running in.
 469                 */
 470                if (in_interrupt())
 471                        netif_rx(skb);
 472                else
 473                        netif_rx_ni(skb);
 474
 475                /* Update network statistics. */
 476                cfhsi->ndev->stats.rx_packets++;
 477                cfhsi->ndev->stats.rx_bytes += len;
 478        }
 479
 480        /* Calculate transfer length. */
 481        plen = desc->cffrm_len;
 482        while (nfrms < CFHSI_MAX_PKTS && *plen) {
 483                xfer_sz += *plen;
 484                plen++;
 485                nfrms++;
 486        }
 487
 488        /* Check for piggy-backed descriptor. */
 489        if (desc->header & CFHSI_PIGGY_DESC)
 490                xfer_sz += CFHSI_DESC_SZ;
 491
 492        if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
 493                netdev_err(cfhsi->ndev,
 494                                "%s: Invalid payload len: %d, ignored.\n",
 495                        __func__, xfer_sz);
 496                return -EPROTO;
 497        }
 498        return xfer_sz;
 499}
 500
 501static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
 502{
 503        int xfer_sz = 0;
 504        int nfrms = 0;
 505        u16 *plen;
 506
 507        if ((desc->header & ~CFHSI_PIGGY_DESC) ||
 508                        (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
 509
 510                pr_err("Invalid descriptor. %x %x\n", desc->header,
 511                                desc->offset);
 512                return -EPROTO;
 513        }
 514
 515        /* Calculate transfer length. */
 516        plen = desc->cffrm_len;
 517        while (nfrms < CFHSI_MAX_PKTS && *plen) {
 518                xfer_sz += *plen;
 519                plen++;
 520                nfrms++;
 521        }
 522
 523        if (xfer_sz % 4) {
 524                pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
 525                return -EPROTO;
 526        }
 527        return xfer_sz;
 528}
 529
 530static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 531{
 532        int rx_sz = 0;
 533        int nfrms = 0;
 534        u16 *plen = NULL;
 535        u8 *pfrm = NULL;
 536
 537        /* Sanity check header and offset. */
 538        if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
 539                        (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
 540                netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
 541                        __func__);
 542                return -EPROTO;
 543        }
 544
 545        /* Set frame pointer to start of payload. */
 546        pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 547        plen = desc->cffrm_len;
 548
 549        /* Skip already processed frames. */
 550        while (nfrms < cfhsi->rx_state.nfrms) {
 551                pfrm += *plen;
 552                rx_sz += *plen;
 553                plen++;
 554                nfrms++;
 555        }
 556
 557        /* Parse payload. */
 558        while (nfrms < CFHSI_MAX_PKTS && *plen) {
 559                struct sk_buff *skb;
 560                u8 *dst = NULL;
 561                u8 *pcffrm = NULL;
 562                int len;
 563
 564                /* CAIF frame starts after head padding. */
 565                pcffrm = pfrm + *pfrm + 1;
 566
 567                /* Read length of CAIF frame (little endian). */
 568                len = *pcffrm;
 569                len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
 570                len += 2;       /* Add FCS fields. */
 571
 572                /* Sanity check length of CAIF frames. */
 573                if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
 574                        netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
 575                                __func__);
 576                        return -EPROTO;
 577                }
 578
 579                /* Allocate SKB (OK even in IRQ context). */
 580                skb = alloc_skb(len + 1, GFP_ATOMIC);
 581                if (!skb) {
 582                        netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
 583                                __func__);
 584                        cfhsi->rx_state.nfrms = nfrms;
 585                        return -ENOMEM;
 586                }
 587                caif_assert(skb != NULL);
 588
 589                dst = skb_put(skb, len);
 590                memcpy(dst, pcffrm, len);
 591
 592                skb->protocol = htons(ETH_P_CAIF);
 593                skb_reset_mac_header(skb);
 594                skb->dev = cfhsi->ndev;
 595
 596                /*
 597                 * We're called in callback from HSI
 598                 * and don't know the context we're running in.
 599                 */
 600                if (in_interrupt())
 601                        netif_rx(skb);
 602                else
 603                        netif_rx_ni(skb);
 604
 605                /* Update network statistics. */
 606                cfhsi->ndev->stats.rx_packets++;
 607                cfhsi->ndev->stats.rx_bytes += len;
 608
 609                pfrm += *plen;
 610                rx_sz += *plen;
 611                plen++;
 612                nfrms++;
 613        }
 614
 615        return rx_sz;
 616}
 617
 618static void cfhsi_rx_done(struct cfhsi *cfhsi)
 619{
 620        int res;
 621        int desc_pld_len = 0, rx_len, rx_state;
 622        struct cfhsi_desc *desc = NULL;
 623        u8 *rx_ptr, *rx_buf;
 624        struct cfhsi_desc *piggy_desc = NULL;
 625
 626        desc = (struct cfhsi_desc *)cfhsi->rx_buf;
 627
 628        netdev_dbg(cfhsi->ndev, "%s\n", __func__);
 629
 630        if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 631                return;
 632
 633        /* Update inactivity timer if pending. */
 634        spin_lock_bh(&cfhsi->lock);
 635        mod_timer_pending(&cfhsi->inactivity_timer,
 636                        jiffies + cfhsi->cfg.inactivity_timeout);
 637        spin_unlock_bh(&cfhsi->lock);
 638
 639        if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
 640                desc_pld_len = cfhsi_rx_desc_len(desc);
 641
 642                if (desc_pld_len < 0)
 643                        goto out_of_sync;
 644
 645                rx_buf = cfhsi->rx_buf;
 646                rx_len = desc_pld_len;
 647                if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
 648                        rx_len += CFHSI_DESC_SZ;
 649                if (desc_pld_len == 0)
 650                        rx_buf = cfhsi->rx_flip_buf;
 651        } else {
 652                rx_buf = cfhsi->rx_flip_buf;
 653
 654                rx_len = CFHSI_DESC_SZ;
 655                if (cfhsi->rx_state.pld_len > 0 &&
 656                                (desc->header & CFHSI_PIGGY_DESC)) {
 657
 658                        piggy_desc = (struct cfhsi_desc *)
 659                                (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
 660                                                cfhsi->rx_state.pld_len);
 661
 662                        cfhsi->rx_state.piggy_desc = true;
 663
 664                        /* Extract payload len from piggy-backed descriptor. */
 665                        desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
 666                        if (desc_pld_len < 0)
 667                                goto out_of_sync;
 668
 669                        if (desc_pld_len > 0) {
 670                                rx_len = desc_pld_len;
 671                                if (piggy_desc->header & CFHSI_PIGGY_DESC)
 672                                        rx_len += CFHSI_DESC_SZ;
 673                        }
 674
 675                        /*
 676                         * Copy needed information from the piggy-backed
 677                         * descriptor to the descriptor in the start.
 678                         */
 679                        memcpy(rx_buf, (u8 *)piggy_desc,
 680                                        CFHSI_DESC_SHORT_SZ);
 681                }
 682        }
 683
 684        if (desc_pld_len) {
 685                rx_state = CFHSI_RX_STATE_PAYLOAD;
 686                rx_ptr = rx_buf + CFHSI_DESC_SZ;
 687        } else {
 688                rx_state = CFHSI_RX_STATE_DESC;
 689                rx_ptr = rx_buf;
 690                rx_len = CFHSI_DESC_SZ;
 691        }
 692
 693        /* Initiate next read */
 694        if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
 695                /* Set up new transfer. */
 696                netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
 697                                __func__);
 698
 699                res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
 700                                cfhsi->ops);
 701                if (WARN_ON(res < 0)) {
 702                        netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
 703                                __func__, res);
 704                        cfhsi->ndev->stats.rx_errors++;
 705                        cfhsi->ndev->stats.rx_dropped++;
 706                }
 707        }
 708
 709        if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
 710                /* Extract payload from descriptor */
 711                if (cfhsi_rx_desc(desc, cfhsi) < 0)
 712                        goto out_of_sync;
 713        } else {
 714                /* Extract payload */
 715                if (cfhsi_rx_pld(desc, cfhsi) < 0)
 716                        goto out_of_sync;
 717                if (piggy_desc) {
 718                        /* Extract any payload in piggyback descriptor. */
 719                        if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
 720                                goto out_of_sync;
 721                        /* Mark no embedded frame after extracting it */
 722                        piggy_desc->offset = 0;
 723                }
 724        }
 725
 726        /* Update state info */
 727        memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
 728        cfhsi->rx_state.state = rx_state;
 729        cfhsi->rx_ptr = rx_ptr;
 730        cfhsi->rx_len = rx_len;
 731        cfhsi->rx_state.pld_len = desc_pld_len;
 732        cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
 733
 734        if (rx_buf != cfhsi->rx_buf)
 735                swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
 736        return;
 737
 738out_of_sync:
 739        netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
 740        print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
 741                        cfhsi->rx_buf, CFHSI_DESC_SZ);
 742        schedule_work(&cfhsi->out_of_sync_work);
 743}
 744
 745static void cfhsi_rx_slowpath(unsigned long arg)
 746{
 747        struct cfhsi *cfhsi = (struct cfhsi *)arg;
 748
 749        netdev_dbg(cfhsi->ndev, "%s.\n",
 750                __func__);
 751
 752        cfhsi_rx_done(cfhsi);
 753}
 754
 755static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
 756{
 757        struct cfhsi *cfhsi;
 758
 759        cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
 760        netdev_dbg(cfhsi->ndev, "%s.\n",
 761                __func__);
 762
 763        if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 764                return;
 765
 766        if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
 767                wake_up_interruptible(&cfhsi->flush_fifo_wait);
 768        else
 769                cfhsi_rx_done(cfhsi);
 770}
 771
 772static void cfhsi_wake_up(struct work_struct *work)
 773{
 774        struct cfhsi *cfhsi = NULL;
 775        int res;
 776        int len;
 777        long ret;
 778
 779        cfhsi = container_of(work, struct cfhsi, wake_up_work);
 780
 781        if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 782                return;
 783
 784        if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
 785                /* It happenes when wakeup is requested by
 786                 * both ends at the same time. */
 787                clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 788                clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 789                return;
 790        }
 791
 792        /* Activate wake line. */
 793        cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
 794
 795        netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
 796                __func__);
 797
 798        /* Wait for acknowledge. */
 799        ret = CFHSI_WAKE_TOUT;
 800        ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
 801                                        test_and_clear_bit(CFHSI_WAKE_UP_ACK,
 802                                                        &cfhsi->bits), ret);
 803        if (unlikely(ret < 0)) {
 804                /* Interrupted by signal. */
 805                netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
 806                        __func__, ret);
 807
 808                clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 809                cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
 810                return;
 811        } else if (!ret) {
 812                bool ca_wake = false;
 813                size_t fifo_occupancy = 0;
 814
 815                /* Wakeup timeout */
 816                netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
 817                        __func__);
 818
 819                /* Check FIFO to check if modem has sent something. */
 820                WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
 821                                        &fifo_occupancy));
 822
 823                netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
 824                                __func__, (unsigned) fifo_occupancy);
 825
 826                /* Check if we misssed the interrupt. */
 827                WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
 828                                                        &ca_wake));
 829
 830                if (ca_wake) {
 831                        netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
 832                                __func__);
 833
 834                        /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
 835                        clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 836
 837                        /* Continue execution. */
 838                        goto wake_ack;
 839                }
 840
 841                clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 842                cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
 843                return;
 844        }
 845wake_ack:
 846        netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
 847                __func__);
 848
 849        /* Clear power up bit. */
 850        set_bit(CFHSI_AWAKE, &cfhsi->bits);
 851        clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 852
 853        /* Resume read operation. */
 854        netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
 855        res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
 856
 857        if (WARN_ON(res < 0))
 858                netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
 859
 860        /* Clear power up acknowledment. */
 861        clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 862
 863        spin_lock_bh(&cfhsi->lock);
 864
 865        /* Resume transmit if queues are not empty. */
 866        if (!cfhsi_tx_queue_len(cfhsi)) {
 867                netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
 868                        __func__);
 869                /* Start inactivity timer. */
 870                mod_timer(&cfhsi->inactivity_timer,
 871                                jiffies + cfhsi->cfg.inactivity_timeout);
 872                spin_unlock_bh(&cfhsi->lock);
 873                return;
 874        }
 875
 876        netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
 877                __func__);
 878
 879        spin_unlock_bh(&cfhsi->lock);
 880
 881        /* Create HSI frame. */
 882        len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
 883
 884        if (likely(len > 0)) {
 885                /* Set up new transfer. */
 886                res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
 887                if (WARN_ON(res < 0)) {
 888                        netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
 889                                __func__, res);
 890                        cfhsi_abort_tx(cfhsi);
 891                }
 892        } else {
 893                netdev_err(cfhsi->ndev,
 894                                "%s: Failed to create HSI frame: %d.\n",
 895                                __func__, len);
 896        }
 897}
 898
 899static void cfhsi_wake_down(struct work_struct *work)
 900{
 901        long ret;
 902        struct cfhsi *cfhsi = NULL;
 903        size_t fifo_occupancy = 0;
 904        int retry = CFHSI_WAKE_TOUT;
 905
 906        cfhsi = container_of(work, struct cfhsi, wake_down_work);
 907        netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
 908
 909        if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 910                return;
 911
 912        /* Deactivate wake line. */
 913        cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
 914
 915        /* Wait for acknowledge. */
 916        ret = CFHSI_WAKE_TOUT;
 917        ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
 918                                        test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
 919                                                        &cfhsi->bits), ret);
 920        if (ret < 0) {
 921                /* Interrupted by signal. */
 922                netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
 923                        __func__, ret);
 924                return;
 925        } else if (!ret) {
 926                bool ca_wake = true;
 927
 928                /* Timeout */
 929                netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
 930
 931                /* Check if we misssed the interrupt. */
 932                WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
 933                                                        &ca_wake));
 934                if (!ca_wake)
 935                        netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
 936                                __func__);
 937        }
 938
 939        /* Check FIFO occupancy. */
 940        while (retry) {
 941                WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
 942                                                        &fifo_occupancy));
 943
 944                if (!fifo_occupancy)
 945                        break;
 946
 947                set_current_state(TASK_INTERRUPTIBLE);
 948                schedule_timeout(1);
 949                retry--;
 950        }
 951
 952        if (!retry)
 953                netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
 954
 955        /* Clear AWAKE condition. */
 956        clear_bit(CFHSI_AWAKE, &cfhsi->bits);
 957
 958        /* Cancel pending RX requests. */
 959        cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
 960}
 961
 962static void cfhsi_out_of_sync(struct work_struct *work)
 963{
 964        struct cfhsi *cfhsi = NULL;
 965
 966        cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
 967
 968        rtnl_lock();
 969        dev_close(cfhsi->ndev);
 970        rtnl_unlock();
 971}
 972
 973static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
 974{
 975        struct cfhsi *cfhsi = NULL;
 976
 977        cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
 978        netdev_dbg(cfhsi->ndev, "%s.\n",
 979                __func__);
 980
 981        set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 982        wake_up_interruptible(&cfhsi->wake_up_wait);
 983
 984        if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 985                return;
 986
 987        /* Schedule wake up work queue if the peer initiates. */
 988        if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
 989                queue_work(cfhsi->wq, &cfhsi->wake_up_work);
 990}
 991
 992static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
 993{
 994        struct cfhsi *cfhsi = NULL;
 995
 996        cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
 997        netdev_dbg(cfhsi->ndev, "%s.\n",
 998                __func__);
 999
1000        /* Initiating low power is only permitted by the host (us). */
1001        set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1002        wake_up_interruptible(&cfhsi->wake_down_wait);
1003}
1004
1005static void cfhsi_aggregation_tout(unsigned long arg)
1006{
1007        struct cfhsi *cfhsi = (struct cfhsi *)arg;
1008
1009        netdev_dbg(cfhsi->ndev, "%s.\n",
1010                __func__);
1011
1012        cfhsi_start_tx(cfhsi);
1013}
1014
1015static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1016{
1017        struct cfhsi *cfhsi = NULL;
1018        int start_xfer = 0;
1019        int timer_active;
1020        int prio;
1021
1022        if (!dev)
1023                return -EINVAL;
1024
1025        cfhsi = netdev_priv(dev);
1026
1027        switch (skb->priority) {
1028        case TC_PRIO_BESTEFFORT:
1029        case TC_PRIO_FILLER:
1030        case TC_PRIO_BULK:
1031                prio = CFHSI_PRIO_BEBK;
1032                break;
1033        case TC_PRIO_INTERACTIVE_BULK:
1034                prio = CFHSI_PRIO_VI;
1035                break;
1036        case TC_PRIO_INTERACTIVE:
1037                prio = CFHSI_PRIO_VO;
1038                break;
1039        case TC_PRIO_CONTROL:
1040        default:
1041                prio = CFHSI_PRIO_CTL;
1042                break;
1043        }
1044
1045        spin_lock_bh(&cfhsi->lock);
1046
1047        /* Update aggregation statistics  */
1048        cfhsi_update_aggregation_stats(cfhsi, skb, 1);
1049
1050        /* Queue the SKB */
1051        skb_queue_tail(&cfhsi->qhead[prio], skb);
1052
1053        /* Sanity check; xmit should not be called after unregister_netdev */
1054        if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
1055                spin_unlock_bh(&cfhsi->lock);
1056                cfhsi_abort_tx(cfhsi);
1057                return -EINVAL;
1058        }
1059
1060        /* Send flow off if number of packets is above high water mark. */
1061        if (!cfhsi->flow_off_sent &&
1062                cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
1063                cfhsi->cfdev.flowctrl) {
1064                cfhsi->flow_off_sent = 1;
1065                cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
1066        }
1067
1068        if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
1069                cfhsi->tx_state = CFHSI_TX_STATE_XFER;
1070                start_xfer = 1;
1071        }
1072
1073        if (!start_xfer) {
1074                /* Send aggregate if it is possible */
1075                bool aggregate_ready =
1076                        cfhsi_can_send_aggregate(cfhsi) &&
1077                        del_timer(&cfhsi->aggregation_timer) > 0;
1078                spin_unlock_bh(&cfhsi->lock);
1079                if (aggregate_ready)
1080                        cfhsi_start_tx(cfhsi);
1081                return 0;
1082        }
1083
1084        /* Delete inactivity timer if started. */
1085        timer_active = del_timer_sync(&cfhsi->inactivity_timer);
1086
1087        spin_unlock_bh(&cfhsi->lock);
1088
1089        if (timer_active) {
1090                struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
1091                int len;
1092                int res;
1093
1094                /* Create HSI frame. */
1095                len = cfhsi_tx_frm(desc, cfhsi);
1096                WARN_ON(!len);
1097
1098                /* Set up new transfer. */
1099                res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
1100                if (WARN_ON(res < 0)) {
1101                        netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
1102                                __func__, res);
1103                        cfhsi_abort_tx(cfhsi);
1104                }
1105        } else {
1106                /* Schedule wake up work queue if the we initiate. */
1107                if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
1108                        queue_work(cfhsi->wq, &cfhsi->wake_up_work);
1109        }
1110
1111        return 0;
1112}
1113
1114static const struct net_device_ops cfhsi_netdevops;
1115
1116static void cfhsi_setup(struct net_device *dev)
1117{
1118        int i;
1119        struct cfhsi *cfhsi = netdev_priv(dev);
1120        dev->features = 0;
1121        dev->type = ARPHRD_CAIF;
1122        dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1123        dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
1124        dev->tx_queue_len = 0;
1125        dev->destructor = free_netdev;
1126        dev->netdev_ops = &cfhsi_netdevops;
1127        for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1128                skb_queue_head_init(&cfhsi->qhead[i]);
1129        cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
1130        cfhsi->cfdev.use_frag = false;
1131        cfhsi->cfdev.use_stx = false;
1132        cfhsi->cfdev.use_fcs = false;
1133        cfhsi->ndev = dev;
1134        cfhsi->cfg = hsi_default_config;
1135}
1136
1137static int cfhsi_open(struct net_device *ndev)
1138{
1139        struct cfhsi *cfhsi = netdev_priv(ndev);
1140        int res;
1141
1142        clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1143
1144        /* Initialize state vaiables. */
1145        cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
1146        cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
1147
1148        /* Set flow info */
1149        cfhsi->flow_off_sent = 0;
1150
1151        /*
1152         * Allocate a TX buffer with the size of a HSI packet descriptors
1153         * and the necessary room for CAIF payload frames.
1154         */
1155        cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
1156        if (!cfhsi->tx_buf) {
1157                res = -ENODEV;
1158                goto err_alloc_tx;
1159        }
1160
1161        /*
1162         * Allocate a RX buffer with the size of two HSI packet descriptors and
1163         * the necessary room for CAIF payload frames.
1164         */
1165        cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1166        if (!cfhsi->rx_buf) {
1167                res = -ENODEV;
1168                goto err_alloc_rx;
1169        }
1170
1171        cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1172        if (!cfhsi->rx_flip_buf) {
1173                res = -ENODEV;
1174                goto err_alloc_rx_flip;
1175        }
1176
1177        /* Initialize aggregation timeout */
1178        cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout;
1179
1180        /* Initialize recieve vaiables. */
1181        cfhsi->rx_ptr = cfhsi->rx_buf;
1182        cfhsi->rx_len = CFHSI_DESC_SZ;
1183
1184        /* Initialize spin locks. */
1185        spin_lock_init(&cfhsi->lock);
1186
1187        /* Set up the driver. */
1188        cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
1189        cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
1190        cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
1191        cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
1192
1193        /* Initialize the work queues. */
1194        INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1195        INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
1196        INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
1197
1198        /* Clear all bit fields. */
1199        clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1200        clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1201        clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1202        clear_bit(CFHSI_AWAKE, &cfhsi->bits);
1203
1204        /* Create work thread. */
1205        cfhsi->wq = create_singlethread_workqueue(cfhsi->ndev->name);
1206        if (!cfhsi->wq) {
1207                netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
1208                        __func__);
1209                res = -ENODEV;
1210                goto err_create_wq;
1211        }
1212
1213        /* Initialize wait queues. */
1214        init_waitqueue_head(&cfhsi->wake_up_wait);
1215        init_waitqueue_head(&cfhsi->wake_down_wait);
1216        init_waitqueue_head(&cfhsi->flush_fifo_wait);
1217
1218        /* Setup the inactivity timer. */
1219        init_timer(&cfhsi->inactivity_timer);
1220        cfhsi->inactivity_timer.data = (unsigned long)cfhsi;
1221        cfhsi->inactivity_timer.function = cfhsi_inactivity_tout;
1222        /* Setup the slowpath RX timer. */
1223        init_timer(&cfhsi->rx_slowpath_timer);
1224        cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1225        cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
1226        /* Setup the aggregation timer. */
1227        init_timer(&cfhsi->aggregation_timer);
1228        cfhsi->aggregation_timer.data = (unsigned long)cfhsi;
1229        cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
1230
1231        /* Activate HSI interface. */
1232        res = cfhsi->ops->cfhsi_up(cfhsi->ops);
1233        if (res) {
1234                netdev_err(cfhsi->ndev,
1235                        "%s: can't activate HSI interface: %d.\n",
1236                        __func__, res);
1237                goto err_activate;
1238        }
1239
1240        /* Flush FIFO */
1241        res = cfhsi_flush_fifo(cfhsi);
1242        if (res) {
1243                netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
1244                        __func__, res);
1245                goto err_net_reg;
1246        }
1247        return res;
1248
1249 err_net_reg:
1250        cfhsi->ops->cfhsi_down(cfhsi->ops);
1251 err_activate:
1252        destroy_workqueue(cfhsi->wq);
1253 err_create_wq:
1254        kfree(cfhsi->rx_flip_buf);
1255 err_alloc_rx_flip:
1256        kfree(cfhsi->rx_buf);
1257 err_alloc_rx:
1258        kfree(cfhsi->tx_buf);
1259 err_alloc_tx:
1260        return res;
1261}
1262
1263static int cfhsi_close(struct net_device *ndev)
1264{
1265        struct cfhsi *cfhsi = netdev_priv(ndev);
1266        u8 *tx_buf, *rx_buf, *flip_buf;
1267
1268        /* going to shutdown driver */
1269        set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1270
1271        /* Flush workqueue */
1272        flush_workqueue(cfhsi->wq);
1273
1274        /* Delete timers if pending */
1275        del_timer_sync(&cfhsi->inactivity_timer);
1276        del_timer_sync(&cfhsi->rx_slowpath_timer);
1277        del_timer_sync(&cfhsi->aggregation_timer);
1278
1279        /* Cancel pending RX request (if any) */
1280        cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
1281
1282        /* Destroy workqueue */
1283        destroy_workqueue(cfhsi->wq);
1284
1285        /* Store bufferes: will be freed later. */
1286        tx_buf = cfhsi->tx_buf;
1287        rx_buf = cfhsi->rx_buf;
1288        flip_buf = cfhsi->rx_flip_buf;
1289        /* Flush transmit queues. */
1290        cfhsi_abort_tx(cfhsi);
1291
1292        /* Deactivate interface */
1293        cfhsi->ops->cfhsi_down(cfhsi->ops);
1294
1295        /* Free buffers. */
1296        kfree(tx_buf);
1297        kfree(rx_buf);
1298        kfree(flip_buf);
1299        return 0;
1300}
1301
1302static void cfhsi_uninit(struct net_device *dev)
1303{
1304        struct cfhsi *cfhsi = netdev_priv(dev);
1305        ASSERT_RTNL();
1306        symbol_put(cfhsi_get_device);
1307        list_del(&cfhsi->list);
1308}
1309
1310static const struct net_device_ops cfhsi_netdevops = {
1311        .ndo_uninit = cfhsi_uninit,
1312        .ndo_open = cfhsi_open,
1313        .ndo_stop = cfhsi_close,
1314        .ndo_start_xmit = cfhsi_xmit
1315};
1316
1317static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
1318{
1319        int i;
1320
1321        if (!data) {
1322                pr_debug("no params data found\n");
1323                return;
1324        }
1325
1326        i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
1327        /*
1328         * Inactivity timeout in millisecs. Lowest possible value is 1,
1329         * and highest possible is NEXT_TIMER_MAX_DELTA.
1330         */
1331        if (data[i]) {
1332                u32 inactivity_timeout = nla_get_u32(data[i]);
1333                /* Pre-calculate inactivity timeout. */
1334                cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000;
1335                if (cfhsi->cfg.inactivity_timeout == 0)
1336                        cfhsi->cfg.inactivity_timeout = 1;
1337                else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1338                        cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1339        }
1340
1341        i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
1342        if (data[i])
1343                cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]);
1344
1345        i = __IFLA_CAIF_HSI_HEAD_ALIGN;
1346        if (data[i])
1347                cfhsi->cfg.head_align = nla_get_u32(data[i]);
1348
1349        i = __IFLA_CAIF_HSI_TAIL_ALIGN;
1350        if (data[i])
1351                cfhsi->cfg.tail_align = nla_get_u32(data[i]);
1352
1353        i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
1354        if (data[i])
1355                cfhsi->cfg.q_high_mark = nla_get_u32(data[i]);
1356
1357        i = __IFLA_CAIF_HSI_QLOW_WATERMARK;
1358        if (data[i])
1359                cfhsi->cfg.q_low_mark = nla_get_u32(data[i]);
1360}
1361
1362static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
1363                                struct nlattr *data[])
1364{
1365        cfhsi_netlink_parms(data, netdev_priv(dev));
1366        netdev_state_change(dev);
1367        return 0;
1368}
1369
1370static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
1371        [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
1372        [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
1373        [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
1374        [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
1375        [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
1376        [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
1377};
1378
1379static size_t caif_hsi_get_size(const struct net_device *dev)
1380{
1381        int i;
1382        size_t s = 0;
1383        for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
1384                s += nla_total_size(caif_hsi_policy[i].len);
1385        return s;
1386}
1387
1388static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
1389{
1390        struct cfhsi *cfhsi = netdev_priv(dev);
1391
1392        if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
1393                        cfhsi->cfg.inactivity_timeout) ||
1394            nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
1395                        cfhsi->cfg.aggregation_timeout) ||
1396            nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
1397                        cfhsi->cfg.head_align) ||
1398            nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
1399                        cfhsi->cfg.tail_align) ||
1400            nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
1401                        cfhsi->cfg.q_high_mark) ||
1402            nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
1403                        cfhsi->cfg.q_low_mark))
1404                return -EMSGSIZE;
1405
1406        return 0;
1407}
1408
1409static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
1410                          struct nlattr *tb[], struct nlattr *data[])
1411{
1412        struct cfhsi *cfhsi = NULL;
1413        struct cfhsi_ops *(*get_ops)(void);
1414
1415        ASSERT_RTNL();
1416
1417        cfhsi = netdev_priv(dev);
1418        cfhsi_netlink_parms(data, cfhsi);
1419        dev_net_set(cfhsi->ndev, src_net);
1420
1421        get_ops = symbol_get(cfhsi_get_ops);
1422        if (!get_ops) {
1423                pr_err("%s: failed to get the cfhsi_ops\n", __func__);
1424                return -ENODEV;
1425        }
1426
1427        /* Assign the HSI device. */
1428        cfhsi->ops = (*get_ops)();
1429        if (!cfhsi->ops) {
1430                pr_err("%s: failed to get the cfhsi_ops\n", __func__);
1431                goto err;
1432        }
1433
1434        /* Assign the driver to this HSI device. */
1435        cfhsi->ops->cb_ops = &cfhsi->cb_ops;
1436        if (register_netdevice(dev)) {
1437                pr_warn("%s: caif_hsi device registration failed\n", __func__);
1438                goto err;
1439        }
1440        /* Add CAIF HSI device to list. */
1441        list_add_tail(&cfhsi->list, &cfhsi_list);
1442
1443        return 0;
1444err:
1445        symbol_put(cfhsi_get_ops);
1446        return -ENODEV;
1447}
1448
1449static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
1450        .kind           = "cfhsi",
1451        .priv_size      = sizeof(struct cfhsi),
1452        .setup          = cfhsi_setup,
1453        .maxtype        = __IFLA_CAIF_HSI_MAX,
1454        .policy = caif_hsi_policy,
1455        .newlink        = caif_hsi_newlink,
1456        .changelink     = caif_hsi_changelink,
1457        .get_size       = caif_hsi_get_size,
1458        .fill_info      = caif_hsi_fill_info,
1459};
1460
1461static void __exit cfhsi_exit_module(void)
1462{
1463        struct list_head *list_node;
1464        struct list_head *n;
1465        struct cfhsi *cfhsi;
1466
1467        rtnl_link_unregister(&caif_hsi_link_ops);
1468
1469        rtnl_lock();
1470        list_for_each_safe(list_node, n, &cfhsi_list) {
1471                cfhsi = list_entry(list_node, struct cfhsi, list);
1472                unregister_netdev(cfhsi->ndev);
1473        }
1474        rtnl_unlock();
1475}
1476
1477static int __init cfhsi_init_module(void)
1478{
1479        return rtnl_link_register(&caif_hsi_link_ops);
1480}
1481
1482module_init(cfhsi_init_module);
1483module_exit(cfhsi_exit_module);
1484