linux/drivers/net/caif/caif_hsi.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) ST-Ericsson AB 2010
   3 * Author:  Daniel Martensson
   4 *          Dmitry.Tarnyagin  / dmitry.tarnyagin@lockless.no
   5 * License terms: GNU General Public License (GPL) version 2.
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME fmt
   9
  10#include <linux/init.h>
  11#include <linux/module.h>
  12#include <linux/device.h>
  13#include <linux/netdevice.h>
  14#include <linux/string.h>
  15#include <linux/list.h>
  16#include <linux/interrupt.h>
  17#include <linux/delay.h>
  18#include <linux/sched.h>
  19#include <linux/if_arp.h>
  20#include <linux/timer.h>
  21#include <net/rtnetlink.h>
  22#include <linux/pkt_sched.h>
  23#include <net/caif/caif_layer.h>
  24#include <net/caif/caif_hsi.h>
  25
  26MODULE_LICENSE("GPL");
  27MODULE_AUTHOR("Daniel Martensson");
  28MODULE_DESCRIPTION("CAIF HSI driver");
  29
  30/* Returns the number of padding bytes for alignment. */
  31#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
  32                                (((pow)-((x)&((pow)-1)))))
  33
  34static const struct cfhsi_config  hsi_default_config = {
  35
  36        /* Inactivity timeout on HSI, ms */
  37        .inactivity_timeout = HZ,
  38
  39        /* Aggregation timeout (ms) of zero means no aggregation is done*/
  40        .aggregation_timeout = 1,
  41
  42        /*
  43         * HSI link layer flow-control thresholds.
  44         * Threshold values for the HSI packet queue. Flow-control will be
  45         * asserted when the number of packets exceeds q_high_mark. It will
  46         * not be de-asserted before the number of packets drops below
  47         * q_low_mark.
  48         * Warning: A high threshold value might increase throughput but it
  49         * will at the same time prevent channel prioritization and increase
  50         * the risk of flooding the modem. The high threshold should be above
  51         * the low.
  52         */
  53        .q_high_mark = 100,
  54        .q_low_mark = 50,
  55
  56        /*
  57         * HSI padding options.
  58         * Warning: must be a base of 2 (& operation used) and can not be zero !
  59         */
  60        .head_align = 4,
  61        .tail_align = 4,
  62};
  63
  64#define ON 1
  65#define OFF 0
  66
  67static LIST_HEAD(cfhsi_list);
  68
  69static void cfhsi_inactivity_tout(unsigned long arg)
  70{
  71        struct cfhsi *cfhsi = (struct cfhsi *)arg;
  72
  73        netdev_dbg(cfhsi->ndev, "%s.\n",
  74                __func__);
  75
  76        /* Schedule power down work queue. */
  77        if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
  78                queue_work(cfhsi->wq, &cfhsi->wake_down_work);
  79}
  80
  81static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
  82                                           const struct sk_buff *skb,
  83                                           int direction)
  84{
  85        struct caif_payload_info *info;
  86        int hpad, tpad, len;
  87
  88        info = (struct caif_payload_info *)&skb->cb;
  89        hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
  90        tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
  91        len = skb->len + hpad + tpad;
  92
  93        if (direction > 0)
  94                cfhsi->aggregation_len += len;
  95        else if (direction < 0)
  96                cfhsi->aggregation_len -= len;
  97}
  98
  99static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
 100{
 101        int i;
 102
 103        if (cfhsi->cfg.aggregation_timeout == 0)
 104                return true;
 105
 106        for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
 107                if (cfhsi->qhead[i].qlen)
 108                        return true;
 109        }
 110
 111        /* TODO: Use aggregation_len instead */
 112        if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
 113                return true;
 114
 115        return false;
 116}
 117
 118static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
 119{
 120        struct sk_buff *skb;
 121        int i;
 122
 123        for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
 124                skb = skb_dequeue(&cfhsi->qhead[i]);
 125                if (skb)
 126                        break;
 127        }
 128
 129        return skb;
 130}
 131
 132static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
 133{
 134        int i, len = 0;
 135        for (i = 0; i < CFHSI_PRIO_LAST; ++i)
 136                len += skb_queue_len(&cfhsi->qhead[i]);
 137        return len;
 138}
 139
 140static void cfhsi_abort_tx(struct cfhsi *cfhsi)
 141{
 142        struct sk_buff *skb;
 143
 144        for (;;) {
 145                spin_lock_bh(&cfhsi->lock);
 146                skb = cfhsi_dequeue(cfhsi);
 147                if (!skb)
 148                        break;
 149
 150                cfhsi->ndev->stats.tx_errors++;
 151                cfhsi->ndev->stats.tx_dropped++;
 152                cfhsi_update_aggregation_stats(cfhsi, skb, -1);
 153                spin_unlock_bh(&cfhsi->lock);
 154                kfree_skb(skb);
 155        }
 156        cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 157        if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 158                mod_timer(&cfhsi->inactivity_timer,
 159                        jiffies + cfhsi->cfg.inactivity_timeout);
 160        spin_unlock_bh(&cfhsi->lock);
 161}
 162
 163static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
 164{
 165        char buffer[32]; /* Any reasonable value */
 166        size_t fifo_occupancy;
 167        int ret;
 168
 169        netdev_dbg(cfhsi->ndev, "%s.\n",
 170                __func__);
 171
 172        do {
 173                ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
 174                                &fifo_occupancy);
 175                if (ret) {
 176                        netdev_warn(cfhsi->ndev,
 177                                "%s: can't get FIFO occupancy: %d.\n",
 178                                __func__, ret);
 179                        break;
 180                } else if (!fifo_occupancy)
 181                        /* No more data, exitting normally */
 182                        break;
 183
 184                fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
 185                set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
 186                ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
 187                                cfhsi->ops);
 188                if (ret) {
 189                        clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
 190                        netdev_warn(cfhsi->ndev,
 191                                "%s: can't read data: %d.\n",
 192                                __func__, ret);
 193                        break;
 194                }
 195
 196                ret = 5 * HZ;
 197                ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
 198                         !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
 199
 200                if (ret < 0) {
 201                        netdev_warn(cfhsi->ndev,
 202                                "%s: can't wait for flush complete: %d.\n",
 203                                __func__, ret);
 204                        break;
 205                } else if (!ret) {
 206                        ret = -ETIMEDOUT;
 207                        netdev_warn(cfhsi->ndev,
 208                                "%s: timeout waiting for flush complete.\n",
 209                                __func__);
 210                        break;
 211                }
 212        } while (1);
 213
 214        return ret;
 215}
 216
 217static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 218{
 219        int nfrms = 0;
 220        int pld_len = 0;
 221        struct sk_buff *skb;
 222        u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 223
 224        skb = cfhsi_dequeue(cfhsi);
 225        if (!skb)
 226                return 0;
 227
 228        /* Clear offset. */
 229        desc->offset = 0;
 230
 231        /* Check if we can embed a CAIF frame. */
 232        if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
 233                struct caif_payload_info *info;
 234                int hpad;
 235                int tpad;
 236
 237                /* Calculate needed head alignment and tail alignment. */
 238                info = (struct caif_payload_info *)&skb->cb;
 239
 240                hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
 241                tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
 242
 243                /* Check if frame still fits with added alignment. */
 244                if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
 245                        u8 *pemb = desc->emb_frm;
 246                        desc->offset = CFHSI_DESC_SHORT_SZ;
 247                        *pemb = (u8)(hpad - 1);
 248                        pemb += hpad;
 249
 250                        /* Update network statistics. */
 251                        spin_lock_bh(&cfhsi->lock);
 252                        cfhsi->ndev->stats.tx_packets++;
 253                        cfhsi->ndev->stats.tx_bytes += skb->len;
 254                        cfhsi_update_aggregation_stats(cfhsi, skb, -1);
 255                        spin_unlock_bh(&cfhsi->lock);
 256
 257                        /* Copy in embedded CAIF frame. */
 258                        skb_copy_bits(skb, 0, pemb, skb->len);
 259
 260                        /* Consume the SKB */
 261                        consume_skb(skb);
 262                        skb = NULL;
 263                }
 264        }
 265
 266        /* Create payload CAIF frames. */
 267        pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 268        while (nfrms < CFHSI_MAX_PKTS) {
 269                struct caif_payload_info *info;
 270                int hpad;
 271                int tpad;
 272
 273                if (!skb)
 274                        skb = cfhsi_dequeue(cfhsi);
 275
 276                if (!skb)
 277                        break;
 278
 279                /* Calculate needed head alignment and tail alignment. */
 280                info = (struct caif_payload_info *)&skb->cb;
 281
 282                hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
 283                tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
 284
 285                /* Fill in CAIF frame length in descriptor. */
 286                desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
 287
 288                /* Fill head padding information. */
 289                *pfrm = (u8)(hpad - 1);
 290                pfrm += hpad;
 291
 292                /* Update network statistics. */
 293                spin_lock_bh(&cfhsi->lock);
 294                cfhsi->ndev->stats.tx_packets++;
 295                cfhsi->ndev->stats.tx_bytes += skb->len;
 296                cfhsi_update_aggregation_stats(cfhsi, skb, -1);
 297                spin_unlock_bh(&cfhsi->lock);
 298
 299                /* Copy in CAIF frame. */
 300                skb_copy_bits(skb, 0, pfrm, skb->len);
 301
 302                /* Update payload length. */
 303                pld_len += desc->cffrm_len[nfrms];
 304
 305                /* Update frame pointer. */
 306                pfrm += skb->len + tpad;
 307
 308                /* Consume the SKB */
 309                consume_skb(skb);
 310                skb = NULL;
 311
 312                /* Update number of frames. */
 313                nfrms++;
 314        }
 315
 316        /* Unused length fields should be zero-filled (according to SPEC). */
 317        while (nfrms < CFHSI_MAX_PKTS) {
 318                desc->cffrm_len[nfrms] = 0x0000;
 319                nfrms++;
 320        }
 321
 322        /* Check if we can piggy-back another descriptor. */
 323        if (cfhsi_can_send_aggregate(cfhsi))
 324                desc->header |= CFHSI_PIGGY_DESC;
 325        else
 326                desc->header &= ~CFHSI_PIGGY_DESC;
 327
 328        return CFHSI_DESC_SZ + pld_len;
 329}
 330
 331static void cfhsi_start_tx(struct cfhsi *cfhsi)
 332{
 333        struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
 334        int len, res;
 335
 336        netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
 337
 338        if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 339                return;
 340
 341        do {
 342                /* Create HSI frame. */
 343                len = cfhsi_tx_frm(desc, cfhsi);
 344                if (!len) {
 345                        spin_lock_bh(&cfhsi->lock);
 346                        if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
 347                                spin_unlock_bh(&cfhsi->lock);
 348                                res = -EAGAIN;
 349                                continue;
 350                        }
 351                        cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 352                        /* Start inactivity timer. */
 353                        mod_timer(&cfhsi->inactivity_timer,
 354                                jiffies + cfhsi->cfg.inactivity_timeout);
 355                        spin_unlock_bh(&cfhsi->lock);
 356                        break;
 357                }
 358
 359                /* Set up new transfer. */
 360                res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
 361                if (WARN_ON(res < 0))
 362                        netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
 363                                __func__, res);
 364        } while (res < 0);
 365}
 366
 367static void cfhsi_tx_done(struct cfhsi *cfhsi)
 368{
 369        netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
 370
 371        if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 372                return;
 373
 374        /*
 375         * Send flow on if flow off has been previously signalled
 376         * and number of packets is below low water mark.
 377         */
 378        spin_lock_bh(&cfhsi->lock);
 379        if (cfhsi->flow_off_sent &&
 380                        cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
 381                        cfhsi->cfdev.flowctrl) {
 382
 383                cfhsi->flow_off_sent = 0;
 384                cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
 385        }
 386
 387        if (cfhsi_can_send_aggregate(cfhsi)) {
 388                spin_unlock_bh(&cfhsi->lock);
 389                cfhsi_start_tx(cfhsi);
 390        } else {
 391                mod_timer(&cfhsi->aggregation_timer,
 392                        jiffies + cfhsi->cfg.aggregation_timeout);
 393                spin_unlock_bh(&cfhsi->lock);
 394        }
 395
 396        return;
 397}
 398
 399static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
 400{
 401        struct cfhsi *cfhsi;
 402
 403        cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
 404        netdev_dbg(cfhsi->ndev, "%s.\n",
 405                __func__);
 406
 407        if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 408                return;
 409        cfhsi_tx_done(cfhsi);
 410}
 411
 412static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 413{
 414        int xfer_sz = 0;
 415        int nfrms = 0;
 416        u16 *plen = NULL;
 417        u8 *pfrm = NULL;
 418
 419        if ((desc->header & ~CFHSI_PIGGY_DESC) ||
 420                        (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
 421                netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
 422                        __func__);
 423                return -EPROTO;
 424        }
 425
 426        /* Check for embedded CAIF frame. */
 427        if (desc->offset) {
 428                struct sk_buff *skb;
 429                u8 *dst = NULL;
 430                int len = 0;
 431                pfrm = ((u8 *)desc) + desc->offset;
 432
 433                /* Remove offset padding. */
 434                pfrm += *pfrm + 1;
 435
 436                /* Read length of CAIF frame (little endian). */
 437                len = *pfrm;
 438                len |= ((*(pfrm+1)) << 8) & 0xFF00;
 439                len += 2;       /* Add FCS fields. */
 440
 441                /* Sanity check length of CAIF frame. */
 442                if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
 443                        netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
 444                                __func__);
 445                        return -EPROTO;
 446                }
 447
 448                /* Allocate SKB (OK even in IRQ context). */
 449                skb = alloc_skb(len + 1, GFP_ATOMIC);
 450                if (!skb) {
 451                        netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
 452                                __func__);
 453                        return -ENOMEM;
 454                }
 455                caif_assert(skb != NULL);
 456
 457                dst = skb_put(skb, len);
 458                memcpy(dst, pfrm, len);
 459
 460                skb->protocol = htons(ETH_P_CAIF);
 461                skb_reset_mac_header(skb);
 462                skb->dev = cfhsi->ndev;
 463
 464                /*
 465                 * We are in a callback handler and
 466                 * unfortunately we don't know what context we're
 467                 * running in.
 468                 */
 469                if (in_interrupt())
 470                        netif_rx(skb);
 471                else
 472                        netif_rx_ni(skb);
 473
 474                /* Update network statistics. */
 475                cfhsi->ndev->stats.rx_packets++;
 476                cfhsi->ndev->stats.rx_bytes += len;
 477        }
 478
 479        /* Calculate transfer length. */
 480        plen = desc->cffrm_len;
 481        while (nfrms < CFHSI_MAX_PKTS && *plen) {
 482                xfer_sz += *plen;
 483                plen++;
 484                nfrms++;
 485        }
 486
 487        /* Check for piggy-backed descriptor. */
 488        if (desc->header & CFHSI_PIGGY_DESC)
 489                xfer_sz += CFHSI_DESC_SZ;
 490
 491        if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
 492                netdev_err(cfhsi->ndev,
 493                                "%s: Invalid payload len: %d, ignored.\n",
 494                        __func__, xfer_sz);
 495                return -EPROTO;
 496        }
 497        return xfer_sz;
 498}
 499
 500static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
 501{
 502        int xfer_sz = 0;
 503        int nfrms = 0;
 504        u16 *plen;
 505
 506        if ((desc->header & ~CFHSI_PIGGY_DESC) ||
 507                        (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
 508
 509                pr_err("Invalid descriptor. %x %x\n", desc->header,
 510                                desc->offset);
 511                return -EPROTO;
 512        }
 513
 514        /* Calculate transfer length. */
 515        plen = desc->cffrm_len;
 516        while (nfrms < CFHSI_MAX_PKTS && *plen) {
 517                xfer_sz += *plen;
 518                plen++;
 519                nfrms++;
 520        }
 521
 522        if (xfer_sz % 4) {
 523                pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
 524                return -EPROTO;
 525        }
 526        return xfer_sz;
 527}
 528
 529static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
 530{
 531        int rx_sz = 0;
 532        int nfrms = 0;
 533        u16 *plen = NULL;
 534        u8 *pfrm = NULL;
 535
 536        /* Sanity check header and offset. */
 537        if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
 538                        (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
 539                netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
 540                        __func__);
 541                return -EPROTO;
 542        }
 543
 544        /* Set frame pointer to start of payload. */
 545        pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 546        plen = desc->cffrm_len;
 547
 548        /* Skip already processed frames. */
 549        while (nfrms < cfhsi->rx_state.nfrms) {
 550                pfrm += *plen;
 551                rx_sz += *plen;
 552                plen++;
 553                nfrms++;
 554        }
 555
 556        /* Parse payload. */
 557        while (nfrms < CFHSI_MAX_PKTS && *plen) {
 558                struct sk_buff *skb;
 559                u8 *dst = NULL;
 560                u8 *pcffrm = NULL;
 561                int len;
 562
 563                /* CAIF frame starts after head padding. */
 564                pcffrm = pfrm + *pfrm + 1;
 565
 566                /* Read length of CAIF frame (little endian). */
 567                len = *pcffrm;
 568                len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
 569                len += 2;       /* Add FCS fields. */
 570
 571                /* Sanity check length of CAIF frames. */
 572                if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
 573                        netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
 574                                __func__);
 575                        return -EPROTO;
 576                }
 577
 578                /* Allocate SKB (OK even in IRQ context). */
 579                skb = alloc_skb(len + 1, GFP_ATOMIC);
 580                if (!skb) {
 581                        netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
 582                                __func__);
 583                        cfhsi->rx_state.nfrms = nfrms;
 584                        return -ENOMEM;
 585                }
 586                caif_assert(skb != NULL);
 587
 588                dst = skb_put(skb, len);
 589                memcpy(dst, pcffrm, len);
 590
 591                skb->protocol = htons(ETH_P_CAIF);
 592                skb_reset_mac_header(skb);
 593                skb->dev = cfhsi->ndev;
 594
 595                /*
 596                 * We're called in callback from HSI
 597                 * and don't know the context we're running in.
 598                 */
 599                if (in_interrupt())
 600                        netif_rx(skb);
 601                else
 602                        netif_rx_ni(skb);
 603
 604                /* Update network statistics. */
 605                cfhsi->ndev->stats.rx_packets++;
 606                cfhsi->ndev->stats.rx_bytes += len;
 607
 608                pfrm += *plen;
 609                rx_sz += *plen;
 610                plen++;
 611                nfrms++;
 612        }
 613
 614        return rx_sz;
 615}
 616
 617static void cfhsi_rx_done(struct cfhsi *cfhsi)
 618{
 619        int res;
 620        int desc_pld_len = 0, rx_len, rx_state;
 621        struct cfhsi_desc *desc = NULL;
 622        u8 *rx_ptr, *rx_buf;
 623        struct cfhsi_desc *piggy_desc = NULL;
 624
 625        desc = (struct cfhsi_desc *)cfhsi->rx_buf;
 626
 627        netdev_dbg(cfhsi->ndev, "%s\n", __func__);
 628
 629        if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 630                return;
 631
 632        /* Update inactivity timer if pending. */
 633        spin_lock_bh(&cfhsi->lock);
 634        mod_timer_pending(&cfhsi->inactivity_timer,
 635                        jiffies + cfhsi->cfg.inactivity_timeout);
 636        spin_unlock_bh(&cfhsi->lock);
 637
 638        if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
 639                desc_pld_len = cfhsi_rx_desc_len(desc);
 640
 641                if (desc_pld_len < 0)
 642                        goto out_of_sync;
 643
 644                rx_buf = cfhsi->rx_buf;
 645                rx_len = desc_pld_len;
 646                if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
 647                        rx_len += CFHSI_DESC_SZ;
 648                if (desc_pld_len == 0)
 649                        rx_buf = cfhsi->rx_flip_buf;
 650        } else {
 651                rx_buf = cfhsi->rx_flip_buf;
 652
 653                rx_len = CFHSI_DESC_SZ;
 654                if (cfhsi->rx_state.pld_len > 0 &&
 655                                (desc->header & CFHSI_PIGGY_DESC)) {
 656
 657                        piggy_desc = (struct cfhsi_desc *)
 658                                (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
 659                                                cfhsi->rx_state.pld_len);
 660
 661                        cfhsi->rx_state.piggy_desc = true;
 662
 663                        /* Extract payload len from piggy-backed descriptor. */
 664                        desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
 665                        if (desc_pld_len < 0)
 666                                goto out_of_sync;
 667
 668                        if (desc_pld_len > 0) {
 669                                rx_len = desc_pld_len;
 670                                if (piggy_desc->header & CFHSI_PIGGY_DESC)
 671                                        rx_len += CFHSI_DESC_SZ;
 672                        }
 673
 674                        /*
 675                         * Copy needed information from the piggy-backed
 676                         * descriptor to the descriptor in the start.
 677                         */
 678                        memcpy(rx_buf, (u8 *)piggy_desc,
 679                                        CFHSI_DESC_SHORT_SZ);
 680                }
 681        }
 682
 683        if (desc_pld_len) {
 684                rx_state = CFHSI_RX_STATE_PAYLOAD;
 685                rx_ptr = rx_buf + CFHSI_DESC_SZ;
 686        } else {
 687                rx_state = CFHSI_RX_STATE_DESC;
 688                rx_ptr = rx_buf;
 689                rx_len = CFHSI_DESC_SZ;
 690        }
 691
 692        /* Initiate next read */
 693        if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
 694                /* Set up new transfer. */
 695                netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
 696                                __func__);
 697
 698                res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
 699                                cfhsi->ops);
 700                if (WARN_ON(res < 0)) {
 701                        netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
 702                                __func__, res);
 703                        cfhsi->ndev->stats.rx_errors++;
 704                        cfhsi->ndev->stats.rx_dropped++;
 705                }
 706        }
 707
 708        if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
 709                /* Extract payload from descriptor */
 710                if (cfhsi_rx_desc(desc, cfhsi) < 0)
 711                        goto out_of_sync;
 712        } else {
 713                /* Extract payload */
 714                if (cfhsi_rx_pld(desc, cfhsi) < 0)
 715                        goto out_of_sync;
 716                if (piggy_desc) {
 717                        /* Extract any payload in piggyback descriptor. */
 718                        if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
 719                                goto out_of_sync;
 720                        /* Mark no embedded frame after extracting it */
 721                        piggy_desc->offset = 0;
 722                }
 723        }
 724
 725        /* Update state info */
 726        memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
 727        cfhsi->rx_state.state = rx_state;
 728        cfhsi->rx_ptr = rx_ptr;
 729        cfhsi->rx_len = rx_len;
 730        cfhsi->rx_state.pld_len = desc_pld_len;
 731        cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
 732
 733        if (rx_buf != cfhsi->rx_buf)
 734                swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
 735        return;
 736
 737out_of_sync:
 738        netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
 739        print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
 740                        cfhsi->rx_buf, CFHSI_DESC_SZ);
 741        schedule_work(&cfhsi->out_of_sync_work);
 742}
 743
 744static void cfhsi_rx_slowpath(unsigned long arg)
 745{
 746        struct cfhsi *cfhsi = (struct cfhsi *)arg;
 747
 748        netdev_dbg(cfhsi->ndev, "%s.\n",
 749                __func__);
 750
 751        cfhsi_rx_done(cfhsi);
 752}
 753
 754static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
 755{
 756        struct cfhsi *cfhsi;
 757
 758        cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
 759        netdev_dbg(cfhsi->ndev, "%s.\n",
 760                __func__);
 761
 762        if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 763                return;
 764
 765        if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
 766                wake_up_interruptible(&cfhsi->flush_fifo_wait);
 767        else
 768                cfhsi_rx_done(cfhsi);
 769}
 770
 771static void cfhsi_wake_up(struct work_struct *work)
 772{
 773        struct cfhsi *cfhsi = NULL;
 774        int res;
 775        int len;
 776        long ret;
 777
 778        cfhsi = container_of(work, struct cfhsi, wake_up_work);
 779
 780        if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 781                return;
 782
 783        if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
 784                /* It happenes when wakeup is requested by
 785                 * both ends at the same time. */
 786                clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 787                clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 788                return;
 789        }
 790
 791        /* Activate wake line. */
 792        cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
 793
 794        netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
 795                __func__);
 796
 797        /* Wait for acknowledge. */
 798        ret = CFHSI_WAKE_TOUT;
 799        ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
 800                                        test_and_clear_bit(CFHSI_WAKE_UP_ACK,
 801                                                        &cfhsi->bits), ret);
 802        if (unlikely(ret < 0)) {
 803                /* Interrupted by signal. */
 804                netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
 805                        __func__, ret);
 806
 807                clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 808                cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
 809                return;
 810        } else if (!ret) {
 811                bool ca_wake = false;
 812                size_t fifo_occupancy = 0;
 813
 814                /* Wakeup timeout */
 815                netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
 816                        __func__);
 817
 818                /* Check FIFO to check if modem has sent something. */
 819                WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
 820                                        &fifo_occupancy));
 821
 822                netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
 823                                __func__, (unsigned) fifo_occupancy);
 824
 825                /* Check if we misssed the interrupt. */
 826                WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
 827                                                        &ca_wake));
 828
 829                if (ca_wake) {
 830                        netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
 831                                __func__);
 832
 833                        /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
 834                        clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 835
 836                        /* Continue execution. */
 837                        goto wake_ack;
 838                }
 839
 840                clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 841                cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
 842                return;
 843        }
 844wake_ack:
 845        netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
 846                __func__);
 847
 848        /* Clear power up bit. */
 849        set_bit(CFHSI_AWAKE, &cfhsi->bits);
 850        clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
 851
 852        /* Resume read operation. */
 853        netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
 854        res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
 855
 856        if (WARN_ON(res < 0))
 857                netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
 858
 859        /* Clear power up acknowledment. */
 860        clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 861
 862        spin_lock_bh(&cfhsi->lock);
 863
 864        /* Resume transmit if queues are not empty. */
 865        if (!cfhsi_tx_queue_len(cfhsi)) {
 866                netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
 867                        __func__);
 868                /* Start inactivity timer. */
 869                mod_timer(&cfhsi->inactivity_timer,
 870                                jiffies + cfhsi->cfg.inactivity_timeout);
 871                spin_unlock_bh(&cfhsi->lock);
 872                return;
 873        }
 874
 875        netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
 876                __func__);
 877
 878        spin_unlock_bh(&cfhsi->lock);
 879
 880        /* Create HSI frame. */
 881        len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
 882
 883        if (likely(len > 0)) {
 884                /* Set up new transfer. */
 885                res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
 886                if (WARN_ON(res < 0)) {
 887                        netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
 888                                __func__, res);
 889                        cfhsi_abort_tx(cfhsi);
 890                }
 891        } else {
 892                netdev_err(cfhsi->ndev,
 893                                "%s: Failed to create HSI frame: %d.\n",
 894                                __func__, len);
 895        }
 896}
 897
 898static void cfhsi_wake_down(struct work_struct *work)
 899{
 900        long ret;
 901        struct cfhsi *cfhsi = NULL;
 902        size_t fifo_occupancy = 0;
 903        int retry = CFHSI_WAKE_TOUT;
 904
 905        cfhsi = container_of(work, struct cfhsi, wake_down_work);
 906        netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
 907
 908        if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 909                return;
 910
 911        /* Deactivate wake line. */
 912        cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
 913
 914        /* Wait for acknowledge. */
 915        ret = CFHSI_WAKE_TOUT;
 916        ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
 917                                        test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
 918                                                        &cfhsi->bits), ret);
 919        if (ret < 0) {
 920                /* Interrupted by signal. */
 921                netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
 922                        __func__, ret);
 923                return;
 924        } else if (!ret) {
 925                bool ca_wake = true;
 926
 927                /* Timeout */
 928                netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
 929
 930                /* Check if we misssed the interrupt. */
 931                WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
 932                                                        &ca_wake));
 933                if (!ca_wake)
 934                        netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
 935                                __func__);
 936        }
 937
 938        /* Check FIFO occupancy. */
 939        while (retry) {
 940                WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
 941                                                        &fifo_occupancy));
 942
 943                if (!fifo_occupancy)
 944                        break;
 945
 946                set_current_state(TASK_INTERRUPTIBLE);
 947                schedule_timeout(1);
 948                retry--;
 949        }
 950
 951        if (!retry)
 952                netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
 953
 954        /* Clear AWAKE condition. */
 955        clear_bit(CFHSI_AWAKE, &cfhsi->bits);
 956
 957        /* Cancel pending RX requests. */
 958        cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
 959}
 960
 961static void cfhsi_out_of_sync(struct work_struct *work)
 962{
 963        struct cfhsi *cfhsi = NULL;
 964
 965        cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
 966
 967        rtnl_lock();
 968        dev_close(cfhsi->ndev);
 969        rtnl_unlock();
 970}
 971
 972static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
 973{
 974        struct cfhsi *cfhsi = NULL;
 975
 976        cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
 977        netdev_dbg(cfhsi->ndev, "%s.\n",
 978                __func__);
 979
 980        set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
 981        wake_up_interruptible(&cfhsi->wake_up_wait);
 982
 983        if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 984                return;
 985
 986        /* Schedule wake up work queue if the peer initiates. */
 987        if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
 988                queue_work(cfhsi->wq, &cfhsi->wake_up_work);
 989}
 990
 991static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
 992{
 993        struct cfhsi *cfhsi = NULL;
 994
 995        cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
 996        netdev_dbg(cfhsi->ndev, "%s.\n",
 997                __func__);
 998
 999        /* Initiating low power is only permitted by the host (us). */
1000        set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1001        wake_up_interruptible(&cfhsi->wake_down_wait);
1002}
1003
1004static void cfhsi_aggregation_tout(unsigned long arg)
1005{
1006        struct cfhsi *cfhsi = (struct cfhsi *)arg;
1007
1008        netdev_dbg(cfhsi->ndev, "%s.\n",
1009                __func__);
1010
1011        cfhsi_start_tx(cfhsi);
1012}
1013
1014static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1015{
1016        struct cfhsi *cfhsi = NULL;
1017        int start_xfer = 0;
1018        int timer_active;
1019        int prio;
1020
1021        if (!dev)
1022                return -EINVAL;
1023
1024        cfhsi = netdev_priv(dev);
1025
1026        switch (skb->priority) {
1027        case TC_PRIO_BESTEFFORT:
1028        case TC_PRIO_FILLER:
1029        case TC_PRIO_BULK:
1030                prio = CFHSI_PRIO_BEBK;
1031                break;
1032        case TC_PRIO_INTERACTIVE_BULK:
1033                prio = CFHSI_PRIO_VI;
1034                break;
1035        case TC_PRIO_INTERACTIVE:
1036                prio = CFHSI_PRIO_VO;
1037                break;
1038        case TC_PRIO_CONTROL:
1039        default:
1040                prio = CFHSI_PRIO_CTL;
1041                break;
1042        }
1043
1044        spin_lock_bh(&cfhsi->lock);
1045
1046        /* Update aggregation statistics  */
1047        cfhsi_update_aggregation_stats(cfhsi, skb, 1);
1048
1049        /* Queue the SKB */
1050        skb_queue_tail(&cfhsi->qhead[prio], skb);
1051
1052        /* Sanity check; xmit should not be called after unregister_netdev */
1053        if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
1054                spin_unlock_bh(&cfhsi->lock);
1055                cfhsi_abort_tx(cfhsi);
1056                return -EINVAL;
1057        }
1058
1059        /* Send flow off if number of packets is above high water mark. */
1060        if (!cfhsi->flow_off_sent &&
1061                cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
1062                cfhsi->cfdev.flowctrl) {
1063                cfhsi->flow_off_sent = 1;
1064                cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
1065        }
1066
1067        if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
1068                cfhsi->tx_state = CFHSI_TX_STATE_XFER;
1069                start_xfer = 1;
1070        }
1071
1072        if (!start_xfer) {
1073                /* Send aggregate if it is possible */
1074                bool aggregate_ready =
1075                        cfhsi_can_send_aggregate(cfhsi) &&
1076                        del_timer(&cfhsi->aggregation_timer) > 0;
1077                spin_unlock_bh(&cfhsi->lock);
1078                if (aggregate_ready)
1079                        cfhsi_start_tx(cfhsi);
1080                return 0;
1081        }
1082
1083        /* Delete inactivity timer if started. */
1084        timer_active = del_timer_sync(&cfhsi->inactivity_timer);
1085
1086        spin_unlock_bh(&cfhsi->lock);
1087
1088        if (timer_active) {
1089                struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
1090                int len;
1091                int res;
1092
1093                /* Create HSI frame. */
1094                len = cfhsi_tx_frm(desc, cfhsi);
1095                WARN_ON(!len);
1096
1097                /* Set up new transfer. */
1098                res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
1099                if (WARN_ON(res < 0)) {
1100                        netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
1101                                __func__, res);
1102                        cfhsi_abort_tx(cfhsi);
1103                }
1104        } else {
1105                /* Schedule wake up work queue if the we initiate. */
1106                if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
1107                        queue_work(cfhsi->wq, &cfhsi->wake_up_work);
1108        }
1109
1110        return 0;
1111}
1112
1113static const struct net_device_ops cfhsi_netdevops;
1114
1115static void cfhsi_setup(struct net_device *dev)
1116{
1117        int i;
1118        struct cfhsi *cfhsi = netdev_priv(dev);
1119        dev->features = 0;
1120        dev->type = ARPHRD_CAIF;
1121        dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1122        dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
1123        dev->tx_queue_len = 0;
1124        dev->destructor = free_netdev;
1125        dev->netdev_ops = &cfhsi_netdevops;
1126        for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1127                skb_queue_head_init(&cfhsi->qhead[i]);
1128        cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
1129        cfhsi->cfdev.use_frag = false;
1130        cfhsi->cfdev.use_stx = false;
1131        cfhsi->cfdev.use_fcs = false;
1132        cfhsi->ndev = dev;
1133        cfhsi->cfg = hsi_default_config;
1134}
1135
1136static int cfhsi_open(struct net_device *ndev)
1137{
1138        struct cfhsi *cfhsi = netdev_priv(ndev);
1139        int res;
1140
1141        clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1142
1143        /* Initialize state vaiables. */
1144        cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
1145        cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
1146
1147        /* Set flow info */
1148        cfhsi->flow_off_sent = 0;
1149
1150        /*
1151         * Allocate a TX buffer with the size of a HSI packet descriptors
1152         * and the necessary room for CAIF payload frames.
1153         */
1154        cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
1155        if (!cfhsi->tx_buf) {
1156                res = -ENODEV;
1157                goto err_alloc_tx;
1158        }
1159
1160        /*
1161         * Allocate a RX buffer with the size of two HSI packet descriptors and
1162         * the necessary room for CAIF payload frames.
1163         */
1164        cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1165        if (!cfhsi->rx_buf) {
1166                res = -ENODEV;
1167                goto err_alloc_rx;
1168        }
1169
1170        cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1171        if (!cfhsi->rx_flip_buf) {
1172                res = -ENODEV;
1173                goto err_alloc_rx_flip;
1174        }
1175
1176        /* Initialize aggregation timeout */
1177        cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout;
1178
1179        /* Initialize recieve vaiables. */
1180        cfhsi->rx_ptr = cfhsi->rx_buf;
1181        cfhsi->rx_len = CFHSI_DESC_SZ;
1182
1183        /* Initialize spin locks. */
1184        spin_lock_init(&cfhsi->lock);
1185
1186        /* Set up the driver. */
1187        cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
1188        cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
1189        cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
1190        cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
1191
1192        /* Initialize the work queues. */
1193        INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1194        INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
1195        INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
1196
1197        /* Clear all bit fields. */
1198        clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1199        clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1200        clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1201        clear_bit(CFHSI_AWAKE, &cfhsi->bits);
1202
1203        /* Create work thread. */
1204        cfhsi->wq = create_singlethread_workqueue(cfhsi->ndev->name);
1205        if (!cfhsi->wq) {
1206                netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
1207                        __func__);
1208                res = -ENODEV;
1209                goto err_create_wq;
1210        }
1211
1212        /* Initialize wait queues. */
1213        init_waitqueue_head(&cfhsi->wake_up_wait);
1214        init_waitqueue_head(&cfhsi->wake_down_wait);
1215        init_waitqueue_head(&cfhsi->flush_fifo_wait);
1216
1217        /* Setup the inactivity timer. */
1218        init_timer(&cfhsi->inactivity_timer);
1219        cfhsi->inactivity_timer.data = (unsigned long)cfhsi;
1220        cfhsi->inactivity_timer.function = cfhsi_inactivity_tout;
1221        /* Setup the slowpath RX timer. */
1222        init_timer(&cfhsi->rx_slowpath_timer);
1223        cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1224        cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
1225        /* Setup the aggregation timer. */
1226        init_timer(&cfhsi->aggregation_timer);
1227        cfhsi->aggregation_timer.data = (unsigned long)cfhsi;
1228        cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
1229
1230        /* Activate HSI interface. */
1231        res = cfhsi->ops->cfhsi_up(cfhsi->ops);
1232        if (res) {
1233                netdev_err(cfhsi->ndev,
1234                        "%s: can't activate HSI interface: %d.\n",
1235                        __func__, res);
1236                goto err_activate;
1237        }
1238
1239        /* Flush FIFO */
1240        res = cfhsi_flush_fifo(cfhsi);
1241        if (res) {
1242                netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
1243                        __func__, res);
1244                goto err_net_reg;
1245        }
1246        return res;
1247
1248 err_net_reg:
1249        cfhsi->ops->cfhsi_down(cfhsi->ops);
1250 err_activate:
1251        destroy_workqueue(cfhsi->wq);
1252 err_create_wq:
1253        kfree(cfhsi->rx_flip_buf);
1254 err_alloc_rx_flip:
1255        kfree(cfhsi->rx_buf);
1256 err_alloc_rx:
1257        kfree(cfhsi->tx_buf);
1258 err_alloc_tx:
1259        return res;
1260}
1261
1262static int cfhsi_close(struct net_device *ndev)
1263{
1264        struct cfhsi *cfhsi = netdev_priv(ndev);
1265        u8 *tx_buf, *rx_buf, *flip_buf;
1266
1267        /* going to shutdown driver */
1268        set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1269
1270        /* Flush workqueue */
1271        flush_workqueue(cfhsi->wq);
1272
1273        /* Delete timers if pending */
1274        del_timer_sync(&cfhsi->inactivity_timer);
1275        del_timer_sync(&cfhsi->rx_slowpath_timer);
1276        del_timer_sync(&cfhsi->aggregation_timer);
1277
1278        /* Cancel pending RX request (if any) */
1279        cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
1280
1281        /* Destroy workqueue */
1282        destroy_workqueue(cfhsi->wq);
1283
1284        /* Store bufferes: will be freed later. */
1285        tx_buf = cfhsi->tx_buf;
1286        rx_buf = cfhsi->rx_buf;
1287        flip_buf = cfhsi->rx_flip_buf;
1288        /* Flush transmit queues. */
1289        cfhsi_abort_tx(cfhsi);
1290
1291        /* Deactivate interface */
1292        cfhsi->ops->cfhsi_down(cfhsi->ops);
1293
1294        /* Free buffers. */
1295        kfree(tx_buf);
1296        kfree(rx_buf);
1297        kfree(flip_buf);
1298        return 0;
1299}
1300
1301static void cfhsi_uninit(struct net_device *dev)
1302{
1303        struct cfhsi *cfhsi = netdev_priv(dev);
1304        ASSERT_RTNL();
1305        symbol_put(cfhsi_get_device);
1306        list_del(&cfhsi->list);
1307}
1308
1309static const struct net_device_ops cfhsi_netdevops = {
1310        .ndo_uninit = cfhsi_uninit,
1311        .ndo_open = cfhsi_open,
1312        .ndo_stop = cfhsi_close,
1313        .ndo_start_xmit = cfhsi_xmit
1314};
1315
1316static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
1317{
1318        int i;
1319
1320        if (!data) {
1321                pr_debug("no params data found\n");
1322                return;
1323        }
1324
1325        i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
1326        /*
1327         * Inactivity timeout in millisecs. Lowest possible value is 1,
1328         * and highest possible is NEXT_TIMER_MAX_DELTA.
1329         */
1330        if (data[i]) {
1331                u32 inactivity_timeout = nla_get_u32(data[i]);
1332                /* Pre-calculate inactivity timeout. */
1333                cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000;
1334                if (cfhsi->cfg.inactivity_timeout == 0)
1335                        cfhsi->cfg.inactivity_timeout = 1;
1336                else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1337                        cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1338        }
1339
1340        i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
1341        if (data[i])
1342                cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]);
1343
1344        i = __IFLA_CAIF_HSI_HEAD_ALIGN;
1345        if (data[i])
1346                cfhsi->cfg.head_align = nla_get_u32(data[i]);
1347
1348        i = __IFLA_CAIF_HSI_TAIL_ALIGN;
1349        if (data[i])
1350                cfhsi->cfg.tail_align = nla_get_u32(data[i]);
1351
1352        i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
1353        if (data[i])
1354                cfhsi->cfg.q_high_mark = nla_get_u32(data[i]);
1355
1356        i = __IFLA_CAIF_HSI_QLOW_WATERMARK;
1357        if (data[i])
1358                cfhsi->cfg.q_low_mark = nla_get_u32(data[i]);
1359}
1360
1361static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
1362                                struct nlattr *data[])
1363{
1364        cfhsi_netlink_parms(data, netdev_priv(dev));
1365        netdev_state_change(dev);
1366        return 0;
1367}
1368
1369static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
1370        [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
1371        [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
1372        [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
1373        [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
1374        [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
1375        [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
1376};
1377
1378static size_t caif_hsi_get_size(const struct net_device *dev)
1379{
1380        int i;
1381        size_t s = 0;
1382        for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
1383                s += nla_total_size(caif_hsi_policy[i].len);
1384        return s;
1385}
1386
1387static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
1388{
1389        struct cfhsi *cfhsi = netdev_priv(dev);
1390
1391        if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
1392                        cfhsi->cfg.inactivity_timeout) ||
1393            nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
1394                        cfhsi->cfg.aggregation_timeout) ||
1395            nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
1396                        cfhsi->cfg.head_align) ||
1397            nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
1398                        cfhsi->cfg.tail_align) ||
1399            nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
1400                        cfhsi->cfg.q_high_mark) ||
1401            nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
1402                        cfhsi->cfg.q_low_mark))
1403                return -EMSGSIZE;
1404
1405        return 0;
1406}
1407
1408static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
1409                          struct nlattr *tb[], struct nlattr *data[])
1410{
1411        struct cfhsi *cfhsi = NULL;
1412        struct cfhsi_ops *(*get_ops)(void);
1413
1414        ASSERT_RTNL();
1415
1416        cfhsi = netdev_priv(dev);
1417        cfhsi_netlink_parms(data, cfhsi);
1418        dev_net_set(cfhsi->ndev, src_net);
1419
1420        get_ops = symbol_get(cfhsi_get_ops);
1421        if (!get_ops) {
1422                pr_err("%s: failed to get the cfhsi_ops\n", __func__);
1423                return -ENODEV;
1424        }
1425
1426        /* Assign the HSI device. */
1427        cfhsi->ops = (*get_ops)();
1428        if (!cfhsi->ops) {
1429                pr_err("%s: failed to get the cfhsi_ops\n", __func__);
1430                goto err;
1431        }
1432
1433        /* Assign the driver to this HSI device. */
1434        cfhsi->ops->cb_ops = &cfhsi->cb_ops;
1435        if (register_netdevice(dev)) {
1436                pr_warn("%s: caif_hsi device registration failed\n", __func__);
1437                goto err;
1438        }
1439        /* Add CAIF HSI device to list. */
1440        list_add_tail(&cfhsi->list, &cfhsi_list);
1441
1442        return 0;
1443err:
1444        symbol_put(cfhsi_get_ops);
1445        return -ENODEV;
1446}
1447
1448static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
1449        .kind           = "cfhsi",
1450        .priv_size      = sizeof(struct cfhsi),
1451        .setup          = cfhsi_setup,
1452        .maxtype        = __IFLA_CAIF_HSI_MAX,
1453        .policy = caif_hsi_policy,
1454        .newlink        = caif_hsi_newlink,
1455        .changelink     = caif_hsi_changelink,
1456        .get_size       = caif_hsi_get_size,
1457        .fill_info      = caif_hsi_fill_info,
1458};
1459
1460static void __exit cfhsi_exit_module(void)
1461{
1462        struct list_head *list_node;
1463        struct list_head *n;
1464        struct cfhsi *cfhsi;
1465
1466        rtnl_link_unregister(&caif_hsi_link_ops);
1467
1468        rtnl_lock();
1469        list_for_each_safe(list_node, n, &cfhsi_list) {
1470                cfhsi = list_entry(list_node, struct cfhsi, list);
1471                unregister_netdev(cfhsi->ndev);
1472        }
1473        rtnl_unlock();
1474}
1475
1476static int __init cfhsi_init_module(void)
1477{
1478        return rtnl_link_register(&caif_hsi_link_ops);
1479}
1480
1481module_init(cfhsi_init_module);
1482module_exit(cfhsi_exit_module);
1483