linux/drivers/net/can/rx-offload.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2014 David Jander, Protonic Holland
   4 * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
   5 */
   6
   7#include <linux/can/dev.h>
   8#include <linux/can/rx-offload.h>
   9
  10struct can_rx_offload_cb {
  11        u32 timestamp;
  12};
  13
  14static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
  15{
  16        BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
  17
  18        return (struct can_rx_offload_cb *)skb->cb;
  19}
  20
  21static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b)
  22{
  23        if (offload->inc)
  24                return a <= b;
  25        else
  26                return a >= b;
  27}
  28
  29static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
  30{
  31        if (offload->inc)
  32                return (*val)++;
  33        else
  34                return (*val)--;
  35}
  36
  37static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
  38{
  39        struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi);
  40        struct net_device *dev = offload->dev;
  41        struct net_device_stats *stats = &dev->stats;
  42        struct sk_buff *skb;
  43        int work_done = 0;
  44
  45        while ((work_done < quota) &&
  46               (skb = skb_dequeue(&offload->skb_queue))) {
  47                struct can_frame *cf = (struct can_frame *)skb->data;
  48
  49                work_done++;
  50                stats->rx_packets++;
  51                stats->rx_bytes += cf->can_dlc;
  52                netif_receive_skb(skb);
  53        }
  54
  55        if (work_done < quota) {
  56                napi_complete_done(napi, work_done);
  57
  58                /* Check if there was another interrupt */
  59                if (!skb_queue_empty(&offload->skb_queue))
  60                        napi_reschedule(&offload->napi);
  61        }
  62
  63        can_led_event(offload->dev, CAN_LED_EVENT_RX);
  64
  65        return work_done;
  66}
  67
  68static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
  69                                        int (*compare)(struct sk_buff *a, struct sk_buff *b))
  70{
  71        struct sk_buff *pos, *insert = NULL;
  72
  73        skb_queue_reverse_walk(head, pos) {
  74                const struct can_rx_offload_cb *cb_pos, *cb_new;
  75
  76                cb_pos = can_rx_offload_get_cb(pos);
  77                cb_new = can_rx_offload_get_cb(new);
  78
  79                netdev_dbg(new->dev,
  80                           "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
  81                           __func__,
  82                           cb_pos->timestamp, cb_new->timestamp,
  83                           cb_new->timestamp - cb_pos->timestamp,
  84                           skb_queue_len(head));
  85
  86                if (compare(pos, new) < 0)
  87                        continue;
  88                insert = pos;
  89                break;
  90        }
  91        if (!insert)
  92                __skb_queue_head(head, new);
  93        else
  94                __skb_queue_after(head, insert, new);
  95}
  96
  97static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
  98{
  99        const struct can_rx_offload_cb *cb_a, *cb_b;
 100
 101        cb_a = can_rx_offload_get_cb(a);
 102        cb_b = can_rx_offload_get_cb(b);
 103
 104        /* Substract two u32 and return result as int, to keep
 105         * difference steady around the u32 overflow.
 106         */
 107        return cb_b->timestamp - cb_a->timestamp;
 108}
 109
 110static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
 111{
 112        struct sk_buff *skb = NULL;
 113        struct can_rx_offload_cb *cb;
 114        struct can_frame *cf;
 115        int ret;
 116
 117        /* If queue is full or skb not available, read to discard mailbox */
 118        if (likely(skb_queue_len(&offload->skb_queue) <=
 119                   offload->skb_queue_len_max))
 120                skb = alloc_can_skb(offload->dev, &cf);
 121
 122        if (!skb) {
 123                struct can_frame cf_overflow;
 124                u32 timestamp;
 125
 126                ret = offload->mailbox_read(offload, &cf_overflow,
 127                                            &timestamp, n);
 128                if (ret)
 129                        offload->dev->stats.rx_dropped++;
 130
 131                return NULL;
 132        }
 133
 134        cb = can_rx_offload_get_cb(skb);
 135        ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
 136        if (!ret) {
 137                kfree_skb(skb);
 138                return NULL;
 139        }
 140
 141        return skb;
 142}
 143
 144int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending)
 145{
 146        struct sk_buff_head skb_queue;
 147        unsigned int i;
 148
 149        __skb_queue_head_init(&skb_queue);
 150
 151        for (i = offload->mb_first;
 152             can_rx_offload_le(offload, i, offload->mb_last);
 153             can_rx_offload_inc(offload, &i)) {
 154                struct sk_buff *skb;
 155
 156                if (!(pending & BIT_ULL(i)))
 157                        continue;
 158
 159                skb = can_rx_offload_offload_one(offload, i);
 160                if (!skb)
 161                        break;
 162
 163                __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
 164        }
 165
 166        if (!skb_queue_empty(&skb_queue)) {
 167                unsigned long flags;
 168                u32 queue_len;
 169
 170                spin_lock_irqsave(&offload->skb_queue.lock, flags);
 171                skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
 172                spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
 173
 174                if ((queue_len = skb_queue_len(&offload->skb_queue)) >
 175                    (offload->skb_queue_len_max / 8))
 176                        netdev_dbg(offload->dev, "%s: queue_len=%d\n",
 177                                   __func__, queue_len);
 178
 179                can_rx_offload_schedule(offload);
 180        }
 181
 182        return skb_queue_len(&skb_queue);
 183}
 184EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
 185
 186int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
 187{
 188        struct sk_buff *skb;
 189        int received = 0;
 190
 191        while ((skb = can_rx_offload_offload_one(offload, 0))) {
 192                skb_queue_tail(&offload->skb_queue, skb);
 193                received++;
 194        }
 195
 196        if (received)
 197                can_rx_offload_schedule(offload);
 198
 199        return received;
 200}
 201EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
 202
 203int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
 204                                struct sk_buff *skb, u32 timestamp)
 205{
 206        struct can_rx_offload_cb *cb;
 207        unsigned long flags;
 208
 209        if (skb_queue_len(&offload->skb_queue) >
 210            offload->skb_queue_len_max)
 211                return -ENOMEM;
 212
 213        cb = can_rx_offload_get_cb(skb);
 214        cb->timestamp = timestamp;
 215
 216        spin_lock_irqsave(&offload->skb_queue.lock, flags);
 217        __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
 218        spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
 219
 220        can_rx_offload_schedule(offload);
 221
 222        return 0;
 223}
 224EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
 225
 226unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
 227                                         unsigned int idx, u32 timestamp)
 228{
 229        struct net_device *dev = offload->dev;
 230        struct net_device_stats *stats = &dev->stats;
 231        struct sk_buff *skb;
 232        u8 len;
 233        int err;
 234
 235        skb = __can_get_echo_skb(dev, idx, &len);
 236        if (!skb)
 237                return 0;
 238
 239        err = can_rx_offload_queue_sorted(offload, skb, timestamp);
 240        if (err) {
 241                stats->rx_errors++;
 242                stats->tx_fifo_errors++;
 243        }
 244
 245        return len;
 246}
 247EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
 248
 249int can_rx_offload_queue_tail(struct can_rx_offload *offload,
 250                              struct sk_buff *skb)
 251{
 252        if (skb_queue_len(&offload->skb_queue) >
 253            offload->skb_queue_len_max)
 254                return -ENOMEM;
 255
 256        skb_queue_tail(&offload->skb_queue, skb);
 257        can_rx_offload_schedule(offload);
 258
 259        return 0;
 260}
 261EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
 262
 263static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
 264{
 265        offload->dev = dev;
 266
 267        /* Limit queue len to 4x the weight (rounted to next power of two) */
 268        offload->skb_queue_len_max = 2 << fls(weight);
 269        offload->skb_queue_len_max *= 4;
 270        skb_queue_head_init(&offload->skb_queue);
 271
 272        can_rx_offload_reset(offload);
 273        netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
 274
 275        dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
 276                __func__, offload->skb_queue_len_max);
 277
 278        return 0;
 279}
 280
 281int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload)
 282{
 283        unsigned int weight;
 284
 285        if (offload->mb_first > BITS_PER_LONG_LONG ||
 286            offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
 287                return -EINVAL;
 288
 289        if (offload->mb_first < offload->mb_last) {
 290                offload->inc = true;
 291                weight = offload->mb_last - offload->mb_first;
 292        } else {
 293                offload->inc = false;
 294                weight = offload->mb_first - offload->mb_last;
 295        }
 296
 297        return can_rx_offload_init_queue(dev, offload, weight);
 298}
 299EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
 300
 301int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
 302{
 303        if (!offload->mailbox_read)
 304                return -EINVAL;
 305
 306        return can_rx_offload_init_queue(dev, offload, weight);
 307}
 308EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
 309
 310void can_rx_offload_enable(struct can_rx_offload *offload)
 311{
 312        can_rx_offload_reset(offload);
 313        napi_enable(&offload->napi);
 314}
 315EXPORT_SYMBOL_GPL(can_rx_offload_enable);
 316
 317void can_rx_offload_del(struct can_rx_offload *offload)
 318{
 319        netif_napi_del(&offload->napi);
 320        skb_queue_purge(&offload->skb_queue);
 321}
 322EXPORT_SYMBOL_GPL(can_rx_offload_del);
 323
 324void can_rx_offload_reset(struct can_rx_offload *offload)
 325{
 326}
 327EXPORT_SYMBOL_GPL(can_rx_offload_reset);
 328