linux/drivers/staging/most/net/net.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * net.c - Networking component for Mostcore
   4 *
   5 * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/module.h>
  11#include <linux/netdevice.h>
  12#include <linux/etherdevice.h>
  13#include <linux/slab.h>
  14#include <linux/init.h>
  15#include <linux/list.h>
  16#include <linux/wait.h>
  17#include <linux/kobject.h>
  18#include "most/core.h"
  19
  20#define MEP_HDR_LEN 8
  21#define MDP_HDR_LEN 16
  22#define MAMAC_DATA_LEN (1024 - MDP_HDR_LEN)
  23
  24#define PMHL 5
  25
  26#define PMS_TELID_UNSEGM_MAMAC  0x0A
  27#define PMS_FIFONO_MDP          0x01
  28#define PMS_FIFONO_MEP          0x04
  29#define PMS_MSGTYPE_DATA        0x04
  30#define PMS_DEF_PRIO            0
  31#define MEP_DEF_RETRY           15
  32
  33#define PMS_FIFONO_MASK         0x07
  34#define PMS_FIFONO_SHIFT        3
  35#define PMS_RETRY_SHIFT         4
  36#define PMS_TELID_MASK          0x0F
  37#define PMS_TELID_SHIFT         4
  38
  39#define HB(value)               ((u8)((u16)(value) >> 8))
  40#define LB(value)               ((u8)(value))
  41
  42#define EXTRACT_BIT_SET(bitset_name, value) \
  43        (((value) >> bitset_name##_SHIFT) & bitset_name##_MASK)
  44
  45#define PMS_IS_MEP(buf, len) \
  46        ((len) > MEP_HDR_LEN && \
  47         EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MEP)
  48
  49static inline bool pms_is_mamac(char *buf, u32 len)
  50{
  51        return (len > MDP_HDR_LEN &&
  52                EXTRACT_BIT_SET(PMS_FIFONO, buf[3]) == PMS_FIFONO_MDP &&
  53                EXTRACT_BIT_SET(PMS_TELID, buf[14]) == PMS_TELID_UNSEGM_MAMAC);
  54}
  55
  56struct net_dev_channel {
  57        bool linked;
  58        int ch_id;
  59};
  60
  61struct net_dev_context {
  62        struct most_interface *iface;
  63        bool is_mamac;
  64        struct net_device *dev;
  65        struct net_dev_channel rx;
  66        struct net_dev_channel tx;
  67        struct list_head list;
  68};
  69
  70static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
  71static struct mutex probe_disc_mt; /* ch->linked = true, most_nd_open */
  72static struct spinlock list_lock; /* list_head, ch->linked = false, dev_hold */
  73static struct core_component comp;
  74
  75static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
  76{
  77        u8 *buff = mbo->virt_address;
  78        static const u8 broadcast[] = { 0x03, 0xFF };
  79        const u8 *dest_addr = skb->data + 4;
  80        const u8 *eth_type = skb->data + 12;
  81        unsigned int payload_len = skb->len - ETH_HLEN;
  82        unsigned int mdp_len = payload_len + MDP_HDR_LEN;
  83
  84        if (mbo->buffer_length < mdp_len) {
  85                pr_err("drop: too small buffer! (%d for %d)\n",
  86                       mbo->buffer_length, mdp_len);
  87                return -EINVAL;
  88        }
  89
  90        if (skb->len < ETH_HLEN) {
  91                pr_err("drop: too small packet! (%d)\n", skb->len);
  92                return -EINVAL;
  93        }
  94
  95        if (dest_addr[0] == 0xFF && dest_addr[1] == 0xFF)
  96                dest_addr = broadcast;
  97
  98        *buff++ = HB(mdp_len - 2);
  99        *buff++ = LB(mdp_len - 2);
 100
 101        *buff++ = PMHL;
 102        *buff++ = (PMS_FIFONO_MDP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
 103        *buff++ = PMS_DEF_PRIO;
 104        *buff++ = dest_addr[0];
 105        *buff++ = dest_addr[1];
 106        *buff++ = 0x00;
 107
 108        *buff++ = HB(payload_len + 6);
 109        *buff++ = LB(payload_len + 6);
 110
 111        /* end of FPH here */
 112
 113        *buff++ = eth_type[0];
 114        *buff++ = eth_type[1];
 115        *buff++ = 0;
 116        *buff++ = 0;
 117
 118        *buff++ = PMS_TELID_UNSEGM_MAMAC << 4 | HB(payload_len);
 119        *buff++ = LB(payload_len);
 120
 121        memcpy(buff, skb->data + ETH_HLEN, payload_len);
 122        mbo->buffer_length = mdp_len;
 123        return 0;
 124}
 125
 126static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
 127{
 128        u8 *buff = mbo->virt_address;
 129        unsigned int mep_len = skb->len + MEP_HDR_LEN;
 130
 131        if (mbo->buffer_length < mep_len) {
 132                pr_err("drop: too small buffer! (%d for %d)\n",
 133                       mbo->buffer_length, mep_len);
 134                return -EINVAL;
 135        }
 136
 137        *buff++ = HB(mep_len - 2);
 138        *buff++ = LB(mep_len - 2);
 139
 140        *buff++ = PMHL;
 141        *buff++ = (PMS_FIFONO_MEP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
 142        *buff++ = (MEP_DEF_RETRY << PMS_RETRY_SHIFT) | PMS_DEF_PRIO;
 143        *buff++ = 0;
 144        *buff++ = 0;
 145        *buff++ = 0;
 146
 147        memcpy(buff, skb->data, skb->len);
 148        mbo->buffer_length = mep_len;
 149        return 0;
 150}
 151
 152static int most_nd_set_mac_address(struct net_device *dev, void *p)
 153{
 154        struct net_dev_context *nd = netdev_priv(dev);
 155        int err = eth_mac_addr(dev, p);
 156
 157        if (err)
 158                return err;
 159
 160        nd->is_mamac =
 161                (dev->dev_addr[0] == 0 && dev->dev_addr[1] == 0 &&
 162                 dev->dev_addr[2] == 0 && dev->dev_addr[3] == 0);
 163
 164        /*
 165         * Set default MTU for the given packet type.
 166         * It is still possible to change MTU using ip tools afterwards.
 167         */
 168        dev->mtu = nd->is_mamac ? MAMAC_DATA_LEN : ETH_DATA_LEN;
 169
 170        return 0;
 171}
 172
 173static void on_netinfo(struct most_interface *iface,
 174                       unsigned char link_stat, unsigned char *mac_addr);
 175
 176static int most_nd_open(struct net_device *dev)
 177{
 178        struct net_dev_context *nd = netdev_priv(dev);
 179        int ret = 0;
 180
 181        mutex_lock(&probe_disc_mt);
 182
 183        if (most_start_channel(nd->iface, nd->rx.ch_id, &comp)) {
 184                netdev_err(dev, "most_start_channel() failed\n");
 185                ret = -EBUSY;
 186                goto unlock;
 187        }
 188
 189        if (most_start_channel(nd->iface, nd->tx.ch_id, &comp)) {
 190                netdev_err(dev, "most_start_channel() failed\n");
 191                most_stop_channel(nd->iface, nd->rx.ch_id, &comp);
 192                ret = -EBUSY;
 193                goto unlock;
 194        }
 195
 196        netif_carrier_off(dev);
 197        if (is_valid_ether_addr(dev->dev_addr))
 198                netif_dormant_off(dev);
 199        else
 200                netif_dormant_on(dev);
 201        netif_wake_queue(dev);
 202        if (nd->iface->request_netinfo)
 203                nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, on_netinfo);
 204
 205unlock:
 206        mutex_unlock(&probe_disc_mt);
 207        return ret;
 208}
 209
 210static int most_nd_stop(struct net_device *dev)
 211{
 212        struct net_dev_context *nd = netdev_priv(dev);
 213
 214        netif_stop_queue(dev);
 215        if (nd->iface->request_netinfo)
 216                nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, NULL);
 217        most_stop_channel(nd->iface, nd->rx.ch_id, &comp);
 218        most_stop_channel(nd->iface, nd->tx.ch_id, &comp);
 219
 220        return 0;
 221}
 222
 223static netdev_tx_t most_nd_start_xmit(struct sk_buff *skb,
 224                                      struct net_device *dev)
 225{
 226        struct net_dev_context *nd = netdev_priv(dev);
 227        struct mbo *mbo;
 228        int ret;
 229
 230        mbo = most_get_mbo(nd->iface, nd->tx.ch_id, &comp);
 231
 232        if (!mbo) {
 233                netif_stop_queue(dev);
 234                dev->stats.tx_fifo_errors++;
 235                return NETDEV_TX_BUSY;
 236        }
 237
 238        if (nd->is_mamac)
 239                ret = skb_to_mamac(skb, mbo);
 240        else
 241                ret = skb_to_mep(skb, mbo);
 242
 243        if (ret) {
 244                most_put_mbo(mbo);
 245                dev->stats.tx_dropped++;
 246                kfree_skb(skb);
 247                return NETDEV_TX_OK;
 248        }
 249
 250        most_submit_mbo(mbo);
 251        dev->stats.tx_packets++;
 252        dev->stats.tx_bytes += skb->len;
 253        kfree_skb(skb);
 254        return NETDEV_TX_OK;
 255}
 256
 257static const struct net_device_ops most_nd_ops = {
 258        .ndo_open = most_nd_open,
 259        .ndo_stop = most_nd_stop,
 260        .ndo_start_xmit = most_nd_start_xmit,
 261        .ndo_set_mac_address = most_nd_set_mac_address,
 262};
 263
 264static void most_nd_setup(struct net_device *dev)
 265{
 266        ether_setup(dev);
 267        dev->netdev_ops = &most_nd_ops;
 268}
 269
 270static struct net_dev_context *get_net_dev(struct most_interface *iface)
 271{
 272        struct net_dev_context *nd;
 273
 274        list_for_each_entry(nd, &net_devices, list)
 275                if (nd->iface == iface)
 276                        return nd;
 277        return NULL;
 278}
 279
 280static struct net_dev_context *get_net_dev_hold(struct most_interface *iface)
 281{
 282        struct net_dev_context *nd;
 283        unsigned long flags;
 284
 285        spin_lock_irqsave(&list_lock, flags);
 286        nd = get_net_dev(iface);
 287        if (nd && nd->rx.linked && nd->tx.linked)
 288                dev_hold(nd->dev);
 289        else
 290                nd = NULL;
 291        spin_unlock_irqrestore(&list_lock, flags);
 292        return nd;
 293}
 294
 295static int comp_probe_channel(struct most_interface *iface, int channel_idx,
 296                              struct most_channel_config *ccfg, char *name)
 297{
 298        struct net_dev_context *nd;
 299        struct net_dev_channel *ch;
 300        struct net_device *dev;
 301        unsigned long flags;
 302        int ret = 0;
 303
 304        if (!iface)
 305                return -EINVAL;
 306
 307        if (ccfg->data_type != MOST_CH_ASYNC)
 308                return -EINVAL;
 309
 310        mutex_lock(&probe_disc_mt);
 311        nd = get_net_dev(iface);
 312        if (!nd) {
 313                dev = alloc_netdev(sizeof(struct net_dev_context), "meth%d",
 314                                   NET_NAME_UNKNOWN, most_nd_setup);
 315                if (!dev) {
 316                        ret = -ENOMEM;
 317                        goto unlock;
 318                }
 319
 320                nd = netdev_priv(dev);
 321                nd->iface = iface;
 322                nd->dev = dev;
 323
 324                spin_lock_irqsave(&list_lock, flags);
 325                list_add(&nd->list, &net_devices);
 326                spin_unlock_irqrestore(&list_lock, flags);
 327
 328                ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
 329        } else {
 330                ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
 331                if (ch->linked) {
 332                        pr_err("direction is allocated\n");
 333                        ret = -EINVAL;
 334                        goto unlock;
 335                }
 336
 337                if (register_netdev(nd->dev)) {
 338                        pr_err("register_netdev() failed\n");
 339                        ret = -EINVAL;
 340                        goto unlock;
 341                }
 342        }
 343        ch->ch_id = channel_idx;
 344        ch->linked = true;
 345
 346unlock:
 347        mutex_unlock(&probe_disc_mt);
 348        return ret;
 349}
 350
 351static int comp_disconnect_channel(struct most_interface *iface,
 352                                   int channel_idx)
 353{
 354        struct net_dev_context *nd;
 355        struct net_dev_channel *ch;
 356        unsigned long flags;
 357        int ret = 0;
 358
 359        mutex_lock(&probe_disc_mt);
 360        nd = get_net_dev(iface);
 361        if (!nd) {
 362                ret = -EINVAL;
 363                goto unlock;
 364        }
 365
 366        if (nd->rx.linked && channel_idx == nd->rx.ch_id) {
 367                ch = &nd->rx;
 368        } else if (nd->tx.linked && channel_idx == nd->tx.ch_id) {
 369                ch = &nd->tx;
 370        } else {
 371                ret = -EINVAL;
 372                goto unlock;
 373        }
 374
 375        if (nd->rx.linked && nd->tx.linked) {
 376                spin_lock_irqsave(&list_lock, flags);
 377                ch->linked = false;
 378                spin_unlock_irqrestore(&list_lock, flags);
 379
 380                /*
 381                 * do not call most_stop_channel() here, because channels are
 382                 * going to be closed in ndo_stop() after unregister_netdev()
 383                 */
 384                unregister_netdev(nd->dev);
 385        } else {
 386                spin_lock_irqsave(&list_lock, flags);
 387                list_del(&nd->list);
 388                spin_unlock_irqrestore(&list_lock, flags);
 389
 390                free_netdev(nd->dev);
 391        }
 392
 393unlock:
 394        mutex_unlock(&probe_disc_mt);
 395        return ret;
 396}
 397
 398static int comp_resume_tx_channel(struct most_interface *iface,
 399                                  int channel_idx)
 400{
 401        struct net_dev_context *nd;
 402
 403        nd = get_net_dev_hold(iface);
 404        if (!nd)
 405                return 0;
 406
 407        if (nd->tx.ch_id != channel_idx)
 408                goto put_nd;
 409
 410        netif_wake_queue(nd->dev);
 411
 412put_nd:
 413        dev_put(nd->dev);
 414        return 0;
 415}
 416
 417static int comp_rx_data(struct mbo *mbo)
 418{
 419        const u32 zero = 0;
 420        struct net_dev_context *nd;
 421        char *buf = mbo->virt_address;
 422        u32 len = mbo->processed_length;
 423        struct sk_buff *skb;
 424        struct net_device *dev;
 425        unsigned int skb_len;
 426        int ret = 0;
 427
 428        nd = get_net_dev_hold(mbo->ifp);
 429        if (!nd)
 430                return -EIO;
 431
 432        if (nd->rx.ch_id != mbo->hdm_channel_id) {
 433                ret = -EIO;
 434                goto put_nd;
 435        }
 436
 437        dev = nd->dev;
 438
 439        if (nd->is_mamac) {
 440                if (!pms_is_mamac(buf, len)) {
 441                        ret = -EIO;
 442                        goto put_nd;
 443                }
 444
 445                skb = dev_alloc_skb(len - MDP_HDR_LEN + 2 * ETH_ALEN + 2);
 446        } else {
 447                if (!PMS_IS_MEP(buf, len)) {
 448                        ret = -EIO;
 449                        goto put_nd;
 450                }
 451
 452                skb = dev_alloc_skb(len - MEP_HDR_LEN);
 453        }
 454
 455        if (!skb) {
 456                dev->stats.rx_dropped++;
 457                pr_err_once("drop packet: no memory for skb\n");
 458                goto out;
 459        }
 460
 461        skb->dev = dev;
 462
 463        if (nd->is_mamac) {
 464                /* dest */
 465                ether_addr_copy(skb_put(skb, ETH_ALEN), dev->dev_addr);
 466
 467                /* src */
 468                skb_put_data(skb, &zero, 4);
 469                skb_put_data(skb, buf + 5, 2);
 470
 471                /* eth type */
 472                skb_put_data(skb, buf + 10, 2);
 473
 474                buf += MDP_HDR_LEN;
 475                len -= MDP_HDR_LEN;
 476        } else {
 477                buf += MEP_HDR_LEN;
 478                len -= MEP_HDR_LEN;
 479        }
 480
 481        skb_put_data(skb, buf, len);
 482        skb->protocol = eth_type_trans(skb, dev);
 483        skb_len = skb->len;
 484        if (netif_rx(skb) == NET_RX_SUCCESS) {
 485                dev->stats.rx_packets++;
 486                dev->stats.rx_bytes += skb_len;
 487        } else {
 488                dev->stats.rx_dropped++;
 489        }
 490
 491out:
 492        most_put_mbo(mbo);
 493
 494put_nd:
 495        dev_put(nd->dev);
 496        return ret;
 497}
 498
 499static struct core_component comp = {
 500        .name = "net",
 501        .probe_channel = comp_probe_channel,
 502        .disconnect_channel = comp_disconnect_channel,
 503        .tx_completion = comp_resume_tx_channel,
 504        .rx_completion = comp_rx_data,
 505};
 506
 507static int __init most_net_init(void)
 508{
 509        spin_lock_init(&list_lock);
 510        mutex_init(&probe_disc_mt);
 511        return most_register_component(&comp);
 512}
 513
 514static void __exit most_net_exit(void)
 515{
 516        most_deregister_component(&comp);
 517}
 518
 519/**
 520 * on_netinfo - callback for HDM to be informed about HW's MAC
 521 * @param iface - most interface instance
 522 * @param link_stat - link status
 523 * @param mac_addr - MAC address
 524 */
 525static void on_netinfo(struct most_interface *iface,
 526                       unsigned char link_stat, unsigned char *mac_addr)
 527{
 528        struct net_dev_context *nd;
 529        struct net_device *dev;
 530        const u8 *m = mac_addr;
 531
 532        nd = get_net_dev_hold(iface);
 533        if (!nd)
 534                return;
 535
 536        dev = nd->dev;
 537
 538        if (link_stat)
 539                netif_carrier_on(dev);
 540        else
 541                netif_carrier_off(dev);
 542
 543        if (m && is_valid_ether_addr(m)) {
 544                if (!is_valid_ether_addr(dev->dev_addr)) {
 545                        netdev_info(dev, "set mac %02x-%02x-%02x-%02x-%02x-%02x\n",
 546                                    m[0], m[1], m[2], m[3], m[4], m[5]);
 547                        ether_addr_copy(dev->dev_addr, m);
 548                        netif_dormant_off(dev);
 549                } else if (!ether_addr_equal(dev->dev_addr, m)) {
 550                        netdev_warn(dev, "reject mac %02x-%02x-%02x-%02x-%02x-%02x\n",
 551                                    m[0], m[1], m[2], m[3], m[4], m[5]);
 552                }
 553        }
 554
 555        dev_put(nd->dev);
 556}
 557
 558module_init(most_net_init);
 559module_exit(most_net_exit);
 560MODULE_LICENSE("GPL");
 561MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
 562MODULE_DESCRIPTION("Networking Component Module for Mostcore");
 563