linux/net/dsa/dsa.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/dsa/dsa.c - Hardware switch handling
   4 * Copyright (c) 2008-2009 Marvell Semiconductor
   5 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
   6 */
   7
   8#include <linux/device.h>
   9#include <linux/list.h>
  10#include <linux/platform_device.h>
  11#include <linux/slab.h>
  12#include <linux/module.h>
  13#include <linux/notifier.h>
  14#include <linux/of.h>
  15#include <linux/of_mdio.h>
  16#include <linux/of_platform.h>
  17#include <linux/of_net.h>
  18#include <linux/netdevice.h>
  19#include <linux/sysfs.h>
  20#include <linux/phy_fixed.h>
  21#include <linux/ptp_classify.h>
  22#include <linux/etherdevice.h>
  23
  24#include "dsa_priv.h"
  25
  26static LIST_HEAD(dsa_tag_drivers_list);
  27static DEFINE_MUTEX(dsa_tag_drivers_lock);
  28
  29static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
  30                                            struct net_device *dev)
  31{
  32        /* Just return the original SKB */
  33        return skb;
  34}
  35
  36static const struct dsa_device_ops none_ops = {
  37        .name   = "none",
  38        .proto  = DSA_TAG_PROTO_NONE,
  39        .xmit   = dsa_slave_notag_xmit,
  40        .rcv    = NULL,
  41};
  42
  43DSA_TAG_DRIVER(none_ops);
  44
  45static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver,
  46                                    struct module *owner)
  47{
  48        dsa_tag_driver->owner = owner;
  49
  50        mutex_lock(&dsa_tag_drivers_lock);
  51        list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list);
  52        mutex_unlock(&dsa_tag_drivers_lock);
  53}
  54
  55void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
  56                              unsigned int count, struct module *owner)
  57{
  58        unsigned int i;
  59
  60        for (i = 0; i < count; i++)
  61                dsa_tag_driver_register(dsa_tag_driver_array[i], owner);
  62}
  63
  64static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver)
  65{
  66        mutex_lock(&dsa_tag_drivers_lock);
  67        list_del(&dsa_tag_driver->list);
  68        mutex_unlock(&dsa_tag_drivers_lock);
  69}
  70EXPORT_SYMBOL_GPL(dsa_tag_drivers_register);
  71
  72void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
  73                                unsigned int count)
  74{
  75        unsigned int i;
  76
  77        for (i = 0; i < count; i++)
  78                dsa_tag_driver_unregister(dsa_tag_driver_array[i]);
  79}
  80EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister);
  81
  82const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
  83{
  84        return ops->name;
  85};
  86
  87const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol)
  88{
  89        struct dsa_tag_driver *dsa_tag_driver;
  90        const struct dsa_device_ops *ops;
  91        bool found = false;
  92
  93        request_module("%s%d", DSA_TAG_DRIVER_ALIAS, tag_protocol);
  94
  95        mutex_lock(&dsa_tag_drivers_lock);
  96        list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
  97                ops = dsa_tag_driver->ops;
  98                if (ops->proto == tag_protocol) {
  99                        found = true;
 100                        break;
 101                }
 102        }
 103
 104        if (found) {
 105                if (!try_module_get(dsa_tag_driver->owner))
 106                        ops = ERR_PTR(-ENOPROTOOPT);
 107        } else {
 108                ops = ERR_PTR(-ENOPROTOOPT);
 109        }
 110
 111        mutex_unlock(&dsa_tag_drivers_lock);
 112
 113        return ops;
 114}
 115
 116void dsa_tag_driver_put(const struct dsa_device_ops *ops)
 117{
 118        struct dsa_tag_driver *dsa_tag_driver;
 119
 120        mutex_lock(&dsa_tag_drivers_lock);
 121        list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
 122                if (dsa_tag_driver->ops == ops) {
 123                        module_put(dsa_tag_driver->owner);
 124                        break;
 125                }
 126        }
 127        mutex_unlock(&dsa_tag_drivers_lock);
 128}
 129
 130static int dev_is_class(struct device *dev, void *class)
 131{
 132        if (dev->class != NULL && !strcmp(dev->class->name, class))
 133                return 1;
 134
 135        return 0;
 136}
 137
 138static struct device *dev_find_class(struct device *parent, char *class)
 139{
 140        if (dev_is_class(parent, class)) {
 141                get_device(parent);
 142                return parent;
 143        }
 144
 145        return device_find_child(parent, class, dev_is_class);
 146}
 147
 148struct net_device *dsa_dev_to_net_device(struct device *dev)
 149{
 150        struct device *d;
 151
 152        d = dev_find_class(dev, "net");
 153        if (d != NULL) {
 154                struct net_device *nd;
 155
 156                nd = to_net_dev(d);
 157                dev_hold(nd);
 158                put_device(d);
 159
 160                return nd;
 161        }
 162
 163        return NULL;
 164}
 165EXPORT_SYMBOL_GPL(dsa_dev_to_net_device);
 166
 167/* Determine if we should defer delivery of skb until we have a rx timestamp.
 168 *
 169 * Called from dsa_switch_rcv. For now, this will only work if tagging is
 170 * enabled on the switch. Normally the MAC driver would retrieve the hardware
 171 * timestamp when it reads the packet out of the hardware. However in a DSA
 172 * switch, the DSA driver owning the interface to which the packet is
 173 * delivered is never notified unless we do so here.
 174 */
 175static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
 176                                       struct sk_buff *skb)
 177{
 178        struct dsa_switch *ds = p->dp->ds;
 179        unsigned int type;
 180
 181        if (skb_headroom(skb) < ETH_HLEN)
 182                return false;
 183
 184        __skb_push(skb, ETH_HLEN);
 185
 186        type = ptp_classify_raw(skb);
 187
 188        __skb_pull(skb, ETH_HLEN);
 189
 190        if (type == PTP_CLASS_NONE)
 191                return false;
 192
 193        if (likely(ds->ops->port_rxtstamp))
 194                return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
 195
 196        return false;
 197}
 198
 199static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
 200                          struct packet_type *pt, struct net_device *unused)
 201{
 202        struct dsa_port *cpu_dp = dev->dsa_ptr;
 203        struct sk_buff *nskb = NULL;
 204        struct pcpu_sw_netstats *s;
 205        struct dsa_slave_priv *p;
 206
 207        if (unlikely(!cpu_dp)) {
 208                kfree_skb(skb);
 209                return 0;
 210        }
 211
 212        skb = skb_unshare(skb, GFP_ATOMIC);
 213        if (!skb)
 214                return 0;
 215
 216        nskb = cpu_dp->rcv(skb, dev, pt);
 217        if (!nskb) {
 218                kfree_skb(skb);
 219                return 0;
 220        }
 221
 222        skb = nskb;
 223        p = netdev_priv(skb->dev);
 224        skb_push(skb, ETH_HLEN);
 225        skb->pkt_type = PACKET_HOST;
 226        skb->protocol = eth_type_trans(skb, skb->dev);
 227
 228        s = this_cpu_ptr(p->stats64);
 229        u64_stats_update_begin(&s->syncp);
 230        s->rx_packets++;
 231        s->rx_bytes += skb->len;
 232        u64_stats_update_end(&s->syncp);
 233
 234        if (dsa_skb_defer_rx_timestamp(p, skb))
 235                return 0;
 236
 237        gro_cells_receive(&p->gcells, skb);
 238
 239        return 0;
 240}
 241
 242#ifdef CONFIG_PM_SLEEP
 243static bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
 244{
 245        const struct dsa_port *dp = dsa_to_port(ds, p);
 246
 247        return dp->type == DSA_PORT_TYPE_USER && dp->slave;
 248}
 249
 250int dsa_switch_suspend(struct dsa_switch *ds)
 251{
 252        int i, ret = 0;
 253
 254        /* Suspend slave network devices */
 255        for (i = 0; i < ds->num_ports; i++) {
 256                if (!dsa_is_port_initialized(ds, i))
 257                        continue;
 258
 259                ret = dsa_slave_suspend(dsa_to_port(ds, i)->slave);
 260                if (ret)
 261                        return ret;
 262        }
 263
 264        if (ds->ops->suspend)
 265                ret = ds->ops->suspend(ds);
 266
 267        return ret;
 268}
 269EXPORT_SYMBOL_GPL(dsa_switch_suspend);
 270
 271int dsa_switch_resume(struct dsa_switch *ds)
 272{
 273        int i, ret = 0;
 274
 275        if (ds->ops->resume)
 276                ret = ds->ops->resume(ds);
 277
 278        if (ret)
 279                return ret;
 280
 281        /* Resume slave network devices */
 282        for (i = 0; i < ds->num_ports; i++) {
 283                if (!dsa_is_port_initialized(ds, i))
 284                        continue;
 285
 286                ret = dsa_slave_resume(dsa_to_port(ds, i)->slave);
 287                if (ret)
 288                        return ret;
 289        }
 290
 291        return 0;
 292}
 293EXPORT_SYMBOL_GPL(dsa_switch_resume);
 294#endif
 295
 296static struct packet_type dsa_pack_type __read_mostly = {
 297        .type   = cpu_to_be16(ETH_P_XDSA),
 298        .func   = dsa_switch_rcv,
 299};
 300
 301static struct workqueue_struct *dsa_owq;
 302
 303bool dsa_schedule_work(struct work_struct *work)
 304{
 305        return queue_work(dsa_owq, work);
 306}
 307
 308static ATOMIC_NOTIFIER_HEAD(dsa_notif_chain);
 309
 310int register_dsa_notifier(struct notifier_block *nb)
 311{
 312        return atomic_notifier_chain_register(&dsa_notif_chain, nb);
 313}
 314EXPORT_SYMBOL_GPL(register_dsa_notifier);
 315
 316int unregister_dsa_notifier(struct notifier_block *nb)
 317{
 318        return atomic_notifier_chain_unregister(&dsa_notif_chain, nb);
 319}
 320EXPORT_SYMBOL_GPL(unregister_dsa_notifier);
 321
 322int call_dsa_notifiers(unsigned long val, struct net_device *dev,
 323                       struct dsa_notifier_info *info)
 324{
 325        info->dev = dev;
 326        return atomic_notifier_call_chain(&dsa_notif_chain, val, info);
 327}
 328EXPORT_SYMBOL_GPL(call_dsa_notifiers);
 329
 330int dsa_devlink_param_get(struct devlink *dl, u32 id,
 331                          struct devlink_param_gset_ctx *ctx)
 332{
 333        struct dsa_devlink_priv *dl_priv;
 334        struct dsa_switch *ds;
 335
 336        dl_priv = devlink_priv(dl);
 337        ds = dl_priv->ds;
 338
 339        if (!ds->ops->devlink_param_get)
 340                return -EOPNOTSUPP;
 341
 342        return ds->ops->devlink_param_get(ds, id, ctx);
 343}
 344EXPORT_SYMBOL_GPL(dsa_devlink_param_get);
 345
 346int dsa_devlink_param_set(struct devlink *dl, u32 id,
 347                          struct devlink_param_gset_ctx *ctx)
 348{
 349        struct dsa_devlink_priv *dl_priv;
 350        struct dsa_switch *ds;
 351
 352        dl_priv = devlink_priv(dl);
 353        ds = dl_priv->ds;
 354
 355        if (!ds->ops->devlink_param_set)
 356                return -EOPNOTSUPP;
 357
 358        return ds->ops->devlink_param_set(ds, id, ctx);
 359}
 360EXPORT_SYMBOL_GPL(dsa_devlink_param_set);
 361
 362int dsa_devlink_params_register(struct dsa_switch *ds,
 363                                const struct devlink_param *params,
 364                                size_t params_count)
 365{
 366        return devlink_params_register(ds->devlink, params, params_count);
 367}
 368EXPORT_SYMBOL_GPL(dsa_devlink_params_register);
 369
 370void dsa_devlink_params_unregister(struct dsa_switch *ds,
 371                                   const struct devlink_param *params,
 372                                   size_t params_count)
 373{
 374        devlink_params_unregister(ds->devlink, params, params_count);
 375}
 376EXPORT_SYMBOL_GPL(dsa_devlink_params_unregister);
 377
 378int dsa_devlink_resource_register(struct dsa_switch *ds,
 379                                  const char *resource_name,
 380                                  u64 resource_size,
 381                                  u64 resource_id,
 382                                  u64 parent_resource_id,
 383                                  const struct devlink_resource_size_params *size_params)
 384{
 385        return devlink_resource_register(ds->devlink, resource_name,
 386                                         resource_size, resource_id,
 387                                         parent_resource_id,
 388                                         size_params);
 389}
 390EXPORT_SYMBOL_GPL(dsa_devlink_resource_register);
 391
 392void dsa_devlink_resources_unregister(struct dsa_switch *ds)
 393{
 394        devlink_resources_unregister(ds->devlink, NULL);
 395}
 396EXPORT_SYMBOL_GPL(dsa_devlink_resources_unregister);
 397
 398void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
 399                                           u64 resource_id,
 400                                           devlink_resource_occ_get_t *occ_get,
 401                                           void *occ_get_priv)
 402{
 403        return devlink_resource_occ_get_register(ds->devlink, resource_id,
 404                                                 occ_get, occ_get_priv);
 405}
 406EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_register);
 407
 408void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
 409                                             u64 resource_id)
 410{
 411        devlink_resource_occ_get_unregister(ds->devlink, resource_id);
 412}
 413EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister);
 414
 415struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
 416{
 417        if (!netdev || !dsa_slave_dev_check(netdev))
 418                return ERR_PTR(-ENODEV);
 419
 420        return dsa_slave_to_port(netdev);
 421}
 422EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
 423
 424static int __init dsa_init_module(void)
 425{
 426        int rc;
 427
 428        dsa_owq = alloc_ordered_workqueue("dsa_ordered",
 429                                          WQ_MEM_RECLAIM);
 430        if (!dsa_owq)
 431                return -ENOMEM;
 432
 433        rc = dsa_slave_register_notifier();
 434        if (rc)
 435                goto register_notifier_fail;
 436
 437        dev_add_pack(&dsa_pack_type);
 438
 439        dsa_tag_driver_register(&DSA_TAG_DRIVER_NAME(none_ops),
 440                                THIS_MODULE);
 441
 442        return 0;
 443
 444register_notifier_fail:
 445        destroy_workqueue(dsa_owq);
 446
 447        return rc;
 448}
 449module_init(dsa_init_module);
 450
 451static void __exit dsa_cleanup_module(void)
 452{
 453        dsa_tag_driver_unregister(&DSA_TAG_DRIVER_NAME(none_ops));
 454
 455        dsa_slave_unregister_notifier();
 456        dev_remove_pack(&dsa_pack_type);
 457        destroy_workqueue(dsa_owq);
 458}
 459module_exit(dsa_cleanup_module);
 460
 461MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
 462MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
 463MODULE_LICENSE("GPL");
 464MODULE_ALIAS("platform:dsa");
 465