linux/drivers/net/ethernet/mellanox/mlxsw/core.c
<<
>>
Prefs
   1/*
   2 * drivers/net/ethernet/mellanox/mlxsw/core.c
   3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
   4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
   5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
   6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
   7 *
   8 * Redistribution and use in source and binary forms, with or without
   9 * modification, are permitted provided that the following conditions are met:
  10 *
  11 * 1. Redistributions of source code must retain the above copyright
  12 *    notice, this list of conditions and the following disclaimer.
  13 * 2. Redistributions in binary form must reproduce the above copyright
  14 *    notice, this list of conditions and the following disclaimer in the
  15 *    documentation and/or other materials provided with the distribution.
  16 * 3. Neither the names of the copyright holders nor the names of its
  17 *    contributors may be used to endorse or promote products derived from
  18 *    this software without specific prior written permission.
  19 *
  20 * Alternatively, this software may be distributed under the terms of the
  21 * GNU General Public License ("GPL") version 2 as published by the Free
  22 * Software Foundation.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34 * POSSIBILITY OF SUCH DAMAGE.
  35 */
  36
  37#include <linux/kernel.h>
  38#include <linux/module.h>
  39#include <linux/device.h>
  40#include <linux/export.h>
  41#include <linux/err.h>
  42#include <linux/if_link.h>
  43#include <linux/netdevice.h>
  44#include <linux/completion.h>
  45#include <linux/skbuff.h>
  46#include <linux/etherdevice.h>
  47#include <linux/types.h>
  48#include <linux/string.h>
  49#include <linux/gfp.h>
  50#include <linux/random.h>
  51#include <linux/jiffies.h>
  52#include <linux/mutex.h>
  53#include <linux/rcupdate.h>
  54#include <linux/slab.h>
  55#include <linux/workqueue.h>
  56#include <asm/byteorder.h>
  57#include <net/devlink.h>
  58#include <trace/events/devlink.h>
  59
  60#include "core.h"
  61#include "item.h"
  62#include "cmd.h"
  63#include "port.h"
  64#include "trap.h"
  65#include "emad.h"
  66#include "reg.h"
  67#include "resources.h"
  68
  69static LIST_HEAD(mlxsw_core_driver_list);
  70static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
  71
  72static const char mlxsw_core_driver_name[] = "mlxsw_core";
  73
  74static struct workqueue_struct *mlxsw_wq;
  75static struct workqueue_struct *mlxsw_owq;
  76
  77struct mlxsw_core_port {
  78        struct devlink_port devlink_port;
  79        void *port_driver_priv;
  80        u8 local_port;
  81};
  82
  83void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
  84{
  85        return mlxsw_core_port->port_driver_priv;
  86}
  87EXPORT_SYMBOL(mlxsw_core_port_driver_priv);
  88
  89static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port)
  90{
  91        return mlxsw_core_port->port_driver_priv != NULL;
  92}
  93
  94struct mlxsw_core {
  95        struct mlxsw_driver *driver;
  96        const struct mlxsw_bus *bus;
  97        void *bus_priv;
  98        const struct mlxsw_bus_info *bus_info;
  99        struct workqueue_struct *emad_wq;
 100        struct list_head rx_listener_list;
 101        struct list_head event_listener_list;
 102        struct {
 103                atomic64_t tid;
 104                struct list_head trans_list;
 105                spinlock_t trans_list_lock; /* protects trans_list writes */
 106                bool use_emad;
 107        } emad;
 108        struct {
 109                u8 *mapping; /* lag_id+port_index to local_port mapping */
 110        } lag;
 111        struct mlxsw_res res;
 112        struct mlxsw_hwmon *hwmon;
 113        struct mlxsw_thermal *thermal;
 114        struct mlxsw_core_port *ports;
 115        unsigned int max_ports;
 116        bool reload_fail;
 117        unsigned long driver_priv[0];
 118        /* driver_priv has to be always the last item */
 119};
 120
 121#define MLXSW_PORT_MAX_PORTS_DEFAULT    0x40
 122
 123static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
 124{
 125        /* Switch ports are numbered from 1 to queried value */
 126        if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
 127                mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
 128                                                           MAX_SYSTEM_PORT) + 1;
 129        else
 130                mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
 131
 132        mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
 133                                    sizeof(struct mlxsw_core_port), GFP_KERNEL);
 134        if (!mlxsw_core->ports)
 135                return -ENOMEM;
 136
 137        return 0;
 138}
 139
 140static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core)
 141{
 142        kfree(mlxsw_core->ports);
 143}
 144
 145unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
 146{
 147        return mlxsw_core->max_ports;
 148}
 149EXPORT_SYMBOL(mlxsw_core_max_ports);
 150
 151void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
 152{
 153        return mlxsw_core->driver_priv;
 154}
 155EXPORT_SYMBOL(mlxsw_core_driver_priv);
 156
 157struct mlxsw_rx_listener_item {
 158        struct list_head list;
 159        struct mlxsw_rx_listener rxl;
 160        void *priv;
 161};
 162
 163struct mlxsw_event_listener_item {
 164        struct list_head list;
 165        struct mlxsw_event_listener el;
 166        void *priv;
 167};
 168
 169/******************
 170 * EMAD processing
 171 ******************/
 172
 173/* emad_eth_hdr_dmac
 174 * Destination MAC in EMAD's Ethernet header.
 175 * Must be set to 01:02:c9:00:00:01
 176 */
 177MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
 178
 179/* emad_eth_hdr_smac
 180 * Source MAC in EMAD's Ethernet header.
 181 * Must be set to 00:02:c9:01:02:03
 182 */
 183MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
 184
 185/* emad_eth_hdr_ethertype
 186 * Ethertype in EMAD's Ethernet header.
 187 * Must be set to 0x8932
 188 */
 189MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
 190
 191/* emad_eth_hdr_mlx_proto
 192 * Mellanox protocol.
 193 * Must be set to 0x0.
 194 */
 195MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
 196
 197/* emad_eth_hdr_ver
 198 * Mellanox protocol version.
 199 * Must be set to 0x0.
 200 */
 201MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
 202
 203/* emad_op_tlv_type
 204 * Type of the TLV.
 205 * Must be set to 0x1 (operation TLV).
 206 */
 207MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
 208
 209/* emad_op_tlv_len
 210 * Length of the operation TLV in u32.
 211 * Must be set to 0x4.
 212 */
 213MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
 214
 215/* emad_op_tlv_dr
 216 * Direct route bit. Setting to 1 indicates the EMAD is a direct route
 217 * EMAD. DR TLV must follow.
 218 *
 219 * Note: Currently not supported and must not be set.
 220 */
 221MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
 222
 223/* emad_op_tlv_status
 224 * Returned status in case of EMAD response. Must be set to 0 in case
 225 * of EMAD request.
 226 * 0x0 - success
 227 * 0x1 - device is busy. Requester should retry
 228 * 0x2 - Mellanox protocol version not supported
 229 * 0x3 - unknown TLV
 230 * 0x4 - register not supported
 231 * 0x5 - operation class not supported
 232 * 0x6 - EMAD method not supported
 233 * 0x7 - bad parameter (e.g. port out of range)
 234 * 0x8 - resource not available
 235 * 0x9 - message receipt acknowledgment. Requester should retry
 236 * 0x70 - internal error
 237 */
 238MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
 239
 240/* emad_op_tlv_register_id
 241 * Register ID of register within register TLV.
 242 */
 243MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
 244
 245/* emad_op_tlv_r
 246 * Response bit. Setting to 1 indicates Response, otherwise request.
 247 */
 248MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
 249
 250/* emad_op_tlv_method
 251 * EMAD method type.
 252 * 0x1 - query
 253 * 0x2 - write
 254 * 0x3 - send (currently not supported)
 255 * 0x4 - event
 256 */
 257MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
 258
 259/* emad_op_tlv_class
 260 * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
 261 */
 262MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
 263
 264/* emad_op_tlv_tid
 265 * EMAD transaction ID. Used for pairing request and response EMADs.
 266 */
 267MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
 268
 269/* emad_reg_tlv_type
 270 * Type of the TLV.
 271 * Must be set to 0x3 (register TLV).
 272 */
 273MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
 274
 275/* emad_reg_tlv_len
 276 * Length of the operation TLV in u32.
 277 */
 278MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
 279
 280/* emad_end_tlv_type
 281 * Type of the TLV.
 282 * Must be set to 0x0 (end TLV).
 283 */
 284MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
 285
 286/* emad_end_tlv_len
 287 * Length of the end TLV in u32.
 288 * Must be set to 1.
 289 */
 290MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
 291
 292enum mlxsw_core_reg_access_type {
 293        MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
 294        MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
 295};
 296
 297static inline const char *
 298mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
 299{
 300        switch (type) {
 301        case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
 302                return "query";
 303        case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
 304                return "write";
 305        }
 306        BUG();
 307}
 308
 309static void mlxsw_emad_pack_end_tlv(char *end_tlv)
 310{
 311        mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
 312        mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
 313}
 314
 315static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
 316                                    const struct mlxsw_reg_info *reg,
 317                                    char *payload)
 318{
 319        mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
 320        mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
 321        memcpy(reg_tlv + sizeof(u32), payload, reg->len);
 322}
 323
 324static void mlxsw_emad_pack_op_tlv(char *op_tlv,
 325                                   const struct mlxsw_reg_info *reg,
 326                                   enum mlxsw_core_reg_access_type type,
 327                                   u64 tid)
 328{
 329        mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
 330        mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
 331        mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
 332        mlxsw_emad_op_tlv_status_set(op_tlv, 0);
 333        mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
 334        mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
 335        if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
 336                mlxsw_emad_op_tlv_method_set(op_tlv,
 337                                             MLXSW_EMAD_OP_TLV_METHOD_QUERY);
 338        else
 339                mlxsw_emad_op_tlv_method_set(op_tlv,
 340                                             MLXSW_EMAD_OP_TLV_METHOD_WRITE);
 341        mlxsw_emad_op_tlv_class_set(op_tlv,
 342                                    MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
 343        mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
 344}
 345
 346static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
 347{
 348        char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
 349
 350        mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
 351        mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
 352        mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
 353        mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
 354        mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
 355
 356        skb_reset_mac_header(skb);
 357
 358        return 0;
 359}
 360
 361static void mlxsw_emad_construct(struct sk_buff *skb,
 362                                 const struct mlxsw_reg_info *reg,
 363                                 char *payload,
 364                                 enum mlxsw_core_reg_access_type type,
 365                                 u64 tid)
 366{
 367        char *buf;
 368
 369        buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
 370        mlxsw_emad_pack_end_tlv(buf);
 371
 372        buf = skb_push(skb, reg->len + sizeof(u32));
 373        mlxsw_emad_pack_reg_tlv(buf, reg, payload);
 374
 375        buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
 376        mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
 377
 378        mlxsw_emad_construct_eth_hdr(skb);
 379}
 380
 381static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
 382{
 383        return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
 384}
 385
 386static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
 387{
 388        return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
 389                                      MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
 390}
 391
 392static char *mlxsw_emad_reg_payload(const char *op_tlv)
 393{
 394        return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
 395}
 396
 397static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
 398{
 399        char *op_tlv;
 400
 401        op_tlv = mlxsw_emad_op_tlv(skb);
 402        return mlxsw_emad_op_tlv_tid_get(op_tlv);
 403}
 404
 405static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
 406{
 407        char *op_tlv;
 408
 409        op_tlv = mlxsw_emad_op_tlv(skb);
 410        return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
 411}
 412
 413static int mlxsw_emad_process_status(char *op_tlv,
 414                                     enum mlxsw_emad_op_tlv_status *p_status)
 415{
 416        *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
 417
 418        switch (*p_status) {
 419        case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
 420                return 0;
 421        case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
 422        case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
 423                return -EAGAIN;
 424        case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
 425        case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
 426        case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
 427        case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
 428        case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
 429        case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
 430        case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
 431        case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
 432        default:
 433                return -EIO;
 434        }
 435}
 436
 437static int
 438mlxsw_emad_process_status_skb(struct sk_buff *skb,
 439                              enum mlxsw_emad_op_tlv_status *p_status)
 440{
 441        return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
 442}
 443
 444struct mlxsw_reg_trans {
 445        struct list_head list;
 446        struct list_head bulk_list;
 447        struct mlxsw_core *core;
 448        struct sk_buff *tx_skb;
 449        struct mlxsw_tx_info tx_info;
 450        struct delayed_work timeout_dw;
 451        unsigned int retries;
 452        u64 tid;
 453        struct completion completion;
 454        atomic_t active;
 455        mlxsw_reg_trans_cb_t *cb;
 456        unsigned long cb_priv;
 457        const struct mlxsw_reg_info *reg;
 458        enum mlxsw_core_reg_access_type type;
 459        int err;
 460        enum mlxsw_emad_op_tlv_status emad_status;
 461        struct rcu_head rcu;
 462};
 463
 464#define MLXSW_EMAD_TIMEOUT_MS 200
 465
 466static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
 467{
 468        unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
 469
 470        queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
 471}
 472
 473static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
 474                               struct mlxsw_reg_trans *trans)
 475{
 476        struct sk_buff *skb;
 477        int err;
 478
 479        skb = skb_copy(trans->tx_skb, GFP_KERNEL);
 480        if (!skb)
 481                return -ENOMEM;
 482
 483        trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
 484                            skb->data + mlxsw_core->driver->txhdr_len,
 485                            skb->len - mlxsw_core->driver->txhdr_len);
 486
 487        atomic_set(&trans->active, 1);
 488        err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
 489        if (err) {
 490                dev_kfree_skb(skb);
 491                return err;
 492        }
 493        mlxsw_emad_trans_timeout_schedule(trans);
 494        return 0;
 495}
 496
 497static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
 498{
 499        struct mlxsw_core *mlxsw_core = trans->core;
 500
 501        dev_kfree_skb(trans->tx_skb);
 502        spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
 503        list_del_rcu(&trans->list);
 504        spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
 505        trans->err = err;
 506        complete(&trans->completion);
 507}
 508
 509static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
 510                                      struct mlxsw_reg_trans *trans)
 511{
 512        int err;
 513
 514        if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
 515                trans->retries++;
 516                err = mlxsw_emad_transmit(trans->core, trans);
 517                if (err == 0)
 518                        return;
 519        } else {
 520                err = -EIO;
 521        }
 522        mlxsw_emad_trans_finish(trans, err);
 523}
 524
 525static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
 526{
 527        struct mlxsw_reg_trans *trans = container_of(work,
 528                                                     struct mlxsw_reg_trans,
 529                                                     timeout_dw.work);
 530
 531        if (!atomic_dec_and_test(&trans->active))
 532                return;
 533
 534        mlxsw_emad_transmit_retry(trans->core, trans);
 535}
 536
 537static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
 538                                        struct mlxsw_reg_trans *trans,
 539                                        struct sk_buff *skb)
 540{
 541        int err;
 542
 543        if (!atomic_dec_and_test(&trans->active))
 544                return;
 545
 546        err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
 547        if (err == -EAGAIN) {
 548                mlxsw_emad_transmit_retry(mlxsw_core, trans);
 549        } else {
 550                if (err == 0) {
 551                        char *op_tlv = mlxsw_emad_op_tlv(skb);
 552
 553                        if (trans->cb)
 554                                trans->cb(mlxsw_core,
 555                                          mlxsw_emad_reg_payload(op_tlv),
 556                                          trans->reg->len, trans->cb_priv);
 557                }
 558                mlxsw_emad_trans_finish(trans, err);
 559        }
 560}
 561
 562/* called with rcu read lock held */
 563static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
 564                                        void *priv)
 565{
 566        struct mlxsw_core *mlxsw_core = priv;
 567        struct mlxsw_reg_trans *trans;
 568
 569        trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
 570                            skb->data, skb->len);
 571
 572        if (!mlxsw_emad_is_resp(skb))
 573                goto free_skb;
 574
 575        list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
 576                if (mlxsw_emad_get_tid(skb) == trans->tid) {
 577                        mlxsw_emad_process_response(mlxsw_core, trans, skb);
 578                        break;
 579                }
 580        }
 581
 582free_skb:
 583        dev_kfree_skb(skb);
 584}
 585
 586static const struct mlxsw_listener mlxsw_emad_rx_listener =
 587        MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
 588                  EMAD, DISCARD);
 589
 590static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
 591{
 592        struct workqueue_struct *emad_wq;
 593        u64 tid;
 594        int err;
 595
 596        if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
 597                return 0;
 598
 599        emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
 600        if (!emad_wq)
 601                return -ENOMEM;
 602        mlxsw_core->emad_wq = emad_wq;
 603
 604        /* Set the upper 32 bits of the transaction ID field to a random
 605         * number. This allows us to discard EMADs addressed to other
 606         * devices.
 607         */
 608        get_random_bytes(&tid, 4);
 609        tid <<= 32;
 610        atomic64_set(&mlxsw_core->emad.tid, tid);
 611
 612        INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
 613        spin_lock_init(&mlxsw_core->emad.trans_list_lock);
 614
 615        err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
 616                                       mlxsw_core);
 617        if (err)
 618                return err;
 619
 620        err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
 621        if (err)
 622                goto err_emad_trap_set;
 623        mlxsw_core->emad.use_emad = true;
 624
 625        return 0;
 626
 627err_emad_trap_set:
 628        mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
 629                                   mlxsw_core);
 630        destroy_workqueue(mlxsw_core->emad_wq);
 631        return err;
 632}
 633
 634static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
 635{
 636
 637        if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
 638                return;
 639
 640        mlxsw_core->emad.use_emad = false;
 641        mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
 642                                   mlxsw_core);
 643        destroy_workqueue(mlxsw_core->emad_wq);
 644}
 645
 646static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
 647                                        u16 reg_len)
 648{
 649        struct sk_buff *skb;
 650        u16 emad_len;
 651
 652        emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
 653                    (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
 654                    sizeof(u32) + mlxsw_core->driver->txhdr_len);
 655        if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
 656                return NULL;
 657
 658        skb = netdev_alloc_skb(NULL, emad_len);
 659        if (!skb)
 660                return NULL;
 661        memset(skb->data, 0, emad_len);
 662        skb_reserve(skb, emad_len);
 663
 664        return skb;
 665}
 666
 667static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
 668                                 const struct mlxsw_reg_info *reg,
 669                                 char *payload,
 670                                 enum mlxsw_core_reg_access_type type,
 671                                 struct mlxsw_reg_trans *trans,
 672                                 struct list_head *bulk_list,
 673                                 mlxsw_reg_trans_cb_t *cb,
 674                                 unsigned long cb_priv, u64 tid)
 675{
 676        struct sk_buff *skb;
 677        int err;
 678
 679        dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
 680                tid, reg->id, mlxsw_reg_id_str(reg->id),
 681                mlxsw_core_reg_access_type_str(type));
 682
 683        skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
 684        if (!skb)
 685                return -ENOMEM;
 686
 687        list_add_tail(&trans->bulk_list, bulk_list);
 688        trans->core = mlxsw_core;
 689        trans->tx_skb = skb;
 690        trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
 691        trans->tx_info.is_emad = true;
 692        INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
 693        trans->tid = tid;
 694        init_completion(&trans->completion);
 695        trans->cb = cb;
 696        trans->cb_priv = cb_priv;
 697        trans->reg = reg;
 698        trans->type = type;
 699
 700        mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
 701        mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
 702
 703        spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
 704        list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
 705        spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
 706        err = mlxsw_emad_transmit(mlxsw_core, trans);
 707        if (err)
 708                goto err_out;
 709        return 0;
 710
 711err_out:
 712        spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
 713        list_del_rcu(&trans->list);
 714        spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
 715        list_del(&trans->bulk_list);
 716        dev_kfree_skb(trans->tx_skb);
 717        return err;
 718}
 719
 720/*****************
 721 * Core functions
 722 *****************/
 723
 724int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
 725{
 726        spin_lock(&mlxsw_core_driver_list_lock);
 727        list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
 728        spin_unlock(&mlxsw_core_driver_list_lock);
 729        return 0;
 730}
 731EXPORT_SYMBOL(mlxsw_core_driver_register);
 732
 733void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
 734{
 735        spin_lock(&mlxsw_core_driver_list_lock);
 736        list_del(&mlxsw_driver->list);
 737        spin_unlock(&mlxsw_core_driver_list_lock);
 738}
 739EXPORT_SYMBOL(mlxsw_core_driver_unregister);
 740
 741static struct mlxsw_driver *__driver_find(const char *kind)
 742{
 743        struct mlxsw_driver *mlxsw_driver;
 744
 745        list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
 746                if (strcmp(mlxsw_driver->kind, kind) == 0)
 747                        return mlxsw_driver;
 748        }
 749        return NULL;
 750}
 751
 752static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
 753{
 754        struct mlxsw_driver *mlxsw_driver;
 755
 756        spin_lock(&mlxsw_core_driver_list_lock);
 757        mlxsw_driver = __driver_find(kind);
 758        spin_unlock(&mlxsw_core_driver_list_lock);
 759        return mlxsw_driver;
 760}
 761
 762static void mlxsw_core_driver_put(const char *kind)
 763{
 764        struct mlxsw_driver *mlxsw_driver;
 765
 766        spin_lock(&mlxsw_core_driver_list_lock);
 767        mlxsw_driver = __driver_find(kind);
 768        spin_unlock(&mlxsw_core_driver_list_lock);
 769}
 770
 771static int mlxsw_devlink_port_split(struct devlink *devlink,
 772                                    unsigned int port_index,
 773                                    unsigned int count)
 774{
 775        struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
 776
 777        if (port_index >= mlxsw_core->max_ports)
 778                return -EINVAL;
 779        if (!mlxsw_core->driver->port_split)
 780                return -EOPNOTSUPP;
 781        return mlxsw_core->driver->port_split(mlxsw_core, port_index, count);
 782}
 783
 784static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
 785                                      unsigned int port_index)
 786{
 787        struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
 788
 789        if (port_index >= mlxsw_core->max_ports)
 790                return -EINVAL;
 791        if (!mlxsw_core->driver->port_unsplit)
 792                return -EOPNOTSUPP;
 793        return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
 794}
 795
 796static int
 797mlxsw_devlink_sb_pool_get(struct devlink *devlink,
 798                          unsigned int sb_index, u16 pool_index,
 799                          struct devlink_sb_pool_info *pool_info)
 800{
 801        struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
 802        struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
 803
 804        if (!mlxsw_driver->sb_pool_get)
 805                return -EOPNOTSUPP;
 806        return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
 807                                         pool_index, pool_info);
 808}
 809
 810static int
 811mlxsw_devlink_sb_pool_set(struct devlink *devlink,
 812                          unsigned int sb_index, u16 pool_index, u32 size,
 813                          enum devlink_sb_threshold_type threshold_type)
 814{
 815        struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
 816        struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
 817
 818        if (!mlxsw_driver->sb_pool_set)
 819                return -EOPNOTSUPP;
 820        return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
 821                                         pool_index, size, threshold_type);
 822}
 823
 824static void *__dl_port(struct devlink_port *devlink_port)
 825{
 826        return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
 827}
 828
 829static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
 830                                       enum devlink_port_type port_type)
 831{
 832        struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
 833        struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
 834        struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
 835
 836        if (!mlxsw_driver->port_type_set)
 837                return -EOPNOTSUPP;
 838
 839        return mlxsw_driver->port_type_set(mlxsw_core,
 840                                           mlxsw_core_port->local_port,
 841                                           port_type);
 842}
 843
 844static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
 845                                          unsigned int sb_index, u16 pool_index,
 846                                          u32 *p_threshold)
 847{
 848        struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
 849        struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
 850        struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
 851
 852        if (!mlxsw_driver->sb_port_pool_get ||
 853            !mlxsw_core_port_check(mlxsw_core_port))
 854                return -EOPNOTSUPP;
 855        return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
 856                                              pool_index, p_threshold);
 857}
 858
 859static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
 860                                          unsigned int sb_index, u16 pool_index,
 861                                          u32 threshold)
 862{
 863        struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
 864        struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
 865        struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
 866
 867        if (!mlxsw_driver->sb_port_pool_set ||
 868            !mlxsw_core_port_check(mlxsw_core_port))
 869                return -EOPNOTSUPP;
 870        return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
 871                                              pool_index, threshold);
 872}
 873
 874static int
 875mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
 876                                  unsigned int sb_index, u16 tc_index,
 877                                  enum devlink_sb_pool_type pool_type,
 878                                  u16 *p_pool_index, u32 *p_threshold)
 879{
 880        struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
 881        struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
 882        struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
 883
 884        if (!mlxsw_driver->sb_tc_pool_bind_get ||
 885            !mlxsw_core_port_check(mlxsw_core_port))
 886                return -EOPNOTSUPP;
 887        return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
 888                                                 tc_index, pool_type,
 889                                                 p_pool_index, p_threshold);
 890}
 891
 892static int
 893mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
 894                                  unsigned int sb_index, u16 tc_index,
 895                                  enum devlink_sb_pool_type pool_type,
 896                                  u16 pool_index, u32 threshold)
 897{
 898        struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
 899        struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
 900        struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
 901
 902        if (!mlxsw_driver->sb_tc_pool_bind_set ||
 903            !mlxsw_core_port_check(mlxsw_core_port))
 904                return -EOPNOTSUPP;
 905        return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
 906                                                 tc_index, pool_type,
 907                                                 pool_index, threshold);
 908}
 909
 910static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
 911                                         unsigned int sb_index)
 912{
 913        struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
 914        struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
 915
 916        if (!mlxsw_driver->sb_occ_snapshot)
 917                return -EOPNOTSUPP;
 918        return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
 919}
 920
 921static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
 922                                          unsigned int sb_index)
 923{
 924        struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
 925        struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
 926
 927        if (!mlxsw_driver->sb_occ_max_clear)
 928                return -EOPNOTSUPP;
 929        return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
 930}
 931
 932static int
 933mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
 934                                   unsigned int sb_index, u16 pool_index,
 935                                   u32 *p_cur, u32 *p_max)
 936{
 937        struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
 938        struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
 939        struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
 940
 941        if (!mlxsw_driver->sb_occ_port_pool_get ||
 942            !mlxsw_core_port_check(mlxsw_core_port))
 943                return -EOPNOTSUPP;
 944        return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
 945                                                  pool_index, p_cur, p_max);
 946}
 947
 948static int
 949mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
 950                                      unsigned int sb_index, u16 tc_index,
 951                                      enum devlink_sb_pool_type pool_type,
 952                                      u32 *p_cur, u32 *p_max)
 953{
 954        struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
 955        struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
 956        struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
 957
 958        if (!mlxsw_driver->sb_occ_tc_port_bind_get ||
 959            !mlxsw_core_port_check(mlxsw_core_port))
 960                return -EOPNOTSUPP;
 961        return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
 962                                                     sb_index, tc_index,
 963                                                     pool_type, p_cur, p_max);
 964}
 965
 966static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink)
 967{
 968        struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
 969        const struct mlxsw_bus *mlxsw_bus = mlxsw_core->bus;
 970        int err;
 971
 972        if (!mlxsw_bus->reset)
 973                return -EOPNOTSUPP;
 974
 975        mlxsw_core_bus_device_unregister(mlxsw_core, true);
 976        mlxsw_bus->reset(mlxsw_core->bus_priv);
 977        err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
 978                                             mlxsw_core->bus,
 979                                             mlxsw_core->bus_priv, true,
 980                                             devlink);
 981        if (err)
 982                mlxsw_core->reload_fail = true;
 983        return err;
 984}
 985
 986static const struct devlink_ops mlxsw_devlink_ops = {
 987        .reload                         = mlxsw_devlink_core_bus_device_reload,
 988        .port_type_set                  = mlxsw_devlink_port_type_set,
 989        .port_split                     = mlxsw_devlink_port_split,
 990        .port_unsplit                   = mlxsw_devlink_port_unsplit,
 991        .sb_pool_get                    = mlxsw_devlink_sb_pool_get,
 992        .sb_pool_set                    = mlxsw_devlink_sb_pool_set,
 993        .sb_port_pool_get               = mlxsw_devlink_sb_port_pool_get,
 994        .sb_port_pool_set               = mlxsw_devlink_sb_port_pool_set,
 995        .sb_tc_pool_bind_get            = mlxsw_devlink_sb_tc_pool_bind_get,
 996        .sb_tc_pool_bind_set            = mlxsw_devlink_sb_tc_pool_bind_set,
 997        .sb_occ_snapshot                = mlxsw_devlink_sb_occ_snapshot,
 998        .sb_occ_max_clear               = mlxsw_devlink_sb_occ_max_clear,
 999        .sb_occ_port_pool_get           = mlxsw_devlink_sb_occ_port_pool_get,
1000        .sb_occ_tc_port_bind_get        = mlxsw_devlink_sb_occ_tc_port_bind_get,
1001};
1002
1003int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
1004                                   const struct mlxsw_bus *mlxsw_bus,
1005                                   void *bus_priv, bool reload,
1006                                   struct devlink *devlink)
1007{
1008        const char *device_kind = mlxsw_bus_info->device_kind;
1009        struct mlxsw_core *mlxsw_core;
1010        struct mlxsw_driver *mlxsw_driver;
1011        struct mlxsw_res *res;
1012        size_t alloc_size;
1013        int err;
1014
1015        mlxsw_driver = mlxsw_core_driver_get(device_kind);
1016        if (!mlxsw_driver)
1017                return -EINVAL;
1018
1019        if (!reload) {
1020                alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
1021                devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
1022                if (!devlink) {
1023                        err = -ENOMEM;
1024                        goto err_devlink_alloc;
1025                }
1026        }
1027
1028        mlxsw_core = devlink_priv(devlink);
1029        INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
1030        INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
1031        mlxsw_core->driver = mlxsw_driver;
1032        mlxsw_core->bus = mlxsw_bus;
1033        mlxsw_core->bus_priv = bus_priv;
1034        mlxsw_core->bus_info = mlxsw_bus_info;
1035
1036        res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL;
1037        err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res);
1038        if (err)
1039                goto err_bus_init;
1040
1041        if (mlxsw_driver->resources_register && !reload) {
1042                err = mlxsw_driver->resources_register(mlxsw_core);
1043                if (err)
1044                        goto err_register_resources;
1045        }
1046
1047        err = mlxsw_ports_init(mlxsw_core);
1048        if (err)
1049                goto err_ports_init;
1050
1051        if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
1052            MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
1053                alloc_size = sizeof(u8) *
1054                        MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
1055                        MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
1056                mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
1057                if (!mlxsw_core->lag.mapping) {
1058                        err = -ENOMEM;
1059                        goto err_alloc_lag_mapping;
1060                }
1061        }
1062
1063        err = mlxsw_emad_init(mlxsw_core);
1064        if (err)
1065                goto err_emad_init;
1066
1067        if (!reload) {
1068                err = devlink_register(devlink, mlxsw_bus_info->dev);
1069                if (err)
1070                        goto err_devlink_register;
1071        }
1072
1073        err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
1074        if (err)
1075                goto err_hwmon_init;
1076
1077        err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info,
1078                                 &mlxsw_core->thermal);
1079        if (err)
1080                goto err_thermal_init;
1081
1082        if (mlxsw_driver->init) {
1083                err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
1084                if (err)
1085                        goto err_driver_init;
1086        }
1087
1088        return 0;
1089
1090err_driver_init:
1091        mlxsw_thermal_fini(mlxsw_core->thermal);
1092err_thermal_init:
1093err_hwmon_init:
1094        if (!reload)
1095                devlink_unregister(devlink);
1096err_devlink_register:
1097        mlxsw_emad_fini(mlxsw_core);
1098err_emad_init:
1099        kfree(mlxsw_core->lag.mapping);
1100err_alloc_lag_mapping:
1101        mlxsw_ports_fini(mlxsw_core);
1102err_ports_init:
1103        if (!reload)
1104                devlink_resources_unregister(devlink, NULL);
1105err_register_resources:
1106        mlxsw_bus->fini(bus_priv);
1107err_bus_init:
1108        if (!reload)
1109                devlink_free(devlink);
1110err_devlink_alloc:
1111        mlxsw_core_driver_put(device_kind);
1112        return err;
1113}
1114EXPORT_SYMBOL(mlxsw_core_bus_device_register);
1115
1116void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
1117                                      bool reload)
1118{
1119        const char *device_kind = mlxsw_core->bus_info->device_kind;
1120        struct devlink *devlink = priv_to_devlink(mlxsw_core);
1121
1122        if (mlxsw_core->reload_fail)
1123                goto reload_fail;
1124
1125        if (mlxsw_core->driver->fini)
1126                mlxsw_core->driver->fini(mlxsw_core);
1127        mlxsw_thermal_fini(mlxsw_core->thermal);
1128        if (!reload)
1129                devlink_unregister(devlink);
1130        mlxsw_emad_fini(mlxsw_core);
1131        kfree(mlxsw_core->lag.mapping);
1132        mlxsw_ports_fini(mlxsw_core);
1133        if (!reload)
1134                devlink_resources_unregister(devlink, NULL);
1135        mlxsw_core->bus->fini(mlxsw_core->bus_priv);
1136        if (reload)
1137                return;
1138reload_fail:
1139        devlink_free(devlink);
1140        mlxsw_core_driver_put(device_kind);
1141}
1142EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
1143
1144bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
1145                                  const struct mlxsw_tx_info *tx_info)
1146{
1147        return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
1148                                                  tx_info);
1149}
1150EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
1151
1152int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1153                            const struct mlxsw_tx_info *tx_info)
1154{
1155        return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
1156                                             tx_info);
1157}
1158EXPORT_SYMBOL(mlxsw_core_skb_transmit);
1159
1160static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
1161                                   const struct mlxsw_rx_listener *rxl_b)
1162{
1163        return (rxl_a->func == rxl_b->func &&
1164                rxl_a->local_port == rxl_b->local_port &&
1165                rxl_a->trap_id == rxl_b->trap_id);
1166}
1167
1168static struct mlxsw_rx_listener_item *
1169__find_rx_listener_item(struct mlxsw_core *mlxsw_core,
1170                        const struct mlxsw_rx_listener *rxl,
1171                        void *priv)
1172{
1173        struct mlxsw_rx_listener_item *rxl_item;
1174
1175        list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
1176                if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
1177                    rxl_item->priv == priv)
1178                        return rxl_item;
1179        }
1180        return NULL;
1181}
1182
1183int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
1184                                    const struct mlxsw_rx_listener *rxl,
1185                                    void *priv)
1186{
1187        struct mlxsw_rx_listener_item *rxl_item;
1188
1189        rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1190        if (rxl_item)
1191                return -EEXIST;
1192        rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
1193        if (!rxl_item)
1194                return -ENOMEM;
1195        rxl_item->rxl = *rxl;
1196        rxl_item->priv = priv;
1197
1198        list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
1199        return 0;
1200}
1201EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
1202
1203void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
1204                                       const struct mlxsw_rx_listener *rxl,
1205                                       void *priv)
1206{
1207        struct mlxsw_rx_listener_item *rxl_item;
1208
1209        rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1210        if (!rxl_item)
1211                return;
1212        list_del_rcu(&rxl_item->list);
1213        synchronize_rcu();
1214        kfree(rxl_item);
1215}
1216EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
1217
1218static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
1219                                           void *priv)
1220{
1221        struct mlxsw_event_listener_item *event_listener_item = priv;
1222        struct mlxsw_reg_info reg;
1223        char *payload;
1224        char *op_tlv = mlxsw_emad_op_tlv(skb);
1225        char *reg_tlv = mlxsw_emad_reg_tlv(skb);
1226
1227        reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
1228        reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
1229        payload = mlxsw_emad_reg_payload(op_tlv);
1230        event_listener_item->el.func(&reg, payload, event_listener_item->priv);
1231        dev_kfree_skb(skb);
1232}
1233
1234static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
1235                                      const struct mlxsw_event_listener *el_b)
1236{
1237        return (el_a->func == el_b->func &&
1238                el_a->trap_id == el_b->trap_id);
1239}
1240
1241static struct mlxsw_event_listener_item *
1242__find_event_listener_item(struct mlxsw_core *mlxsw_core,
1243                           const struct mlxsw_event_listener *el,
1244                           void *priv)
1245{
1246        struct mlxsw_event_listener_item *el_item;
1247
1248        list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
1249                if (__is_event_listener_equal(&el_item->el, el) &&
1250                    el_item->priv == priv)
1251                        return el_item;
1252        }
1253        return NULL;
1254}
1255
1256int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
1257                                       const struct mlxsw_event_listener *el,
1258                                       void *priv)
1259{
1260        int err;
1261        struct mlxsw_event_listener_item *el_item;
1262        const struct mlxsw_rx_listener rxl = {
1263                .func = mlxsw_core_event_listener_func,
1264                .local_port = MLXSW_PORT_DONT_CARE,
1265                .trap_id = el->trap_id,
1266        };
1267
1268        el_item = __find_event_listener_item(mlxsw_core, el, priv);
1269        if (el_item)
1270                return -EEXIST;
1271        el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
1272        if (!el_item)
1273                return -ENOMEM;
1274        el_item->el = *el;
1275        el_item->priv = priv;
1276
1277        err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
1278        if (err)
1279                goto err_rx_listener_register;
1280
1281        /* No reason to save item if we did not manage to register an RX
1282         * listener for it.
1283         */
1284        list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
1285
1286        return 0;
1287
1288err_rx_listener_register:
1289        kfree(el_item);
1290        return err;
1291}
1292EXPORT_SYMBOL(mlxsw_core_event_listener_register);
1293
1294void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
1295                                          const struct mlxsw_event_listener *el,
1296                                          void *priv)
1297{
1298        struct mlxsw_event_listener_item *el_item;
1299        const struct mlxsw_rx_listener rxl = {
1300                .func = mlxsw_core_event_listener_func,
1301                .local_port = MLXSW_PORT_DONT_CARE,
1302                .trap_id = el->trap_id,
1303        };
1304
1305        el_item = __find_event_listener_item(mlxsw_core, el, priv);
1306        if (!el_item)
1307                return;
1308        mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
1309        list_del(&el_item->list);
1310        kfree(el_item);
1311}
1312EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
1313
1314static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
1315                                        const struct mlxsw_listener *listener,
1316                                        void *priv)
1317{
1318        if (listener->is_event)
1319                return mlxsw_core_event_listener_register(mlxsw_core,
1320                                                &listener->u.event_listener,
1321                                                priv);
1322        else
1323                return mlxsw_core_rx_listener_register(mlxsw_core,
1324                                                &listener->u.rx_listener,
1325                                                priv);
1326}
1327
1328static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
1329                                      const struct mlxsw_listener *listener,
1330                                      void *priv)
1331{
1332        if (listener->is_event)
1333                mlxsw_core_event_listener_unregister(mlxsw_core,
1334                                                     &listener->u.event_listener,
1335                                                     priv);
1336        else
1337                mlxsw_core_rx_listener_unregister(mlxsw_core,
1338                                                  &listener->u.rx_listener,
1339                                                  priv);
1340}
1341
1342int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
1343                             const struct mlxsw_listener *listener, void *priv)
1344{
1345        char hpkt_pl[MLXSW_REG_HPKT_LEN];
1346        int err;
1347
1348        err = mlxsw_core_listener_register(mlxsw_core, listener, priv);
1349        if (err)
1350                return err;
1351
1352        mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id,
1353                            listener->trap_group, listener->is_ctrl);
1354        err = mlxsw_reg_write(mlxsw_core,  MLXSW_REG(hpkt), hpkt_pl);
1355        if (err)
1356                goto err_trap_set;
1357
1358        return 0;
1359
1360err_trap_set:
1361        mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
1362        return err;
1363}
1364EXPORT_SYMBOL(mlxsw_core_trap_register);
1365
1366void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
1367                                const struct mlxsw_listener *listener,
1368                                void *priv)
1369{
1370        char hpkt_pl[MLXSW_REG_HPKT_LEN];
1371
1372        if (!listener->is_event) {
1373                mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action,
1374                                    listener->trap_id, listener->trap_group,
1375                                    listener->is_ctrl);
1376                mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
1377        }
1378
1379        mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
1380}
1381EXPORT_SYMBOL(mlxsw_core_trap_unregister);
1382
1383static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
1384{
1385        return atomic64_inc_return(&mlxsw_core->emad.tid);
1386}
1387
1388static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1389                                      const struct mlxsw_reg_info *reg,
1390                                      char *payload,
1391                                      enum mlxsw_core_reg_access_type type,
1392                                      struct list_head *bulk_list,
1393                                      mlxsw_reg_trans_cb_t *cb,
1394                                      unsigned long cb_priv)
1395{
1396        u64 tid = mlxsw_core_tid_get(mlxsw_core);
1397        struct mlxsw_reg_trans *trans;
1398        int err;
1399
1400        trans = kzalloc(sizeof(*trans), GFP_KERNEL);
1401        if (!trans)
1402                return -ENOMEM;
1403
1404        err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
1405                                    bulk_list, cb, cb_priv, tid);
1406        if (err) {
1407                kfree(trans);
1408                return err;
1409        }
1410        return 0;
1411}
1412
1413int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
1414                          const struct mlxsw_reg_info *reg, char *payload,
1415                          struct list_head *bulk_list,
1416                          mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1417{
1418        return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1419                                          MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
1420                                          bulk_list, cb, cb_priv);
1421}
1422EXPORT_SYMBOL(mlxsw_reg_trans_query);
1423
1424int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
1425                          const struct mlxsw_reg_info *reg, char *payload,
1426                          struct list_head *bulk_list,
1427                          mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1428{
1429        return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1430                                          MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
1431                                          bulk_list, cb, cb_priv);
1432}
1433EXPORT_SYMBOL(mlxsw_reg_trans_write);
1434
1435static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
1436{
1437        struct mlxsw_core *mlxsw_core = trans->core;
1438        int err;
1439
1440        wait_for_completion(&trans->completion);
1441        cancel_delayed_work_sync(&trans->timeout_dw);
1442        err = trans->err;
1443
1444        if (trans->retries)
1445                dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
1446                         trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
1447        if (err)
1448                dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
1449                        trans->tid, trans->reg->id,
1450                        mlxsw_reg_id_str(trans->reg->id),
1451                        mlxsw_core_reg_access_type_str(trans->type),
1452                        trans->emad_status,
1453                        mlxsw_emad_op_tlv_status_str(trans->emad_status));
1454
1455        list_del(&trans->bulk_list);
1456        kfree_rcu(trans, rcu);
1457        return err;
1458}
1459
1460int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
1461{
1462        struct mlxsw_reg_trans *trans;
1463        struct mlxsw_reg_trans *tmp;
1464        int sum_err = 0;
1465        int err;
1466
1467        list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
1468                err = mlxsw_reg_trans_wait(trans);
1469                if (err && sum_err == 0)
1470                        sum_err = err; /* first error to be returned */
1471        }
1472        return sum_err;
1473}
1474EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
1475
1476static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
1477                                     const struct mlxsw_reg_info *reg,
1478                                     char *payload,
1479                                     enum mlxsw_core_reg_access_type type)
1480{
1481        enum mlxsw_emad_op_tlv_status status;
1482        int err, n_retry;
1483        char *in_mbox, *out_mbox, *tmp;
1484
1485        dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
1486                reg->id, mlxsw_reg_id_str(reg->id),
1487                mlxsw_core_reg_access_type_str(type));
1488
1489        in_mbox = mlxsw_cmd_mbox_alloc();
1490        if (!in_mbox)
1491                return -ENOMEM;
1492
1493        out_mbox = mlxsw_cmd_mbox_alloc();
1494        if (!out_mbox) {
1495                err = -ENOMEM;
1496                goto free_in_mbox;
1497        }
1498
1499        mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
1500                               mlxsw_core_tid_get(mlxsw_core));
1501        tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
1502        mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
1503
1504        n_retry = 0;
1505retry:
1506        err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
1507        if (!err) {
1508                err = mlxsw_emad_process_status(out_mbox, &status);
1509                if (err) {
1510                        if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
1511                                goto retry;
1512                        dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
1513                                status, mlxsw_emad_op_tlv_status_str(status));
1514                }
1515        }
1516
1517        if (!err)
1518                memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
1519                       reg->len);
1520
1521        mlxsw_cmd_mbox_free(out_mbox);
1522free_in_mbox:
1523        mlxsw_cmd_mbox_free(in_mbox);
1524        if (err)
1525                dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
1526                        reg->id, mlxsw_reg_id_str(reg->id),
1527                        mlxsw_core_reg_access_type_str(type));
1528        return err;
1529}
1530
1531static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
1532                                     char *payload, size_t payload_len,
1533                                     unsigned long cb_priv)
1534{
1535        char *orig_payload = (char *) cb_priv;
1536
1537        memcpy(orig_payload, payload, payload_len);
1538}
1539
1540static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
1541                                 const struct mlxsw_reg_info *reg,
1542                                 char *payload,
1543                                 enum mlxsw_core_reg_access_type type)
1544{
1545        LIST_HEAD(bulk_list);
1546        int err;
1547
1548        /* During initialization EMAD interface is not available to us,
1549         * so we default to command interface. We switch to EMAD interface
1550         * after setting the appropriate traps.
1551         */
1552        if (!mlxsw_core->emad.use_emad)
1553                return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
1554                                                 payload, type);
1555
1556        err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
1557                                         payload, type, &bulk_list,
1558                                         mlxsw_core_reg_access_cb,
1559                                         (unsigned long) payload);
1560        if (err)
1561                return err;
1562        return mlxsw_reg_trans_bulk_wait(&bulk_list);
1563}
1564
1565int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
1566                    const struct mlxsw_reg_info *reg, char *payload)
1567{
1568        return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1569                                     MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
1570}
1571EXPORT_SYMBOL(mlxsw_reg_query);
1572
1573int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
1574                    const struct mlxsw_reg_info *reg, char *payload)
1575{
1576        return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1577                                     MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
1578}
1579EXPORT_SYMBOL(mlxsw_reg_write);
1580
1581void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1582                            struct mlxsw_rx_info *rx_info)
1583{
1584        struct mlxsw_rx_listener_item *rxl_item;
1585        const struct mlxsw_rx_listener *rxl;
1586        u8 local_port;
1587        bool found = false;
1588
1589        if (rx_info->is_lag) {
1590                dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
1591                                    __func__, rx_info->u.lag_id,
1592                                    rx_info->trap_id);
1593                /* Upper layer does not care if the skb came from LAG or not,
1594                 * so just get the local_port for the lag port and push it up.
1595                 */
1596                local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
1597                                                        rx_info->u.lag_id,
1598                                                        rx_info->lag_port_index);
1599        } else {
1600                local_port = rx_info->u.sys_port;
1601        }
1602
1603        dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
1604                            __func__, local_port, rx_info->trap_id);
1605
1606        if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
1607            (local_port >= mlxsw_core->max_ports))
1608                goto drop;
1609
1610        rcu_read_lock();
1611        list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
1612                rxl = &rxl_item->rxl;
1613                if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
1614                     rxl->local_port == local_port) &&
1615                    rxl->trap_id == rx_info->trap_id) {
1616                        found = true;
1617                        break;
1618                }
1619        }
1620        rcu_read_unlock();
1621        if (!found)
1622                goto drop;
1623
1624        rxl->func(skb, local_port, rxl_item->priv);
1625        return;
1626
1627drop:
1628        dev_kfree_skb(skb);
1629}
1630EXPORT_SYMBOL(mlxsw_core_skb_receive);
1631
1632static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
1633                                        u16 lag_id, u8 port_index)
1634{
1635        return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id +
1636               port_index;
1637}
1638
1639void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
1640                                u16 lag_id, u8 port_index, u8 local_port)
1641{
1642        int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1643                                                 lag_id, port_index);
1644
1645        mlxsw_core->lag.mapping[index] = local_port;
1646}
1647EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
1648
1649u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
1650                              u16 lag_id, u8 port_index)
1651{
1652        int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1653                                                 lag_id, port_index);
1654
1655        return mlxsw_core->lag.mapping[index];
1656}
1657EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
1658
1659void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
1660                                  u16 lag_id, u8 local_port)
1661{
1662        int i;
1663
1664        for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) {
1665                int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1666                                                         lag_id, i);
1667
1668                if (mlxsw_core->lag.mapping[index] == local_port)
1669                        mlxsw_core->lag.mapping[index] = 0;
1670        }
1671}
1672EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
1673
1674bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
1675                          enum mlxsw_res_id res_id)
1676{
1677        return mlxsw_res_valid(&mlxsw_core->res, res_id);
1678}
1679EXPORT_SYMBOL(mlxsw_core_res_valid);
1680
1681u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
1682                       enum mlxsw_res_id res_id)
1683{
1684        return mlxsw_res_get(&mlxsw_core->res, res_id);
1685}
1686EXPORT_SYMBOL(mlxsw_core_res_get);
1687
1688int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port)
1689{
1690        struct devlink *devlink = priv_to_devlink(mlxsw_core);
1691        struct mlxsw_core_port *mlxsw_core_port =
1692                                        &mlxsw_core->ports[local_port];
1693        struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1694        int err;
1695
1696        mlxsw_core_port->local_port = local_port;
1697        err = devlink_port_register(devlink, devlink_port, local_port);
1698        if (err)
1699                memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
1700        return err;
1701}
1702EXPORT_SYMBOL(mlxsw_core_port_init);
1703
1704void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
1705{
1706        struct mlxsw_core_port *mlxsw_core_port =
1707                                        &mlxsw_core->ports[local_port];
1708        struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1709
1710        devlink_port_unregister(devlink_port);
1711        memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
1712}
1713EXPORT_SYMBOL(mlxsw_core_port_fini);
1714
1715void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1716                             void *port_driver_priv, struct net_device *dev,
1717                             bool split, u32 split_group)
1718{
1719        struct mlxsw_core_port *mlxsw_core_port =
1720                                        &mlxsw_core->ports[local_port];
1721        struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1722
1723        mlxsw_core_port->port_driver_priv = port_driver_priv;
1724        if (split)
1725                devlink_port_split_set(devlink_port, split_group);
1726        devlink_port_type_eth_set(devlink_port, dev);
1727}
1728EXPORT_SYMBOL(mlxsw_core_port_eth_set);
1729
1730void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1731                            void *port_driver_priv)
1732{
1733        struct mlxsw_core_port *mlxsw_core_port =
1734                                        &mlxsw_core->ports[local_port];
1735        struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1736
1737        mlxsw_core_port->port_driver_priv = port_driver_priv;
1738        devlink_port_type_ib_set(devlink_port, NULL);
1739}
1740EXPORT_SYMBOL(mlxsw_core_port_ib_set);
1741
1742void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
1743                           void *port_driver_priv)
1744{
1745        struct mlxsw_core_port *mlxsw_core_port =
1746                                        &mlxsw_core->ports[local_port];
1747        struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1748
1749        mlxsw_core_port->port_driver_priv = port_driver_priv;
1750        devlink_port_type_clear(devlink_port);
1751}
1752EXPORT_SYMBOL(mlxsw_core_port_clear);
1753
1754enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
1755                                                u8 local_port)
1756{
1757        struct mlxsw_core_port *mlxsw_core_port =
1758                                        &mlxsw_core->ports[local_port];
1759        struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1760
1761        return devlink_port->type;
1762}
1763EXPORT_SYMBOL(mlxsw_core_port_type_get);
1764
1765static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
1766                                    const char *buf, size_t size)
1767{
1768        __be32 *m = (__be32 *) buf;
1769        int i;
1770        int count = size / sizeof(__be32);
1771
1772        for (i = count - 1; i >= 0; i--)
1773                if (m[i])
1774                        break;
1775        i++;
1776        count = i ? i : 1;
1777        for (i = 0; i < count; i += 4)
1778                dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
1779                        i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
1780                        be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
1781}
1782
1783int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
1784                   u32 in_mod, bool out_mbox_direct,
1785                   char *in_mbox, size_t in_mbox_size,
1786                   char *out_mbox, size_t out_mbox_size)
1787{
1788        u8 status;
1789        int err;
1790
1791        BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
1792        if (!mlxsw_core->bus->cmd_exec)
1793                return -EOPNOTSUPP;
1794
1795        dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1796                opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
1797        if (in_mbox) {
1798                dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
1799                mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
1800        }
1801
1802        err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
1803                                        opcode_mod, in_mod, out_mbox_direct,
1804                                        in_mbox, in_mbox_size,
1805                                        out_mbox, out_mbox_size, &status);
1806
1807        if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
1808                dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1809                        opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1810                        in_mod, status, mlxsw_cmd_status_str(status));
1811        } else if (err == -ETIMEDOUT) {
1812                dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1813                        opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1814                        in_mod);
1815        }
1816
1817        if (!err && out_mbox) {
1818                dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
1819                mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
1820        }
1821        return err;
1822}
1823EXPORT_SYMBOL(mlxsw_cmd_exec);
1824
1825int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
1826{
1827        return queue_delayed_work(mlxsw_wq, dwork, delay);
1828}
1829EXPORT_SYMBOL(mlxsw_core_schedule_dw);
1830
1831bool mlxsw_core_schedule_work(struct work_struct *work)
1832{
1833        return queue_work(mlxsw_owq, work);
1834}
1835EXPORT_SYMBOL(mlxsw_core_schedule_work);
1836
1837void mlxsw_core_flush_owq(void)
1838{
1839        flush_workqueue(mlxsw_owq);
1840}
1841EXPORT_SYMBOL(mlxsw_core_flush_owq);
1842
1843int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
1844                             const struct mlxsw_config_profile *profile,
1845                             u64 *p_single_size, u64 *p_double_size,
1846                             u64 *p_linear_size)
1847{
1848        struct mlxsw_driver *driver = mlxsw_core->driver;
1849
1850        if (!driver->kvd_sizes_get)
1851                return -EINVAL;
1852
1853        return driver->kvd_sizes_get(mlxsw_core, profile,
1854                                     p_single_size, p_double_size,
1855                                     p_linear_size);
1856}
1857EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
1858
1859static int __init mlxsw_core_module_init(void)
1860{
1861        int err;
1862
1863        mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
1864        if (!mlxsw_wq)
1865                return -ENOMEM;
1866        mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
1867                                            mlxsw_core_driver_name);
1868        if (!mlxsw_owq) {
1869                err = -ENOMEM;
1870                goto err_alloc_ordered_workqueue;
1871        }
1872        return 0;
1873
1874err_alloc_ordered_workqueue:
1875        destroy_workqueue(mlxsw_wq);
1876        return err;
1877}
1878
1879static void __exit mlxsw_core_module_exit(void)
1880{
1881        destroy_workqueue(mlxsw_owq);
1882        destroy_workqueue(mlxsw_wq);
1883}
1884
1885module_init(mlxsw_core_module_init);
1886module_exit(mlxsw_core_module_exit);
1887
1888MODULE_LICENSE("Dual BSD/GPL");
1889MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1890MODULE_DESCRIPTION("Mellanox switch device core driver");
1891