linux/drivers/net/ethernet/mellanox/mlx5/core/health.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/kernel.h>
  34#include <linux/module.h>
  35#include <linux/random.h>
  36#include <linux/vmalloc.h>
  37#include <linux/hardirq.h>
  38#include <linux/mlx5/driver.h>
  39#include <linux/mlx5/cmd.h>
  40#include "mlx5_core.h"
  41
  42enum {
  43        MLX5_HEALTH_POLL_INTERVAL       = 2 * HZ,
  44        MAX_MISSES                      = 3,
  45};
  46
  47enum {
  48        MLX5_HEALTH_SYNDR_FW_ERR                = 0x1,
  49        MLX5_HEALTH_SYNDR_IRISC_ERR             = 0x7,
  50        MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR  = 0x8,
  51        MLX5_HEALTH_SYNDR_CRC_ERR               = 0x9,
  52        MLX5_HEALTH_SYNDR_FETCH_PCI_ERR         = 0xa,
  53        MLX5_HEALTH_SYNDR_HW_FTL_ERR            = 0xb,
  54        MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR  = 0xc,
  55        MLX5_HEALTH_SYNDR_EQ_ERR                = 0xd,
  56        MLX5_HEALTH_SYNDR_EQ_INV                = 0xe,
  57        MLX5_HEALTH_SYNDR_FFSER_ERR             = 0xf,
  58        MLX5_HEALTH_SYNDR_HIGH_TEMP             = 0x10
  59};
  60
  61enum {
  62        MLX5_NIC_IFC_FULL               = 0,
  63        MLX5_NIC_IFC_DISABLED           = 1,
  64        MLX5_NIC_IFC_NO_DRAM_NIC        = 2,
  65        MLX5_NIC_IFC_INVALID            = 3
  66};
  67
  68enum {
  69        MLX5_DROP_NEW_HEALTH_WORK,
  70};
  71
  72static u8 get_nic_state(struct mlx5_core_dev *dev)
  73{
  74        return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
  75}
  76
  77static void trigger_cmd_completions(struct mlx5_core_dev *dev)
  78{
  79        unsigned long flags;
  80        u64 vector;
  81
  82        /* wait for pending handlers to complete */
  83        synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector);
  84        spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
  85        vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
  86        if (!vector)
  87                goto no_trig;
  88
  89        vector |= MLX5_TRIGGERED_CMD_COMP;
  90        spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
  91
  92        mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
  93        mlx5_cmd_comp_handler(dev, vector);
  94        return;
  95
  96no_trig:
  97        spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
  98}
  99
 100static int in_fatal(struct mlx5_core_dev *dev)
 101{
 102        struct mlx5_core_health *health = &dev->priv.health;
 103        struct health_buffer __iomem *h = health->health;
 104
 105        if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
 106                return 1;
 107
 108        if (ioread32be(&h->fw_ver) == 0xffffffff)
 109                return 1;
 110
 111        return 0;
 112}
 113
 114void mlx5_enter_error_state(struct mlx5_core_dev *dev)
 115{
 116        mutex_lock(&dev->intf_state_mutex);
 117        if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
 118                goto unlock;
 119
 120        mlx5_core_err(dev, "start\n");
 121        if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
 122                dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
 123                trigger_cmd_completions(dev);
 124        }
 125
 126        mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
 127        mlx5_core_err(dev, "end\n");
 128
 129unlock:
 130        mutex_unlock(&dev->intf_state_mutex);
 131}
 132
 133static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
 134{
 135        u8 nic_interface = get_nic_state(dev);
 136
 137        switch (nic_interface) {
 138        case MLX5_NIC_IFC_FULL:
 139                mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
 140                break;
 141
 142        case MLX5_NIC_IFC_DISABLED:
 143                mlx5_core_warn(dev, "starting teardown\n");
 144                break;
 145
 146        case MLX5_NIC_IFC_NO_DRAM_NIC:
 147                mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
 148                break;
 149        default:
 150                mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n",
 151                               nic_interface);
 152        }
 153
 154        mlx5_disable_device(dev);
 155}
 156
 157static void health_recover(struct work_struct *work)
 158{
 159        struct mlx5_core_health *health;
 160        struct delayed_work *dwork;
 161        struct mlx5_core_dev *dev;
 162        struct mlx5_priv *priv;
 163        u8 nic_state;
 164
 165        dwork = container_of(work, struct delayed_work, work);
 166        health = container_of(dwork, struct mlx5_core_health, recover_work);
 167        priv = container_of(health, struct mlx5_priv, health);
 168        dev = container_of(priv, struct mlx5_core_dev, priv);
 169
 170        nic_state = get_nic_state(dev);
 171        if (nic_state == MLX5_NIC_IFC_INVALID) {
 172                dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
 173                return;
 174        }
 175
 176        dev_err(&dev->pdev->dev, "starting health recovery flow\n");
 177        mlx5_recover_device(dev);
 178}
 179
 180/* How much time to wait until health resetting the driver (in msecs) */
 181#define MLX5_RECOVERY_DELAY_MSECS 60000
 182static void health_care(struct work_struct *work)
 183{
 184        unsigned long recover_delay = msecs_to_jiffies(MLX5_RECOVERY_DELAY_MSECS);
 185        struct mlx5_core_health *health;
 186        struct mlx5_core_dev *dev;
 187        struct mlx5_priv *priv;
 188
 189        health = container_of(work, struct mlx5_core_health, work);
 190        priv = container_of(health, struct mlx5_priv, health);
 191        dev = container_of(priv, struct mlx5_core_dev, priv);
 192        mlx5_core_warn(dev, "handling bad device here\n");
 193        mlx5_handle_bad_state(dev);
 194
 195        spin_lock(&health->wq_lock);
 196        if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
 197                schedule_delayed_work(&health->recover_work, recover_delay);
 198        else
 199                dev_err(&dev->pdev->dev,
 200                        "new health works are not permitted at this stage\n");
 201        spin_unlock(&health->wq_lock);
 202}
 203
 204static const char *hsynd_str(u8 synd)
 205{
 206        switch (synd) {
 207        case MLX5_HEALTH_SYNDR_FW_ERR:
 208                return "firmware internal error";
 209        case MLX5_HEALTH_SYNDR_IRISC_ERR:
 210                return "irisc not responding";
 211        case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
 212                return "unrecoverable hardware error";
 213        case MLX5_HEALTH_SYNDR_CRC_ERR:
 214                return "firmware CRC error";
 215        case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
 216                return "ICM fetch PCI error";
 217        case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
 218                return "HW fatal error\n";
 219        case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
 220                return "async EQ buffer overrun";
 221        case MLX5_HEALTH_SYNDR_EQ_ERR:
 222                return "EQ error";
 223        case MLX5_HEALTH_SYNDR_EQ_INV:
 224                return "Invalid EQ referenced";
 225        case MLX5_HEALTH_SYNDR_FFSER_ERR:
 226                return "FFSER error";
 227        case MLX5_HEALTH_SYNDR_HIGH_TEMP:
 228                return "High temperature";
 229        default:
 230                return "unrecognized error";
 231        }
 232}
 233
 234static u16 get_maj(u32 fw)
 235{
 236        return fw >> 28;
 237}
 238
 239static u16 get_min(u32 fw)
 240{
 241        return fw >> 16 & 0xfff;
 242}
 243
 244static u16 get_sub(u32 fw)
 245{
 246        return fw & 0xffff;
 247}
 248
 249static void print_health_info(struct mlx5_core_dev *dev)
 250{
 251        struct mlx5_core_health *health = &dev->priv.health;
 252        struct health_buffer __iomem *h = health->health;
 253        char fw_str[18];
 254        u32 fw;
 255        int i;
 256
 257        /* If the syndrom is 0, the device is OK and no need to print buffer */
 258        if (!ioread8(&h->synd))
 259                return;
 260
 261        for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
 262                dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
 263
 264        dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
 265        dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
 266        fw = ioread32be(&h->fw_ver);
 267        sprintf(fw_str, "%d.%d.%d", get_maj(fw), get_min(fw), get_sub(fw));
 268        dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str);
 269        dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
 270        dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index));
 271        dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
 272        dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
 273}
 274
 275static unsigned long get_next_poll_jiffies(void)
 276{
 277        unsigned long next;
 278
 279        get_random_bytes(&next, sizeof(next));
 280        next %= HZ;
 281        next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
 282
 283        return next;
 284}
 285
 286static void poll_health(unsigned long data)
 287{
 288        struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
 289        struct mlx5_core_health *health = &dev->priv.health;
 290        u32 count;
 291
 292        if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
 293                mod_timer(&health->timer, get_next_poll_jiffies());
 294                return;
 295        }
 296
 297        count = ioread32be(health->health_counter);
 298        if (count == health->prev)
 299                ++health->miss_counter;
 300        else
 301                health->miss_counter = 0;
 302
 303        health->prev = count;
 304        if (health->miss_counter == MAX_MISSES) {
 305                dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n");
 306                print_health_info(dev);
 307        } else {
 308                mod_timer(&health->timer, get_next_poll_jiffies());
 309        }
 310
 311        if (in_fatal(dev) && !health->sick) {
 312                health->sick = true;
 313                print_health_info(dev);
 314                spin_lock(&health->wq_lock);
 315                if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
 316                        queue_work(health->wq, &health->work);
 317                else
 318                        dev_err(&dev->pdev->dev,
 319                                "new health works are not permitted at this stage\n");
 320                spin_unlock(&health->wq_lock);
 321        }
 322}
 323
 324void mlx5_start_health_poll(struct mlx5_core_dev *dev)
 325{
 326        struct mlx5_core_health *health = &dev->priv.health;
 327
 328        init_timer(&health->timer);
 329        health->sick = 0;
 330        clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
 331        health->health = &dev->iseg->health;
 332        health->health_counter = &dev->iseg->health_counter;
 333
 334        health->timer.data = (unsigned long)dev;
 335        health->timer.function = poll_health;
 336        health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL);
 337        add_timer(&health->timer);
 338}
 339
 340void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
 341{
 342        struct mlx5_core_health *health = &dev->priv.health;
 343
 344        del_timer_sync(&health->timer);
 345}
 346
 347void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
 348{
 349        struct mlx5_core_health *health = &dev->priv.health;
 350
 351        spin_lock(&health->wq_lock);
 352        set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
 353        spin_unlock(&health->wq_lock);
 354        cancel_delayed_work_sync(&health->recover_work);
 355        cancel_work_sync(&health->work);
 356}
 357
 358void mlx5_health_cleanup(struct mlx5_core_dev *dev)
 359{
 360        struct mlx5_core_health *health = &dev->priv.health;
 361
 362        destroy_workqueue(health->wq);
 363}
 364
 365int mlx5_health_init(struct mlx5_core_dev *dev)
 366{
 367        struct mlx5_core_health *health;
 368        char *name;
 369
 370        health = &dev->priv.health;
 371        name = kmalloc(64, GFP_KERNEL);
 372        if (!name)
 373                return -ENOMEM;
 374
 375        strcpy(name, "mlx5_health");
 376        strcat(name, dev_name(&dev->pdev->dev));
 377        health->wq = create_singlethread_workqueue(name);
 378        kfree(name);
 379        if (!health->wq)
 380                return -ENOMEM;
 381        spin_lock_init(&health->wq_lock);
 382        INIT_WORK(&health->work, health_care);
 383        INIT_DELAYED_WORK(&health->recover_work, health_recover);
 384
 385        return 0;
 386}
 387