linux/drivers/acpi/nfit/intel.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2018 Intel Corporation. All rights reserved. */
   3#include <linux/libnvdimm.h>
   4#include <linux/ndctl.h>
   5#include <linux/acpi.h>
   6#include <asm/smp.h>
   7#include "intel.h"
   8#include "nfit.h"
   9
  10static ssize_t firmware_activate_noidle_show(struct device *dev,
  11                struct device_attribute *attr, char *buf)
  12{
  13        struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  14        struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  15        struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  16
  17        return sprintf(buf, "%s\n", acpi_desc->fwa_noidle ? "Y" : "N");
  18}
  19
  20static ssize_t firmware_activate_noidle_store(struct device *dev,
  21                struct device_attribute *attr, const char *buf, size_t size)
  22{
  23        struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  24        struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  25        struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  26        ssize_t rc;
  27        bool val;
  28
  29        rc = kstrtobool(buf, &val);
  30        if (rc)
  31                return rc;
  32        if (val != acpi_desc->fwa_noidle)
  33                acpi_desc->fwa_cap = NVDIMM_FWA_CAP_INVALID;
  34        acpi_desc->fwa_noidle = val;
  35        return size;
  36}
  37DEVICE_ATTR_RW(firmware_activate_noidle);
  38
  39bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus)
  40{
  41        struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  42        struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  43        unsigned long *mask;
  44
  45        if (!test_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask))
  46                return false;
  47
  48        mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
  49        return *mask == NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
  50}
  51
  52static unsigned long intel_security_flags(struct nvdimm *nvdimm,
  53                enum nvdimm_passphrase_type ptype)
  54{
  55        struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  56        unsigned long security_flags = 0;
  57        struct {
  58                struct nd_cmd_pkg pkg;
  59                struct nd_intel_get_security_state cmd;
  60        } nd_cmd = {
  61                .pkg = {
  62                        .nd_command = NVDIMM_INTEL_GET_SECURITY_STATE,
  63                        .nd_family = NVDIMM_FAMILY_INTEL,
  64                        .nd_size_out =
  65                                sizeof(struct nd_intel_get_security_state),
  66                        .nd_fw_size =
  67                                sizeof(struct nd_intel_get_security_state),
  68                },
  69        };
  70        int rc;
  71
  72        if (!test_bit(NVDIMM_INTEL_GET_SECURITY_STATE, &nfit_mem->dsm_mask))
  73                return 0;
  74
  75        /*
  76         * Short circuit the state retrieval while we are doing overwrite.
  77         * The DSM spec states that the security state is indeterminate
  78         * until the overwrite DSM completes.
  79         */
  80        if (nvdimm_in_overwrite(nvdimm) && ptype == NVDIMM_USER)
  81                return BIT(NVDIMM_SECURITY_OVERWRITE);
  82
  83        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
  84        if (rc < 0 || nd_cmd.cmd.status) {
  85                pr_err("%s: security state retrieval failed (%d:%#x)\n",
  86                                nvdimm_name(nvdimm), rc, nd_cmd.cmd.status);
  87                return 0;
  88        }
  89
  90        /* check and see if security is enabled and locked */
  91        if (ptype == NVDIMM_MASTER) {
  92                if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_ENABLED)
  93                        set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
  94                else
  95                        set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
  96                if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_PLIMIT)
  97                        set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
  98                return security_flags;
  99        }
 100
 101        if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_UNSUPPORTED)
 102                return 0;
 103
 104        if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_ENABLED) {
 105                if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_FROZEN ||
 106                    nd_cmd.cmd.state & ND_INTEL_SEC_STATE_PLIMIT)
 107                        set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
 108
 109                if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_LOCKED)
 110                        set_bit(NVDIMM_SECURITY_LOCKED, &security_flags);
 111                else
 112                        set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
 113        } else
 114                set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
 115
 116        return security_flags;
 117}
 118
 119static int intel_security_freeze(struct nvdimm *nvdimm)
 120{
 121        struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
 122        struct {
 123                struct nd_cmd_pkg pkg;
 124                struct nd_intel_freeze_lock cmd;
 125        } nd_cmd = {
 126                .pkg = {
 127                        .nd_command = NVDIMM_INTEL_FREEZE_LOCK,
 128                        .nd_family = NVDIMM_FAMILY_INTEL,
 129                        .nd_size_out = ND_INTEL_STATUS_SIZE,
 130                        .nd_fw_size = ND_INTEL_STATUS_SIZE,
 131                },
 132        };
 133        int rc;
 134
 135        if (!test_bit(NVDIMM_INTEL_FREEZE_LOCK, &nfit_mem->dsm_mask))
 136                return -ENOTTY;
 137
 138        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
 139        if (rc < 0)
 140                return rc;
 141        if (nd_cmd.cmd.status)
 142                return -EIO;
 143        return 0;
 144}
 145
 146static int intel_security_change_key(struct nvdimm *nvdimm,
 147                const struct nvdimm_key_data *old_data,
 148                const struct nvdimm_key_data *new_data,
 149                enum nvdimm_passphrase_type ptype)
 150{
 151        struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
 152        unsigned int cmd = ptype == NVDIMM_MASTER ?
 153                NVDIMM_INTEL_SET_MASTER_PASSPHRASE :
 154                NVDIMM_INTEL_SET_PASSPHRASE;
 155        struct {
 156                struct nd_cmd_pkg pkg;
 157                struct nd_intel_set_passphrase cmd;
 158        } nd_cmd = {
 159                .pkg = {
 160                        .nd_family = NVDIMM_FAMILY_INTEL,
 161                        .nd_size_in = ND_INTEL_PASSPHRASE_SIZE * 2,
 162                        .nd_size_out = ND_INTEL_STATUS_SIZE,
 163                        .nd_fw_size = ND_INTEL_STATUS_SIZE,
 164                        .nd_command = cmd,
 165                },
 166        };
 167        int rc;
 168
 169        if (!test_bit(cmd, &nfit_mem->dsm_mask))
 170                return -ENOTTY;
 171
 172        memcpy(nd_cmd.cmd.old_pass, old_data->data,
 173                        sizeof(nd_cmd.cmd.old_pass));
 174        memcpy(nd_cmd.cmd.new_pass, new_data->data,
 175                        sizeof(nd_cmd.cmd.new_pass));
 176        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
 177        if (rc < 0)
 178                return rc;
 179
 180        switch (nd_cmd.cmd.status) {
 181        case 0:
 182                return 0;
 183        case ND_INTEL_STATUS_INVALID_PASS:
 184                return -EINVAL;
 185        case ND_INTEL_STATUS_NOT_SUPPORTED:
 186                return -EOPNOTSUPP;
 187        case ND_INTEL_STATUS_INVALID_STATE:
 188        default:
 189                return -EIO;
 190        }
 191}
 192
 193static void nvdimm_invalidate_cache(void);
 194
 195static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
 196                const struct nvdimm_key_data *key_data)
 197{
 198        struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
 199        struct {
 200                struct nd_cmd_pkg pkg;
 201                struct nd_intel_unlock_unit cmd;
 202        } nd_cmd = {
 203                .pkg = {
 204                        .nd_command = NVDIMM_INTEL_UNLOCK_UNIT,
 205                        .nd_family = NVDIMM_FAMILY_INTEL,
 206                        .nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
 207                        .nd_size_out = ND_INTEL_STATUS_SIZE,
 208                        .nd_fw_size = ND_INTEL_STATUS_SIZE,
 209                },
 210        };
 211        int rc;
 212
 213        if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
 214                return -ENOTTY;
 215
 216        memcpy(nd_cmd.cmd.passphrase, key_data->data,
 217                        sizeof(nd_cmd.cmd.passphrase));
 218        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
 219        if (rc < 0)
 220                return rc;
 221        switch (nd_cmd.cmd.status) {
 222        case 0:
 223                break;
 224        case ND_INTEL_STATUS_INVALID_PASS:
 225                return -EINVAL;
 226        default:
 227                return -EIO;
 228        }
 229
 230        /* DIMM unlocked, invalidate all CPU caches before we read it */
 231        nvdimm_invalidate_cache();
 232
 233        return 0;
 234}
 235
 236static int intel_security_disable(struct nvdimm *nvdimm,
 237                const struct nvdimm_key_data *key_data)
 238{
 239        int rc;
 240        struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
 241        struct {
 242                struct nd_cmd_pkg pkg;
 243                struct nd_intel_disable_passphrase cmd;
 244        } nd_cmd = {
 245                .pkg = {
 246                        .nd_command = NVDIMM_INTEL_DISABLE_PASSPHRASE,
 247                        .nd_family = NVDIMM_FAMILY_INTEL,
 248                        .nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
 249                        .nd_size_out = ND_INTEL_STATUS_SIZE,
 250                        .nd_fw_size = ND_INTEL_STATUS_SIZE,
 251                },
 252        };
 253
 254        if (!test_bit(NVDIMM_INTEL_DISABLE_PASSPHRASE, &nfit_mem->dsm_mask))
 255                return -ENOTTY;
 256
 257        memcpy(nd_cmd.cmd.passphrase, key_data->data,
 258                        sizeof(nd_cmd.cmd.passphrase));
 259        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
 260        if (rc < 0)
 261                return rc;
 262
 263        switch (nd_cmd.cmd.status) {
 264        case 0:
 265                break;
 266        case ND_INTEL_STATUS_INVALID_PASS:
 267                return -EINVAL;
 268        case ND_INTEL_STATUS_INVALID_STATE:
 269        default:
 270                return -ENXIO;
 271        }
 272
 273        return 0;
 274}
 275
 276static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
 277                const struct nvdimm_key_data *key,
 278                enum nvdimm_passphrase_type ptype)
 279{
 280        int rc;
 281        struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
 282        unsigned int cmd = ptype == NVDIMM_MASTER ?
 283                NVDIMM_INTEL_MASTER_SECURE_ERASE : NVDIMM_INTEL_SECURE_ERASE;
 284        struct {
 285                struct nd_cmd_pkg pkg;
 286                struct nd_intel_secure_erase cmd;
 287        } nd_cmd = {
 288                .pkg = {
 289                        .nd_family = NVDIMM_FAMILY_INTEL,
 290                        .nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
 291                        .nd_size_out = ND_INTEL_STATUS_SIZE,
 292                        .nd_fw_size = ND_INTEL_STATUS_SIZE,
 293                        .nd_command = cmd,
 294                },
 295        };
 296
 297        if (!test_bit(cmd, &nfit_mem->dsm_mask))
 298                return -ENOTTY;
 299
 300        /* flush all cache before we erase DIMM */
 301        nvdimm_invalidate_cache();
 302        memcpy(nd_cmd.cmd.passphrase, key->data,
 303                        sizeof(nd_cmd.cmd.passphrase));
 304        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
 305        if (rc < 0)
 306                return rc;
 307
 308        switch (nd_cmd.cmd.status) {
 309        case 0:
 310                break;
 311        case ND_INTEL_STATUS_NOT_SUPPORTED:
 312                return -EOPNOTSUPP;
 313        case ND_INTEL_STATUS_INVALID_PASS:
 314                return -EINVAL;
 315        case ND_INTEL_STATUS_INVALID_STATE:
 316        default:
 317                return -ENXIO;
 318        }
 319
 320        /* DIMM erased, invalidate all CPU caches before we read it */
 321        nvdimm_invalidate_cache();
 322        return 0;
 323}
 324
 325static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
 326{
 327        int rc;
 328        struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
 329        struct {
 330                struct nd_cmd_pkg pkg;
 331                struct nd_intel_query_overwrite cmd;
 332        } nd_cmd = {
 333                .pkg = {
 334                        .nd_command = NVDIMM_INTEL_QUERY_OVERWRITE,
 335                        .nd_family = NVDIMM_FAMILY_INTEL,
 336                        .nd_size_out = ND_INTEL_STATUS_SIZE,
 337                        .nd_fw_size = ND_INTEL_STATUS_SIZE,
 338                },
 339        };
 340
 341        if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask))
 342                return -ENOTTY;
 343
 344        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
 345        if (rc < 0)
 346                return rc;
 347
 348        switch (nd_cmd.cmd.status) {
 349        case 0:
 350                break;
 351        case ND_INTEL_STATUS_OQUERY_INPROGRESS:
 352                return -EBUSY;
 353        default:
 354                return -ENXIO;
 355        }
 356
 357        /* flush all cache before we make the nvdimms available */
 358        nvdimm_invalidate_cache();
 359        return 0;
 360}
 361
 362static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
 363                const struct nvdimm_key_data *nkey)
 364{
 365        int rc;
 366        struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
 367        struct {
 368                struct nd_cmd_pkg pkg;
 369                struct nd_intel_overwrite cmd;
 370        } nd_cmd = {
 371                .pkg = {
 372                        .nd_command = NVDIMM_INTEL_OVERWRITE,
 373                        .nd_family = NVDIMM_FAMILY_INTEL,
 374                        .nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
 375                        .nd_size_out = ND_INTEL_STATUS_SIZE,
 376                        .nd_fw_size = ND_INTEL_STATUS_SIZE,
 377                },
 378        };
 379
 380        if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
 381                return -ENOTTY;
 382
 383        /* flush all cache before we erase DIMM */
 384        nvdimm_invalidate_cache();
 385        memcpy(nd_cmd.cmd.passphrase, nkey->data,
 386                        sizeof(nd_cmd.cmd.passphrase));
 387        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
 388        if (rc < 0)
 389                return rc;
 390
 391        switch (nd_cmd.cmd.status) {
 392        case 0:
 393                return 0;
 394        case ND_INTEL_STATUS_OVERWRITE_UNSUPPORTED:
 395                return -ENOTSUPP;
 396        case ND_INTEL_STATUS_INVALID_PASS:
 397                return -EINVAL;
 398        case ND_INTEL_STATUS_INVALID_STATE:
 399        default:
 400                return -ENXIO;
 401        }
 402}
 403
 404/*
 405 * TODO: define a cross arch wbinvd equivalent when/if
 406 * NVDIMM_FAMILY_INTEL command support arrives on another arch.
 407 */
 408#ifdef CONFIG_X86
 409static void nvdimm_invalidate_cache(void)
 410{
 411        wbinvd_on_all_cpus();
 412}
 413#else
 414static void nvdimm_invalidate_cache(void)
 415{
 416        WARN_ON_ONCE("cache invalidation required after unlock\n");
 417}
 418#endif
 419
 420static const struct nvdimm_security_ops __intel_security_ops = {
 421        .get_flags = intel_security_flags,
 422        .freeze = intel_security_freeze,
 423        .change_key = intel_security_change_key,
 424        .disable = intel_security_disable,
 425#ifdef CONFIG_X86
 426        .unlock = intel_security_unlock,
 427        .erase = intel_security_erase,
 428        .overwrite = intel_security_overwrite,
 429        .query_overwrite = intel_security_query_overwrite,
 430#endif
 431};
 432
 433const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops;
 434
 435static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc,
 436                struct nd_intel_bus_fw_activate_businfo *info)
 437{
 438        struct {
 439                struct nd_cmd_pkg pkg;
 440                struct nd_intel_bus_fw_activate_businfo cmd;
 441        } nd_cmd = {
 442                .pkg = {
 443                        .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO,
 444                        .nd_family = NVDIMM_BUS_FAMILY_INTEL,
 445                        .nd_size_out =
 446                                sizeof(struct nd_intel_bus_fw_activate_businfo),
 447                        .nd_fw_size =
 448                                sizeof(struct nd_intel_bus_fw_activate_businfo),
 449                },
 450        };
 451        int rc;
 452
 453        rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
 454                        NULL);
 455        *info = nd_cmd.cmd;
 456        return rc;
 457}
 458
 459/* The fw_ops expect to be called with the nvdimm_bus_lock() held */
 460static enum nvdimm_fwa_state intel_bus_fwa_state(
 461                struct nvdimm_bus_descriptor *nd_desc)
 462{
 463        struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
 464        struct nd_intel_bus_fw_activate_businfo info;
 465        struct device *dev = acpi_desc->dev;
 466        enum nvdimm_fwa_state state;
 467        int rc;
 468
 469        /*
 470         * It should not be possible for platform firmware to return
 471         * busy because activate is a synchronous operation. Treat it
 472         * similar to invalid, i.e. always refresh / poll the status.
 473         */
 474        switch (acpi_desc->fwa_state) {
 475        case NVDIMM_FWA_INVALID:
 476        case NVDIMM_FWA_BUSY:
 477                break;
 478        default:
 479                /* check if capability needs to be refreshed */
 480                if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID)
 481                        break;
 482                return acpi_desc->fwa_state;
 483        }
 484
 485        /* Refresh with platform firmware */
 486        rc = intel_bus_fwa_businfo(nd_desc, &info);
 487        if (rc)
 488                return NVDIMM_FWA_INVALID;
 489
 490        switch (info.state) {
 491        case ND_INTEL_FWA_IDLE:
 492                state = NVDIMM_FWA_IDLE;
 493                break;
 494        case ND_INTEL_FWA_BUSY:
 495                state = NVDIMM_FWA_BUSY;
 496                break;
 497        case ND_INTEL_FWA_ARMED:
 498                if (info.activate_tmo > info.max_quiesce_tmo)
 499                        state = NVDIMM_FWA_ARM_OVERFLOW;
 500                else
 501                        state = NVDIMM_FWA_ARMED;
 502                break;
 503        default:
 504                dev_err_once(dev, "invalid firmware activate state %d\n",
 505                                info.state);
 506                return NVDIMM_FWA_INVALID;
 507        }
 508
 509        /*
 510         * Capability data is available in the same payload as state. It
 511         * is expected to be static.
 512         */
 513        if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) {
 514                if (info.capability & ND_INTEL_BUS_FWA_CAP_FWQUIESCE)
 515                        acpi_desc->fwa_cap = NVDIMM_FWA_CAP_QUIESCE;
 516                else if (info.capability & ND_INTEL_BUS_FWA_CAP_OSQUIESCE) {
 517                        /*
 518                         * Skip hibernate cycle by default if platform
 519                         * indicates that it does not need devices to be
 520                         * quiesced.
 521                         */
 522                        acpi_desc->fwa_cap = NVDIMM_FWA_CAP_LIVE;
 523                } else
 524                        acpi_desc->fwa_cap = NVDIMM_FWA_CAP_NONE;
 525        }
 526
 527        acpi_desc->fwa_state = state;
 528
 529        return state;
 530}
 531
 532static enum nvdimm_fwa_capability intel_bus_fwa_capability(
 533                struct nvdimm_bus_descriptor *nd_desc)
 534{
 535        struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
 536
 537        if (acpi_desc->fwa_cap > NVDIMM_FWA_CAP_INVALID)
 538                return acpi_desc->fwa_cap;
 539
 540        if (intel_bus_fwa_state(nd_desc) > NVDIMM_FWA_INVALID)
 541                return acpi_desc->fwa_cap;
 542
 543        return NVDIMM_FWA_CAP_INVALID;
 544}
 545
 546static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc)
 547{
 548        struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
 549        struct {
 550                struct nd_cmd_pkg pkg;
 551                struct nd_intel_bus_fw_activate cmd;
 552        } nd_cmd = {
 553                .pkg = {
 554                        .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
 555                        .nd_family = NVDIMM_BUS_FAMILY_INTEL,
 556                        .nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
 557                        .nd_size_out =
 558                                sizeof(struct nd_intel_bus_fw_activate),
 559                        .nd_fw_size =
 560                                sizeof(struct nd_intel_bus_fw_activate),
 561                },
 562                /*
 563                 * Even though activate is run from a suspended context,
 564                 * for safety, still ask platform firmware to force
 565                 * quiesce devices by default. Let a module
 566                 * parameter override that policy.
 567                 */
 568                .cmd = {
 569                        .iodev_state = acpi_desc->fwa_noidle
 570                                ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
 571                                : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
 572                },
 573        };
 574        int rc;
 575
 576        switch (intel_bus_fwa_state(nd_desc)) {
 577        case NVDIMM_FWA_ARMED:
 578        case NVDIMM_FWA_ARM_OVERFLOW:
 579                break;
 580        default:
 581                return -ENXIO;
 582        }
 583
 584        rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
 585                        NULL);
 586
 587        /*
 588         * Whether the command succeeded, or failed, the agent checking
 589         * for the result needs to query the DIMMs individually.
 590         * Increment the activation count to invalidate all the DIMM
 591         * states at once (it's otherwise not possible to take
 592         * acpi_desc->init_mutex in this context)
 593         */
 594        acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
 595        acpi_desc->fwa_count++;
 596
 597        dev_dbg(acpi_desc->dev, "result: %d\n", rc);
 598
 599        return rc;
 600}
 601
 602static const struct nvdimm_bus_fw_ops __intel_bus_fw_ops = {
 603        .activate_state = intel_bus_fwa_state,
 604        .capability = intel_bus_fwa_capability,
 605        .activate = intel_bus_fwa_activate,
 606};
 607
 608const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops;
 609
 610static int intel_fwa_dimminfo(struct nvdimm *nvdimm,
 611                struct nd_intel_fw_activate_dimminfo *info)
 612{
 613        struct {
 614                struct nd_cmd_pkg pkg;
 615                struct nd_intel_fw_activate_dimminfo cmd;
 616        } nd_cmd = {
 617                .pkg = {
 618                        .nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO,
 619                        .nd_family = NVDIMM_FAMILY_INTEL,
 620                        .nd_size_out =
 621                                sizeof(struct nd_intel_fw_activate_dimminfo),
 622                        .nd_fw_size =
 623                                sizeof(struct nd_intel_fw_activate_dimminfo),
 624                },
 625        };
 626        int rc;
 627
 628        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
 629        *info = nd_cmd.cmd;
 630        return rc;
 631}
 632
 633static enum nvdimm_fwa_state intel_fwa_state(struct nvdimm *nvdimm)
 634{
 635        struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
 636        struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
 637        struct nd_intel_fw_activate_dimminfo info;
 638        int rc;
 639
 640        /*
 641         * Similar to the bus state, since activate is synchronous the
 642         * busy state should resolve within the context of 'activate'.
 643         */
 644        switch (nfit_mem->fwa_state) {
 645        case NVDIMM_FWA_INVALID:
 646        case NVDIMM_FWA_BUSY:
 647                break;
 648        default:
 649                /* If no activations occurred the old state is still valid */
 650                if (nfit_mem->fwa_count == acpi_desc->fwa_count)
 651                        return nfit_mem->fwa_state;
 652        }
 653
 654        rc = intel_fwa_dimminfo(nvdimm, &info);
 655        if (rc)
 656                return NVDIMM_FWA_INVALID;
 657
 658        switch (info.state) {
 659        case ND_INTEL_FWA_IDLE:
 660                nfit_mem->fwa_state = NVDIMM_FWA_IDLE;
 661                break;
 662        case ND_INTEL_FWA_BUSY:
 663                nfit_mem->fwa_state = NVDIMM_FWA_BUSY;
 664                break;
 665        case ND_INTEL_FWA_ARMED:
 666                nfit_mem->fwa_state = NVDIMM_FWA_ARMED;
 667                break;
 668        default:
 669                nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
 670                break;
 671        }
 672
 673        switch (info.result) {
 674        case ND_INTEL_DIMM_FWA_NONE:
 675                nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NONE;
 676                break;
 677        case ND_INTEL_DIMM_FWA_SUCCESS:
 678                nfit_mem->fwa_result = NVDIMM_FWA_RESULT_SUCCESS;
 679                break;
 680        case ND_INTEL_DIMM_FWA_NOTSTAGED:
 681                nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NOTSTAGED;
 682                break;
 683        case ND_INTEL_DIMM_FWA_NEEDRESET:
 684                nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NEEDRESET;
 685                break;
 686        case ND_INTEL_DIMM_FWA_MEDIAFAILED:
 687        case ND_INTEL_DIMM_FWA_ABORT:
 688        case ND_INTEL_DIMM_FWA_NOTSUPP:
 689        case ND_INTEL_DIMM_FWA_ERROR:
 690        default:
 691                nfit_mem->fwa_result = NVDIMM_FWA_RESULT_FAIL;
 692                break;
 693        }
 694
 695        nfit_mem->fwa_count = acpi_desc->fwa_count;
 696
 697        return nfit_mem->fwa_state;
 698}
 699
 700static enum nvdimm_fwa_result intel_fwa_result(struct nvdimm *nvdimm)
 701{
 702        struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
 703        struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
 704
 705        if (nfit_mem->fwa_count == acpi_desc->fwa_count
 706                        && nfit_mem->fwa_result > NVDIMM_FWA_RESULT_INVALID)
 707                return nfit_mem->fwa_result;
 708
 709        if (intel_fwa_state(nvdimm) > NVDIMM_FWA_INVALID)
 710                return nfit_mem->fwa_result;
 711
 712        return NVDIMM_FWA_RESULT_INVALID;
 713}
 714
 715static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm)
 716{
 717        struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
 718        struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
 719        struct {
 720                struct nd_cmd_pkg pkg;
 721                struct nd_intel_fw_activate_arm cmd;
 722        } nd_cmd = {
 723                .pkg = {
 724                        .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
 725                        .nd_family = NVDIMM_FAMILY_INTEL,
 726                        .nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
 727                        .nd_size_out =
 728                                sizeof(struct nd_intel_fw_activate_arm),
 729                        .nd_fw_size =
 730                                sizeof(struct nd_intel_fw_activate_arm),
 731                },
 732                .cmd = {
 733                        .activate_arm = arm == NVDIMM_FWA_ARM
 734                                ? ND_INTEL_DIMM_FWA_ARM
 735                                : ND_INTEL_DIMM_FWA_DISARM,
 736                },
 737        };
 738        int rc;
 739
 740        switch (intel_fwa_state(nvdimm)) {
 741        case NVDIMM_FWA_INVALID:
 742                return -ENXIO;
 743        case NVDIMM_FWA_BUSY:
 744                return -EBUSY;
 745        case NVDIMM_FWA_IDLE:
 746                if (arm == NVDIMM_FWA_DISARM)
 747                        return 0;
 748                break;
 749        case NVDIMM_FWA_ARMED:
 750                if (arm == NVDIMM_FWA_ARM)
 751                        return 0;
 752                break;
 753        default:
 754                return -ENXIO;
 755        }
 756
 757        /*
 758         * Invalidate the bus-level state, now that we're committed to
 759         * changing the 'arm' state.
 760         */
 761        acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
 762        nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
 763
 764        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
 765
 766        dev_dbg(acpi_desc->dev, "%s result: %d\n", arm == NVDIMM_FWA_ARM
 767                        ? "arm" : "disarm", rc);
 768        return rc;
 769}
 770
 771static const struct nvdimm_fw_ops __intel_fw_ops = {
 772        .activate_state = intel_fwa_state,
 773        .activate_result = intel_fwa_result,
 774        .arm = intel_fwa_arm,
 775};
 776
 777const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops;
 778