linux/drivers/cxl/acpi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
   3#include <linux/platform_device.h>
   4#include <linux/module.h>
   5#include <linux/device.h>
   6#include <linux/kernel.h>
   7#include <linux/acpi.h>
   8#include <linux/pci.h>
   9#include "cxl.h"
  10
  11static struct acpi_table_header *acpi_cedt;
  12
  13/* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
  14#define CFMWS_INTERLEAVE_WAYS(x)        (1 << (x)->interleave_ways)
  15#define CFMWS_INTERLEAVE_GRANULARITY(x) ((x)->granularity + 8)
  16
  17static unsigned long cfmws_to_decoder_flags(int restrictions)
  18{
  19        unsigned long flags = 0;
  20
  21        if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2)
  22                flags |= CXL_DECODER_F_TYPE2;
  23        if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3)
  24                flags |= CXL_DECODER_F_TYPE3;
  25        if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE)
  26                flags |= CXL_DECODER_F_RAM;
  27        if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM)
  28                flags |= CXL_DECODER_F_PMEM;
  29        if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED)
  30                flags |= CXL_DECODER_F_LOCK;
  31
  32        return flags;
  33}
  34
  35static int cxl_acpi_cfmws_verify(struct device *dev,
  36                                 struct acpi_cedt_cfmws *cfmws)
  37{
  38        int expected_len;
  39
  40        if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) {
  41                dev_err(dev, "CFMWS Unsupported Interleave Arithmetic\n");
  42                return -EINVAL;
  43        }
  44
  45        if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) {
  46                dev_err(dev, "CFMWS Base HPA not 256MB aligned\n");
  47                return -EINVAL;
  48        }
  49
  50        if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) {
  51                dev_err(dev, "CFMWS Window Size not 256MB aligned\n");
  52                return -EINVAL;
  53        }
  54
  55        expected_len = struct_size((cfmws), interleave_targets,
  56                                   CFMWS_INTERLEAVE_WAYS(cfmws));
  57
  58        if (cfmws->header.length < expected_len) {
  59                dev_err(dev, "CFMWS length %d less than expected %d\n",
  60                        cfmws->header.length, expected_len);
  61                return -EINVAL;
  62        }
  63
  64        if (cfmws->header.length > expected_len)
  65                dev_dbg(dev, "CFMWS length %d greater than expected %d\n",
  66                        cfmws->header.length, expected_len);
  67
  68        return 0;
  69}
  70
  71static void cxl_add_cfmws_decoders(struct device *dev,
  72                                   struct cxl_port *root_port)
  73{
  74        struct acpi_cedt_cfmws *cfmws;
  75        struct cxl_decoder *cxld;
  76        acpi_size len, cur = 0;
  77        void *cedt_subtable;
  78        unsigned long flags;
  79        int rc;
  80
  81        len = acpi_cedt->length - sizeof(*acpi_cedt);
  82        cedt_subtable = acpi_cedt + 1;
  83
  84        while (cur < len) {
  85                struct acpi_cedt_header *c = cedt_subtable + cur;
  86
  87                if (c->type != ACPI_CEDT_TYPE_CFMWS) {
  88                        cur += c->length;
  89                        continue;
  90                }
  91
  92                cfmws = cedt_subtable + cur;
  93
  94                if (cfmws->header.length < sizeof(*cfmws)) {
  95                        dev_warn_once(dev,
  96                                      "CFMWS entry skipped:invalid length:%u\n",
  97                                      cfmws->header.length);
  98                        cur += c->length;
  99                        continue;
 100                }
 101
 102                rc = cxl_acpi_cfmws_verify(dev, cfmws);
 103                if (rc) {
 104                        dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
 105                                cfmws->base_hpa, cfmws->base_hpa +
 106                                cfmws->window_size - 1);
 107                        cur += c->length;
 108                        continue;
 109                }
 110
 111                flags = cfmws_to_decoder_flags(cfmws->restrictions);
 112                cxld = devm_cxl_add_decoder(dev, root_port,
 113                                            CFMWS_INTERLEAVE_WAYS(cfmws),
 114                                            cfmws->base_hpa, cfmws->window_size,
 115                                            CFMWS_INTERLEAVE_WAYS(cfmws),
 116                                            CFMWS_INTERLEAVE_GRANULARITY(cfmws),
 117                                            CXL_DECODER_EXPANDER,
 118                                            flags);
 119
 120                if (IS_ERR(cxld)) {
 121                        dev_err(dev, "Failed to add decoder for %#llx-%#llx\n",
 122                                cfmws->base_hpa, cfmws->base_hpa +
 123                                cfmws->window_size - 1);
 124                } else {
 125                        dev_dbg(dev, "add: %s range %#llx-%#llx\n",
 126                                dev_name(&cxld->dev), cfmws->base_hpa,
 127                                 cfmws->base_hpa + cfmws->window_size - 1);
 128                }
 129                cur += c->length;
 130        }
 131}
 132
 133static struct acpi_cedt_chbs *cxl_acpi_match_chbs(struct device *dev, u32 uid)
 134{
 135        struct acpi_cedt_chbs *chbs, *chbs_match = NULL;
 136        acpi_size len, cur = 0;
 137        void *cedt_subtable;
 138
 139        len = acpi_cedt->length - sizeof(*acpi_cedt);
 140        cedt_subtable = acpi_cedt + 1;
 141
 142        while (cur < len) {
 143                struct acpi_cedt_header *c = cedt_subtable + cur;
 144
 145                if (c->type != ACPI_CEDT_TYPE_CHBS) {
 146                        cur += c->length;
 147                        continue;
 148                }
 149
 150                chbs = cedt_subtable + cur;
 151
 152                if (chbs->header.length < sizeof(*chbs)) {
 153                        dev_warn_once(dev,
 154                                      "CHBS entry skipped: invalid length:%u\n",
 155                                      chbs->header.length);
 156                        cur += c->length;
 157                        continue;
 158                }
 159
 160                if (chbs->uid != uid) {
 161                        cur += c->length;
 162                        continue;
 163                }
 164
 165                if (chbs_match) {
 166                        dev_warn_once(dev,
 167                                      "CHBS entry skipped: duplicate UID:%u\n",
 168                                      uid);
 169                        cur += c->length;
 170                        continue;
 171                }
 172
 173                chbs_match = chbs;
 174                cur += c->length;
 175        }
 176
 177        return chbs_match ? chbs_match : ERR_PTR(-ENODEV);
 178}
 179
 180static resource_size_t get_chbcr(struct acpi_cedt_chbs *chbs)
 181{
 182        return IS_ERR(chbs) ? CXL_RESOURCE_NONE : chbs->base;
 183}
 184
 185struct cxl_walk_context {
 186        struct device *dev;
 187        struct pci_bus *root;
 188        struct cxl_port *port;
 189        int error;
 190        int count;
 191};
 192
 193static int match_add_root_ports(struct pci_dev *pdev, void *data)
 194{
 195        struct cxl_walk_context *ctx = data;
 196        struct pci_bus *root_bus = ctx->root;
 197        struct cxl_port *port = ctx->port;
 198        int type = pci_pcie_type(pdev);
 199        struct device *dev = ctx->dev;
 200        u32 lnkcap, port_num;
 201        int rc;
 202
 203        if (pdev->bus != root_bus)
 204                return 0;
 205        if (!pci_is_pcie(pdev))
 206                return 0;
 207        if (type != PCI_EXP_TYPE_ROOT_PORT)
 208                return 0;
 209        if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
 210                                  &lnkcap) != PCIBIOS_SUCCESSFUL)
 211                return 0;
 212
 213        /* TODO walk DVSEC to find component register base */
 214        port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
 215        rc = cxl_add_dport(port, &pdev->dev, port_num, CXL_RESOURCE_NONE);
 216        if (rc) {
 217                ctx->error = rc;
 218                return rc;
 219        }
 220        ctx->count++;
 221
 222        dev_dbg(dev, "add dport%d: %s\n", port_num, dev_name(&pdev->dev));
 223
 224        return 0;
 225}
 226
 227static struct cxl_dport *find_dport_by_dev(struct cxl_port *port, struct device *dev)
 228{
 229        struct cxl_dport *dport;
 230
 231        device_lock(&port->dev);
 232        list_for_each_entry(dport, &port->dports, list)
 233                if (dport->dport == dev) {
 234                        device_unlock(&port->dev);
 235                        return dport;
 236                }
 237
 238        device_unlock(&port->dev);
 239        return NULL;
 240}
 241
 242static struct acpi_device *to_cxl_host_bridge(struct device *dev)
 243{
 244        struct acpi_device *adev = to_acpi_device(dev);
 245
 246        if (!acpi_pci_find_root(adev->handle))
 247                return NULL;
 248
 249        if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0)
 250                return adev;
 251        return NULL;
 252}
 253
 254/*
 255 * A host bridge is a dport to a CFMWS decode and it is a uport to the
 256 * dport (PCIe Root Ports) in the host bridge.
 257 */
 258static int add_host_bridge_uport(struct device *match, void *arg)
 259{
 260        struct acpi_device *bridge = to_cxl_host_bridge(match);
 261        struct cxl_port *root_port = arg;
 262        struct device *host = root_port->dev.parent;
 263        struct acpi_pci_root *pci_root;
 264        struct cxl_walk_context ctx;
 265        struct cxl_decoder *cxld;
 266        struct cxl_dport *dport;
 267        struct cxl_port *port;
 268
 269        if (!bridge)
 270                return 0;
 271
 272        dport = find_dport_by_dev(root_port, match);
 273        if (!dport) {
 274                dev_dbg(host, "host bridge expected and not found\n");
 275                return -ENODEV;
 276        }
 277
 278        port = devm_cxl_add_port(host, match, dport->component_reg_phys,
 279                                 root_port);
 280        if (IS_ERR(port))
 281                return PTR_ERR(port);
 282        dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
 283
 284        /*
 285         * Note that this lookup already succeeded in
 286         * to_cxl_host_bridge(), so no need to check for failure here
 287         */
 288        pci_root = acpi_pci_find_root(bridge->handle);
 289        ctx = (struct cxl_walk_context){
 290                .dev = host,
 291                .root = pci_root->bus,
 292                .port = port,
 293        };
 294        pci_walk_bus(pci_root->bus, match_add_root_ports, &ctx);
 295
 296        if (ctx.count == 0)
 297                return -ENODEV;
 298        if (ctx.error)
 299                return ctx.error;
 300
 301        /* TODO: Scan CHBCR for HDM Decoder resources */
 302
 303        /*
 304         * In the single-port host-bridge case there are no HDM decoders
 305         * in the CHBCR and a 1:1 passthrough decode is implied.
 306         */
 307        if (ctx.count == 1) {
 308                cxld = devm_cxl_add_passthrough_decoder(host, port);
 309                if (IS_ERR(cxld))
 310                        return PTR_ERR(cxld);
 311
 312                dev_dbg(host, "add: %s\n", dev_name(&cxld->dev));
 313        }
 314
 315        return 0;
 316}
 317
 318static int add_host_bridge_dport(struct device *match, void *arg)
 319{
 320        int rc;
 321        acpi_status status;
 322        unsigned long long uid;
 323        struct acpi_cedt_chbs *chbs;
 324        struct cxl_port *root_port = arg;
 325        struct device *host = root_port->dev.parent;
 326        struct acpi_device *bridge = to_cxl_host_bridge(match);
 327
 328        if (!bridge)
 329                return 0;
 330
 331        status = acpi_evaluate_integer(bridge->handle, METHOD_NAME__UID, NULL,
 332                                       &uid);
 333        if (status != AE_OK) {
 334                dev_err(host, "unable to retrieve _UID of %s\n",
 335                        dev_name(match));
 336                return -ENODEV;
 337        }
 338
 339        chbs = cxl_acpi_match_chbs(host, uid);
 340        if (IS_ERR(chbs))
 341                dev_dbg(host, "No CHBS found for Host Bridge: %s\n",
 342                        dev_name(match));
 343
 344        rc = cxl_add_dport(root_port, match, uid, get_chbcr(chbs));
 345        if (rc) {
 346                dev_err(host, "failed to add downstream port: %s\n",
 347                        dev_name(match));
 348                return rc;
 349        }
 350        dev_dbg(host, "add dport%llu: %s\n", uid, dev_name(match));
 351        return 0;
 352}
 353
 354static int add_root_nvdimm_bridge(struct device *match, void *data)
 355{
 356        struct cxl_decoder *cxld;
 357        struct cxl_port *root_port = data;
 358        struct cxl_nvdimm_bridge *cxl_nvb;
 359        struct device *host = root_port->dev.parent;
 360
 361        if (!is_root_decoder(match))
 362                return 0;
 363
 364        cxld = to_cxl_decoder(match);
 365        if (!(cxld->flags & CXL_DECODER_F_PMEM))
 366                return 0;
 367
 368        cxl_nvb = devm_cxl_add_nvdimm_bridge(host, root_port);
 369        if (IS_ERR(cxl_nvb)) {
 370                dev_dbg(host, "failed to register pmem\n");
 371                return PTR_ERR(cxl_nvb);
 372        }
 373        dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev),
 374                dev_name(&cxl_nvb->dev));
 375        return 1;
 376}
 377
 378static int cxl_acpi_probe(struct platform_device *pdev)
 379{
 380        int rc;
 381        acpi_status status;
 382        struct cxl_port *root_port;
 383        struct device *host = &pdev->dev;
 384        struct acpi_device *adev = ACPI_COMPANION(host);
 385
 386        root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
 387        if (IS_ERR(root_port))
 388                return PTR_ERR(root_port);
 389        dev_dbg(host, "add: %s\n", dev_name(&root_port->dev));
 390
 391        status = acpi_get_table(ACPI_SIG_CEDT, 0, &acpi_cedt);
 392        if (ACPI_FAILURE(status))
 393                return -ENXIO;
 394
 395        rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
 396                              add_host_bridge_dport);
 397        if (rc)
 398                goto out;
 399
 400        cxl_add_cfmws_decoders(host, root_port);
 401
 402        /*
 403         * Root level scanned with host-bridge as dports, now scan host-bridges
 404         * for their role as CXL uports to their CXL-capable PCIe Root Ports.
 405         */
 406        rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
 407                              add_host_bridge_uport);
 408        if (rc)
 409                goto out;
 410
 411        if (IS_ENABLED(CONFIG_CXL_PMEM))
 412                rc = device_for_each_child(&root_port->dev, root_port,
 413                                           add_root_nvdimm_bridge);
 414
 415out:
 416        acpi_put_table(acpi_cedt);
 417        if (rc < 0)
 418                return rc;
 419        return 0;
 420}
 421
 422static const struct acpi_device_id cxl_acpi_ids[] = {
 423        { "ACPI0017", 0 },
 424        { "", 0 },
 425};
 426MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
 427
 428static struct platform_driver cxl_acpi_driver = {
 429        .probe = cxl_acpi_probe,
 430        .driver = {
 431                .name = KBUILD_MODNAME,
 432                .acpi_match_table = cxl_acpi_ids,
 433        },
 434};
 435
 436module_platform_driver(cxl_acpi_driver);
 437MODULE_LICENSE("GPL v2");
 438MODULE_IMPORT_NS(CXL);
 439