linux/arch/x86/kernel/amd_nb.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Shared support code for AMD K8 northbridges and derivates.
   4 * Copyright 2006 Andi Kleen, SUSE Labs.
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/types.h>
  10#include <linux/slab.h>
  11#include <linux/init.h>
  12#include <linux/errno.h>
  13#include <linux/export.h>
  14#include <linux/spinlock.h>
  15#include <linux/pci_ids.h>
  16#include <asm/amd_nb.h>
  17
  18#define PCI_DEVICE_ID_AMD_17H_ROOT      0x1450
  19#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
  20#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
  21#define PCI_DEVICE_ID_AMD_17H_DF_F4     0x1464
  22#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
  23#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
  24
  25/* Protect the PCI config register pairs used for SMN and DF indirect access. */
  26static DEFINE_MUTEX(smn_mutex);
  27
  28static u32 *flush_words;
  29
  30static const struct pci_device_id amd_root_ids[] = {
  31        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
  32        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
  33        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
  34        {}
  35};
  36
  37
  38#define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
  39
  40const struct pci_device_id amd_nb_misc_ids[] = {
  41        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
  42        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
  43        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
  44        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
  45        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
  46        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
  47        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
  48        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
  49        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
  50        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
  51        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
  52        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
  53        {}
  54};
  55EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
  56
  57static const struct pci_device_id amd_nb_link_ids[] = {
  58        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
  59        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
  60        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
  61        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
  62        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
  63        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
  64        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
  65        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
  66        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
  67        {}
  68};
  69
  70static const struct pci_device_id hygon_root_ids[] = {
  71        { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
  72        {}
  73};
  74
  75static const struct pci_device_id hygon_nb_misc_ids[] = {
  76        { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
  77        {}
  78};
  79
  80static const struct pci_device_id hygon_nb_link_ids[] = {
  81        { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
  82        {}
  83};
  84
  85const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
  86        { 0x00, 0x18, 0x20 },
  87        { 0xff, 0x00, 0x20 },
  88        { 0xfe, 0x00, 0x20 },
  89        { }
  90};
  91
  92static struct amd_northbridge_info amd_northbridges;
  93
  94u16 amd_nb_num(void)
  95{
  96        return amd_northbridges.num;
  97}
  98EXPORT_SYMBOL_GPL(amd_nb_num);
  99
 100bool amd_nb_has_feature(unsigned int feature)
 101{
 102        return ((amd_northbridges.flags & feature) == feature);
 103}
 104EXPORT_SYMBOL_GPL(amd_nb_has_feature);
 105
 106struct amd_northbridge *node_to_amd_nb(int node)
 107{
 108        return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
 109}
 110EXPORT_SYMBOL_GPL(node_to_amd_nb);
 111
 112static struct pci_dev *next_northbridge(struct pci_dev *dev,
 113                                        const struct pci_device_id *ids)
 114{
 115        do {
 116                dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
 117                if (!dev)
 118                        break;
 119        } while (!pci_match_id(ids, dev));
 120        return dev;
 121}
 122
 123static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
 124{
 125        struct pci_dev *root;
 126        int err = -ENODEV;
 127
 128        if (node >= amd_northbridges.num)
 129                goto out;
 130
 131        root = node_to_amd_nb(node)->root;
 132        if (!root)
 133                goto out;
 134
 135        mutex_lock(&smn_mutex);
 136
 137        err = pci_write_config_dword(root, 0x60, address);
 138        if (err) {
 139                pr_warn("Error programming SMN address 0x%x.\n", address);
 140                goto out_unlock;
 141        }
 142
 143        err = (write ? pci_write_config_dword(root, 0x64, *value)
 144                     : pci_read_config_dword(root, 0x64, value));
 145        if (err)
 146                pr_warn("Error %s SMN address 0x%x.\n",
 147                        (write ? "writing to" : "reading from"), address);
 148
 149out_unlock:
 150        mutex_unlock(&smn_mutex);
 151
 152out:
 153        return err;
 154}
 155
 156int amd_smn_read(u16 node, u32 address, u32 *value)
 157{
 158        return __amd_smn_rw(node, address, value, false);
 159}
 160EXPORT_SYMBOL_GPL(amd_smn_read);
 161
 162int amd_smn_write(u16 node, u32 address, u32 value)
 163{
 164        return __amd_smn_rw(node, address, &value, true);
 165}
 166EXPORT_SYMBOL_GPL(amd_smn_write);
 167
 168/*
 169 * Data Fabric Indirect Access uses FICAA/FICAD.
 170 *
 171 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
 172 * on the device's Instance Id and the PCI function and register offset of
 173 * the desired register.
 174 *
 175 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
 176 * and FICAD HI registers but so far we only need the LO register.
 177 */
 178int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
 179{
 180        struct pci_dev *F4;
 181        u32 ficaa;
 182        int err = -ENODEV;
 183
 184        if (node >= amd_northbridges.num)
 185                goto out;
 186
 187        F4 = node_to_amd_nb(node)->link;
 188        if (!F4)
 189                goto out;
 190
 191        ficaa  = 1;
 192        ficaa |= reg & 0x3FC;
 193        ficaa |= (func & 0x7) << 11;
 194        ficaa |= instance_id << 16;
 195
 196        mutex_lock(&smn_mutex);
 197
 198        err = pci_write_config_dword(F4, 0x5C, ficaa);
 199        if (err) {
 200                pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
 201                goto out_unlock;
 202        }
 203
 204        err = pci_read_config_dword(F4, 0x98, lo);
 205        if (err)
 206                pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
 207
 208out_unlock:
 209        mutex_unlock(&smn_mutex);
 210
 211out:
 212        return err;
 213}
 214EXPORT_SYMBOL_GPL(amd_df_indirect_read);
 215
 216int amd_cache_northbridges(void)
 217{
 218        const struct pci_device_id *misc_ids = amd_nb_misc_ids;
 219        const struct pci_device_id *link_ids = amd_nb_link_ids;
 220        const struct pci_device_id *root_ids = amd_root_ids;
 221        struct pci_dev *root, *misc, *link;
 222        struct amd_northbridge *nb;
 223        u16 roots_per_misc = 0;
 224        u16 misc_count = 0;
 225        u16 root_count = 0;
 226        u16 i, j;
 227
 228        if (amd_northbridges.num)
 229                return 0;
 230
 231        if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
 232                root_ids = hygon_root_ids;
 233                misc_ids = hygon_nb_misc_ids;
 234                link_ids = hygon_nb_link_ids;
 235        }
 236
 237        misc = NULL;
 238        while ((misc = next_northbridge(misc, misc_ids)) != NULL)
 239                misc_count++;
 240
 241        if (!misc_count)
 242                return -ENODEV;
 243
 244        root = NULL;
 245        while ((root = next_northbridge(root, root_ids)) != NULL)
 246                root_count++;
 247
 248        if (root_count) {
 249                roots_per_misc = root_count / misc_count;
 250
 251                /*
 252                 * There should be _exactly_ N roots for each DF/SMN
 253                 * interface.
 254                 */
 255                if (!roots_per_misc || (root_count % roots_per_misc)) {
 256                        pr_info("Unsupported AMD DF/PCI configuration found\n");
 257                        return -ENODEV;
 258                }
 259        }
 260
 261        nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
 262        if (!nb)
 263                return -ENOMEM;
 264
 265        amd_northbridges.nb = nb;
 266        amd_northbridges.num = misc_count;
 267
 268        link = misc = root = NULL;
 269        for (i = 0; i < amd_northbridges.num; i++) {
 270                node_to_amd_nb(i)->root = root =
 271                        next_northbridge(root, root_ids);
 272                node_to_amd_nb(i)->misc = misc =
 273                        next_northbridge(misc, misc_ids);
 274                node_to_amd_nb(i)->link = link =
 275                        next_northbridge(link, link_ids);
 276
 277                /*
 278                 * If there are more PCI root devices than data fabric/
 279                 * system management network interfaces, then the (N)
 280                 * PCI roots per DF/SMN interface are functionally the
 281                 * same (for DF/SMN access) and N-1 are redundant.  N-1
 282                 * PCI roots should be skipped per DF/SMN interface so
 283                 * the following DF/SMN interfaces get mapped to
 284                 * correct PCI roots.
 285                 */
 286                for (j = 1; j < roots_per_misc; j++)
 287                        root = next_northbridge(root, root_ids);
 288        }
 289
 290        if (amd_gart_present())
 291                amd_northbridges.flags |= AMD_NB_GART;
 292
 293        /*
 294         * Check for L3 cache presence.
 295         */
 296        if (!cpuid_edx(0x80000006))
 297                return 0;
 298
 299        /*
 300         * Some CPU families support L3 Cache Index Disable. There are some
 301         * limitations because of E382 and E388 on family 0x10.
 302         */
 303        if (boot_cpu_data.x86 == 0x10 &&
 304            boot_cpu_data.x86_model >= 0x8 &&
 305            (boot_cpu_data.x86_model > 0x9 ||
 306             boot_cpu_data.x86_stepping >= 0x1))
 307                amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 308
 309        if (boot_cpu_data.x86 == 0x15)
 310                amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 311
 312        /* L3 cache partitioning is supported on family 0x15 */
 313        if (boot_cpu_data.x86 == 0x15)
 314                amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
 315
 316        return 0;
 317}
 318EXPORT_SYMBOL_GPL(amd_cache_northbridges);
 319
 320/*
 321 * Ignores subdevice/subvendor but as far as I can figure out
 322 * they're useless anyways
 323 */
 324bool __init early_is_amd_nb(u32 device)
 325{
 326        const struct pci_device_id *misc_ids = amd_nb_misc_ids;
 327        const struct pci_device_id *id;
 328        u32 vendor = device & 0xffff;
 329
 330        if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
 331            boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
 332                return false;
 333
 334        if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
 335                misc_ids = hygon_nb_misc_ids;
 336
 337        device >>= 16;
 338        for (id = misc_ids; id->vendor; id++)
 339                if (vendor == id->vendor && device == id->device)
 340                        return true;
 341        return false;
 342}
 343
 344struct resource *amd_get_mmconfig_range(struct resource *res)
 345{
 346        u32 address;
 347        u64 base, msr;
 348        unsigned int segn_busn_bits;
 349
 350        if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
 351            boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
 352                return NULL;
 353
 354        /* assume all cpus from fam10h have mmconfig */
 355        if (boot_cpu_data.x86 < 0x10)
 356                return NULL;
 357
 358        address = MSR_FAM10H_MMIO_CONF_BASE;
 359        rdmsrl(address, msr);
 360
 361        /* mmconfig is not enabled */
 362        if (!(msr & FAM10H_MMIO_CONF_ENABLE))
 363                return NULL;
 364
 365        base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
 366
 367        segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
 368                         FAM10H_MMIO_CONF_BUSRANGE_MASK;
 369
 370        res->flags = IORESOURCE_MEM;
 371        res->start = base;
 372        res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
 373        return res;
 374}
 375
 376int amd_get_subcaches(int cpu)
 377{
 378        struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
 379        unsigned int mask;
 380
 381        if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
 382                return 0;
 383
 384        pci_read_config_dword(link, 0x1d4, &mask);
 385
 386        return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
 387}
 388
 389int amd_set_subcaches(int cpu, unsigned long mask)
 390{
 391        static unsigned int reset, ban;
 392        struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
 393        unsigned int reg;
 394        int cuid;
 395
 396        if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
 397                return -EINVAL;
 398
 399        /* if necessary, collect reset state of L3 partitioning and BAN mode */
 400        if (reset == 0) {
 401                pci_read_config_dword(nb->link, 0x1d4, &reset);
 402                pci_read_config_dword(nb->misc, 0x1b8, &ban);
 403                ban &= 0x180000;
 404        }
 405
 406        /* deactivate BAN mode if any subcaches are to be disabled */
 407        if (mask != 0xf) {
 408                pci_read_config_dword(nb->misc, 0x1b8, &reg);
 409                pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
 410        }
 411
 412        cuid = cpu_data(cpu).cpu_core_id;
 413        mask <<= 4 * cuid;
 414        mask |= (0xf ^ (1 << cuid)) << 26;
 415
 416        pci_write_config_dword(nb->link, 0x1d4, mask);
 417
 418        /* reset BAN mode if L3 partitioning returned to reset state */
 419        pci_read_config_dword(nb->link, 0x1d4, &reg);
 420        if (reg == reset) {
 421                pci_read_config_dword(nb->misc, 0x1b8, &reg);
 422                reg &= ~0x180000;
 423                pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
 424        }
 425
 426        return 0;
 427}
 428
 429static void amd_cache_gart(void)
 430{
 431        u16 i;
 432
 433        if (!amd_nb_has_feature(AMD_NB_GART))
 434                return;
 435
 436        flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
 437        if (!flush_words) {
 438                amd_northbridges.flags &= ~AMD_NB_GART;
 439                pr_notice("Cannot initialize GART flush words, GART support disabled\n");
 440                return;
 441        }
 442
 443        for (i = 0; i != amd_northbridges.num; i++)
 444                pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
 445}
 446
 447void amd_flush_garts(void)
 448{
 449        int flushed, i;
 450        unsigned long flags;
 451        static DEFINE_SPINLOCK(gart_lock);
 452
 453        if (!amd_nb_has_feature(AMD_NB_GART))
 454                return;
 455
 456        /*
 457         * Avoid races between AGP and IOMMU. In theory it's not needed
 458         * but I'm not sure if the hardware won't lose flush requests
 459         * when another is pending. This whole thing is so expensive anyways
 460         * that it doesn't matter to serialize more. -AK
 461         */
 462        spin_lock_irqsave(&gart_lock, flags);
 463        flushed = 0;
 464        for (i = 0; i < amd_northbridges.num; i++) {
 465                pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
 466                                       flush_words[i] | 1);
 467                flushed++;
 468        }
 469        for (i = 0; i < amd_northbridges.num; i++) {
 470                u32 w;
 471                /* Make sure the hardware actually executed the flush*/
 472                for (;;) {
 473                        pci_read_config_dword(node_to_amd_nb(i)->misc,
 474                                              0x9c, &w);
 475                        if (!(w & 1))
 476                                break;
 477                        cpu_relax();
 478                }
 479        }
 480        spin_unlock_irqrestore(&gart_lock, flags);
 481        if (!flushed)
 482                pr_notice("nothing to flush?\n");
 483}
 484EXPORT_SYMBOL_GPL(amd_flush_garts);
 485
 486static void __fix_erratum_688(void *info)
 487{
 488#define MSR_AMD64_IC_CFG 0xC0011021
 489
 490        msr_set_bit(MSR_AMD64_IC_CFG, 3);
 491        msr_set_bit(MSR_AMD64_IC_CFG, 14);
 492}
 493
 494/* Apply erratum 688 fix so machines without a BIOS fix work. */
 495static __init void fix_erratum_688(void)
 496{
 497        struct pci_dev *F4;
 498        u32 val;
 499
 500        if (boot_cpu_data.x86 != 0x14)
 501                return;
 502
 503        if (!amd_northbridges.num)
 504                return;
 505
 506        F4 = node_to_amd_nb(0)->link;
 507        if (!F4)
 508                return;
 509
 510        if (pci_read_config_dword(F4, 0x164, &val))
 511                return;
 512
 513        if (val & BIT(2))
 514                return;
 515
 516        on_each_cpu(__fix_erratum_688, NULL, 0);
 517
 518        pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
 519}
 520
 521static __init int init_amd_nbs(void)
 522{
 523        amd_cache_northbridges();
 524        amd_cache_gart();
 525
 526        fix_erratum_688();
 527
 528        return 0;
 529}
 530
 531/* This has to go after the PCI subsystem */
 532fs_initcall(init_amd_nbs);
 533