linux/arch/x86/kernel/amd_nb.c
<<
>>
Prefs
   1/*
   2 * Shared support code for AMD K8 northbridges and derivates.
   3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
   4 */
   5
   6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7
   8#include <linux/types.h>
   9#include <linux/slab.h>
  10#include <linux/init.h>
  11#include <linux/errno.h>
  12#include <linux/export.h>
  13#include <linux/spinlock.h>
  14#include <asm/amd_nb.h>
  15
  16#define PCI_DEVICE_ID_AMD_17H_ROOT      0x1450
  17#define PCI_DEVICE_ID_AMD_17H_DF_F3     0x1463
  18#define PCI_DEVICE_ID_AMD_17H_DF_F4     0x1464
  19
  20/* Protect the PCI config register pairs used for SMN and DF indirect access. */
  21static DEFINE_MUTEX(smn_mutex);
  22
  23static u32 *flush_words;
  24
  25static const struct pci_device_id amd_root_ids[] = {
  26        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
  27        {}
  28};
  29
  30const struct pci_device_id amd_nb_misc_ids[] = {
  31        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
  32        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
  33        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
  34        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
  35        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
  36        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
  37        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
  38        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
  39        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
  40        {}
  41};
  42EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
  43
  44static const struct pci_device_id amd_nb_link_ids[] = {
  45        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
  46        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
  47        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
  48        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
  49        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
  50        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
  51        {}
  52};
  53
  54const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
  55        { 0x00, 0x18, 0x20 },
  56        { 0xff, 0x00, 0x20 },
  57        { 0xfe, 0x00, 0x20 },
  58        { }
  59};
  60
  61static struct amd_northbridge_info amd_northbridges;
  62
  63u16 amd_nb_num(void)
  64{
  65        return amd_northbridges.num;
  66}
  67EXPORT_SYMBOL_GPL(amd_nb_num);
  68
  69bool amd_nb_has_feature(unsigned int feature)
  70{
  71        return ((amd_northbridges.flags & feature) == feature);
  72}
  73EXPORT_SYMBOL_GPL(amd_nb_has_feature);
  74
  75struct amd_northbridge *node_to_amd_nb(int node)
  76{
  77        return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
  78}
  79EXPORT_SYMBOL_GPL(node_to_amd_nb);
  80
  81static struct pci_dev *next_northbridge(struct pci_dev *dev,
  82                                        const struct pci_device_id *ids)
  83{
  84        do {
  85                dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
  86                if (!dev)
  87                        break;
  88        } while (!pci_match_id(ids, dev));
  89        return dev;
  90}
  91
  92static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
  93{
  94        struct pci_dev *root;
  95        int err = -ENODEV;
  96
  97        if (node >= amd_northbridges.num)
  98                goto out;
  99
 100        root = node_to_amd_nb(node)->root;
 101        if (!root)
 102                goto out;
 103
 104        mutex_lock(&smn_mutex);
 105
 106        err = pci_write_config_dword(root, 0x60, address);
 107        if (err) {
 108                pr_warn("Error programming SMN address 0x%x.\n", address);
 109                goto out_unlock;
 110        }
 111
 112        err = (write ? pci_write_config_dword(root, 0x64, *value)
 113                     : pci_read_config_dword(root, 0x64, value));
 114        if (err)
 115                pr_warn("Error %s SMN address 0x%x.\n",
 116                        (write ? "writing to" : "reading from"), address);
 117
 118out_unlock:
 119        mutex_unlock(&smn_mutex);
 120
 121out:
 122        return err;
 123}
 124
 125int amd_smn_read(u16 node, u32 address, u32 *value)
 126{
 127        return __amd_smn_rw(node, address, value, false);
 128}
 129EXPORT_SYMBOL_GPL(amd_smn_read);
 130
 131int amd_smn_write(u16 node, u32 address, u32 value)
 132{
 133        return __amd_smn_rw(node, address, &value, true);
 134}
 135EXPORT_SYMBOL_GPL(amd_smn_write);
 136
 137/*
 138 * Data Fabric Indirect Access uses FICAA/FICAD.
 139 *
 140 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
 141 * on the device's Instance Id and the PCI function and register offset of
 142 * the desired register.
 143 *
 144 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
 145 * and FICAD HI registers but so far we only need the LO register.
 146 */
 147int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
 148{
 149        struct pci_dev *F4;
 150        u32 ficaa;
 151        int err = -ENODEV;
 152
 153        if (node >= amd_northbridges.num)
 154                goto out;
 155
 156        F4 = node_to_amd_nb(node)->link;
 157        if (!F4)
 158                goto out;
 159
 160        ficaa  = 1;
 161        ficaa |= reg & 0x3FC;
 162        ficaa |= (func & 0x7) << 11;
 163        ficaa |= instance_id << 16;
 164
 165        mutex_lock(&smn_mutex);
 166
 167        err = pci_write_config_dword(F4, 0x5C, ficaa);
 168        if (err) {
 169                pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
 170                goto out_unlock;
 171        }
 172
 173        err = pci_read_config_dword(F4, 0x98, lo);
 174        if (err)
 175                pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
 176
 177out_unlock:
 178        mutex_unlock(&smn_mutex);
 179
 180out:
 181        return err;
 182}
 183EXPORT_SYMBOL_GPL(amd_df_indirect_read);
 184
 185int amd_cache_northbridges(void)
 186{
 187        u16 i = 0;
 188        struct amd_northbridge *nb;
 189        struct pci_dev *root, *misc, *link;
 190
 191        if (amd_northbridges.num)
 192                return 0;
 193
 194        misc = NULL;
 195        while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
 196                i++;
 197
 198        if (!i)
 199                return -ENODEV;
 200
 201        nb = kcalloc(i, sizeof(struct amd_northbridge), GFP_KERNEL);
 202        if (!nb)
 203                return -ENOMEM;
 204
 205        amd_northbridges.nb = nb;
 206        amd_northbridges.num = i;
 207
 208        link = misc = root = NULL;
 209        for (i = 0; i != amd_northbridges.num; i++) {
 210                node_to_amd_nb(i)->root = root =
 211                        next_northbridge(root, amd_root_ids);
 212                node_to_amd_nb(i)->misc = misc =
 213                        next_northbridge(misc, amd_nb_misc_ids);
 214                node_to_amd_nb(i)->link = link =
 215                        next_northbridge(link, amd_nb_link_ids);
 216        }
 217
 218        if (amd_gart_present())
 219                amd_northbridges.flags |= AMD_NB_GART;
 220
 221        /*
 222         * Check for L3 cache presence.
 223         */
 224        if (!cpuid_edx(0x80000006))
 225                return 0;
 226
 227        /*
 228         * Some CPU families support L3 Cache Index Disable. There are some
 229         * limitations because of E382 and E388 on family 0x10.
 230         */
 231        if (boot_cpu_data.x86 == 0x10 &&
 232            boot_cpu_data.x86_model >= 0x8 &&
 233            (boot_cpu_data.x86_model > 0x9 ||
 234             boot_cpu_data.x86_mask >= 0x1))
 235                amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 236
 237        if (boot_cpu_data.x86 == 0x15)
 238                amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 239
 240        /* L3 cache partitioning is supported on family 0x15 */
 241        if (boot_cpu_data.x86 == 0x15)
 242                amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
 243
 244        return 0;
 245}
 246EXPORT_SYMBOL_GPL(amd_cache_northbridges);
 247
 248/*
 249 * Ignores subdevice/subvendor but as far as I can figure out
 250 * they're useless anyways
 251 */
 252bool __init early_is_amd_nb(u32 device)
 253{
 254        const struct pci_device_id *id;
 255        u32 vendor = device & 0xffff;
 256
 257        device >>= 16;
 258        for (id = amd_nb_misc_ids; id->vendor; id++)
 259                if (vendor == id->vendor && device == id->device)
 260                        return true;
 261        return false;
 262}
 263
 264struct resource *amd_get_mmconfig_range(struct resource *res)
 265{
 266        u32 address;
 267        u64 base, msr;
 268        unsigned int segn_busn_bits;
 269
 270        if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
 271                return NULL;
 272
 273        /* assume all cpus from fam10h have mmconfig */
 274        if (boot_cpu_data.x86 < 0x10)
 275                return NULL;
 276
 277        address = MSR_FAM10H_MMIO_CONF_BASE;
 278        rdmsrl(address, msr);
 279
 280        /* mmconfig is not enabled */
 281        if (!(msr & FAM10H_MMIO_CONF_ENABLE))
 282                return NULL;
 283
 284        base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
 285
 286        segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
 287                         FAM10H_MMIO_CONF_BUSRANGE_MASK;
 288
 289        res->flags = IORESOURCE_MEM;
 290        res->start = base;
 291        res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
 292        return res;
 293}
 294
 295int amd_get_subcaches(int cpu)
 296{
 297        struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
 298        unsigned int mask;
 299
 300        if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
 301                return 0;
 302
 303        pci_read_config_dword(link, 0x1d4, &mask);
 304
 305        return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
 306}
 307
 308int amd_set_subcaches(int cpu, unsigned long mask)
 309{
 310        static unsigned int reset, ban;
 311        struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
 312        unsigned int reg;
 313        int cuid;
 314
 315        if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
 316                return -EINVAL;
 317
 318        /* if necessary, collect reset state of L3 partitioning and BAN mode */
 319        if (reset == 0) {
 320                pci_read_config_dword(nb->link, 0x1d4, &reset);
 321                pci_read_config_dword(nb->misc, 0x1b8, &ban);
 322                ban &= 0x180000;
 323        }
 324
 325        /* deactivate BAN mode if any subcaches are to be disabled */
 326        if (mask != 0xf) {
 327                pci_read_config_dword(nb->misc, 0x1b8, &reg);
 328                pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
 329        }
 330
 331        cuid = cpu_data(cpu).cpu_core_id;
 332        mask <<= 4 * cuid;
 333        mask |= (0xf ^ (1 << cuid)) << 26;
 334
 335        pci_write_config_dword(nb->link, 0x1d4, mask);
 336
 337        /* reset BAN mode if L3 partitioning returned to reset state */
 338        pci_read_config_dword(nb->link, 0x1d4, &reg);
 339        if (reg == reset) {
 340                pci_read_config_dword(nb->misc, 0x1b8, &reg);
 341                reg &= ~0x180000;
 342                pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
 343        }
 344
 345        return 0;
 346}
 347
 348static void amd_cache_gart(void)
 349{
 350        u16 i;
 351
 352        if (!amd_nb_has_feature(AMD_NB_GART))
 353                return;
 354
 355        flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
 356        if (!flush_words) {
 357                amd_northbridges.flags &= ~AMD_NB_GART;
 358                pr_notice("Cannot initialize GART flush words, GART support disabled\n");
 359                return;
 360        }
 361
 362        for (i = 0; i != amd_northbridges.num; i++)
 363                pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
 364}
 365
 366void amd_flush_garts(void)
 367{
 368        int flushed, i;
 369        unsigned long flags;
 370        static DEFINE_SPINLOCK(gart_lock);
 371
 372        if (!amd_nb_has_feature(AMD_NB_GART))
 373                return;
 374
 375        /*
 376         * Avoid races between AGP and IOMMU. In theory it's not needed
 377         * but I'm not sure if the hardware won't lose flush requests
 378         * when another is pending. This whole thing is so expensive anyways
 379         * that it doesn't matter to serialize more. -AK
 380         */
 381        spin_lock_irqsave(&gart_lock, flags);
 382        flushed = 0;
 383        for (i = 0; i < amd_northbridges.num; i++) {
 384                pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
 385                                       flush_words[i] | 1);
 386                flushed++;
 387        }
 388        for (i = 0; i < amd_northbridges.num; i++) {
 389                u32 w;
 390                /* Make sure the hardware actually executed the flush*/
 391                for (;;) {
 392                        pci_read_config_dword(node_to_amd_nb(i)->misc,
 393                                              0x9c, &w);
 394                        if (!(w & 1))
 395                                break;
 396                        cpu_relax();
 397                }
 398        }
 399        spin_unlock_irqrestore(&gart_lock, flags);
 400        if (!flushed)
 401                pr_notice("nothing to flush?\n");
 402}
 403EXPORT_SYMBOL_GPL(amd_flush_garts);
 404
 405static __init int init_amd_nbs(void)
 406{
 407        amd_cache_northbridges();
 408        amd_cache_gart();
 409
 410        return 0;
 411}
 412
 413/* This has to go after the PCI subsystem */
 414fs_initcall(init_amd_nbs);
 415