linux/arch/x86/kernel/amd_nb.c
<<
>>
Prefs
   1/*
   2 * Shared support code for AMD K8 northbridges and derivates.
   3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
   4 */
   5
   6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7
   8#include <linux/types.h>
   9#include <linux/slab.h>
  10#include <linux/init.h>
  11#include <linux/errno.h>
  12#include <linux/module.h>
  13#include <linux/spinlock.h>
  14#include <asm/amd_nb.h>
  15
  16static u32 *flush_words;
  17
  18const struct pci_device_id amd_nb_misc_ids[] = {
  19        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
  20        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
  21        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
  22        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
  23        {}
  24};
  25EXPORT_SYMBOL(amd_nb_misc_ids);
  26
  27static struct pci_device_id amd_nb_link_ids[] = {
  28        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
  29        {}
  30};
  31
  32const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
  33        { 0x00, 0x18, 0x20 },
  34        { 0xff, 0x00, 0x20 },
  35        { 0xfe, 0x00, 0x20 },
  36        { }
  37};
  38
  39struct amd_northbridge_info amd_northbridges;
  40EXPORT_SYMBOL(amd_northbridges);
  41
  42static struct pci_dev *next_northbridge(struct pci_dev *dev,
  43                                        const struct pci_device_id *ids)
  44{
  45        do {
  46                dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
  47                if (!dev)
  48                        break;
  49        } while (!pci_match_id(ids, dev));
  50        return dev;
  51}
  52
  53int amd_cache_northbridges(void)
  54{
  55        u16 i = 0;
  56        struct amd_northbridge *nb;
  57        struct pci_dev *misc, *link;
  58
  59        if (amd_nb_num())
  60                return 0;
  61
  62        misc = NULL;
  63        while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
  64                i++;
  65
  66        if (i == 0)
  67                return 0;
  68
  69        nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
  70        if (!nb)
  71                return -ENOMEM;
  72
  73        amd_northbridges.nb = nb;
  74        amd_northbridges.num = i;
  75
  76        link = misc = NULL;
  77        for (i = 0; i != amd_nb_num(); i++) {
  78                node_to_amd_nb(i)->misc = misc =
  79                        next_northbridge(misc, amd_nb_misc_ids);
  80                node_to_amd_nb(i)->link = link =
  81                        next_northbridge(link, amd_nb_link_ids);
  82        }
  83
  84        /* some CPU families (e.g. family 0x11) do not support GART */
  85        if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
  86            boot_cpu_data.x86 == 0x15)
  87                amd_northbridges.flags |= AMD_NB_GART;
  88
  89        /*
  90         * Some CPU families support L3 Cache Index Disable. There are some
  91         * limitations because of E382 and E388 on family 0x10.
  92         */
  93        if (boot_cpu_data.x86 == 0x10 &&
  94            boot_cpu_data.x86_model >= 0x8 &&
  95            (boot_cpu_data.x86_model > 0x9 ||
  96             boot_cpu_data.x86_mask >= 0x1))
  97                amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
  98
  99        if (boot_cpu_data.x86 == 0x15)
 100                amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 101
 102        /* L3 cache partitioning is supported on family 0x15 */
 103        if (boot_cpu_data.x86 == 0x15)
 104                amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
 105
 106        return 0;
 107}
 108EXPORT_SYMBOL_GPL(amd_cache_northbridges);
 109
 110/*
 111 * Ignores subdevice/subvendor but as far as I can figure out
 112 * they're useless anyways
 113 */
 114bool __init early_is_amd_nb(u32 device)
 115{
 116        const struct pci_device_id *id;
 117        u32 vendor = device & 0xffff;
 118
 119        device >>= 16;
 120        for (id = amd_nb_misc_ids; id->vendor; id++)
 121                if (vendor == id->vendor && device == id->device)
 122                        return true;
 123        return false;
 124}
 125
 126struct resource *amd_get_mmconfig_range(struct resource *res)
 127{
 128        u32 address;
 129        u64 base, msr;
 130        unsigned segn_busn_bits;
 131
 132        if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
 133                return NULL;
 134
 135        /* assume all cpus from fam10h have mmconfig */
 136        if (boot_cpu_data.x86 < 0x10)
 137                return NULL;
 138
 139        address = MSR_FAM10H_MMIO_CONF_BASE;
 140        rdmsrl(address, msr);
 141
 142        /* mmconfig is not enabled */
 143        if (!(msr & FAM10H_MMIO_CONF_ENABLE))
 144                return NULL;
 145
 146        base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
 147
 148        segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
 149                         FAM10H_MMIO_CONF_BUSRANGE_MASK;
 150
 151        res->flags = IORESOURCE_MEM;
 152        res->start = base;
 153        res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
 154        return res;
 155}
 156
 157int amd_get_subcaches(int cpu)
 158{
 159        struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
 160        unsigned int mask;
 161        int cuid;
 162
 163        if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
 164                return 0;
 165
 166        pci_read_config_dword(link, 0x1d4, &mask);
 167
 168        cuid = cpu_data(cpu).compute_unit_id;
 169        return (mask >> (4 * cuid)) & 0xf;
 170}
 171
 172int amd_set_subcaches(int cpu, int mask)
 173{
 174        static unsigned int reset, ban;
 175        struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
 176        unsigned int reg;
 177        int cuid;
 178
 179        if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
 180                return -EINVAL;
 181
 182        /* if necessary, collect reset state of L3 partitioning and BAN mode */
 183        if (reset == 0) {
 184                pci_read_config_dword(nb->link, 0x1d4, &reset);
 185                pci_read_config_dword(nb->misc, 0x1b8, &ban);
 186                ban &= 0x180000;
 187        }
 188
 189        /* deactivate BAN mode if any subcaches are to be disabled */
 190        if (mask != 0xf) {
 191                pci_read_config_dword(nb->misc, 0x1b8, &reg);
 192                pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
 193        }
 194
 195        cuid = cpu_data(cpu).compute_unit_id;
 196        mask <<= 4 * cuid;
 197        mask |= (0xf ^ (1 << cuid)) << 26;
 198
 199        pci_write_config_dword(nb->link, 0x1d4, mask);
 200
 201        /* reset BAN mode if L3 partitioning returned to reset state */
 202        pci_read_config_dword(nb->link, 0x1d4, &reg);
 203        if (reg == reset) {
 204                pci_read_config_dword(nb->misc, 0x1b8, &reg);
 205                reg &= ~0x180000;
 206                pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
 207        }
 208
 209        return 0;
 210}
 211
 212static int amd_cache_gart(void)
 213{
 214        u16 i;
 215
 216       if (!amd_nb_has_feature(AMD_NB_GART))
 217               return 0;
 218
 219       flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
 220       if (!flush_words) {
 221               amd_northbridges.flags &= ~AMD_NB_GART;
 222               return -ENOMEM;
 223       }
 224
 225       for (i = 0; i != amd_nb_num(); i++)
 226               pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
 227                                     &flush_words[i]);
 228
 229       return 0;
 230}
 231
 232void amd_flush_garts(void)
 233{
 234        int flushed, i;
 235        unsigned long flags;
 236        static DEFINE_SPINLOCK(gart_lock);
 237
 238        if (!amd_nb_has_feature(AMD_NB_GART))
 239                return;
 240
 241        /* Avoid races between AGP and IOMMU. In theory it's not needed
 242           but I'm not sure if the hardware won't lose flush requests
 243           when another is pending. This whole thing is so expensive anyways
 244           that it doesn't matter to serialize more. -AK */
 245        spin_lock_irqsave(&gart_lock, flags);
 246        flushed = 0;
 247        for (i = 0; i < amd_nb_num(); i++) {
 248                pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
 249                                       flush_words[i] | 1);
 250                flushed++;
 251        }
 252        for (i = 0; i < amd_nb_num(); i++) {
 253                u32 w;
 254                /* Make sure the hardware actually executed the flush*/
 255                for (;;) {
 256                        pci_read_config_dword(node_to_amd_nb(i)->misc,
 257                                              0x9c, &w);
 258                        if (!(w & 1))
 259                                break;
 260                        cpu_relax();
 261                }
 262        }
 263        spin_unlock_irqrestore(&gart_lock, flags);
 264        if (!flushed)
 265                pr_notice("nothing to flush?\n");
 266}
 267EXPORT_SYMBOL_GPL(amd_flush_garts);
 268
 269static __init int init_amd_nbs(void)
 270{
 271        int err = 0;
 272
 273        err = amd_cache_northbridges();
 274
 275        if (err < 0)
 276                pr_notice("Cannot enumerate AMD northbridges\n");
 277
 278        if (amd_cache_gart() < 0)
 279                pr_notice("Cannot initialize GART flush words, GART support disabled\n");
 280
 281        return err;
 282}
 283
 284/* This has to go after the PCI subsystem */
 285fs_initcall(init_amd_nbs);
 286