linux/arch/x86/kernel/amd_nb.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Shared support code for AMD K8 northbridges and derivatives.
   4 * Copyright 2006 Andi Kleen, SUSE Labs.
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/types.h>
  10#include <linux/slab.h>
  11#include <linux/init.h>
  12#include <linux/errno.h>
  13#include <linux/export.h>
  14#include <linux/spinlock.h>
  15#include <linux/pci_ids.h>
  16#include <asm/amd_nb.h>
  17
  18#define PCI_DEVICE_ID_AMD_17H_ROOT      0x1450
  19#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
  20#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
  21#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630
  22#define PCI_DEVICE_ID_AMD_17H_DF_F4     0x1464
  23#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
  24#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
  25#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
  26#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
  27#define PCI_DEVICE_ID_AMD_19H_DF_F4     0x1654
  28#define PCI_DEVICE_ID_AMD_19H_M40H_ROOT 0x14b5
  29#define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4 0x167d
  30#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
  31
  32/* Protect the PCI config register pairs used for SMN and DF indirect access. */
  33static DEFINE_MUTEX(smn_mutex);
  34
  35static u32 *flush_words;
  36
  37static const struct pci_device_id amd_root_ids[] = {
  38        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
  39        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
  40        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
  41        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
  42        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) },
  43        {}
  44};
  45
  46#define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
  47
  48static const struct pci_device_id amd_nb_misc_ids[] = {
  49        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
  50        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
  51        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
  52        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
  53        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
  54        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
  55        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
  56        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
  57        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
  58        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
  59        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
  60        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
  61        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
  62        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
  63        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
  64        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
  65        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
  66        {}
  67};
  68
  69static const struct pci_device_id amd_nb_link_ids[] = {
  70        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
  71        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
  72        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
  73        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
  74        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
  75        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
  76        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
  77        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
  78        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
  79        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
  80        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
  81        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
  82        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
  83        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
  84        {}
  85};
  86
  87static const struct pci_device_id hygon_root_ids[] = {
  88        { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
  89        {}
  90};
  91
  92static const struct pci_device_id hygon_nb_misc_ids[] = {
  93        { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
  94        {}
  95};
  96
  97static const struct pci_device_id hygon_nb_link_ids[] = {
  98        { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
  99        {}
 100};
 101
 102const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 103        { 0x00, 0x18, 0x20 },
 104        { 0xff, 0x00, 0x20 },
 105        { 0xfe, 0x00, 0x20 },
 106        { }
 107};
 108
 109static struct amd_northbridge_info amd_northbridges;
 110
 111u16 amd_nb_num(void)
 112{
 113        return amd_northbridges.num;
 114}
 115EXPORT_SYMBOL_GPL(amd_nb_num);
 116
 117bool amd_nb_has_feature(unsigned int feature)
 118{
 119        return ((amd_northbridges.flags & feature) == feature);
 120}
 121EXPORT_SYMBOL_GPL(amd_nb_has_feature);
 122
 123struct amd_northbridge *node_to_amd_nb(int node)
 124{
 125        return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
 126}
 127EXPORT_SYMBOL_GPL(node_to_amd_nb);
 128
 129static struct pci_dev *next_northbridge(struct pci_dev *dev,
 130                                        const struct pci_device_id *ids)
 131{
 132        do {
 133                dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
 134                if (!dev)
 135                        break;
 136        } while (!pci_match_id(ids, dev));
 137        return dev;
 138}
 139
 140static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
 141{
 142        struct pci_dev *root;
 143        int err = -ENODEV;
 144
 145        if (node >= amd_northbridges.num)
 146                goto out;
 147
 148        root = node_to_amd_nb(node)->root;
 149        if (!root)
 150                goto out;
 151
 152        mutex_lock(&smn_mutex);
 153
 154        err = pci_write_config_dword(root, 0x60, address);
 155        if (err) {
 156                pr_warn("Error programming SMN address 0x%x.\n", address);
 157                goto out_unlock;
 158        }
 159
 160        err = (write ? pci_write_config_dword(root, 0x64, *value)
 161                     : pci_read_config_dword(root, 0x64, value));
 162        if (err)
 163                pr_warn("Error %s SMN address 0x%x.\n",
 164                        (write ? "writing to" : "reading from"), address);
 165
 166out_unlock:
 167        mutex_unlock(&smn_mutex);
 168
 169out:
 170        return err;
 171}
 172
 173int amd_smn_read(u16 node, u32 address, u32 *value)
 174{
 175        return __amd_smn_rw(node, address, value, false);
 176}
 177EXPORT_SYMBOL_GPL(amd_smn_read);
 178
 179int amd_smn_write(u16 node, u32 address, u32 value)
 180{
 181        return __amd_smn_rw(node, address, &value, true);
 182}
 183EXPORT_SYMBOL_GPL(amd_smn_write);
 184
 185/*
 186 * Data Fabric Indirect Access uses FICAA/FICAD.
 187 *
 188 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
 189 * on the device's Instance Id and the PCI function and register offset of
 190 * the desired register.
 191 *
 192 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
 193 * and FICAD HI registers but so far we only need the LO register.
 194 */
 195int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
 196{
 197        struct pci_dev *F4;
 198        u32 ficaa;
 199        int err = -ENODEV;
 200
 201        if (node >= amd_northbridges.num)
 202                goto out;
 203
 204        F4 = node_to_amd_nb(node)->link;
 205        if (!F4)
 206                goto out;
 207
 208        ficaa  = 1;
 209        ficaa |= reg & 0x3FC;
 210        ficaa |= (func & 0x7) << 11;
 211        ficaa |= instance_id << 16;
 212
 213        mutex_lock(&smn_mutex);
 214
 215        err = pci_write_config_dword(F4, 0x5C, ficaa);
 216        if (err) {
 217                pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
 218                goto out_unlock;
 219        }
 220
 221        err = pci_read_config_dword(F4, 0x98, lo);
 222        if (err)
 223                pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
 224
 225out_unlock:
 226        mutex_unlock(&smn_mutex);
 227
 228out:
 229        return err;
 230}
 231EXPORT_SYMBOL_GPL(amd_df_indirect_read);
 232
 233int amd_cache_northbridges(void)
 234{
 235        const struct pci_device_id *misc_ids = amd_nb_misc_ids;
 236        const struct pci_device_id *link_ids = amd_nb_link_ids;
 237        const struct pci_device_id *root_ids = amd_root_ids;
 238        struct pci_dev *root, *misc, *link;
 239        struct amd_northbridge *nb;
 240        u16 roots_per_misc = 0;
 241        u16 misc_count = 0;
 242        u16 root_count = 0;
 243        u16 i, j;
 244
 245        if (amd_northbridges.num)
 246                return 0;
 247
 248        if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
 249                root_ids = hygon_root_ids;
 250                misc_ids = hygon_nb_misc_ids;
 251                link_ids = hygon_nb_link_ids;
 252        }
 253
 254        misc = NULL;
 255        while ((misc = next_northbridge(misc, misc_ids)) != NULL)
 256                misc_count++;
 257
 258        if (!misc_count)
 259                return -ENODEV;
 260
 261        root = NULL;
 262        while ((root = next_northbridge(root, root_ids)) != NULL)
 263                root_count++;
 264
 265        if (root_count) {
 266                roots_per_misc = root_count / misc_count;
 267
 268                /*
 269                 * There should be _exactly_ N roots for each DF/SMN
 270                 * interface.
 271                 */
 272                if (!roots_per_misc || (root_count % roots_per_misc)) {
 273                        pr_info("Unsupported AMD DF/PCI configuration found\n");
 274                        return -ENODEV;
 275                }
 276        }
 277
 278        nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
 279        if (!nb)
 280                return -ENOMEM;
 281
 282        amd_northbridges.nb = nb;
 283        amd_northbridges.num = misc_count;
 284
 285        link = misc = root = NULL;
 286        for (i = 0; i < amd_northbridges.num; i++) {
 287                node_to_amd_nb(i)->root = root =
 288                        next_northbridge(root, root_ids);
 289                node_to_amd_nb(i)->misc = misc =
 290                        next_northbridge(misc, misc_ids);
 291                node_to_amd_nb(i)->link = link =
 292                        next_northbridge(link, link_ids);
 293
 294                /*
 295                 * If there are more PCI root devices than data fabric/
 296                 * system management network interfaces, then the (N)
 297                 * PCI roots per DF/SMN interface are functionally the
 298                 * same (for DF/SMN access) and N-1 are redundant.  N-1
 299                 * PCI roots should be skipped per DF/SMN interface so
 300                 * the following DF/SMN interfaces get mapped to
 301                 * correct PCI roots.
 302                 */
 303                for (j = 1; j < roots_per_misc; j++)
 304                        root = next_northbridge(root, root_ids);
 305        }
 306
 307        if (amd_gart_present())
 308                amd_northbridges.flags |= AMD_NB_GART;
 309
 310        /*
 311         * Check for L3 cache presence.
 312         */
 313        if (!cpuid_edx(0x80000006))
 314                return 0;
 315
 316        /*
 317         * Some CPU families support L3 Cache Index Disable. There are some
 318         * limitations because of E382 and E388 on family 0x10.
 319         */
 320        if (boot_cpu_data.x86 == 0x10 &&
 321            boot_cpu_data.x86_model >= 0x8 &&
 322            (boot_cpu_data.x86_model > 0x9 ||
 323             boot_cpu_data.x86_stepping >= 0x1))
 324                amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 325
 326        if (boot_cpu_data.x86 == 0x15)
 327                amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 328
 329        /* L3 cache partitioning is supported on family 0x15 */
 330        if (boot_cpu_data.x86 == 0x15)
 331                amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
 332
 333        return 0;
 334}
 335EXPORT_SYMBOL_GPL(amd_cache_northbridges);
 336
 337/*
 338 * Ignores subdevice/subvendor but as far as I can figure out
 339 * they're useless anyways
 340 */
 341bool __init early_is_amd_nb(u32 device)
 342{
 343        const struct pci_device_id *misc_ids = amd_nb_misc_ids;
 344        const struct pci_device_id *id;
 345        u32 vendor = device & 0xffff;
 346
 347        if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
 348            boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
 349                return false;
 350
 351        if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
 352                misc_ids = hygon_nb_misc_ids;
 353
 354        device >>= 16;
 355        for (id = misc_ids; id->vendor; id++)
 356                if (vendor == id->vendor && device == id->device)
 357                        return true;
 358        return false;
 359}
 360
 361struct resource *amd_get_mmconfig_range(struct resource *res)
 362{
 363        u32 address;
 364        u64 base, msr;
 365        unsigned int segn_busn_bits;
 366
 367        if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
 368            boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
 369                return NULL;
 370
 371        /* assume all cpus from fam10h have mmconfig */
 372        if (boot_cpu_data.x86 < 0x10)
 373                return NULL;
 374
 375        address = MSR_FAM10H_MMIO_CONF_BASE;
 376        rdmsrl(address, msr);
 377
 378        /* mmconfig is not enabled */
 379        if (!(msr & FAM10H_MMIO_CONF_ENABLE))
 380                return NULL;
 381
 382        base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
 383
 384        segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
 385                         FAM10H_MMIO_CONF_BUSRANGE_MASK;
 386
 387        res->flags = IORESOURCE_MEM;
 388        res->start = base;
 389        res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
 390        return res;
 391}
 392
 393int amd_get_subcaches(int cpu)
 394{
 395        struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link;
 396        unsigned int mask;
 397
 398        if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
 399                return 0;
 400
 401        pci_read_config_dword(link, 0x1d4, &mask);
 402
 403        return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
 404}
 405
 406int amd_set_subcaches(int cpu, unsigned long mask)
 407{
 408        static unsigned int reset, ban;
 409        struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu));
 410        unsigned int reg;
 411        int cuid;
 412
 413        if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
 414                return -EINVAL;
 415
 416        /* if necessary, collect reset state of L3 partitioning and BAN mode */
 417        if (reset == 0) {
 418                pci_read_config_dword(nb->link, 0x1d4, &reset);
 419                pci_read_config_dword(nb->misc, 0x1b8, &ban);
 420                ban &= 0x180000;
 421        }
 422
 423        /* deactivate BAN mode if any subcaches are to be disabled */
 424        if (mask != 0xf) {
 425                pci_read_config_dword(nb->misc, 0x1b8, &reg);
 426                pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
 427        }
 428
 429        cuid = cpu_data(cpu).cpu_core_id;
 430        mask <<= 4 * cuid;
 431        mask |= (0xf ^ (1 << cuid)) << 26;
 432
 433        pci_write_config_dword(nb->link, 0x1d4, mask);
 434
 435        /* reset BAN mode if L3 partitioning returned to reset state */
 436        pci_read_config_dword(nb->link, 0x1d4, &reg);
 437        if (reg == reset) {
 438                pci_read_config_dword(nb->misc, 0x1b8, &reg);
 439                reg &= ~0x180000;
 440                pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
 441        }
 442
 443        return 0;
 444}
 445
 446static void amd_cache_gart(void)
 447{
 448        u16 i;
 449
 450        if (!amd_nb_has_feature(AMD_NB_GART))
 451                return;
 452
 453        flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
 454        if (!flush_words) {
 455                amd_northbridges.flags &= ~AMD_NB_GART;
 456                pr_notice("Cannot initialize GART flush words, GART support disabled\n");
 457                return;
 458        }
 459
 460        for (i = 0; i != amd_northbridges.num; i++)
 461                pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
 462}
 463
 464void amd_flush_garts(void)
 465{
 466        int flushed, i;
 467        unsigned long flags;
 468        static DEFINE_SPINLOCK(gart_lock);
 469
 470        if (!amd_nb_has_feature(AMD_NB_GART))
 471                return;
 472
 473        /*
 474         * Avoid races between AGP and IOMMU. In theory it's not needed
 475         * but I'm not sure if the hardware won't lose flush requests
 476         * when another is pending. This whole thing is so expensive anyways
 477         * that it doesn't matter to serialize more. -AK
 478         */
 479        spin_lock_irqsave(&gart_lock, flags);
 480        flushed = 0;
 481        for (i = 0; i < amd_northbridges.num; i++) {
 482                pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
 483                                       flush_words[i] | 1);
 484                flushed++;
 485        }
 486        for (i = 0; i < amd_northbridges.num; i++) {
 487                u32 w;
 488                /* Make sure the hardware actually executed the flush*/
 489                for (;;) {
 490                        pci_read_config_dword(node_to_amd_nb(i)->misc,
 491                                              0x9c, &w);
 492                        if (!(w & 1))
 493                                break;
 494                        cpu_relax();
 495                }
 496        }
 497        spin_unlock_irqrestore(&gart_lock, flags);
 498        if (!flushed)
 499                pr_notice("nothing to flush?\n");
 500}
 501EXPORT_SYMBOL_GPL(amd_flush_garts);
 502
 503static void __fix_erratum_688(void *info)
 504{
 505#define MSR_AMD64_IC_CFG 0xC0011021
 506
 507        msr_set_bit(MSR_AMD64_IC_CFG, 3);
 508        msr_set_bit(MSR_AMD64_IC_CFG, 14);
 509}
 510
 511/* Apply erratum 688 fix so machines without a BIOS fix work. */
 512static __init void fix_erratum_688(void)
 513{
 514        struct pci_dev *F4;
 515        u32 val;
 516
 517        if (boot_cpu_data.x86 != 0x14)
 518                return;
 519
 520        if (!amd_northbridges.num)
 521                return;
 522
 523        F4 = node_to_amd_nb(0)->link;
 524        if (!F4)
 525                return;
 526
 527        if (pci_read_config_dword(F4, 0x164, &val))
 528                return;
 529
 530        if (val & BIT(2))
 531                return;
 532
 533        on_each_cpu(__fix_erratum_688, NULL, 0);
 534
 535        pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
 536}
 537
 538static __init int init_amd_nbs(void)
 539{
 540        amd_cache_northbridges();
 541        amd_cache_gart();
 542
 543        fix_erratum_688();
 544
 545        return 0;
 546}
 547
 548/* This has to go after the PCI subsystem */
 549fs_initcall(init_amd_nbs);
 550