linux/drivers/dma/ioat/dca.c
<<
>>
Prefs
   1/*
   2 * Intel I/OAT DMA Linux driver
   3 * Copyright(c) 2007 - 2009 Intel Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc.,
  16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17 *
  18 * The full GNU General Public License is included in this distribution in
  19 * the file called "COPYING".
  20 *
  21 */
  22
  23#include <linux/kernel.h>
  24#include <linux/pci.h>
  25#include <linux/smp.h>
  26#include <linux/interrupt.h>
  27#include <linux/dca.h>
  28
  29/* either a kernel change is needed, or we need something like this in kernel */
  30#ifndef CONFIG_SMP
  31#include <asm/smp.h>
  32#undef cpu_physical_id
  33#define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
  34#endif
  35
  36#include "dma.h"
  37#include "registers.h"
  38
  39/*
  40 * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
  41 * contain the bit number of the APIC ID to map into the DCA tag.  If the valid
  42 * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
  43 */
  44#define DCA_TAG_MAP_VALID 0x80
  45
  46#define DCA3_TAG_MAP_BIT_TO_INV 0x80
  47#define DCA3_TAG_MAP_BIT_TO_SEL 0x40
  48#define DCA3_TAG_MAP_LITERAL_VAL 0x1
  49
  50#define DCA_TAG_MAP_MASK 0xDF
  51
  52/* expected tag map bytes for I/OAT ver.2 */
  53#define DCA2_TAG_MAP_BYTE0 0x80
  54#define DCA2_TAG_MAP_BYTE1 0x0
  55#define DCA2_TAG_MAP_BYTE2 0x81
  56#define DCA2_TAG_MAP_BYTE3 0x82
  57#define DCA2_TAG_MAP_BYTE4 0x82
  58
  59/* verify if tag map matches expected values */
  60static inline int dca2_tag_map_valid(u8 *tag_map)
  61{
  62        return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) &&
  63                (tag_map[1] == DCA2_TAG_MAP_BYTE1) &&
  64                (tag_map[2] == DCA2_TAG_MAP_BYTE2) &&
  65                (tag_map[3] == DCA2_TAG_MAP_BYTE3) &&
  66                (tag_map[4] == DCA2_TAG_MAP_BYTE4));
  67}
  68
  69/*
  70 * "Legacy" DCA systems do not implement the DCA register set in the
  71 * I/OAT device.  Software needs direct support for their tag mappings.
  72 */
  73
  74#define APICID_BIT(x)           (DCA_TAG_MAP_VALID | (x))
  75#define IOAT_TAG_MAP_LEN        8
  76
  77static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = {
  78        1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
  79static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = {
  80        1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
  81static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = {
  82        1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), };
  83static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 };
  84
  85/* pack PCI B/D/F into a u16 */
  86static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
  87{
  88        return (pci->bus->number << 8) | pci->devfn;
  89}
  90
  91static int dca_enabled_in_bios(struct pci_dev *pdev)
  92{
  93        /* CPUID level 9 returns DCA configuration */
  94        /* Bit 0 indicates DCA enabled by the BIOS */
  95        unsigned long cpuid_level_9;
  96        int res;
  97
  98        cpuid_level_9 = cpuid_eax(9);
  99        res = test_bit(0, &cpuid_level_9);
 100        if (!res)
 101                dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");
 102
 103        return res;
 104}
 105
 106int system_has_dca_enabled(struct pci_dev *pdev)
 107{
 108        if (boot_cpu_has(X86_FEATURE_DCA))
 109                return dca_enabled_in_bios(pdev);
 110
 111        dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
 112        return 0;
 113}
 114
 115struct ioat_dca_slot {
 116        struct pci_dev *pdev;   /* requester device */
 117        u16 rid;                /* requester id, as used by IOAT */
 118};
 119
 120#define IOAT_DCA_MAX_REQ 6
 121#define IOAT3_DCA_MAX_REQ 2
 122
 123struct ioat_dca_priv {
 124        void __iomem            *iobase;
 125        void __iomem            *dca_base;
 126        int                      max_requesters;
 127        int                      requester_count;
 128        u8                       tag_map[IOAT_TAG_MAP_LEN];
 129        struct ioat_dca_slot     req_slots[0];
 130};
 131
 132/* 5000 series chipset DCA Port Requester ID Table Entry Format
 133 * [15:8]       PCI-Express Bus Number
 134 * [7:3]        PCI-Express Device Number
 135 * [2:0]        PCI-Express Function Number
 136 *
 137 * 5000 series chipset DCA control register format
 138 * [7:1]        Reserved (0)
 139 * [0]          Ignore Function Number
 140 */
 141
 142static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
 143{
 144        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 145        struct pci_dev *pdev;
 146        int i;
 147        u16 id;
 148
 149        /* This implementation only supports PCI-Express */
 150        if (dev->bus != &pci_bus_type)
 151                return -ENODEV;
 152        pdev = to_pci_dev(dev);
 153        id = dcaid_from_pcidev(pdev);
 154
 155        if (ioatdca->requester_count == ioatdca->max_requesters)
 156                return -ENODEV;
 157
 158        for (i = 0; i < ioatdca->max_requesters; i++) {
 159                if (ioatdca->req_slots[i].pdev == NULL) {
 160                        /* found an empty slot */
 161                        ioatdca->requester_count++;
 162                        ioatdca->req_slots[i].pdev = pdev;
 163                        ioatdca->req_slots[i].rid = id;
 164                        writew(id, ioatdca->dca_base + (i * 4));
 165                        /* make sure the ignore function bit is off */
 166                        writeb(0, ioatdca->dca_base + (i * 4) + 2);
 167                        return i;
 168                }
 169        }
 170        /* Error, ioatdma->requester_count is out of whack */
 171        return -EFAULT;
 172}
 173
 174static int ioat_dca_remove_requester(struct dca_provider *dca,
 175                                     struct device *dev)
 176{
 177        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 178        struct pci_dev *pdev;
 179        int i;
 180
 181        /* This implementation only supports PCI-Express */
 182        if (dev->bus != &pci_bus_type)
 183                return -ENODEV;
 184        pdev = to_pci_dev(dev);
 185
 186        for (i = 0; i < ioatdca->max_requesters; i++) {
 187                if (ioatdca->req_slots[i].pdev == pdev) {
 188                        writew(0, ioatdca->dca_base + (i * 4));
 189                        ioatdca->req_slots[i].pdev = NULL;
 190                        ioatdca->req_slots[i].rid = 0;
 191                        ioatdca->requester_count--;
 192                        return i;
 193                }
 194        }
 195        return -ENODEV;
 196}
 197
 198static u8 ioat_dca_get_tag(struct dca_provider *dca,
 199                           struct device *dev,
 200                           int cpu)
 201{
 202        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 203        int i, apic_id, bit, value;
 204        u8 entry, tag;
 205
 206        tag = 0;
 207        apic_id = cpu_physical_id(cpu);
 208
 209        for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
 210                entry = ioatdca->tag_map[i];
 211                if (entry & DCA_TAG_MAP_VALID) {
 212                        bit = entry & ~DCA_TAG_MAP_VALID;
 213                        value = (apic_id & (1 << bit)) ? 1 : 0;
 214                } else {
 215                        value = entry ? 1 : 0;
 216                }
 217                tag |= (value << i);
 218        }
 219        return tag;
 220}
 221
 222static int ioat_dca_dev_managed(struct dca_provider *dca,
 223                                struct device *dev)
 224{
 225        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 226        struct pci_dev *pdev;
 227        int i;
 228
 229        pdev = to_pci_dev(dev);
 230        for (i = 0; i < ioatdca->max_requesters; i++) {
 231                if (ioatdca->req_slots[i].pdev == pdev)
 232                        return 1;
 233        }
 234        return 0;
 235}
 236
 237static struct dca_ops ioat_dca_ops = {
 238        .add_requester          = ioat_dca_add_requester,
 239        .remove_requester       = ioat_dca_remove_requester,
 240        .get_tag                = ioat_dca_get_tag,
 241        .dev_managed            = ioat_dca_dev_managed,
 242};
 243
 244
 245struct dca_provider * __devinit
 246ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
 247{
 248        struct dca_provider *dca;
 249        struct ioat_dca_priv *ioatdca;
 250        u8 *tag_map = NULL;
 251        int i;
 252        int err;
 253        u8 version;
 254        u8 max_requesters;
 255
 256        if (!system_has_dca_enabled(pdev))
 257                return NULL;
 258
 259        /* I/OAT v1 systems must have a known tag_map to support DCA */
 260        switch (pdev->vendor) {
 261        case PCI_VENDOR_ID_INTEL:
 262                switch (pdev->device) {
 263                case PCI_DEVICE_ID_INTEL_IOAT:
 264                        tag_map = ioat_tag_map_BNB;
 265                        break;
 266                case PCI_DEVICE_ID_INTEL_IOAT_CNB:
 267                        tag_map = ioat_tag_map_CNB;
 268                        break;
 269                case PCI_DEVICE_ID_INTEL_IOAT_SCNB:
 270                        tag_map = ioat_tag_map_SCNB;
 271                        break;
 272                }
 273                break;
 274        case PCI_VENDOR_ID_UNISYS:
 275                switch (pdev->device) {
 276                case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR:
 277                        tag_map = ioat_tag_map_UNISYS;
 278                        break;
 279                }
 280                break;
 281        }
 282        if (tag_map == NULL)
 283                return NULL;
 284
 285        version = readb(iobase + IOAT_VER_OFFSET);
 286        if (version == IOAT_VER_3_0)
 287                max_requesters = IOAT3_DCA_MAX_REQ;
 288        else
 289                max_requesters = IOAT_DCA_MAX_REQ;
 290
 291        dca = alloc_dca_provider(&ioat_dca_ops,
 292                        sizeof(*ioatdca) +
 293                        (sizeof(struct ioat_dca_slot) * max_requesters));
 294        if (!dca)
 295                return NULL;
 296
 297        ioatdca = dca_priv(dca);
 298        ioatdca->max_requesters = max_requesters;
 299        ioatdca->dca_base = iobase + 0x54;
 300
 301        /* copy over the APIC ID to DCA tag mapping */
 302        for (i = 0; i < IOAT_TAG_MAP_LEN; i++)
 303                ioatdca->tag_map[i] = tag_map[i];
 304
 305        err = register_dca_provider(dca, &pdev->dev);
 306        if (err) {
 307                free_dca_provider(dca);
 308                return NULL;
 309        }
 310
 311        return dca;
 312}
 313
 314
 315static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
 316{
 317        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 318        struct pci_dev *pdev;
 319        int i;
 320        u16 id;
 321        u16 global_req_table;
 322
 323        /* This implementation only supports PCI-Express */
 324        if (dev->bus != &pci_bus_type)
 325                return -ENODEV;
 326        pdev = to_pci_dev(dev);
 327        id = dcaid_from_pcidev(pdev);
 328
 329        if (ioatdca->requester_count == ioatdca->max_requesters)
 330                return -ENODEV;
 331
 332        for (i = 0; i < ioatdca->max_requesters; i++) {
 333                if (ioatdca->req_slots[i].pdev == NULL) {
 334                        /* found an empty slot */
 335                        ioatdca->requester_count++;
 336                        ioatdca->req_slots[i].pdev = pdev;
 337                        ioatdca->req_slots[i].rid = id;
 338                        global_req_table =
 339                              readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
 340                        writel(id | IOAT_DCA_GREQID_VALID,
 341                               ioatdca->iobase + global_req_table + (i * 4));
 342                        return i;
 343                }
 344        }
 345        /* Error, ioatdma->requester_count is out of whack */
 346        return -EFAULT;
 347}
 348
 349static int ioat2_dca_remove_requester(struct dca_provider *dca,
 350                                      struct device *dev)
 351{
 352        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 353        struct pci_dev *pdev;
 354        int i;
 355        u16 global_req_table;
 356
 357        /* This implementation only supports PCI-Express */
 358        if (dev->bus != &pci_bus_type)
 359                return -ENODEV;
 360        pdev = to_pci_dev(dev);
 361
 362        for (i = 0; i < ioatdca->max_requesters; i++) {
 363                if (ioatdca->req_slots[i].pdev == pdev) {
 364                        global_req_table =
 365                              readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
 366                        writel(0, ioatdca->iobase + global_req_table + (i * 4));
 367                        ioatdca->req_slots[i].pdev = NULL;
 368                        ioatdca->req_slots[i].rid = 0;
 369                        ioatdca->requester_count--;
 370                        return i;
 371                }
 372        }
 373        return -ENODEV;
 374}
 375
 376static u8 ioat2_dca_get_tag(struct dca_provider *dca,
 377                            struct device *dev,
 378                            int cpu)
 379{
 380        u8 tag;
 381
 382        tag = ioat_dca_get_tag(dca, dev, cpu);
 383        tag = (~tag) & 0x1F;
 384        return tag;
 385}
 386
 387static struct dca_ops ioat2_dca_ops = {
 388        .add_requester          = ioat2_dca_add_requester,
 389        .remove_requester       = ioat2_dca_remove_requester,
 390        .get_tag                = ioat2_dca_get_tag,
 391        .dev_managed            = ioat_dca_dev_managed,
 392};
 393
 394static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
 395{
 396        int slots = 0;
 397        u32 req;
 398        u16 global_req_table;
 399
 400        global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET);
 401        if (global_req_table == 0)
 402                return 0;
 403        do {
 404                req = readl(iobase + global_req_table + (slots * sizeof(u32)));
 405                slots++;
 406        } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
 407
 408        return slots;
 409}
 410
 411struct dca_provider * __devinit
 412ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
 413{
 414        struct dca_provider *dca;
 415        struct ioat_dca_priv *ioatdca;
 416        int slots;
 417        int i;
 418        int err;
 419        u32 tag_map;
 420        u16 dca_offset;
 421        u16 csi_fsb_control;
 422        u16 pcie_control;
 423        u8 bit;
 424
 425        if (!system_has_dca_enabled(pdev))
 426                return NULL;
 427
 428        dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
 429        if (dca_offset == 0)
 430                return NULL;
 431
 432        slots = ioat2_dca_count_dca_slots(iobase, dca_offset);
 433        if (slots == 0)
 434                return NULL;
 435
 436        dca = alloc_dca_provider(&ioat2_dca_ops,
 437                                 sizeof(*ioatdca)
 438                                      + (sizeof(struct ioat_dca_slot) * slots));
 439        if (!dca)
 440                return NULL;
 441
 442        ioatdca = dca_priv(dca);
 443        ioatdca->iobase = iobase;
 444        ioatdca->dca_base = iobase + dca_offset;
 445        ioatdca->max_requesters = slots;
 446
 447        /* some bios might not know to turn these on */
 448        csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
 449        if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) {
 450                csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH;
 451                writew(csi_fsb_control,
 452                       ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
 453        }
 454        pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
 455        if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) {
 456                pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR;
 457                writew(pcie_control,
 458                       ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
 459        }
 460
 461
 462        /* TODO version, compatibility and configuration checks */
 463
 464        /* copy out the APIC to DCA tag map */
 465        tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET);
 466        for (i = 0; i < 5; i++) {
 467                bit = (tag_map >> (4 * i)) & 0x0f;
 468                if (bit < 8)
 469                        ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID;
 470                else
 471                        ioatdca->tag_map[i] = 0;
 472        }
 473
 474        if (!dca2_tag_map_valid(ioatdca->tag_map)) {
 475                dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, "
 476                        "disabling DCA\n");
 477                free_dca_provider(dca);
 478                return NULL;
 479        }
 480
 481        err = register_dca_provider(dca, &pdev->dev);
 482        if (err) {
 483                free_dca_provider(dca);
 484                return NULL;
 485        }
 486
 487        return dca;
 488}
 489
 490static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
 491{
 492        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 493        struct pci_dev *pdev;
 494        int i;
 495        u16 id;
 496        u16 global_req_table;
 497
 498        /* This implementation only supports PCI-Express */
 499        if (dev->bus != &pci_bus_type)
 500                return -ENODEV;
 501        pdev = to_pci_dev(dev);
 502        id = dcaid_from_pcidev(pdev);
 503
 504        if (ioatdca->requester_count == ioatdca->max_requesters)
 505                return -ENODEV;
 506
 507        for (i = 0; i < ioatdca->max_requesters; i++) {
 508                if (ioatdca->req_slots[i].pdev == NULL) {
 509                        /* found an empty slot */
 510                        ioatdca->requester_count++;
 511                        ioatdca->req_slots[i].pdev = pdev;
 512                        ioatdca->req_slots[i].rid = id;
 513                        global_req_table =
 514                              readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
 515                        writel(id | IOAT_DCA_GREQID_VALID,
 516                               ioatdca->iobase + global_req_table + (i * 4));
 517                        return i;
 518                }
 519        }
 520        /* Error, ioatdma->requester_count is out of whack */
 521        return -EFAULT;
 522}
 523
 524static int ioat3_dca_remove_requester(struct dca_provider *dca,
 525                                      struct device *dev)
 526{
 527        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 528        struct pci_dev *pdev;
 529        int i;
 530        u16 global_req_table;
 531
 532        /* This implementation only supports PCI-Express */
 533        if (dev->bus != &pci_bus_type)
 534                return -ENODEV;
 535        pdev = to_pci_dev(dev);
 536
 537        for (i = 0; i < ioatdca->max_requesters; i++) {
 538                if (ioatdca->req_slots[i].pdev == pdev) {
 539                        global_req_table =
 540                              readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
 541                        writel(0, ioatdca->iobase + global_req_table + (i * 4));
 542                        ioatdca->req_slots[i].pdev = NULL;
 543                        ioatdca->req_slots[i].rid = 0;
 544                        ioatdca->requester_count--;
 545                        return i;
 546                }
 547        }
 548        return -ENODEV;
 549}
 550
 551static u8 ioat3_dca_get_tag(struct dca_provider *dca,
 552                            struct device *dev,
 553                            int cpu)
 554{
 555        u8 tag;
 556
 557        struct ioat_dca_priv *ioatdca = dca_priv(dca);
 558        int i, apic_id, bit, value;
 559        u8 entry;
 560
 561        tag = 0;
 562        apic_id = cpu_physical_id(cpu);
 563
 564        for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
 565                entry = ioatdca->tag_map[i];
 566                if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
 567                        bit = entry &
 568                                ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
 569                        value = (apic_id & (1 << bit)) ? 1 : 0;
 570                } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
 571                        bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
 572                        value = (apic_id & (1 << bit)) ? 0 : 1;
 573                } else {
 574                        value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
 575                }
 576                tag |= (value << i);
 577        }
 578
 579        return tag;
 580}
 581
 582static struct dca_ops ioat3_dca_ops = {
 583        .add_requester          = ioat3_dca_add_requester,
 584        .remove_requester       = ioat3_dca_remove_requester,
 585        .get_tag                = ioat3_dca_get_tag,
 586        .dev_managed            = ioat_dca_dev_managed,
 587};
 588
 589static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
 590{
 591        int slots = 0;
 592        u32 req;
 593        u16 global_req_table;
 594
 595        global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
 596        if (global_req_table == 0)
 597                return 0;
 598
 599        do {
 600                req = readl(iobase + global_req_table + (slots * sizeof(u32)));
 601                slots++;
 602        } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
 603
 604        return slots;
 605}
 606
 607struct dca_provider * __devinit
 608ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
 609{
 610        struct dca_provider *dca;
 611        struct ioat_dca_priv *ioatdca;
 612        int slots;
 613        int i;
 614        int err;
 615        u16 dca_offset;
 616        u16 csi_fsb_control;
 617        u16 pcie_control;
 618        u8 bit;
 619
 620        union {
 621                u64 full;
 622                struct {
 623                        u32 low;
 624                        u32 high;
 625                };
 626        } tag_map;
 627
 628        if (!system_has_dca_enabled(pdev))
 629                return NULL;
 630
 631        dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
 632        if (dca_offset == 0)
 633                return NULL;
 634
 635        slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
 636        if (slots == 0)
 637                return NULL;
 638
 639        dca = alloc_dca_provider(&ioat3_dca_ops,
 640                                 sizeof(*ioatdca)
 641                                      + (sizeof(struct ioat_dca_slot) * slots));
 642        if (!dca)
 643                return NULL;
 644
 645        ioatdca = dca_priv(dca);
 646        ioatdca->iobase = iobase;
 647        ioatdca->dca_base = iobase + dca_offset;
 648        ioatdca->max_requesters = slots;
 649
 650        /* some bios might not know to turn these on */
 651        csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
 652        if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
 653                csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
 654                writew(csi_fsb_control,
 655                       ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
 656        }
 657        pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
 658        if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
 659                pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
 660                writew(pcie_control,
 661                       ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
 662        }
 663
 664
 665        /* TODO version, compatibility and configuration checks */
 666
 667        /* copy out the APIC to DCA tag map */
 668        tag_map.low =
 669                readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
 670        tag_map.high =
 671                readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
 672        for (i = 0; i < 8; i++) {
 673                bit = tag_map.full >> (8 * i);
 674                ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
 675        }
 676
 677        err = register_dca_provider(dca, &pdev->dev);
 678        if (err) {
 679                free_dca_provider(dca);
 680                return NULL;
 681        }
 682
 683        return dca;
 684}
 685