linux/arch/ia64/sn/pci/pcibr/pcibr_dma.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved.
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/pci.h>
  11#include <asm/sn/addrs.h>
  12#include <asm/sn/geo.h>
  13#include <asm/sn/pcibr_provider.h>
  14#include <asm/sn/pcibus_provider_defs.h>
  15#include <asm/sn/pcidev.h>
  16#include <asm/sn/pic.h>
  17#include <asm/sn/sn_sal.h>
  18#include <asm/sn/tiocp.h>
  19#include "tio.h"
  20#include "xtalk/xwidgetdev.h"
  21#include "xtalk/hubdev.h"
  22
  23extern int sn_ioif_inited;
  24
  25/* =====================================================================
  26 *    DMA MANAGEMENT
  27 *
  28 *      The Bridge ASIC provides three methods of doing DMA: via a "direct map"
  29 *      register available in 32-bit PCI space (which selects a contiguous 2G
  30 *      address space on some other widget), via "direct" addressing via 64-bit
  31 *      PCI space (all destination information comes from the PCI address,
  32 *      including transfer attributes), and via a "mapped" region that allows 
  33 *      a bunch of different small mappings to be established with the PMU.
  34 *
  35 *      For efficiency, we most prefer to use the 32bit direct mapping facility,
  36 *      since it requires no resource allocations. The advantage of using the
  37 *      PMU over the 64-bit direct is that single-cycle PCI addressing can be
  38 *      used; the advantage of using 64-bit direct over PMU addressing is that
  39 *      we do not have to allocate entries in the PMU.
  40 */
  41
  42static dma_addr_t
  43pcibr_dmamap_ate32(struct pcidev_info *info,
  44                   u64 paddr, size_t req_size, u64 flags, int dma_flags)
  45{
  46
  47        struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
  48        struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
  49            pdi_pcibus_info;
  50        u8 internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info->
  51                                            pdi_linux_pcidev->devfn)) - 1;
  52        int ate_count;
  53        int ate_index;
  54        u64 ate_flags = flags | PCI32_ATE_V;
  55        u64 ate;
  56        u64 pci_addr;
  57        u64 xio_addr;
  58        u64 offset;
  59
  60        /* PIC in PCI-X mode does not supports 32bit PageMap mode */
  61        if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) {
  62                return 0;
  63        }
  64
  65        /* Calculate the number of ATEs needed. */
  66        if (!(MINIMAL_ATE_FLAG(paddr, req_size))) {
  67                ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */
  68                                 +req_size      /* max mapping bytes */
  69                                 - 1) + 1;      /* round UP */
  70        } else {                /* assume requested target is page aligned */
  71                ate_count = IOPG(req_size       /* max mapping bytes */
  72                                 - 1) + 1;      /* round UP */
  73        }
  74
  75        /* Get the number of ATEs required. */
  76        ate_index = pcibr_ate_alloc(pcibus_info, ate_count);
  77        if (ate_index < 0)
  78                return 0;
  79
  80        /* In PCI-X mode, Prefetch not supported */
  81        if (IS_PCIX(pcibus_info))
  82                ate_flags &= ~(PCI32_ATE_PREF);
  83
  84        if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS))
  85                xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
  86                                                      PHYS_TO_TIODMA(paddr);
  87        else
  88                xio_addr = paddr;
  89
  90        offset = IOPGOFF(xio_addr);
  91        ate = ate_flags | (xio_addr - offset);
  92
  93        /* If PIC, put the targetid in the ATE */
  94        if (IS_PIC_SOFT(pcibus_info)) {
  95                ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT);
  96        }
  97
  98        /*
  99         * If we're mapping for MSI, set the MSI bit in the ATE.  If it's a
 100         * TIOCP based pci bus, we also need to set the PIO bit in the ATE.
 101         */
 102        if (dma_flags & SN_DMA_MSI) {
 103                ate |= PCI32_ATE_MSI;
 104                if (IS_TIOCP_SOFT(pcibus_info))
 105                        ate |= PCI32_ATE_PIO;
 106        }
 107
 108        ate_write(pcibus_info, ate_index, ate_count, ate);
 109
 110        /*
 111         * Set up the DMA mapped Address.
 112         */
 113        pci_addr = PCI32_MAPPED_BASE + offset + IOPGSIZE * ate_index;
 114
 115        /*
 116         * If swap was set in device in pcibr_endian_set()
 117         * we need to turn swapping on.
 118         */
 119        if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR)
 120                ATE_SWAP_ON(pci_addr);
 121
 122
 123        return pci_addr;
 124}
 125
 126static dma_addr_t
 127pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
 128                        u64 dma_attributes, int dma_flags)
 129{
 130        struct pcibus_info *pcibus_info = (struct pcibus_info *)
 131            ((info->pdi_host_pcidev_info)->pdi_pcibus_info);
 132        u64 pci_addr;
 133
 134        /* Translate to Crosstalk View of Physical Address */
 135        if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
 136                pci_addr = IS_PIC_SOFT(pcibus_info) ?
 137                                PHYS_TO_DMA(paddr) :
 138                                PHYS_TO_TIODMA(paddr);
 139        else
 140                pci_addr = paddr;
 141        pci_addr |= dma_attributes;
 142
 143        /* Handle Bus mode */
 144        if (IS_PCIX(pcibus_info))
 145                pci_addr &= ~PCI64_ATTR_PREF;
 146
 147        /* Handle Bridge Chipset differences */
 148        if (IS_PIC_SOFT(pcibus_info)) {
 149                pci_addr |=
 150                    ((u64) pcibus_info->
 151                     pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
 152        } else
 153                pci_addr |= (dma_flags & SN_DMA_MSI) ?
 154                                TIOCP_PCI64_CMDTYPE_MSI :
 155                                TIOCP_PCI64_CMDTYPE_MEM;
 156
 157        /* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */
 158        if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn))
 159                pci_addr |= PCI64_ATTR_VIRTUAL;
 160
 161        return pci_addr;
 162}
 163
 164static dma_addr_t
 165pcibr_dmatrans_direct32(struct pcidev_info * info,
 166                        u64 paddr, size_t req_size, u64 flags, int dma_flags)
 167{
 168        struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
 169        struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
 170            pdi_pcibus_info;
 171        u64 xio_addr;
 172
 173        u64 xio_base;
 174        u64 offset;
 175        u64 endoff;
 176
 177        if (IS_PCIX(pcibus_info)) {
 178                return 0;
 179        }
 180
 181        if (dma_flags & SN_DMA_MSI)
 182                return 0;
 183
 184        if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
 185                xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
 186                                                      PHYS_TO_TIODMA(paddr);
 187        else
 188                xio_addr = paddr;
 189
 190        xio_base = pcibus_info->pbi_dir_xbase;
 191        offset = xio_addr - xio_base;
 192        endoff = req_size + offset;
 193        if ((req_size > (1ULL << 31)) ||        /* Too Big */
 194            (xio_addr < xio_base) ||    /* Out of range for mappings */
 195            (endoff > (1ULL << 31))) {  /* Too Big */
 196                return 0;
 197        }
 198
 199        return PCI32_DIRECT_BASE | offset;
 200}
 201
 202/*
 203 * Wrapper routine for freeing DMA maps
 204 * DMA mappings for Direct 64 and 32 do not have any DMA maps.
 205 */
 206void
 207pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction)
 208{
 209        struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
 210        struct pcibus_info *pcibus_info =
 211            (struct pcibus_info *)pcidev_info->pdi_pcibus_info;
 212
 213        if (IS_PCI32_MAPPED(dma_handle)) {
 214                int ate_index;
 215
 216                ate_index =
 217                    IOPG((ATE_SWAP_OFF(dma_handle) - PCI32_MAPPED_BASE));
 218                pcibr_ate_free(pcibus_info, ate_index);
 219        }
 220}
 221
 222/*
 223 * On SN systems there is a race condition between a PIO read response and 
 224 * DMA's.  In rare cases, the read response may beat the DMA, causing the
 225 * driver to think that data in memory is complete and meaningful.  This code
 226 * eliminates that race.  This routine is called by the PIO read routines
 227 * after doing the read.  For PIC this routine then forces a fake interrupt
 228 * on another line, which is logically associated with the slot that the PIO
 229 * is addressed to.  It then spins while watching the memory location that
 230 * the interrupt is targeted to.  When the interrupt response arrives, we 
 231 * are sure that the DMA has landed in memory and it is safe for the driver
 232 * to proceed.  For TIOCP use the Device(x) Write Request Buffer Flush 
 233 * Bridge register since it ensures the data has entered the coherence domain,
 234 * unlike the PIC Device(x) Write Request Buffer Flush register.
 235 */
 236
 237void sn_dma_flush(u64 addr)
 238{
 239        nasid_t nasid;
 240        int is_tio;
 241        int wid_num;
 242        int i, j;
 243        unsigned long flags;
 244        u64 itte;
 245        struct hubdev_info *hubinfo;
 246        struct sn_flush_device_kernel *p;
 247        struct sn_flush_device_common *common;
 248        struct sn_flush_nasid_entry *flush_nasid_list;
 249
 250        if (!sn_ioif_inited)
 251                return;
 252
 253        nasid = NASID_GET(addr);
 254        if (-1 == nasid_to_cnodeid(nasid))
 255                return;
 256
 257        hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;
 258
 259        BUG_ON(!hubinfo);
 260
 261        flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
 262        if (flush_nasid_list->widget_p == NULL)
 263                return;
 264
 265        is_tio = (nasid & 1);
 266        if (is_tio) {
 267                int itte_index;
 268
 269                if (TIO_HWIN(addr))
 270                        itte_index = 0;
 271                else if (TIO_BWIN_WINDOWNUM(addr))
 272                        itte_index = TIO_BWIN_WINDOWNUM(addr);
 273                else
 274                        itte_index = -1;
 275
 276                if (itte_index >= 0) {
 277                        itte = flush_nasid_list->iio_itte[itte_index];
 278                        if (! TIO_ITTE_VALID(itte))
 279                                return;
 280                        wid_num = TIO_ITTE_WIDGET(itte);
 281                } else
 282                        wid_num = TIO_SWIN_WIDGETNUM(addr);
 283        } else {
 284                if (BWIN_WINDOWNUM(addr)) {
 285                        itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)];
 286                        wid_num = IIO_ITTE_WIDGET(itte);
 287                } else
 288                        wid_num = SWIN_WIDGETNUM(addr);
 289        }
 290        if (flush_nasid_list->widget_p[wid_num] == NULL)
 291                return;
 292        p = &flush_nasid_list->widget_p[wid_num][0];
 293
 294        /* find a matching BAR */
 295        for (i = 0; i < DEV_PER_WIDGET; i++,p++) {
 296                common = p->common;
 297                for (j = 0; j < PCI_ROM_RESOURCE; j++) {
 298                        if (common->sfdl_bar_list[j].start == 0)
 299                                break;
 300                        if (addr >= common->sfdl_bar_list[j].start
 301                            && addr <= common->sfdl_bar_list[j].end)
 302                                break;
 303                }
 304                if (j < PCI_ROM_RESOURCE && common->sfdl_bar_list[j].start != 0)
 305                        break;
 306        }
 307
 308        /* if no matching BAR, return without doing anything. */
 309        if (i == DEV_PER_WIDGET)
 310                return;
 311
 312        /*
 313         * For TIOCP use the Device(x) Write Request Buffer Flush Bridge
 314         * register since it ensures the data has entered the coherence
 315         * domain, unlike PIC.
 316         */
 317        if (is_tio) {
 318                /*
 319                 * Note:  devices behind TIOCE should never be matched in the
 320                 * above code, and so the following code is PIC/CP centric.
 321                 * If CE ever needs the sn_dma_flush mechanism, we will have
 322                 * to account for that here and in tioce_bus_fixup().
 323                 */
 324                u32 tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID));
 325                u32 revnum = XWIDGET_PART_REV_NUM(tio_id);
 326
 327                /* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */
 328                if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
 329                        return;
 330                } else {
 331                        pcireg_wrb_flush_get(common->sfdl_pcibus_info,
 332                                             (common->sfdl_slot - 1));
 333                }
 334        } else {
 335                spin_lock_irqsave(&p->sfdl_flush_lock, flags);
 336                *common->sfdl_flush_addr = 0;
 337
 338                /* force an interrupt. */
 339                *(volatile u32 *)(common->sfdl_force_int_addr) = 1;
 340
 341                /* wait for the interrupt to come back. */
 342                while (*(common->sfdl_flush_addr) != 0x10f)
 343                        cpu_relax();
 344
 345                /* okay, everything is synched up. */
 346                spin_unlock_irqrestore(&p->sfdl_flush_lock, flags);
 347        }
 348        return;
 349}
 350
 351/*
 352 * DMA interfaces.  Called from pci_dma.c routines.
 353 */
 354
 355dma_addr_t
 356pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags)
 357{
 358        dma_addr_t dma_handle;
 359        struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
 360
 361        /* SN cannot support DMA addresses smaller than 32 bits. */
 362        if (hwdev->dma_mask < 0x7fffffff) {
 363                return 0;
 364        }
 365
 366        if (hwdev->dma_mask == ~0UL) {
 367                /*
 368                 * Handle the most common case: 64 bit cards.  This
 369                 * call should always succeed.
 370                 */
 371
 372                dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
 373                                                     PCI64_ATTR_PREF, dma_flags);
 374        } else {
 375                /* Handle 32-63 bit cards via direct mapping */
 376                dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr,
 377                                                     size, 0, dma_flags);
 378                if (!dma_handle) {
 379                        /*
 380                         * It is a 32 bit card and we cannot do direct mapping,
 381                         * so we use an ATE.
 382                         */
 383
 384                        dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr,
 385                                                        size, PCI32_ATE_PREF,
 386                                                        dma_flags);
 387                }
 388        }
 389
 390        return dma_handle;
 391}
 392
 393dma_addr_t
 394pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr,
 395                         size_t size, int dma_flags)
 396{
 397        dma_addr_t dma_handle;
 398        struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
 399
 400        if (hwdev->dev.coherent_dma_mask == ~0UL) {
 401                dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
 402                                            PCI64_ATTR_BAR, dma_flags);
 403        } else {
 404                dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
 405                                                    phys_addr, size,
 406                                                    PCI32_ATE_BAR, dma_flags);
 407        }
 408
 409        return dma_handle;
 410}
 411
 412EXPORT_SYMBOL(sn_dma_flush);
 413