linux/arch/powerpc/platforms/pasemi/iommu.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2005-2008, PA Semi, Inc
   3 *
   4 * Maintained by: Olof Johansson <olof@lixom.net>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 */
  19
  20#undef DEBUG
  21
  22#include <linux/types.h>
  23#include <linux/spinlock.h>
  24#include <linux/pci.h>
  25#include <asm/iommu.h>
  26#include <asm/machdep.h>
  27#include <asm/abs_addr.h>
  28#include <asm/firmware.h>
  29
  30#define IOBMAP_PAGE_SHIFT       12
  31#define IOBMAP_PAGE_SIZE        (1 << IOBMAP_PAGE_SHIFT)
  32#define IOBMAP_PAGE_MASK        (IOBMAP_PAGE_SIZE - 1)
  33
  34#define IOB_BASE                0xe0000000
  35#define IOB_SIZE                0x3000
  36/* Configuration registers */
  37#define IOBCAP_REG              0x40
  38#define IOBCOM_REG              0x100
  39/* Enable IOB address translation */
  40#define IOBCOM_ATEN             0x00000100
  41
  42/* Address decode configuration register */
  43#define IOB_AD_REG              0x14c
  44/* IOBCOM_AD_REG fields */
  45#define IOB_AD_VGPRT            0x00000e00
  46#define IOB_AD_VGAEN            0x00000100
  47/* Direct mapping settings */
  48#define IOB_AD_MPSEL_MASK       0x00000030
  49#define IOB_AD_MPSEL_B38        0x00000000
  50#define IOB_AD_MPSEL_B40        0x00000010
  51#define IOB_AD_MPSEL_B42        0x00000020
  52/* Translation window size / enable */
  53#define IOB_AD_TRNG_MASK        0x00000003
  54#define IOB_AD_TRNG_256M        0x00000000
  55#define IOB_AD_TRNG_2G          0x00000001
  56#define IOB_AD_TRNG_128G        0x00000003
  57
  58#define IOB_TABLEBASE_REG       0x154
  59
  60/* Base of the 64 4-byte L1 registers */
  61#define IOB_XLT_L1_REGBASE      0x2b00
  62
  63/* Register to invalidate TLB entries */
  64#define IOB_AT_INVAL_TLB_REG    0x2d00
  65
  66/* The top two bits of the level 1 entry contains valid and type flags */
  67#define IOBMAP_L1E_V            0x40000000
  68#define IOBMAP_L1E_V_B          0x80000000
  69
  70/* For big page entries, the bottom two bits contains flags */
  71#define IOBMAP_L1E_BIG_CACHED   0x00000002
  72#define IOBMAP_L1E_BIG_PRIORITY 0x00000001
  73
  74/* For regular level 2 entries, top 2 bits contain valid and cache flags */
  75#define IOBMAP_L2E_V            0x80000000
  76#define IOBMAP_L2E_V_CACHED     0xc0000000
  77
  78static void __iomem *iob;
  79static u32 iob_l1_emptyval;
  80static u32 iob_l2_emptyval;
  81static u32 *iob_l2_base;
  82
  83static struct iommu_table iommu_table_iobmap;
  84static int iommu_table_iobmap_inited;
  85
  86static int iobmap_build(struct iommu_table *tbl, long index,
  87                         long npages, unsigned long uaddr,
  88                         enum dma_data_direction direction,
  89                         struct dma_attrs *attrs)
  90{
  91        u32 *ip;
  92        u32 rpn;
  93        unsigned long bus_addr;
  94
  95        pr_debug("iobmap: build at: %lx, %lx, addr: %lx\n", index, npages, uaddr);
  96
  97        bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT;
  98
  99        ip = ((u32 *)tbl->it_base) + index;
 100
 101        while (npages--) {
 102                rpn = virt_to_abs(uaddr) >> IOBMAP_PAGE_SHIFT;
 103
 104                *(ip++) = IOBMAP_L2E_V | rpn;
 105                /* invalidate tlb, can be optimized more */
 106                out_le32(iob+IOB_AT_INVAL_TLB_REG, bus_addr >> 14);
 107
 108                uaddr += IOBMAP_PAGE_SIZE;
 109                bus_addr += IOBMAP_PAGE_SIZE;
 110        }
 111        return 0;
 112}
 113
 114
 115static void iobmap_free(struct iommu_table *tbl, long index,
 116                        long npages)
 117{
 118        u32 *ip;
 119        unsigned long bus_addr;
 120
 121        pr_debug("iobmap: free at: %lx, %lx\n", index, npages);
 122
 123        bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT;
 124
 125        ip = ((u32 *)tbl->it_base) + index;
 126
 127        while (npages--) {
 128                *(ip++) = iob_l2_emptyval;
 129                /* invalidate tlb, can be optimized more */
 130                out_le32(iob+IOB_AT_INVAL_TLB_REG, bus_addr >> 14);
 131                bus_addr += IOBMAP_PAGE_SIZE;
 132        }
 133}
 134
 135
 136static void iommu_table_iobmap_setup(void)
 137{
 138        pr_debug(" -> %s\n", __func__);
 139        iommu_table_iobmap.it_busno = 0;
 140        iommu_table_iobmap.it_offset = 0;
 141        /* it_size is in number of entries */
 142        iommu_table_iobmap.it_size = 0x80000000 >> IOBMAP_PAGE_SHIFT;
 143
 144        /* Initialize the common IOMMU code */
 145        iommu_table_iobmap.it_base = (unsigned long)iob_l2_base;
 146        iommu_table_iobmap.it_index = 0;
 147        /* XXXOJN tune this to avoid IOB cache invals.
 148         * Should probably be 8 (64 bytes)
 149         */
 150        iommu_table_iobmap.it_blocksize = 4;
 151        iommu_init_table(&iommu_table_iobmap, 0);
 152        pr_debug(" <- %s\n", __func__);
 153}
 154
 155
 156
 157static void pci_dma_bus_setup_pasemi(struct pci_bus *bus)
 158{
 159        pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self);
 160
 161        if (!iommu_table_iobmap_inited) {
 162                iommu_table_iobmap_inited = 1;
 163                iommu_table_iobmap_setup();
 164        }
 165}
 166
 167
 168static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
 169{
 170        pr_debug("pci_dma_dev_setup, dev %p (%s)\n", dev, pci_name(dev));
 171
 172#if !defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
 173        /* For non-LPAR environment, don't translate anything for the DMA
 174         * engine. The exception to this is if the user has enabled
 175         * CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE at build time.
 176         */
 177        if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
 178            !firmware_has_feature(FW_FEATURE_LPAR)) {
 179                dev->dev.archdata.dma_ops = &dma_direct_ops;
 180                return;
 181        }
 182#endif
 183
 184        set_iommu_table_base(&dev->dev, &iommu_table_iobmap);
 185}
 186
 187int __init iob_init(struct device_node *dn)
 188{
 189        unsigned long tmp;
 190        u32 regword;
 191        int i;
 192
 193        pr_debug(" -> %s\n", __func__);
 194
 195        /* Allocate a spare page to map all invalid IOTLB pages. */
 196        tmp = memblock_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
 197        if (!tmp)
 198                panic("IOBMAP: Cannot allocate spare page!");
 199        /* Empty l1 is marked invalid */
 200        iob_l1_emptyval = 0;
 201        /* Empty l2 is mapped to dummy page */
 202        iob_l2_emptyval = IOBMAP_L2E_V | (tmp >> IOBMAP_PAGE_SHIFT);
 203
 204        iob = ioremap(IOB_BASE, IOB_SIZE);
 205        if (!iob)
 206                panic("IOBMAP: Cannot map registers!");
 207
 208        /* setup direct mapping of the L1 entries */
 209        for (i = 0; i < 64; i++) {
 210                /* Each L1 covers 32MB, i.e. 8K entries = 32K of ram */
 211                regword = IOBMAP_L1E_V | (__pa(iob_l2_base + i*0x2000) >> 12);
 212                out_le32(iob+IOB_XLT_L1_REGBASE+i*4, regword);
 213        }
 214
 215        /* set 2GB translation window, based at 0 */
 216        regword = in_le32(iob+IOB_AD_REG);
 217        regword &= ~IOB_AD_TRNG_MASK;
 218        regword |= IOB_AD_TRNG_2G;
 219        out_le32(iob+IOB_AD_REG, regword);
 220
 221        /* Enable translation */
 222        regword = in_le32(iob+IOBCOM_REG);
 223        regword |= IOBCOM_ATEN;
 224        out_le32(iob+IOBCOM_REG, regword);
 225
 226        pr_debug(" <- %s\n", __func__);
 227
 228        return 0;
 229}
 230
 231
 232/* These are called very early. */
 233void __init iommu_init_early_pasemi(void)
 234{
 235        int iommu_off;
 236
 237#ifndef CONFIG_PPC_PASEMI_IOMMU
 238        iommu_off = 1;
 239#else
 240        iommu_off = of_chosen &&
 241                        of_get_property(of_chosen, "linux,iommu-off", NULL);
 242#endif
 243        if (iommu_off)
 244                return;
 245
 246        iob_init(NULL);
 247
 248        ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pasemi;
 249        ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pasemi;
 250        ppc_md.tce_build = iobmap_build;
 251        ppc_md.tce_free  = iobmap_free;
 252        set_pci_dma_ops(&dma_iommu_ops);
 253}
 254
 255void __init alloc_iobmap_l2(void)
 256{
 257#ifndef CONFIG_PPC_PASEMI_IOMMU
 258        return;
 259#endif
 260        /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
 261        iob_l2_base = (u32 *)abs_to_virt(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
 262
 263        printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base);
 264}
 265