linux/arch/powerpc/platforms/pasemi/iommu.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2005-2008, PA Semi, Inc
   3 *
   4 * Maintained by: Olof Johansson <olof@lixom.net>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 */
  19
  20#undef DEBUG
  21
  22#include <linux/memblock.h>
  23#include <linux/types.h>
  24#include <linux/spinlock.h>
  25#include <linux/pci.h>
  26#include <asm/iommu.h>
  27#include <asm/machdep.h>
  28#include <asm/firmware.h>
  29
  30#include "pasemi.h"
  31
  32#define IOBMAP_PAGE_SHIFT       12
  33#define IOBMAP_PAGE_SIZE        (1 << IOBMAP_PAGE_SHIFT)
  34#define IOBMAP_PAGE_MASK        (IOBMAP_PAGE_SIZE - 1)
  35
  36#define IOB_BASE                0xe0000000
  37#define IOB_SIZE                0x3000
  38/* Configuration registers */
  39#define IOBCAP_REG              0x40
  40#define IOBCOM_REG              0x100
  41/* Enable IOB address translation */
  42#define IOBCOM_ATEN             0x00000100
  43
  44/* Address decode configuration register */
  45#define IOB_AD_REG              0x14c
  46/* IOBCOM_AD_REG fields */
  47#define IOB_AD_VGPRT            0x00000e00
  48#define IOB_AD_VGAEN            0x00000100
  49/* Direct mapping settings */
  50#define IOB_AD_MPSEL_MASK       0x00000030
  51#define IOB_AD_MPSEL_B38        0x00000000
  52#define IOB_AD_MPSEL_B40        0x00000010
  53#define IOB_AD_MPSEL_B42        0x00000020
  54/* Translation window size / enable */
  55#define IOB_AD_TRNG_MASK        0x00000003
  56#define IOB_AD_TRNG_256M        0x00000000
  57#define IOB_AD_TRNG_2G          0x00000001
  58#define IOB_AD_TRNG_128G        0x00000003
  59
  60#define IOB_TABLEBASE_REG       0x154
  61
  62/* Base of the 64 4-byte L1 registers */
  63#define IOB_XLT_L1_REGBASE      0x2b00
  64
  65/* Register to invalidate TLB entries */
  66#define IOB_AT_INVAL_TLB_REG    0x2d00
  67
  68/* The top two bits of the level 1 entry contains valid and type flags */
  69#define IOBMAP_L1E_V            0x40000000
  70#define IOBMAP_L1E_V_B          0x80000000
  71
  72/* For big page entries, the bottom two bits contains flags */
  73#define IOBMAP_L1E_BIG_CACHED   0x00000002
  74#define IOBMAP_L1E_BIG_PRIORITY 0x00000001
  75
  76/* For regular level 2 entries, top 2 bits contain valid and cache flags */
  77#define IOBMAP_L2E_V            0x80000000
  78#define IOBMAP_L2E_V_CACHED     0xc0000000
  79
  80static void __iomem *iob;
  81static u32 iob_l1_emptyval;
  82static u32 iob_l2_emptyval;
  83static u32 *iob_l2_base;
  84
  85static struct iommu_table iommu_table_iobmap;
  86static int iommu_table_iobmap_inited;
  87
  88static int iobmap_build(struct iommu_table *tbl, long index,
  89                         long npages, unsigned long uaddr,
  90                         enum dma_data_direction direction,
  91                         struct dma_attrs *attrs)
  92{
  93        u32 *ip;
  94        u32 rpn;
  95        unsigned long bus_addr;
  96
  97        pr_debug("iobmap: build at: %lx, %lx, addr: %lx\n", index, npages, uaddr);
  98
  99        bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT;
 100
 101        ip = ((u32 *)tbl->it_base) + index;
 102
 103        while (npages--) {
 104                rpn = __pa(uaddr) >> IOBMAP_PAGE_SHIFT;
 105
 106                *(ip++) = IOBMAP_L2E_V | rpn;
 107                /* invalidate tlb, can be optimized more */
 108                out_le32(iob+IOB_AT_INVAL_TLB_REG, bus_addr >> 14);
 109
 110                uaddr += IOBMAP_PAGE_SIZE;
 111                bus_addr += IOBMAP_PAGE_SIZE;
 112        }
 113        return 0;
 114}
 115
 116
 117static void iobmap_free(struct iommu_table *tbl, long index,
 118                        long npages)
 119{
 120        u32 *ip;
 121        unsigned long bus_addr;
 122
 123        pr_debug("iobmap: free at: %lx, %lx\n", index, npages);
 124
 125        bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT;
 126
 127        ip = ((u32 *)tbl->it_base) + index;
 128
 129        while (npages--) {
 130                *(ip++) = iob_l2_emptyval;
 131                /* invalidate tlb, can be optimized more */
 132                out_le32(iob+IOB_AT_INVAL_TLB_REG, bus_addr >> 14);
 133                bus_addr += IOBMAP_PAGE_SIZE;
 134        }
 135}
 136
 137static struct iommu_table_ops iommu_table_iobmap_ops = {
 138        .set = iobmap_build,
 139        .clear  = iobmap_free
 140};
 141
 142static void iommu_table_iobmap_setup(void)
 143{
 144        pr_debug(" -> %s\n", __func__);
 145        iommu_table_iobmap.it_busno = 0;
 146        iommu_table_iobmap.it_offset = 0;
 147        iommu_table_iobmap.it_page_shift = IOBMAP_PAGE_SHIFT;
 148
 149        /* it_size is in number of entries */
 150        iommu_table_iobmap.it_size =
 151                0x80000000 >> iommu_table_iobmap.it_page_shift;
 152
 153        /* Initialize the common IOMMU code */
 154        iommu_table_iobmap.it_base = (unsigned long)iob_l2_base;
 155        iommu_table_iobmap.it_index = 0;
 156        /* XXXOJN tune this to avoid IOB cache invals.
 157         * Should probably be 8 (64 bytes)
 158         */
 159        iommu_table_iobmap.it_blocksize = 4;
 160        iommu_table_iobmap.it_ops = &iommu_table_iobmap_ops;
 161        iommu_init_table(&iommu_table_iobmap, 0);
 162        pr_debug(" <- %s\n", __func__);
 163}
 164
 165
 166
 167static void pci_dma_bus_setup_pasemi(struct pci_bus *bus)
 168{
 169        pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self);
 170
 171        if (!iommu_table_iobmap_inited) {
 172                iommu_table_iobmap_inited = 1;
 173                iommu_table_iobmap_setup();
 174        }
 175}
 176
 177
 178static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
 179{
 180        pr_debug("pci_dma_dev_setup, dev %p (%s)\n", dev, pci_name(dev));
 181
 182#if !defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
 183        /* For non-LPAR environment, don't translate anything for the DMA
 184         * engine. The exception to this is if the user has enabled
 185         * CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE at build time.
 186         */
 187        if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
 188            !firmware_has_feature(FW_FEATURE_LPAR)) {
 189                dev->dev.archdata.dma_ops = &dma_direct_ops;
 190                return;
 191        }
 192#endif
 193
 194        set_iommu_table_base(&dev->dev, &iommu_table_iobmap);
 195}
 196
 197int __init iob_init(struct device_node *dn)
 198{
 199        unsigned long tmp;
 200        u32 regword;
 201        int i;
 202
 203        pr_debug(" -> %s\n", __func__);
 204
 205        /* Allocate a spare page to map all invalid IOTLB pages. */
 206        tmp = memblock_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
 207        if (!tmp)
 208                panic("IOBMAP: Cannot allocate spare page!");
 209        /* Empty l1 is marked invalid */
 210        iob_l1_emptyval = 0;
 211        /* Empty l2 is mapped to dummy page */
 212        iob_l2_emptyval = IOBMAP_L2E_V | (tmp >> IOBMAP_PAGE_SHIFT);
 213
 214        iob = ioremap(IOB_BASE, IOB_SIZE);
 215        if (!iob)
 216                panic("IOBMAP: Cannot map registers!");
 217
 218        /* setup direct mapping of the L1 entries */
 219        for (i = 0; i < 64; i++) {
 220                /* Each L1 covers 32MB, i.e. 8K entries = 32K of ram */
 221                regword = IOBMAP_L1E_V | (__pa(iob_l2_base + i*0x2000) >> 12);
 222                out_le32(iob+IOB_XLT_L1_REGBASE+i*4, regword);
 223        }
 224
 225        /* set 2GB translation window, based at 0 */
 226        regword = in_le32(iob+IOB_AD_REG);
 227        regword &= ~IOB_AD_TRNG_MASK;
 228        regword |= IOB_AD_TRNG_2G;
 229        out_le32(iob+IOB_AD_REG, regword);
 230
 231        /* Enable translation */
 232        regword = in_le32(iob+IOBCOM_REG);
 233        regword |= IOBCOM_ATEN;
 234        out_le32(iob+IOBCOM_REG, regword);
 235
 236        pr_debug(" <- %s\n", __func__);
 237
 238        return 0;
 239}
 240
 241
 242/* These are called very early. */
 243void __init iommu_init_early_pasemi(void)
 244{
 245        int iommu_off;
 246
 247#ifndef CONFIG_PPC_PASEMI_IOMMU
 248        iommu_off = 1;
 249#else
 250        iommu_off = of_chosen &&
 251                        of_get_property(of_chosen, "linux,iommu-off", NULL);
 252#endif
 253        if (iommu_off)
 254                return;
 255
 256        iob_init(NULL);
 257
 258        pasemi_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pasemi;
 259        pasemi_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pasemi;
 260        set_pci_dma_ops(&dma_iommu_ops);
 261}
 262
 263void __init alloc_iobmap_l2(void)
 264{
 265#ifndef CONFIG_PPC_PASEMI_IOMMU
 266        return;
 267#endif
 268        /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
 269        iob_l2_base = (u32 *)__va(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
 270
 271        printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base);
 272}
 273