linux/arch/x86/pci/sta2x11-fixup.c
<<
>>
Prefs
   1/*
   2 * arch/x86/pci/sta2x11-fixup.c
   3 * glue code for lib/swiotlb.c and DMA translation between STA2x11
   4 * AMBA memory mapping and the X86 memory mapping
   5 *
   6 * ST Microelectronics ConneXt (STA2X11/STA2X10)
   7 *
   8 * Copyright (c) 2010-2011 Wind River Systems, Inc.
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  17 * See the GNU General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; if not, write to the Free Software
  21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  22 *
  23 */
  24
  25#include <linux/pci.h>
  26#include <linux/pci_ids.h>
  27#include <linux/export.h>
  28#include <linux/list.h>
  29
  30#define STA2X11_SWIOTLB_SIZE (4*1024*1024)
  31extern int swiotlb_late_init_with_default_size(size_t default_size);
  32
  33/*
  34 * We build a list of bus numbers that are under the ConneXt. The
  35 * main bridge hosts 4 busses, which are the 4 endpoints, in order.
  36 */
  37#define STA2X11_NR_EP           4       /* 0..3 included */
  38#define STA2X11_NR_FUNCS        8       /* 0..7 included */
  39#define STA2X11_AMBA_SIZE       (512 << 20)
  40
  41struct sta2x11_ahb_regs { /* saved during suspend */
  42        u32 base, pexlbase, pexhbase, crw;
  43};
  44
  45struct sta2x11_mapping {
  46        u32 amba_base;
  47        int is_suspended;
  48        struct sta2x11_ahb_regs regs[STA2X11_NR_FUNCS];
  49};
  50
  51struct sta2x11_instance {
  52        struct list_head list;
  53        int bus0;
  54        struct sta2x11_mapping map[STA2X11_NR_EP];
  55};
  56
  57static LIST_HEAD(sta2x11_instance_list);
  58
  59/* At probe time, record new instances of this bridge (likely one only) */
  60static void sta2x11_new_instance(struct pci_dev *pdev)
  61{
  62        struct sta2x11_instance *instance;
  63
  64        instance = kzalloc(sizeof(*instance), GFP_ATOMIC);
  65        if (!instance)
  66                return;
  67        /* This has a subordinate bridge, with 4 more-subordinate ones */
  68        instance->bus0 = pdev->subordinate->number + 1;
  69
  70        if (list_empty(&sta2x11_instance_list)) {
  71                int size = STA2X11_SWIOTLB_SIZE;
  72                /* First instance: register your own swiotlb area */
  73                dev_info(&pdev->dev, "Using SWIOTLB (size %i)\n", size);
  74                if (swiotlb_late_init_with_default_size(size))
  75                        dev_emerg(&pdev->dev, "init swiotlb failed\n");
  76        }
  77        list_add(&instance->list, &sta2x11_instance_list);
  78}
  79DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, 0xcc17, sta2x11_new_instance);
  80
  81/*
  82 * Utility functions used in this file from below
  83 */
  84static struct sta2x11_instance *sta2x11_pdev_to_instance(struct pci_dev *pdev)
  85{
  86        struct sta2x11_instance *instance;
  87        int ep;
  88
  89        list_for_each_entry(instance, &sta2x11_instance_list, list) {
  90                ep = pdev->bus->number - instance->bus0;
  91                if (ep >= 0 && ep < STA2X11_NR_EP)
  92                        return instance;
  93        }
  94        return NULL;
  95}
  96
  97static int sta2x11_pdev_to_ep(struct pci_dev *pdev)
  98{
  99        struct sta2x11_instance *instance;
 100
 101        instance = sta2x11_pdev_to_instance(pdev);
 102        if (!instance)
 103                return -1;
 104
 105        return pdev->bus->number - instance->bus0;
 106}
 107
 108static struct sta2x11_mapping *sta2x11_pdev_to_mapping(struct pci_dev *pdev)
 109{
 110        struct sta2x11_instance *instance;
 111        int ep;
 112
 113        instance = sta2x11_pdev_to_instance(pdev);
 114        if (!instance)
 115                return NULL;
 116        ep = sta2x11_pdev_to_ep(pdev);
 117        return instance->map + ep;
 118}
 119
 120/* This is exported, as some devices need to access the MFD registers */
 121struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev)
 122{
 123        return sta2x11_pdev_to_instance(pdev);
 124}
 125EXPORT_SYMBOL(sta2x11_get_instance);
 126
 127
 128/**
 129 * p2a - Translate physical address to STA2x11 AMBA address,
 130 *       used for DMA transfers to STA2x11
 131 * @p: Physical address
 132 * @pdev: PCI device (must be hosted within the connext)
 133 */
 134static dma_addr_t p2a(dma_addr_t p, struct pci_dev *pdev)
 135{
 136        struct sta2x11_mapping *map;
 137        dma_addr_t a;
 138
 139        map = sta2x11_pdev_to_mapping(pdev);
 140        a = p + map->amba_base;
 141        return a;
 142}
 143
 144/**
 145 * a2p - Translate STA2x11 AMBA address to physical address
 146 *       used for DMA transfers from STA2x11
 147 * @a: STA2x11 AMBA address
 148 * @pdev: PCI device (must be hosted within the connext)
 149 */
 150static dma_addr_t a2p(dma_addr_t a, struct pci_dev *pdev)
 151{
 152        struct sta2x11_mapping *map;
 153        dma_addr_t p;
 154
 155        map = sta2x11_pdev_to_mapping(pdev);
 156        p = a - map->amba_base;
 157        return p;
 158}
 159
 160/**
 161 * sta2x11_swiotlb_alloc_coherent - Allocate swiotlb bounce buffers
 162 *     returns virtual address. This is the only "special" function here.
 163 * @dev: PCI device
 164 * @size: Size of the buffer
 165 * @dma_handle: DMA address
 166 * @flags: memory flags
 167 */
 168static void *sta2x11_swiotlb_alloc_coherent(struct device *dev,
 169                                            size_t size,
 170                                            dma_addr_t *dma_handle,
 171                                            gfp_t flags,
 172                                            struct dma_attrs *attrs)
 173{
 174        void *vaddr;
 175
 176        vaddr = dma_generic_alloc_coherent(dev, size, dma_handle, flags, attrs);
 177        if (!vaddr)
 178                vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, flags);
 179        *dma_handle = p2a(*dma_handle, to_pci_dev(dev));
 180        return vaddr;
 181}
 182
 183/* We have our own dma_ops: the same as swiotlb but from alloc (above) */
 184static struct dma_map_ops sta2x11_dma_ops = {
 185        .alloc = sta2x11_swiotlb_alloc_coherent,
 186        .free = swiotlb_free_coherent,
 187        .map_page = swiotlb_map_page,
 188        .unmap_page = swiotlb_unmap_page,
 189        .map_sg = swiotlb_map_sg_attrs,
 190        .unmap_sg = swiotlb_unmap_sg_attrs,
 191        .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
 192        .sync_single_for_device = swiotlb_sync_single_for_device,
 193        .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
 194        .sync_sg_for_device = swiotlb_sync_sg_for_device,
 195        .mapping_error = swiotlb_dma_mapping_error,
 196        .dma_supported = NULL, /* FIXME: we should use this instead! */
 197};
 198
 199/* At setup time, we use our own ops if the device is a ConneXt one */
 200static void sta2x11_setup_pdev(struct pci_dev *pdev)
 201{
 202        struct sta2x11_instance *instance = sta2x11_pdev_to_instance(pdev);
 203
 204        if (!instance) /* either a sta2x11 bridge or another ST device */
 205                return;
 206        pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
 207        pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
 208        pdev->dev.archdata.dma_ops = &sta2x11_dma_ops;
 209
 210        /* We must enable all devices as master, for audio DMA to work */
 211        pci_set_master(pdev);
 212}
 213DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, sta2x11_setup_pdev);
 214
 215/*
 216 * The following three functions are exported (used in swiotlb: FIXME)
 217 */
 218/**
 219 * dma_capable - Check if device can manage DMA transfers (FIXME: kill it)
 220 * @dev: device for a PCI device
 221 * @addr: DMA address
 222 * @size: DMA size
 223 */
 224bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 225{
 226        struct sta2x11_mapping *map;
 227
 228        if (dev->archdata.dma_ops != &sta2x11_dma_ops) {
 229                if (!dev->dma_mask)
 230                        return false;
 231                return addr + size - 1 <= *dev->dma_mask;
 232        }
 233
 234        map = sta2x11_pdev_to_mapping(to_pci_dev(dev));
 235
 236        if (!map || (addr < map->amba_base))
 237                return false;
 238        if (addr + size >= map->amba_base + STA2X11_AMBA_SIZE) {
 239                return false;
 240        }
 241
 242        return true;
 243}
 244
 245/**
 246 * phys_to_dma - Return the DMA AMBA address used for this STA2x11 device
 247 * @dev: device for a PCI device
 248 * @paddr: Physical address
 249 */
 250dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 251{
 252        if (dev->archdata.dma_ops != &sta2x11_dma_ops)
 253                return paddr;
 254        return p2a(paddr, to_pci_dev(dev));
 255}
 256
 257/**
 258 * dma_to_phys - Return the physical address used for this STA2x11 DMA address
 259 * @dev: device for a PCI device
 260 * @daddr: STA2x11 AMBA DMA address
 261 */
 262phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 263{
 264        if (dev->archdata.dma_ops != &sta2x11_dma_ops)
 265                return daddr;
 266        return a2p(daddr, to_pci_dev(dev));
 267}
 268
 269
 270/*
 271 * At boot we must set up the mappings for the pcie-to-amba bridge.
 272 * It involves device access, and the same happens at suspend/resume time
 273 */
 274
 275#define AHB_MAPB                0xCA4
 276#define AHB_CRW(i)              (AHB_MAPB + 0  + (i) * 0x10)
 277#define AHB_CRW_SZMASK                  0xfffffc00UL
 278#define AHB_CRW_ENABLE                  (1 << 0)
 279#define AHB_CRW_WTYPE_MEM               (2 << 1)
 280#define AHB_CRW_ROE                     (1UL << 3)      /* Relax Order Ena */
 281#define AHB_CRW_NSE                     (1UL << 4)      /* No Snoop Enable */
 282#define AHB_BASE(i)             (AHB_MAPB + 4  + (i) * 0x10)
 283#define AHB_PEXLBASE(i)         (AHB_MAPB + 8  + (i) * 0x10)
 284#define AHB_PEXHBASE(i)         (AHB_MAPB + 12 + (i) * 0x10)
 285
 286/* At probe time, enable mapping for each endpoint, using the pdev */
 287static void sta2x11_map_ep(struct pci_dev *pdev)
 288{
 289        struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
 290        int i;
 291
 292        if (!map)
 293                return;
 294        pci_read_config_dword(pdev, AHB_BASE(0), &map->amba_base);
 295
 296        /* Configure AHB mapping */
 297        pci_write_config_dword(pdev, AHB_PEXLBASE(0), 0);
 298        pci_write_config_dword(pdev, AHB_PEXHBASE(0), 0);
 299        pci_write_config_dword(pdev, AHB_CRW(0), STA2X11_AMBA_SIZE |
 300                               AHB_CRW_WTYPE_MEM | AHB_CRW_ENABLE);
 301
 302        /* Disable all the other windows */
 303        for (i = 1; i < STA2X11_NR_FUNCS; i++)
 304                pci_write_config_dword(pdev, AHB_CRW(i), 0);
 305
 306        dev_info(&pdev->dev,
 307                 "sta2x11: Map EP %i: AMBA address %#8x-%#8x\n",
 308                 sta2x11_pdev_to_ep(pdev),  map->amba_base,
 309                 map->amba_base + STA2X11_AMBA_SIZE - 1);
 310}
 311DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, sta2x11_map_ep);
 312
 313#ifdef CONFIG_PM /* Some register values must be saved and restored */
 314
 315static void suspend_mapping(struct pci_dev *pdev)
 316{
 317        struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
 318        int i;
 319
 320        if (!map)
 321                return;
 322
 323        if (map->is_suspended)
 324                return;
 325        map->is_suspended = 1;
 326
 327        /* Save all window configs */
 328        for (i = 0; i < STA2X11_NR_FUNCS; i++) {
 329                struct sta2x11_ahb_regs *regs = map->regs + i;
 330
 331                pci_read_config_dword(pdev, AHB_BASE(i), &regs->base);
 332                pci_read_config_dword(pdev, AHB_PEXLBASE(i), &regs->pexlbase);
 333                pci_read_config_dword(pdev, AHB_PEXHBASE(i), &regs->pexhbase);
 334                pci_read_config_dword(pdev, AHB_CRW(i), &regs->crw);
 335        }
 336}
 337DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, suspend_mapping);
 338
 339static void resume_mapping(struct pci_dev *pdev)
 340{
 341        struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
 342        int i;
 343
 344        if (!map)
 345                return;
 346
 347
 348        if (!map->is_suspended)
 349                goto out;
 350        map->is_suspended = 0;
 351
 352        /* Restore all window configs */
 353        for (i = 0; i < STA2X11_NR_FUNCS; i++) {
 354                struct sta2x11_ahb_regs *regs = map->regs + i;
 355
 356                pci_write_config_dword(pdev, AHB_BASE(i), regs->base);
 357                pci_write_config_dword(pdev, AHB_PEXLBASE(i), regs->pexlbase);
 358                pci_write_config_dword(pdev, AHB_PEXHBASE(i), regs->pexhbase);
 359                pci_write_config_dword(pdev, AHB_CRW(i), regs->crw);
 360        }
 361out:
 362        pci_set_master(pdev); /* Like at boot, enable master on all devices */
 363}
 364DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, resume_mapping);
 365
 366#endif /* CONFIG_PM */
 367