linux/arch/m68k/include/asm/dma.h
<<
>>
Prefs
   1#ifndef _M68K_DMA_H
   2#define _M68K_DMA_H 1
   3
   4#ifdef CONFIG_COLDFIRE
   5/*
   6 * ColdFire DMA Model:
   7 *   ColdFire DMA supports two forms of DMA: Single and Dual address. Single
   8 * address mode emits a source address, and expects that the device will either
   9 * pick up the data (DMA READ) or source data (DMA WRITE). This implies that
  10 * the device will place data on the correct byte(s) of the data bus, as the
  11 * memory transactions are always 32 bits. This implies that only 32 bit
  12 * devices will find single mode transfers useful. Dual address DMA mode
  13 * performs two cycles: source read and destination write. ColdFire will
  14 * align the data so that the device will always get the correct bytes, thus
  15 * is useful for 8 and 16 bit devices. This is the mode that is supported
  16 * below.
  17 *
  18 * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000
  19 *               Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
  20 *
  21 * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000
  22 *               Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
  23 *
  24 * APR/18/2002 : added proper support for MCF5272 DMA controller.
  25 *               Arthur Shipkowski (art@videon-central.com)
  26 */
  27
  28#include <asm/coldfire.h>
  29#include <asm/mcfsim.h>
  30#include <asm/mcfdma.h>
  31
  32/*
  33 * Set number of channels of DMA on ColdFire for different implementations.
  34 */
  35#if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) || \
  36        defined(CONFIG_M523x) || defined(CONFIG_M527x) || \
  37        defined(CONFIG_M528x) || defined(CONFIG_M525x)
  38
  39#define MAX_M68K_DMA_CHANNELS 4
  40#elif defined(CONFIG_M5272)
  41#define MAX_M68K_DMA_CHANNELS 1
  42#elif defined(CONFIG_M53xx)
  43#define MAX_M68K_DMA_CHANNELS 0
  44#else
  45#define MAX_M68K_DMA_CHANNELS 2
  46#endif
  47
  48extern unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS];
  49extern unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
  50
  51#if !defined(CONFIG_M5272)
  52#define DMA_MODE_WRITE_BIT  0x01  /* Memory/IO to IO/Memory select */
  53#define DMA_MODE_WORD_BIT   0x02  /* 8 or 16 bit transfers */
  54#define DMA_MODE_LONG_BIT   0x04  /* or 32 bit transfers */
  55#define DMA_MODE_SINGLE_BIT 0x08  /* single-address-mode */
  56
  57/* I/O to memory, 8 bits, mode */
  58#define DMA_MODE_READ               0
  59/* memory to I/O, 8 bits, mode */
  60#define DMA_MODE_WRITE              1
  61/* I/O to memory, 16 bits, mode */
  62#define DMA_MODE_READ_WORD          2
  63/* memory to I/O, 16 bits, mode */
  64#define DMA_MODE_WRITE_WORD         3
  65/* I/O to memory, 32 bits, mode */
  66#define DMA_MODE_READ_LONG          4
  67/* memory to I/O, 32 bits, mode */
  68#define DMA_MODE_WRITE_LONG         5
  69/* I/O to memory, 8 bits, single-address-mode */
  70#define DMA_MODE_READ_SINGLE        8
  71/* memory to I/O, 8 bits, single-address-mode */
  72#define DMA_MODE_WRITE_SINGLE       9
  73/* I/O to memory, 16 bits, single-address-mode */
  74#define DMA_MODE_READ_WORD_SINGLE  10
  75/* memory to I/O, 16 bits, single-address-mode */
  76#define DMA_MODE_WRITE_WORD_SINGLE 11
  77/* I/O to memory, 32 bits, single-address-mode */
  78#define DMA_MODE_READ_LONG_SINGLE  12
  79/* memory to I/O, 32 bits, single-address-mode */
  80#define DMA_MODE_WRITE_LONG_SINGLE 13
  81
  82#else /* CONFIG_M5272 is defined */
  83
  84/* Source static-address mode */
  85#define DMA_MODE_SRC_SA_BIT 0x01
  86/* Two bits to select between all four modes */
  87#define DMA_MODE_SSIZE_MASK 0x06
  88/* Offset to shift bits in */
  89#define DMA_MODE_SSIZE_OFF  0x01
  90/* Destination static-address mode */
  91#define DMA_MODE_DES_SA_BIT 0x10
  92/* Two bits to select between all four modes */
  93#define DMA_MODE_DSIZE_MASK 0x60
  94/* Offset to shift bits in */
  95#define DMA_MODE_DSIZE_OFF  0x05
  96/* Size modifiers */
  97#define DMA_MODE_SIZE_LONG  0x00
  98#define DMA_MODE_SIZE_BYTE  0x01
  99#define DMA_MODE_SIZE_WORD  0x02
 100#define DMA_MODE_SIZE_LINE  0x03
 101
 102/*
 103 * Aliases to help speed quick ports; these may be suboptimal, however. They
 104 * do not include the SINGLE mode modifiers since the MCF5272 does not have a
 105 * mode where the device is in control of its addressing.
 106 */
 107
 108/* I/O to memory, 8 bits, mode */
 109#define DMA_MODE_READ                 ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
 110/* memory to I/O, 8 bits, mode */
 111#define DMA_MODE_WRITE              ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
 112/* I/O to memory, 16 bits, mode */
 113#define DMA_MODE_READ_WORD              ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
 114/* memory to I/O, 16 bits, mode */
 115#define DMA_MODE_WRITE_WORD         ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
 116/* I/O to memory, 32 bits, mode */
 117#define DMA_MODE_READ_LONG              ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
 118/* memory to I/O, 32 bits, mode */
 119#define DMA_MODE_WRITE_LONG         ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
 120
 121#endif /* !defined(CONFIG_M5272) */
 122
 123#if !defined(CONFIG_M5272)
 124/* enable/disable a specific DMA channel */
 125static __inline__ void enable_dma(unsigned int dmanr)
 126{
 127  volatile unsigned short *dmawp;
 128
 129#ifdef DMA_DEBUG
 130  printk("enable_dma(dmanr=%d)\n", dmanr);
 131#endif
 132
 133  dmawp = (unsigned short *) dma_base_addr[dmanr];
 134  dmawp[MCFDMA_DCR] |= MCFDMA_DCR_EEXT;
 135}
 136
 137static __inline__ void disable_dma(unsigned int dmanr)
 138{
 139  volatile unsigned short *dmawp;
 140  volatile unsigned char  *dmapb;
 141
 142#ifdef DMA_DEBUG
 143  printk("disable_dma(dmanr=%d)\n", dmanr);
 144#endif
 145
 146  dmawp = (unsigned short *) dma_base_addr[dmanr];
 147  dmapb = (unsigned char *) dma_base_addr[dmanr];
 148
 149  /* Turn off external requests, and stop any DMA in progress */
 150  dmawp[MCFDMA_DCR] &= ~MCFDMA_DCR_EEXT;
 151  dmapb[MCFDMA_DSR] = MCFDMA_DSR_DONE;
 152}
 153
 154/*
 155 * Clear the 'DMA Pointer Flip Flop'.
 156 * Write 0 for LSB/MSB, 1 for MSB/LSB access.
 157 * Use this once to initialize the FF to a known state.
 158 * After that, keep track of it. :-)
 159 * --- In order to do that, the DMA routines below should ---
 160 * --- only be used while interrupts are disabled! ---
 161 *
 162 * This is a NOP for ColdFire. Provide a stub for compatibility.
 163 */
 164static __inline__ void clear_dma_ff(unsigned int dmanr)
 165{
 166}
 167
 168/* set mode (above) for a specific DMA channel */
 169static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
 170{
 171
 172  volatile unsigned char  *dmabp;
 173  volatile unsigned short *dmawp;
 174
 175#ifdef DMA_DEBUG
 176  printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
 177#endif
 178
 179  dmabp = (unsigned char *) dma_base_addr[dmanr];
 180  dmawp = (unsigned short *) dma_base_addr[dmanr];
 181
 182  /* Clear config errors */
 183  dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE;
 184
 185  /* Set command register */
 186  dmawp[MCFDMA_DCR] =
 187    MCFDMA_DCR_INT |         /* Enable completion irq */
 188    MCFDMA_DCR_CS |          /* Force one xfer per request */
 189    MCFDMA_DCR_AA |          /* Enable auto alignment */
 190    /* single-address-mode */
 191    ((mode & DMA_MODE_SINGLE_BIT) ? MCFDMA_DCR_SAA : 0) |
 192    /* sets s_rw (-> r/w) high if Memory to I/0 */
 193    ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_S_RW : 0) |
 194    /* Memory to I/O or I/O to Memory */
 195    ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) |
 196    /* 32 bit, 16 bit or 8 bit transfers */
 197    ((mode & DMA_MODE_WORD_BIT)  ? MCFDMA_DCR_SSIZE_WORD :
 198     ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_SSIZE_LONG :
 199                                   MCFDMA_DCR_SSIZE_BYTE)) |
 200    ((mode & DMA_MODE_WORD_BIT)  ? MCFDMA_DCR_DSIZE_WORD :
 201     ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_DSIZE_LONG :
 202                                   MCFDMA_DCR_DSIZE_BYTE));
 203
 204#ifdef DEBUG_DMA
 205  printk("%s(%d): dmanr=%d DSR[%x]=%x DCR[%x]=%x\n", __FILE__, __LINE__,
 206         dmanr, (int) &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR],
 207         (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR]);
 208#endif
 209}
 210
 211/* Set transfer address for specific DMA channel */
 212static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
 213{
 214  volatile unsigned short *dmawp;
 215  volatile unsigned int   *dmalp;
 216
 217#ifdef DMA_DEBUG
 218  printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
 219#endif
 220
 221  dmawp = (unsigned short *) dma_base_addr[dmanr];
 222  dmalp = (unsigned int *) dma_base_addr[dmanr];
 223
 224  /* Determine which address registers are used for memory/device accesses */
 225  if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) {
 226    /* Source incrementing, must be memory */
 227    dmalp[MCFDMA_SAR] = a;
 228    /* Set dest address, must be device */
 229    dmalp[MCFDMA_DAR] = dma_device_address[dmanr];
 230  } else {
 231    /* Destination incrementing, must be memory */
 232    dmalp[MCFDMA_DAR] = a;
 233    /* Set source address, must be device */
 234    dmalp[MCFDMA_SAR] = dma_device_address[dmanr];
 235  }
 236
 237#ifdef DEBUG_DMA
 238  printk("%s(%d): dmanr=%d DCR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
 239        __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR],
 240        (int) &dmalp[MCFDMA_SAR], dmalp[MCFDMA_SAR],
 241        (int) &dmalp[MCFDMA_DAR], dmalp[MCFDMA_DAR]);
 242#endif
 243}
 244
 245/*
 246 * Specific for Coldfire - sets device address.
 247 * Should be called after the mode set call, and before set DMA address.
 248 */
 249static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
 250{
 251#ifdef DMA_DEBUG
 252  printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
 253#endif
 254
 255  dma_device_address[dmanr] = a;
 256}
 257
 258/*
 259 * NOTE 2: "count" represents _bytes_.
 260 */
 261static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
 262{
 263  volatile unsigned short *dmawp;
 264
 265#ifdef DMA_DEBUG
 266  printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
 267#endif
 268
 269  dmawp = (unsigned short *) dma_base_addr[dmanr];
 270  dmawp[MCFDMA_BCR] = (unsigned short)count;
 271}
 272
 273/*
 274 * Get DMA residue count. After a DMA transfer, this
 275 * should return zero. Reading this while a DMA transfer is
 276 * still in progress will return unpredictable results.
 277 * Otherwise, it returns the number of _bytes_ left to transfer.
 278 */
 279static __inline__ int get_dma_residue(unsigned int dmanr)
 280{
 281  volatile unsigned short *dmawp;
 282  unsigned short count;
 283
 284#ifdef DMA_DEBUG
 285  printk("get_dma_residue(dmanr=%d)\n", dmanr);
 286#endif
 287
 288  dmawp = (unsigned short *) dma_base_addr[dmanr];
 289  count = dmawp[MCFDMA_BCR];
 290  return((int) count);
 291}
 292#else /* CONFIG_M5272 is defined */
 293
 294/*
 295 * The MCF5272 DMA controller is very different than the controller defined above
 296 * in terms of register mapping.  For instance, with the exception of the 16-bit
 297 * interrupt register (IRQ#85, for reference), all of the registers are 32-bit.
 298 *
 299 * The big difference, however, is the lack of device-requested DMA.  All modes
 300 * are dual address transfer, and there is no 'device' setup or direction bit.
 301 * You can DMA between a device and memory, between memory and memory, or even between
 302 * two devices directly, with any combination of incrementing and non-incrementing
 303 * addresses you choose.  This puts a crimp in distinguishing between the 'device
 304 * address' set up by set_dma_device_addr.
 305 *
 306 * Therefore, there are two options.  One is to use set_dma_addr and set_dma_device_addr,
 307 * which will act exactly as above in -- it will look to see if the source is set to
 308 * autoincrement, and if so it will make the source use the set_dma_addr value and the
 309 * destination the set_dma_device_addr value.  Otherwise the source will be set to the
 310 * set_dma_device_addr value and the destination will get the set_dma_addr value.
 311 *
 312 * The other is to use the provided set_dma_src_addr and set_dma_dest_addr functions
 313 * and make it explicit.  Depending on what you're doing, one of these two should work
 314 * for you, but don't mix them in the same transfer setup.
 315 */
 316
 317/* enable/disable a specific DMA channel */
 318static __inline__ void enable_dma(unsigned int dmanr)
 319{
 320  volatile unsigned int  *dmalp;
 321
 322#ifdef DMA_DEBUG
 323  printk("enable_dma(dmanr=%d)\n", dmanr);
 324#endif
 325
 326  dmalp = (unsigned int *) dma_base_addr[dmanr];
 327  dmalp[MCFDMA_DMR] |= MCFDMA_DMR_EN;
 328}
 329
 330static __inline__ void disable_dma(unsigned int dmanr)
 331{
 332  volatile unsigned int   *dmalp;
 333
 334#ifdef DMA_DEBUG
 335  printk("disable_dma(dmanr=%d)\n", dmanr);
 336#endif
 337
 338  dmalp = (unsigned int *) dma_base_addr[dmanr];
 339
 340  /* Turn off external requests, and stop any DMA in progress */
 341  dmalp[MCFDMA_DMR] &= ~MCFDMA_DMR_EN;
 342  dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
 343}
 344
 345/*
 346 * Clear the 'DMA Pointer Flip Flop'.
 347 * Write 0 for LSB/MSB, 1 for MSB/LSB access.
 348 * Use this once to initialize the FF to a known state.
 349 * After that, keep track of it. :-)
 350 * --- In order to do that, the DMA routines below should ---
 351 * --- only be used while interrupts are disabled! ---
 352 *
 353 * This is a NOP for ColdFire. Provide a stub for compatibility.
 354 */
 355static __inline__ void clear_dma_ff(unsigned int dmanr)
 356{
 357}
 358
 359/* set mode (above) for a specific DMA channel */
 360static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
 361{
 362
 363  volatile unsigned int   *dmalp;
 364  volatile unsigned short *dmawp;
 365
 366#ifdef DMA_DEBUG
 367  printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
 368#endif
 369  dmalp = (unsigned int *) dma_base_addr[dmanr];
 370  dmawp = (unsigned short *) dma_base_addr[dmanr];
 371
 372  /* Clear config errors */
 373  dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
 374
 375  /* Set command register */
 376  dmalp[MCFDMA_DMR] =
 377    MCFDMA_DMR_RQM_DUAL |         /* Mandatory Request Mode setting */
 378    MCFDMA_DMR_DSTT_SD  |         /* Set up addressing types; set to supervisor-data. */
 379    MCFDMA_DMR_SRCT_SD  |         /* Set up addressing types; set to supervisor-data. */
 380    /* source static-address-mode */
 381    ((mode & DMA_MODE_SRC_SA_BIT) ? MCFDMA_DMR_SRCM_SA : MCFDMA_DMR_SRCM_IA) |
 382    /* dest static-address-mode */
 383    ((mode & DMA_MODE_DES_SA_BIT) ? MCFDMA_DMR_DSTM_SA : MCFDMA_DMR_DSTM_IA) |
 384    /* burst, 32 bit, 16 bit or 8 bit transfers are separately configurable on the MCF5272 */
 385    (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_DSTS_OFF) |
 386    (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_SRCS_OFF);
 387
 388  dmawp[MCFDMA_DIR] |= MCFDMA_DIR_ASCEN;   /* Enable completion interrupts */
 389
 390#ifdef DEBUG_DMA
 391  printk("%s(%d): dmanr=%d DMR[%x]=%x DIR[%x]=%x\n", __FILE__, __LINE__,
 392         dmanr, (int) &dmalp[MCFDMA_DMR], dmabp[MCFDMA_DMR],
 393         (int) &dmawp[MCFDMA_DIR], dmawp[MCFDMA_DIR]);
 394#endif
 395}
 396
 397/* Set transfer address for specific DMA channel */
 398static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
 399{
 400  volatile unsigned int   *dmalp;
 401
 402#ifdef DMA_DEBUG
 403  printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
 404#endif
 405
 406  dmalp = (unsigned int *) dma_base_addr[dmanr];
 407
 408  /* Determine which address registers are used for memory/device accesses */
 409  if (dmalp[MCFDMA_DMR] & MCFDMA_DMR_SRCM) {
 410    /* Source incrementing, must be memory */
 411    dmalp[MCFDMA_DSAR] = a;
 412    /* Set dest address, must be device */
 413    dmalp[MCFDMA_DDAR] = dma_device_address[dmanr];
 414  } else {
 415    /* Destination incrementing, must be memory */
 416    dmalp[MCFDMA_DDAR] = a;
 417    /* Set source address, must be device */
 418    dmalp[MCFDMA_DSAR] = dma_device_address[dmanr];
 419  }
 420
 421#ifdef DEBUG_DMA
 422  printk("%s(%d): dmanr=%d DMR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
 423        __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DMR], dmawp[MCFDMA_DMR],
 424        (int) &dmalp[MCFDMA_DSAR], dmalp[MCFDMA_DSAR],
 425        (int) &dmalp[MCFDMA_DDAR], dmalp[MCFDMA_DDAR]);
 426#endif
 427}
 428
 429/*
 430 * Specific for Coldfire - sets device address.
 431 * Should be called after the mode set call, and before set DMA address.
 432 */
 433static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
 434{
 435#ifdef DMA_DEBUG
 436  printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
 437#endif
 438
 439  dma_device_address[dmanr] = a;
 440}
 441
 442/*
 443 * NOTE 2: "count" represents _bytes_.
 444 *
 445 * NOTE 3: While a 32-bit register, "count" is only a maximum 24-bit value.
 446 */
 447static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
 448{
 449  volatile unsigned int *dmalp;
 450
 451#ifdef DMA_DEBUG
 452  printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
 453#endif
 454
 455  dmalp = (unsigned int *) dma_base_addr[dmanr];
 456  dmalp[MCFDMA_DBCR] = count;
 457}
 458
 459/*
 460 * Get DMA residue count. After a DMA transfer, this
 461 * should return zero. Reading this while a DMA transfer is
 462 * still in progress will return unpredictable results.
 463 * Otherwise, it returns the number of _bytes_ left to transfer.
 464 */
 465static __inline__ int get_dma_residue(unsigned int dmanr)
 466{
 467  volatile unsigned int *dmalp;
 468  unsigned int count;
 469
 470#ifdef DMA_DEBUG
 471  printk("get_dma_residue(dmanr=%d)\n", dmanr);
 472#endif
 473
 474  dmalp = (unsigned int *) dma_base_addr[dmanr];
 475  count = dmalp[MCFDMA_DBCR];
 476  return(count);
 477}
 478
 479#endif /* !defined(CONFIG_M5272) */
 480#endif /* CONFIG_COLDFIRE */
 481
 482/* it's useless on the m68k, but unfortunately needed by the new
 483   bootmem allocator (but this should do it for this) */
 484#define MAX_DMA_ADDRESS PAGE_OFFSET
 485
 486#define MAX_DMA_CHANNELS 8
 487
 488extern int request_dma(unsigned int dmanr, const char * device_id);     /* reserve a DMA channel */
 489extern void free_dma(unsigned int dmanr);       /* release it again */
 490
 491#ifdef CONFIG_PCI
 492extern int isa_dma_bridge_buggy;
 493#else
 494#define isa_dma_bridge_buggy    (0)
 495#endif
 496
 497#endif /* _M68K_DMA_H */
 498