1
2
3
4
5
6
7
8
9
10
11
12#ifndef __UNICORE_DMA_MAPPING_H__
13#define __UNICORE_DMA_MAPPING_H__
14
15#ifdef __KERNEL__
16
17#include <linux/mm_types.h>
18#include <linux/scatterlist.h>
19#include <linux/swiotlb.h>
20
21#include <asm/memory.h>
22#include <asm/cacheflush.h>
23
24extern struct dma_map_ops swiotlb_dma_map_ops;
25
26static inline struct dma_map_ops *get_dma_ops(struct device *dev)
27{
28 return &swiotlb_dma_map_ops;
29}
30
31static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
32{
33 if (dev && dev->dma_mask)
34 return addr + size - 1 <= *dev->dma_mask;
35
36 return 1;
37}
38
39static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
40{
41 return paddr;
42}
43
44static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
45{
46 return daddr;
47}
48
49static inline void dma_mark_clean(void *addr, size_t size) {}
50
51static inline void dma_cache_sync(struct device *dev, void *vaddr,
52 size_t size, enum dma_data_direction direction)
53{
54 unsigned long start = (unsigned long)vaddr;
55 unsigned long end = start + size;
56
57 switch (direction) {
58 case DMA_NONE:
59 BUG();
60 case DMA_FROM_DEVICE:
61 case DMA_BIDIRECTIONAL:
62 __cpuc_dma_flush_range(start, end);
63 break;
64 case DMA_TO_DEVICE:
65 __cpuc_dma_clean_range(start, end);
66 break;
67 }
68}
69
70#endif
71#endif
72