1
2
3
4
5
6#ifndef _LINUX_IO_MAPPING_H
7#define _LINUX_IO_MAPPING_H
8
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/bug.h>
12#include <linux/io.h>
13#include <asm/page.h>
14
15
16
17
18
19
20
21
22struct io_mapping {
23 resource_size_t base;
24 unsigned long size;
25 pgprot_t prot;
26 void __iomem *iomem;
27};
28
29#ifdef CONFIG_HAVE_ATOMIC_IOMAP
30
31#include <asm/iomap.h>
32
33
34
35
36
37
38
39static inline struct io_mapping *
40io_mapping_init_wc(struct io_mapping *iomap,
41 resource_size_t base,
42 unsigned long size)
43{
44 pgprot_t prot;
45
46 if (iomap_create_wc(base, size, &prot))
47 return NULL;
48
49 iomap->base = base;
50 iomap->size = size;
51 iomap->prot = prot;
52 return iomap;
53}
54
55static inline void
56io_mapping_fini(struct io_mapping *mapping)
57{
58 iomap_free(mapping->base, mapping->size);
59}
60
61
62static inline void __iomem *
63io_mapping_map_atomic_wc(struct io_mapping *mapping,
64 unsigned long offset)
65{
66 resource_size_t phys_addr;
67 unsigned long pfn;
68
69 BUG_ON(offset >= mapping->size);
70 phys_addr = mapping->base + offset;
71 pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
72 return iomap_atomic_prot_pfn(pfn, mapping->prot);
73}
74
75static inline void
76io_mapping_unmap_atomic(void __iomem *vaddr)
77{
78 iounmap_atomic(vaddr);
79}
80
81static inline void __iomem *
82io_mapping_map_wc(struct io_mapping *mapping,
83 unsigned long offset,
84 unsigned long size)
85{
86 resource_size_t phys_addr;
87
88 BUG_ON(offset >= mapping->size);
89 phys_addr = mapping->base + offset;
90
91 return ioremap_wc(phys_addr, size);
92}
93
94static inline void
95io_mapping_unmap(void __iomem *vaddr)
96{
97 iounmap(vaddr);
98}
99
100#else
101
102#include <linux/uaccess.h>
103#include <asm/pgtable.h>
104
105
106static inline struct io_mapping *
107io_mapping_init_wc(struct io_mapping *iomap,
108 resource_size_t base,
109 unsigned long size)
110{
111 iomap->base = base;
112 iomap->size = size;
113 iomap->iomem = ioremap_wc(base, size);
114#if defined(pgprot_noncached_wc)
115 iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
116#elif defined(pgprot_writecombine)
117 iomap->prot = pgprot_writecombine(PAGE_KERNEL);
118#else
119 iomap->prot = pgprot_noncached(PAGE_KERNEL);
120#endif
121
122 return iomap;
123}
124
125static inline void
126io_mapping_fini(struct io_mapping *mapping)
127{
128 iounmap(mapping->iomem);
129}
130
131
132static inline void __iomem *
133io_mapping_map_wc(struct io_mapping *mapping,
134 unsigned long offset,
135 unsigned long size)
136{
137 return mapping->iomem + offset;
138}
139
140static inline void
141io_mapping_unmap(void __iomem *vaddr)
142{
143}
144
145
146static inline void __iomem *
147io_mapping_map_atomic_wc(struct io_mapping *mapping,
148 unsigned long offset)
149{
150 preempt_disable();
151 pagefault_disable();
152 return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
153}
154
155static inline void
156io_mapping_unmap_atomic(void __iomem *vaddr)
157{
158 io_mapping_unmap(vaddr);
159 pagefault_enable();
160 preempt_enable();
161}
162
163#endif
164
165static inline struct io_mapping *
166io_mapping_create_wc(resource_size_t base,
167 unsigned long size)
168{
169 struct io_mapping *iomap;
170
171 iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
172 if (!iomap)
173 return NULL;
174
175 if (!io_mapping_init_wc(iomap, base, size)) {
176 kfree(iomap);
177 return NULL;
178 }
179
180 return iomap;
181}
182
183static inline void
184io_mapping_free(struct io_mapping *iomap)
185{
186 io_mapping_fini(iomap);
187 kfree(iomap);
188}
189
190#endif
191