1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#ifndef _LINUX_IO_MAPPING_H
19#define _LINUX_IO_MAPPING_H
20
21#include <linux/types.h>
22#include <linux/slab.h>
23#include <linux/bug.h>
24#include <linux/io.h>
25#include <asm/page.h>
26
27
28
29
30
31
32
33
34struct io_mapping {
35 resource_size_t base;
36 unsigned long size;
37 pgprot_t prot;
38 void __iomem *iomem;
39};
40
41#ifdef CONFIG_HAVE_ATOMIC_IOMAP
42
43#include <asm/iomap.h>
44
45
46
47
48
49
50
51static inline struct io_mapping *
52io_mapping_init_wc(struct io_mapping *iomap,
53 resource_size_t base,
54 unsigned long size)
55{
56 pgprot_t prot;
57
58 if (iomap_create_wc(base, size, &prot))
59 return NULL;
60
61 iomap->base = base;
62 iomap->size = size;
63 iomap->prot = prot;
64 return iomap;
65}
66
67static inline void
68io_mapping_fini(struct io_mapping *mapping)
69{
70 iomap_free(mapping->base, mapping->size);
71}
72
73
74static inline void __iomem *
75io_mapping_map_atomic_wc(struct io_mapping *mapping,
76 unsigned long offset)
77{
78 resource_size_t phys_addr;
79 unsigned long pfn;
80
81 BUG_ON(offset >= mapping->size);
82 phys_addr = mapping->base + offset;
83 pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
84 return iomap_atomic_prot_pfn(pfn, mapping->prot);
85}
86
87static inline void
88io_mapping_unmap_atomic(void __iomem *vaddr)
89{
90 iounmap_atomic(vaddr);
91}
92
93static inline void __iomem *
94io_mapping_map_wc(struct io_mapping *mapping,
95 unsigned long offset,
96 unsigned long size)
97{
98 resource_size_t phys_addr;
99
100 BUG_ON(offset >= mapping->size);
101 phys_addr = mapping->base + offset;
102
103 return ioremap_wc(phys_addr, size);
104}
105
106static inline void
107io_mapping_unmap(void __iomem *vaddr)
108{
109 iounmap(vaddr);
110}
111
112#else
113
114#include <linux/uaccess.h>
115#include <asm/pgtable.h>
116
117
118static inline struct io_mapping *
119io_mapping_init_wc(struct io_mapping *iomap,
120 resource_size_t base,
121 unsigned long size)
122{
123 iomap->base = base;
124 iomap->size = size;
125 iomap->iomem = ioremap_wc(base, size);
126#if defined(pgprot_noncached_wc)
127 iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
128#elif defined(pgprot_writecombine)
129 iomap->prot = pgprot_writecombine(PAGE_KERNEL);
130#else
131 iomap->prot = pgprot_noncached(PAGE_KERNEL);
132#endif
133
134 return iomap;
135}
136
137static inline void
138io_mapping_fini(struct io_mapping *mapping)
139{
140 iounmap(mapping->iomem);
141}
142
143
144static inline void __iomem *
145io_mapping_map_wc(struct io_mapping *mapping,
146 unsigned long offset,
147 unsigned long size)
148{
149 return mapping->iomem + offset;
150}
151
152static inline void
153io_mapping_unmap(void __iomem *vaddr)
154{
155}
156
157
158static inline void __iomem *
159io_mapping_map_atomic_wc(struct io_mapping *mapping,
160 unsigned long offset)
161{
162 preempt_disable();
163 pagefault_disable();
164 return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
165}
166
167static inline void
168io_mapping_unmap_atomic(void __iomem *vaddr)
169{
170 io_mapping_unmap(vaddr);
171 pagefault_enable();
172 preempt_enable();
173}
174
175#endif
176
177static inline struct io_mapping *
178io_mapping_create_wc(resource_size_t base,
179 unsigned long size)
180{
181 struct io_mapping *iomap;
182
183 iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
184 if (!iomap)
185 return NULL;
186
187 if (!io_mapping_init_wc(iomap, base, size)) {
188 kfree(iomap);
189 return NULL;
190 }
191
192 return iomap;
193}
194
195static inline void
196io_mapping_free(struct io_mapping *iomap)
197{
198 io_mapping_fini(iomap);
199 kfree(iomap);
200}
201
202#endif
203