1
2
3
4
5
6#ifndef _LINUX_IO_MAPPING_H
7#define _LINUX_IO_MAPPING_H
8
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/bug.h>
12#include <linux/io.h>
13#include <linux/pgtable.h>
14#include <asm/page.h>
15
16
17
18
19
20
21
22
23struct io_mapping {
24 resource_size_t base;
25 unsigned long size;
26 pgprot_t prot;
27 void __iomem *iomem;
28};
29
30#ifdef CONFIG_HAVE_ATOMIC_IOMAP
31
32#include <linux/pfn.h>
33#include <asm/iomap.h>
34
35
36
37
38
39
40
41static inline struct io_mapping *
42io_mapping_init_wc(struct io_mapping *iomap,
43 resource_size_t base,
44 unsigned long size)
45{
46 pgprot_t prot;
47
48 if (iomap_create_wc(base, size, &prot))
49 return NULL;
50
51 iomap->base = base;
52 iomap->size = size;
53 iomap->prot = prot;
54 return iomap;
55}
56
57static inline void
58io_mapping_fini(struct io_mapping *mapping)
59{
60 iomap_free(mapping->base, mapping->size);
61}
62
63
64static inline void __iomem *
65io_mapping_map_atomic_wc(struct io_mapping *mapping,
66 unsigned long offset)
67{
68 resource_size_t phys_addr;
69
70 BUG_ON(offset >= mapping->size);
71 phys_addr = mapping->base + offset;
72 return iomap_atomic_prot_pfn(PHYS_PFN(phys_addr), mapping->prot);
73}
74
75static inline void
76io_mapping_unmap_atomic(void __iomem *vaddr)
77{
78 iounmap_atomic(vaddr);
79}
80
81static inline void __iomem *
82io_mapping_map_wc(struct io_mapping *mapping,
83 unsigned long offset,
84 unsigned long size)
85{
86 resource_size_t phys_addr;
87
88 BUG_ON(offset >= mapping->size);
89 phys_addr = mapping->base + offset;
90
91 return ioremap_wc(phys_addr, size);
92}
93
94static inline void
95io_mapping_unmap(void __iomem *vaddr)
96{
97 iounmap(vaddr);
98}
99
100#else
101
102#include <linux/uaccess.h>
103
104
105static inline struct io_mapping *
106io_mapping_init_wc(struct io_mapping *iomap,
107 resource_size_t base,
108 unsigned long size)
109{
110 iomap->iomem = ioremap_wc(base, size);
111 if (!iomap->iomem)
112 return NULL;
113
114 iomap->base = base;
115 iomap->size = size;
116#if defined(pgprot_noncached_wc)
117 iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
118#elif defined(pgprot_writecombine)
119 iomap->prot = pgprot_writecombine(PAGE_KERNEL);
120#else
121 iomap->prot = pgprot_noncached(PAGE_KERNEL);
122#endif
123
124 return iomap;
125}
126
127static inline void
128io_mapping_fini(struct io_mapping *mapping)
129{
130 iounmap(mapping->iomem);
131}
132
133
134static inline void __iomem *
135io_mapping_map_wc(struct io_mapping *mapping,
136 unsigned long offset,
137 unsigned long size)
138{
139 return mapping->iomem + offset;
140}
141
142static inline void
143io_mapping_unmap(void __iomem *vaddr)
144{
145}
146
147
148static inline void __iomem *
149io_mapping_map_atomic_wc(struct io_mapping *mapping,
150 unsigned long offset)
151{
152 preempt_disable();
153 pagefault_disable();
154 return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
155}
156
157static inline void
158io_mapping_unmap_atomic(void __iomem *vaddr)
159{
160 io_mapping_unmap(vaddr);
161 pagefault_enable();
162 preempt_enable();
163}
164
165#endif
166
167static inline struct io_mapping *
168io_mapping_create_wc(resource_size_t base,
169 unsigned long size)
170{
171 struct io_mapping *iomap;
172
173 iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
174 if (!iomap)
175 return NULL;
176
177 if (!io_mapping_init_wc(iomap, base, size)) {
178 kfree(iomap);
179 return NULL;
180 }
181
182 return iomap;
183}
184
185static inline void
186io_mapping_free(struct io_mapping *iomap)
187{
188 io_mapping_fini(iomap);
189 kfree(iomap);
190}
191
192#endif
193