1#ifndef _ASM_X86_IO_H
2#define _ASM_X86_IO_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#define ARCH_HAS_IOREMAP_WC
38#define ARCH_HAS_IOREMAP_WT
39
40#include <linux/string.h>
41#include <linux/compiler.h>
42#include <asm/page.h>
43#include <asm/early_ioremap.h>
44#include <asm/pgtable_types.h>
45
46#define build_mmio_read(name, size, type, reg, barrier) \
47static inline type name(const volatile void __iomem *addr) \
48{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \
49:"m" (*(volatile type __force *)addr) barrier); return ret; }
50
51#define build_mmio_write(name, size, type, reg, barrier) \
52static inline void name(type val, volatile void __iomem *addr) \
53{ asm volatile("mov" size " %0,%1": :reg (val), \
54"m" (*(volatile type __force *)addr) barrier); }
55
56build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
57build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
58build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
59
60build_mmio_read(__readb, "b", unsigned char, "=q", )
61build_mmio_read(__readw, "w", unsigned short, "=r", )
62build_mmio_read(__readl, "l", unsigned int, "=r", )
63
64build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
65build_mmio_write(writew, "w", unsigned short, "r", :"memory")
66build_mmio_write(writel, "l", unsigned int, "r", :"memory")
67
68build_mmio_write(__writeb, "b", unsigned char, "q", )
69build_mmio_write(__writew, "w", unsigned short, "r", )
70build_mmio_write(__writel, "l", unsigned int, "r", )
71
72#define readb_relaxed(a) __readb(a)
73#define readw_relaxed(a) __readw(a)
74#define readl_relaxed(a) __readl(a)
75#define __raw_readb __readb
76#define __raw_readw __readw
77#define __raw_readl __readl
78
79#define writeb_relaxed(v, a) __writeb(v, a)
80#define writew_relaxed(v, a) __writew(v, a)
81#define writel_relaxed(v, a) __writel(v, a)
82#define __raw_writeb __writeb
83#define __raw_writew __writew
84#define __raw_writel __writel
85
86#define mmiowb() barrier()
87
88#ifdef CONFIG_X86_64
89
90build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
91build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
92
93#define readq_relaxed(a) readq(a)
94#define writeq_relaxed(v, a) writeq(v, a)
95
96#define __raw_readq(a) readq(a)
97#define __raw_writeq(val, addr) writeq(val, addr)
98
99
100#define readq readq
101#define writeq writeq
102
103#endif
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118static inline phys_addr_t virt_to_phys(volatile void *address)
119{
120 return __pa(address);
121}
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136static inline void *phys_to_virt(phys_addr_t address)
137{
138 return __va(address);
139}
140
141
142
143
144#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
145
146
147
148
149
150
151static inline unsigned int isa_virt_to_bus(volatile void *address)
152{
153 return (unsigned int)virt_to_phys(address);
154}
155#define isa_page_to_bus(page) ((unsigned int)page_to_phys(page))
156#define isa_bus_to_virt phys_to_virt
157
158
159
160
161
162
163
164#define virt_to_bus virt_to_phys
165#define bus_to_virt phys_to_virt
166
167
168
169
170
171extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
172extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
173#define ioremap_uc ioremap_uc
174
175extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
176extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val);
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
193{
194 return ioremap_nocache(offset, size);
195}
196
197extern void iounmap(volatile void __iomem *addr);
198
199extern void set_iounmap_nonlazy(void);
200
201#ifdef __KERNEL__
202
203#include <asm-generic/iomap.h>
204
205
206
207
208#define xlate_dev_kmem_ptr(p) p
209
210
211
212
213
214
215
216
217
218static inline void
219memset_io(volatile void __iomem *addr, unsigned char val, size_t count)
220{
221 memset((void __force *)addr, val, count);
222}
223
224
225
226
227
228
229
230
231
232static inline void
233memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count)
234{
235 memcpy(dst, (const void __force *)src, count);
236}
237
238
239
240
241
242
243
244
245
246static inline void
247memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
248{
249 memcpy((void __force *)dst, src, count);
250}
251
252
253
254
255
256
257
258
259
260#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
261
262
263
264
265
266
267
268
269
270static inline void flush_write_buffers(void)
271{
272#if defined(CONFIG_X86_PPRO_FENCE)
273 asm volatile("lock; addl $0,0(%%esp)": : :"memory");
274#endif
275}
276
277#endif
278
279extern void native_io_delay(void);
280
281extern int io_delay_type;
282extern void io_delay_init(void);
283
284#if defined(CONFIG_PARAVIRT)
285#include <asm/paravirt.h>
286#else
287
288static inline void slow_down_io(void)
289{
290 native_io_delay();
291#ifdef REALLY_SLOW_IO
292 native_io_delay();
293 native_io_delay();
294 native_io_delay();
295#endif
296}
297
298#endif
299
300#define BUILDIO(bwl, bw, type) \
301static inline void out##bwl(unsigned type value, int port) \
302{ \
303 asm volatile("out" #bwl " %" #bw "0, %w1" \
304 : : "a"(value), "Nd"(port)); \
305} \
306 \
307static inline unsigned type in##bwl(int port) \
308{ \
309 unsigned type value; \
310 asm volatile("in" #bwl " %w1, %" #bw "0" \
311 : "=a"(value) : "Nd"(port)); \
312 return value; \
313} \
314 \
315static inline void out##bwl##_p(unsigned type value, int port) \
316{ \
317 out##bwl(value, port); \
318 slow_down_io(); \
319} \
320 \
321static inline unsigned type in##bwl##_p(int port) \
322{ \
323 unsigned type value = in##bwl(port); \
324 slow_down_io(); \
325 return value; \
326} \
327 \
328static inline void outs##bwl(int port, const void *addr, unsigned long count) \
329{ \
330 asm volatile("rep; outs" #bwl \
331 : "+S"(addr), "+c"(count) : "d"(port)); \
332} \
333 \
334static inline void ins##bwl(int port, void *addr, unsigned long count) \
335{ \
336 asm volatile("rep; ins" #bwl \
337 : "+D"(addr), "+c"(count) : "d"(port)); \
338}
339
340BUILDIO(b, b, char)
341BUILDIO(w, w, short)
342BUILDIO(l, , int)
343
344extern void *xlate_dev_mem_ptr(phys_addr_t phys);
345extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
346
347extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
348 enum page_cache_mode pcm);
349extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
350extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size);
351
352extern bool is_early_ioremap_ptep(pte_t *ptep);
353
354#ifdef CONFIG_XEN
355#include <xen/xen.h>
356struct bio_vec;
357
358extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
359 const struct bio_vec *vec2);
360
361#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
362 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
363 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
364#endif
365
366#define IO_SPACE_LIMIT 0xffff
367
368#ifdef CONFIG_MTRR
369extern int __must_check arch_phys_wc_index(int handle);
370#define arch_phys_wc_index arch_phys_wc_index
371
372extern int __must_check arch_phys_wc_add(unsigned long base,
373 unsigned long size);
374extern void arch_phys_wc_del(int handle);
375#define arch_phys_wc_add arch_phys_wc_add
376#endif
377
378#ifdef CONFIG_X86_PAT
379extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
380extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
381#define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
382#endif
383
384#endif
385