1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#ifndef _INTEL_IOMMU_H_
23#define _INTEL_IOMMU_H_
24
25#include <linux/types.h>
26#include <linux/iova.h>
27#include <linux/io.h>
28#include <linux/dma_remapping.h>
29#include <asm/cacheflush.h>
30#include <asm/iommu.h>
31
32
33
34
35
36#define DMAR_VER_REG 0x0
37#define DMAR_CAP_REG 0x8
38#define DMAR_ECAP_REG 0x10
39#define DMAR_GCMD_REG 0x18
40#define DMAR_GSTS_REG 0x1c
41#define DMAR_RTADDR_REG 0x20
42#define DMAR_CCMD_REG 0x28
43#define DMAR_FSTS_REG 0x34
44#define DMAR_FECTL_REG 0x38
45#define DMAR_FEDATA_REG 0x3c
46#define DMAR_FEADDR_REG 0x40
47#define DMAR_FEUADDR_REG 0x44
48#define DMAR_AFLOG_REG 0x58
49#define DMAR_PMEN_REG 0x64
50#define DMAR_PLMBASE_REG 0x68
51#define DMAR_PLMLIMIT_REG 0x6c
52#define DMAR_PHMBASE_REG 0x70
53#define DMAR_PHMLIMIT_REG 0x78
54#define DMAR_IQH_REG 0x80
55#define DMAR_IQT_REG 0x88
56#define DMAR_IQ_SHIFT 4
57#define DMAR_IQA_REG 0x90
58#define DMAR_ICS_REG 0x98
59#define DMAR_IRTA_REG 0xb8
60
61#define OFFSET_STRIDE (9)
62
63
64
65
66
67
68
69
70static inline u64 dmar_readq(void __iomem *addr)
71{
72 u32 lo, hi;
73 lo = readl(addr);
74 hi = readl(addr + 4);
75 return (((u64) hi) << 32) + lo;
76}
77
78static inline void dmar_writeq(void __iomem *addr, u64 val)
79{
80 writel((u32)val, addr);
81 writel((u32)(val >> 32), addr + 4);
82}
83
84#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
85#define DMAR_VER_MINOR(v) ((v) & 0x0f)
86
87
88
89
90#define cap_read_drain(c) (((c) >> 55) & 1)
91#define cap_write_drain(c) (((c) >> 54) & 1)
92#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
93#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
94#define cap_pgsel_inv(c) (((c) >> 39) & 1)
95
96#define cap_super_page_val(c) (((c) >> 34) & 0xf)
97#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
98 * OFFSET_STRIDE) + 21)
99
100#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
101#define cap_max_fault_reg_offset(c) \
102 (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
103
104#define cap_zlr(c) (((c) >> 22) & 1)
105#define cap_isoch(c) (((c) >> 23) & 1)
106#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
107#define cap_sagaw(c) (((c) >> 8) & 0x1f)
108#define cap_caching_mode(c) (((c) >> 7) & 1)
109#define cap_phmr(c) (((c) >> 6) & 1)
110#define cap_plmr(c) (((c) >> 5) & 1)
111#define cap_rwbf(c) (((c) >> 4) & 1)
112#define cap_afl(c) (((c) >> 3) & 1)
113#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
114
115
116
117
118#define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1)
119#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
120#define ecap_max_iotlb_offset(e) \
121 (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
122#define ecap_coherent(e) ((e) & 0x1)
123#define ecap_qis(e) ((e) & 0x2)
124#define ecap_pass_through(e) ((e >> 6) & 0x1)
125#define ecap_eim_support(e) ((e >> 4) & 0x1)
126#define ecap_ir_support(e) ((e >> 3) & 0x1)
127#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
128#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
129#define ecap_sc_support(e) ((e >> 7) & 0x1)
130
131
132#define DMA_TLB_FLUSH_GRANU_OFFSET 60
133#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
134#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
135#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
136#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
137#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
138#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
139#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
140#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
141#define DMA_TLB_IVT (((u64)1) << 63)
142#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
143#define DMA_TLB_MAX_SIZE (0x3f)
144
145
146#define DMA_CCMD_INVL_GRANU_OFFSET 61
147#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3)
148#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3)
149#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3)
150#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
151#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
152#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
153#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
154#define DMA_ID_TLB_ADDR(addr) (addr)
155#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
156
157
158#define DMA_PMEN_EPM (((u32)1)<<31)
159#define DMA_PMEN_PRS (((u32)1)<<0)
160
161
162#define DMA_GCMD_TE (((u32)1) << 31)
163#define DMA_GCMD_SRTP (((u32)1) << 30)
164#define DMA_GCMD_SFL (((u32)1) << 29)
165#define DMA_GCMD_EAFL (((u32)1) << 28)
166#define DMA_GCMD_WBF (((u32)1) << 27)
167#define DMA_GCMD_QIE (((u32)1) << 26)
168#define DMA_GCMD_SIRTP (((u32)1) << 24)
169#define DMA_GCMD_IRE (((u32) 1) << 25)
170#define DMA_GCMD_CFI (((u32) 1) << 23)
171
172
173#define DMA_GSTS_TES (((u32)1) << 31)
174#define DMA_GSTS_RTPS (((u32)1) << 30)
175#define DMA_GSTS_FLS (((u32)1) << 29)
176#define DMA_GSTS_AFLS (((u32)1) << 28)
177#define DMA_GSTS_WBFS (((u32)1) << 27)
178#define DMA_GSTS_QIES (((u32)1) << 26)
179#define DMA_GSTS_IRTPS (((u32)1) << 24)
180#define DMA_GSTS_IRES (((u32)1) << 25)
181#define DMA_GSTS_CFIS (((u32)1) << 23)
182
183
184#define DMA_CCMD_ICC (((u64)1) << 63)
185#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
186#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
187#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
188#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
189#define DMA_CCMD_MASK_NOBIT 0
190#define DMA_CCMD_MASK_1BIT 1
191#define DMA_CCMD_MASK_2BIT 2
192#define DMA_CCMD_MASK_3BIT 3
193#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
194#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
195
196
197#define DMA_FECTL_IM (((u32)1) << 31)
198
199
200#define DMA_FSTS_PPF ((u32)2)
201#define DMA_FSTS_PFO ((u32)1)
202#define DMA_FSTS_IQE (1 << 4)
203#define DMA_FSTS_ICE (1 << 5)
204#define DMA_FSTS_ITE (1 << 6)
205#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
206
207
208#define DMA_FRCD_F (((u32)1) << 31)
209#define dma_frcd_type(d) ((d >> 30) & 1)
210#define dma_frcd_fault_reason(c) (c & 0xff)
211#define dma_frcd_source_id(c) (c & 0xffff)
212
213#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
214
215#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
216do { \
217 cycles_t start_time = get_cycles(); \
218 while (1) { \
219 sts = op(iommu->reg + offset); \
220 if (cond) \
221 break; \
222 if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
223 panic("DMAR hardware is malfunctioning\n"); \
224 cpu_relax(); \
225 } \
226} while (0)
227
228#define QI_LENGTH 256
229
230enum {
231 QI_FREE,
232 QI_IN_USE,
233 QI_DONE,
234 QI_ABORT
235};
236
237#define QI_CC_TYPE 0x1
238#define QI_IOTLB_TYPE 0x2
239#define QI_DIOTLB_TYPE 0x3
240#define QI_IEC_TYPE 0x4
241#define QI_IWD_TYPE 0x5
242
243#define QI_IEC_SELECTIVE (((u64)1) << 4)
244#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
245#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
246
247#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
248#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
249
250#define QI_IOTLB_DID(did) (((u64)did) << 16)
251#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
252#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
253#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
254#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
255#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
256#define QI_IOTLB_AM(am) (((u8)am))
257
258#define QI_CC_FM(fm) (((u64)fm) << 48)
259#define QI_CC_SID(sid) (((u64)sid) << 32)
260#define QI_CC_DID(did) (((u64)did) << 16)
261#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
262
263#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
264#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
265#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
266#define QI_DEV_IOTLB_SIZE 1
267#define QI_DEV_IOTLB_MAX_INVS 32
268
269struct qi_desc {
270 u64 low, high;
271};
272
273struct q_inval {
274 spinlock_t q_lock;
275 struct qi_desc *desc;
276 int *desc_status;
277 int free_head;
278 int free_tail;
279 int free_cnt;
280};
281
282#ifdef CONFIG_INTR_REMAP
283
284#define INTR_REMAP_PAGE_ORDER 8
285#define INTR_REMAP_TABLE_REG_SIZE 0xf
286
287#define INTR_REMAP_TABLE_ENTRIES 65536
288
289struct ir_table {
290 struct irte *base;
291};
292#endif
293
294struct iommu_flush {
295 void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
296 u8 fm, u64 type);
297 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
298 unsigned int size_order, u64 type);
299};
300
301enum {
302 SR_DMAR_FECTL_REG,
303 SR_DMAR_FEDATA_REG,
304 SR_DMAR_FEADDR_REG,
305 SR_DMAR_FEUADDR_REG,
306 MAX_SR_DMAR_REGS
307};
308
309struct intel_iommu {
310 void __iomem *reg;
311 u64 cap;
312 u64 ecap;
313 u32 gcmd;
314 spinlock_t register_lock;
315 int seq_id;
316 int agaw;
317 int msagaw;
318 unsigned int irq;
319 unsigned char name[13];
320
321#ifdef CONFIG_DMAR
322 unsigned long *domain_ids;
323 struct dmar_domain **domains;
324 spinlock_t lock;
325 struct root_entry *root_entry;
326
327 struct iommu_flush flush;
328#endif
329 struct q_inval *qi;
330 u32 *iommu_state;
331
332#ifdef CONFIG_INTR_REMAP
333 struct ir_table *ir_table;
334#endif
335};
336
337static inline void __iommu_flush_cache(
338 struct intel_iommu *iommu, void *addr, int size)
339{
340 if (!ecap_coherent(iommu->ecap))
341 clflush_cache_range(addr, size);
342}
343
344extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
345extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
346
347extern int alloc_iommu(struct dmar_drhd_unit *drhd);
348extern void free_iommu(struct intel_iommu *iommu);
349extern int dmar_enable_qi(struct intel_iommu *iommu);
350extern void dmar_disable_qi(struct intel_iommu *iommu);
351extern int dmar_reenable_qi(struct intel_iommu *iommu);
352extern void qi_global_iec(struct intel_iommu *iommu);
353
354extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
355 u8 fm, u64 type);
356extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
357 unsigned int size_order, u64 type);
358extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
359 u64 addr, unsigned mask);
360
361extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
362
363extern int dmar_ir_support(void);
364
365#endif
366