1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/list.h>
27#include <linux/kvm_host.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
30#include <linux/iommu.h>
31#include <linux/intel-iommu.h>
32
33static int kvm_iommu_unmap_memslots(struct kvm *kvm);
34static void kvm_iommu_put_pages(struct kvm *kvm,
35 gfn_t base_gfn, unsigned long npages);
36
37static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot,
38 gfn_t gfn, unsigned long size)
39{
40 gfn_t end_gfn;
41 pfn_t pfn;
42
43 pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
44 end_gfn = gfn + (size >> PAGE_SHIFT);
45 gfn += 1;
46
47 if (is_error_pfn(pfn))
48 return pfn;
49
50 while (gfn < end_gfn)
51 gfn_to_pfn_memslot(kvm, slot, gfn++);
52
53 return pfn;
54}
55
56int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
57{
58 gfn_t gfn, end_gfn;
59 pfn_t pfn;
60 int r = 0;
61 struct iommu_domain *domain = kvm->arch.iommu_domain;
62 int flags;
63
64
65 if (!domain)
66 return 0;
67
68 gfn = slot->base_gfn;
69 end_gfn = gfn + slot->npages;
70
71 flags = IOMMU_READ | IOMMU_WRITE;
72 if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
73 flags |= IOMMU_CACHE;
74
75
76 while (gfn < end_gfn) {
77 unsigned long page_size;
78
79
80 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
81 gfn += 1;
82 continue;
83 }
84
85
86 page_size = kvm_host_page_size(kvm, gfn);
87
88
89 while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
90 page_size >>= 1;
91
92
93 while ((gfn << PAGE_SHIFT) & (page_size - 1))
94 page_size >>= 1;
95
96
97
98
99
100 pfn = kvm_pin_pages(kvm, slot, gfn, page_size);
101 if (is_error_pfn(pfn)) {
102 gfn += 1;
103 continue;
104 }
105
106
107 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
108 get_order(page_size), flags);
109 if (r) {
110 printk(KERN_ERR "kvm_iommu_map_address:"
111 "iommu failed to map pfn=%llx\n", pfn);
112 goto unmap_pages;
113 }
114
115 gfn += page_size >> PAGE_SHIFT;
116
117
118 }
119
120 return 0;
121
122unmap_pages:
123 kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
124 return r;
125}
126
127static int kvm_iommu_map_memslots(struct kvm *kvm)
128{
129 int i, idx, r = 0;
130 struct kvm_memslots *slots;
131
132 idx = srcu_read_lock(&kvm->srcu);
133 slots = kvm_memslots(kvm);
134
135 for (i = 0; i < slots->nmemslots; i++) {
136 r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
137 if (r)
138 break;
139 }
140 srcu_read_unlock(&kvm->srcu, idx);
141
142 return r;
143}
144
145int kvm_assign_device(struct kvm *kvm,
146 struct kvm_assigned_dev_kernel *assigned_dev)
147{
148 struct pci_dev *pdev = NULL;
149 struct iommu_domain *domain = kvm->arch.iommu_domain;
150 int r, last_flags;
151
152
153 if (!domain)
154 return 0;
155
156 pdev = assigned_dev->dev;
157 if (pdev == NULL)
158 return -ENODEV;
159
160 r = iommu_attach_device(domain, &pdev->dev);
161 if (r) {
162 printk(KERN_ERR "assign device %x:%x:%x.%x failed",
163 pci_domain_nr(pdev->bus),
164 pdev->bus->number,
165 PCI_SLOT(pdev->devfn),
166 PCI_FUNC(pdev->devfn));
167 return r;
168 }
169
170 last_flags = kvm->arch.iommu_flags;
171 if (iommu_domain_has_cap(kvm->arch.iommu_domain,
172 IOMMU_CAP_CACHE_COHERENCY))
173 kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY;
174
175
176 if ((last_flags ^ kvm->arch.iommu_flags) ==
177 KVM_IOMMU_CACHE_COHERENCY) {
178 kvm_iommu_unmap_memslots(kvm);
179 r = kvm_iommu_map_memslots(kvm);
180 if (r)
181 goto out_unmap;
182 }
183
184 printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
185 assigned_dev->host_segnr,
186 assigned_dev->host_busnr,
187 PCI_SLOT(assigned_dev->host_devfn),
188 PCI_FUNC(assigned_dev->host_devfn));
189
190 return 0;
191out_unmap:
192 kvm_iommu_unmap_memslots(kvm);
193 return r;
194}
195
196int kvm_deassign_device(struct kvm *kvm,
197 struct kvm_assigned_dev_kernel *assigned_dev)
198{
199 struct iommu_domain *domain = kvm->arch.iommu_domain;
200 struct pci_dev *pdev = NULL;
201
202
203 if (!domain)
204 return 0;
205
206 pdev = assigned_dev->dev;
207 if (pdev == NULL)
208 return -ENODEV;
209
210 iommu_detach_device(domain, &pdev->dev);
211
212 printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
213 assigned_dev->host_segnr,
214 assigned_dev->host_busnr,
215 PCI_SLOT(assigned_dev->host_devfn),
216 PCI_FUNC(assigned_dev->host_devfn));
217
218 return 0;
219}
220
221int kvm_iommu_map_guest(struct kvm *kvm)
222{
223 int r;
224
225 if (!iommu_found()) {
226 printk(KERN_ERR "%s: iommu not found\n", __func__);
227 return -ENODEV;
228 }
229
230 kvm->arch.iommu_domain = iommu_domain_alloc();
231 if (!kvm->arch.iommu_domain)
232 return -ENOMEM;
233
234 r = kvm_iommu_map_memslots(kvm);
235 if (r)
236 goto out_unmap;
237
238 return 0;
239
240out_unmap:
241 kvm_iommu_unmap_memslots(kvm);
242 return r;
243}
244
245static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
246{
247 unsigned long i;
248
249 for (i = 0; i < npages; ++i)
250 kvm_release_pfn_clean(pfn + i);
251}
252
253static void kvm_iommu_put_pages(struct kvm *kvm,
254 gfn_t base_gfn, unsigned long npages)
255{
256 struct iommu_domain *domain;
257 gfn_t end_gfn, gfn;
258 pfn_t pfn;
259 u64 phys;
260
261 domain = kvm->arch.iommu_domain;
262 end_gfn = base_gfn + npages;
263 gfn = base_gfn;
264
265
266 if (!domain)
267 return;
268
269 while (gfn < end_gfn) {
270 unsigned long unmap_pages;
271 int order;
272
273
274 phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
275 pfn = phys >> PAGE_SHIFT;
276
277
278 order = iommu_unmap(domain, gfn_to_gpa(gfn), 0);
279 unmap_pages = 1ULL << order;
280
281
282 kvm_unpin_pages(kvm, pfn, unmap_pages);
283
284 gfn += unmap_pages;
285 }
286}
287
288static int kvm_iommu_unmap_memslots(struct kvm *kvm)
289{
290 int i, idx;
291 struct kvm_memslots *slots;
292
293 idx = srcu_read_lock(&kvm->srcu);
294 slots = kvm_memslots(kvm);
295
296 for (i = 0; i < slots->nmemslots; i++) {
297 kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
298 slots->memslots[i].npages);
299 }
300 srcu_read_unlock(&kvm->srcu, idx);
301
302 return 0;
303}
304
305int kvm_iommu_unmap_guest(struct kvm *kvm)
306{
307 struct iommu_domain *domain = kvm->arch.iommu_domain;
308
309
310 if (!domain)
311 return 0;
312
313 kvm_iommu_unmap_memslots(kvm);
314 iommu_domain_free(domain);
315 return 0;
316}
317