1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/io.h>
16#include <linux/pci.h>
17#include <linux/uaccess.h>
18#include <linux/vfio.h>
19#include <linux/sched/mm.h>
20#include <linux/mmu_context.h>
21#include <asm/kvm_ppc.h>
22#include "vfio_pci_private.h"
23
24#define CREATE_TRACE_POINTS
25#include "trace.h"
26
27EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_nvgpu_mmap_fault);
28EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_nvgpu_mmap);
29EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_npu2_mmap);
30
31struct vfio_pci_nvgpu_data {
32 unsigned long gpu_hpa;
33 unsigned long gpu_tgt;
34 unsigned long useraddr;
35 unsigned long size;
36 struct mm_struct *mm;
37 struct mm_iommu_table_group_mem_t *mem;
38 struct pci_dev *gpdev;
39 struct notifier_block group_notifier;
40};
41
42static size_t vfio_pci_nvgpu_rw(struct vfio_pci_device *vdev,
43 char __user *buf, size_t count, loff_t *ppos, bool iswrite)
44{
45 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
46 struct vfio_pci_nvgpu_data *data = vdev->region[i].data;
47 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
48 loff_t posaligned = pos & PAGE_MASK, posoff = pos & ~PAGE_MASK;
49 size_t sizealigned;
50 void __iomem *ptr;
51
52 if (pos >= vdev->region[i].size)
53 return -EINVAL;
54
55 count = min(count, (size_t)(vdev->region[i].size - pos));
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70 sizealigned = _ALIGN_UP(posoff + count, PAGE_SIZE);
71 ptr = ioremap_cache(data->gpu_hpa + posaligned, sizealigned);
72 if (!ptr)
73 return -EFAULT;
74
75 if (iswrite) {
76 if (copy_from_user(ptr + posoff, buf, count))
77 count = -EFAULT;
78 else
79 *ppos += count;
80 } else {
81 if (copy_to_user(buf, ptr + posoff, count))
82 count = -EFAULT;
83 else
84 *ppos += count;
85 }
86
87 iounmap(ptr);
88
89 return count;
90}
91
92static void vfio_pci_nvgpu_release(struct vfio_pci_device *vdev,
93 struct vfio_pci_region *region)
94{
95 struct vfio_pci_nvgpu_data *data = region->data;
96 long ret;
97
98
99 if (data->mm) {
100 ret = mm_iommu_put(data->mm, data->mem);
101 WARN_ON(ret);
102
103 mmdrop(data->mm);
104 }
105
106 vfio_unregister_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
107 &data->group_notifier);
108
109 pnv_npu2_unmap_lpar_dev(data->gpdev);
110
111 kfree(data);
112}
113
114static vm_fault_t vfio_pci_nvgpu_mmap_fault(struct vm_fault *vmf)
115{
116 vm_fault_t ret;
117 struct vm_area_struct *vma = vmf->vma;
118 struct vfio_pci_region *region = vma->vm_private_data;
119 struct vfio_pci_nvgpu_data *data = region->data;
120 unsigned long vmf_off = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
121 unsigned long nv2pg = data->gpu_hpa >> PAGE_SHIFT;
122 unsigned long vm_pgoff = vma->vm_pgoff &
123 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
124 unsigned long pfn = nv2pg + vm_pgoff + vmf_off;
125
126 ret = vmf_insert_pfn(vma, vmf->address, pfn);
127 trace_vfio_pci_nvgpu_mmap_fault(data->gpdev, pfn << PAGE_SHIFT,
128 vmf->address, ret);
129
130 return ret;
131}
132
133static const struct vm_operations_struct vfio_pci_nvgpu_mmap_vmops = {
134 .fault = vfio_pci_nvgpu_mmap_fault,
135};
136
137static int vfio_pci_nvgpu_mmap(struct vfio_pci_device *vdev,
138 struct vfio_pci_region *region, struct vm_area_struct *vma)
139{
140 int ret;
141 struct vfio_pci_nvgpu_data *data = region->data;
142
143 if (data->useraddr)
144 return -EPERM;
145
146 if (vma->vm_end - vma->vm_start > data->size)
147 return -EINVAL;
148
149 vma->vm_private_data = region;
150 vma->vm_flags |= VM_PFNMAP;
151 vma->vm_ops = &vfio_pci_nvgpu_mmap_vmops;
152
153
154
155
156
157
158
159 data->useraddr = vma->vm_start;
160 data->mm = current->mm;
161
162 atomic_inc(&data->mm->mm_count);
163 ret = (int) mm_iommu_newdev(data->mm, data->useraddr,
164 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT,
165 data->gpu_hpa, &data->mem);
166
167 trace_vfio_pci_nvgpu_mmap(vdev->pdev, data->gpu_hpa, data->useraddr,
168 vma->vm_end - vma->vm_start, ret);
169
170 return ret;
171}
172
173static int vfio_pci_nvgpu_add_capability(struct vfio_pci_device *vdev,
174 struct vfio_pci_region *region, struct vfio_info_cap *caps)
175{
176 struct vfio_pci_nvgpu_data *data = region->data;
177 struct vfio_region_info_cap_nvlink2_ssatgt cap = {
178 .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
179 .header.version = 1,
180 .tgt = data->gpu_tgt
181 };
182
183 return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
184}
185
186static const struct vfio_pci_regops vfio_pci_nvgpu_regops = {
187 .rw = vfio_pci_nvgpu_rw,
188 .release = vfio_pci_nvgpu_release,
189 .mmap = vfio_pci_nvgpu_mmap,
190 .add_capability = vfio_pci_nvgpu_add_capability,
191};
192
193static int vfio_pci_nvgpu_group_notifier(struct notifier_block *nb,
194 unsigned long action, void *opaque)
195{
196 struct kvm *kvm = opaque;
197 struct vfio_pci_nvgpu_data *data = container_of(nb,
198 struct vfio_pci_nvgpu_data,
199 group_notifier);
200
201 if (action == VFIO_GROUP_NOTIFY_SET_KVM && kvm &&
202 pnv_npu2_map_lpar_dev(data->gpdev,
203 kvm->arch.lpid, MSR_DR | MSR_PR))
204 return NOTIFY_BAD;
205
206 return NOTIFY_OK;
207}
208
209int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev)
210{
211 int ret;
212 u64 reg[2];
213 u64 tgt = 0;
214 struct device_node *npu_node, *mem_node;
215 struct pci_dev *npu_dev;
216 struct vfio_pci_nvgpu_data *data;
217 uint32_t mem_phandle = 0;
218 unsigned long events = VFIO_GROUP_NOTIFY_SET_KVM;
219
220
221
222
223
224 npu_dev = pnv_pci_get_npu_dev(vdev->pdev, 0);
225 if (!npu_dev)
226 return -ENODEV;
227
228 npu_node = pci_device_to_OF_node(npu_dev);
229 if (!npu_node)
230 return -EINVAL;
231
232 if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
233 return -EINVAL;
234
235 mem_node = of_find_node_by_phandle(mem_phandle);
236 if (!mem_node)
237 return -EINVAL;
238
239 if (of_property_read_variable_u64_array(mem_node, "reg", reg,
240 ARRAY_SIZE(reg), ARRAY_SIZE(reg)) !=
241 ARRAY_SIZE(reg))
242 return -EINVAL;
243
244 if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
245 dev_warn(&vdev->pdev->dev, "No ibm,device-tgt-addr found\n");
246 return -EFAULT;
247 }
248
249 data = kzalloc(sizeof(*data), GFP_KERNEL);
250 if (!data)
251 return -ENOMEM;
252
253 data->gpu_hpa = reg[0];
254 data->gpu_tgt = tgt;
255 data->size = reg[1];
256
257 dev_dbg(&vdev->pdev->dev, "%lx..%lx\n", data->gpu_hpa,
258 data->gpu_hpa + data->size - 1);
259
260 data->gpdev = vdev->pdev;
261 data->group_notifier.notifier_call = vfio_pci_nvgpu_group_notifier;
262
263 ret = vfio_register_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
264 &events, &data->group_notifier);
265 if (ret)
266 goto free_exit;
267
268
269
270
271
272
273
274 vfio_unregister_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
275 &data->group_notifier);
276
277 ret = vfio_pci_register_dev_region(vdev,
278 PCI_VENDOR_ID_NVIDIA | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
279 VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM,
280 &vfio_pci_nvgpu_regops,
281 data->size,
282 VFIO_REGION_INFO_FLAG_READ |
283 VFIO_REGION_INFO_FLAG_WRITE |
284 VFIO_REGION_INFO_FLAG_MMAP,
285 data);
286 if (ret)
287 goto free_exit;
288
289 return 0;
290free_exit:
291 kfree(data);
292
293 return ret;
294}
295
296
297
298
299struct vfio_pci_npu2_data {
300 void *base;
301 unsigned long mmio_atsd;
302 unsigned long gpu_tgt;
303 unsigned int link_speed;
304};
305
306static size_t vfio_pci_npu2_rw(struct vfio_pci_device *vdev,
307 char __user *buf, size_t count, loff_t *ppos, bool iswrite)
308{
309 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
310 struct vfio_pci_npu2_data *data = vdev->region[i].data;
311 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
312
313 if (pos >= vdev->region[i].size)
314 return -EINVAL;
315
316 count = min(count, (size_t)(vdev->region[i].size - pos));
317
318 if (iswrite) {
319 if (copy_from_user(data->base + pos, buf, count))
320 return -EFAULT;
321 } else {
322 if (copy_to_user(buf, data->base + pos, count))
323 return -EFAULT;
324 }
325 *ppos += count;
326
327 return count;
328}
329
330static int vfio_pci_npu2_mmap(struct vfio_pci_device *vdev,
331 struct vfio_pci_region *region, struct vm_area_struct *vma)
332{
333 int ret;
334 struct vfio_pci_npu2_data *data = region->data;
335 unsigned long req_len = vma->vm_end - vma->vm_start;
336
337 if (req_len != PAGE_SIZE)
338 return -EINVAL;
339
340 vma->vm_flags |= VM_PFNMAP;
341 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
342
343 ret = remap_pfn_range(vma, vma->vm_start, data->mmio_atsd >> PAGE_SHIFT,
344 req_len, vma->vm_page_prot);
345 trace_vfio_pci_npu2_mmap(vdev->pdev, data->mmio_atsd, vma->vm_start,
346 vma->vm_end - vma->vm_start, ret);
347
348 return ret;
349}
350
351static void vfio_pci_npu2_release(struct vfio_pci_device *vdev,
352 struct vfio_pci_region *region)
353{
354 struct vfio_pci_npu2_data *data = region->data;
355
356 memunmap(data->base);
357 kfree(data);
358}
359
360static int vfio_pci_npu2_add_capability(struct vfio_pci_device *vdev,
361 struct vfio_pci_region *region, struct vfio_info_cap *caps)
362{
363 struct vfio_pci_npu2_data *data = region->data;
364 struct vfio_region_info_cap_nvlink2_ssatgt captgt = {
365 .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
366 .header.version = 1,
367 .tgt = data->gpu_tgt
368 };
369 struct vfio_region_info_cap_nvlink2_lnkspd capspd = {
370 .header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD,
371 .header.version = 1,
372 .link_speed = data->link_speed
373 };
374 int ret;
375
376 ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt));
377 if (ret)
378 return ret;
379
380 return vfio_info_add_capability(caps, &capspd.header, sizeof(capspd));
381}
382
383static const struct vfio_pci_regops vfio_pci_npu2_regops = {
384 .rw = vfio_pci_npu2_rw,
385 .mmap = vfio_pci_npu2_mmap,
386 .release = vfio_pci_npu2_release,
387 .add_capability = vfio_pci_npu2_add_capability,
388};
389
390int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
391{
392 int ret;
393 struct vfio_pci_npu2_data *data;
394 struct device_node *nvlink_dn;
395 u32 nvlink_index = 0;
396 struct pci_dev *npdev = vdev->pdev;
397 struct device_node *npu_node = pci_device_to_OF_node(npdev);
398 struct pci_controller *hose = pci_bus_to_host(npdev->bus);
399 u64 mmio_atsd = 0;
400 u64 tgt = 0;
401 u32 link_speed = 0xff;
402
403
404
405
406
407 if (!pnv_pci_get_gpu_dev(vdev->pdev))
408 return -ENODEV;
409
410
411
412
413
414
415
416
417 nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
418 if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
419 &nvlink_index)))
420 return -ENODEV;
421
422 if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", nvlink_index,
423 &mmio_atsd)) {
424 dev_warn(&vdev->pdev->dev, "No available ATSD found\n");
425 mmio_atsd = 0;
426 }
427
428 if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
429 dev_warn(&vdev->pdev->dev, "No ibm,device-tgt-addr found\n");
430 return -EFAULT;
431 }
432
433 if (of_property_read_u32(npu_node, "ibm,nvlink-speed", &link_speed)) {
434 dev_warn(&vdev->pdev->dev, "No ibm,nvlink-speed found\n");
435 return -EFAULT;
436 }
437
438 data = kzalloc(sizeof(*data), GFP_KERNEL);
439 if (!data)
440 return -ENOMEM;
441
442 data->mmio_atsd = mmio_atsd;
443 data->gpu_tgt = tgt;
444 data->link_speed = link_speed;
445 if (data->mmio_atsd) {
446 data->base = memremap(data->mmio_atsd, SZ_64K, MEMREMAP_WT);
447 if (!data->base) {
448 ret = -ENOMEM;
449 goto free_exit;
450 }
451 }
452
453
454
455
456
457
458
459 ret = vfio_pci_register_dev_region(vdev,
460 PCI_VENDOR_ID_IBM |
461 VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
462 VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD,
463 &vfio_pci_npu2_regops,
464 data->mmio_atsd ? PAGE_SIZE : 0,
465 VFIO_REGION_INFO_FLAG_READ |
466 VFIO_REGION_INFO_FLAG_WRITE |
467 VFIO_REGION_INFO_FLAG_MMAP,
468 data);
469 if (ret)
470 goto free_exit;
471
472 return 0;
473
474free_exit:
475 if (data->base)
476 memunmap(data->base);
477 kfree(data);
478
479 return ret;
480}
481