1
2
3
4
5
6
7#include <linux/compiler.h>
8#include <linux/module.h>
9#include <linux/vringh.h>
10#include <linux/virtio_ring.h>
11#include <linux/kernel.h>
12#include <linux/ratelimit.h>
13#include <linux/uaccess.h>
14#include <linux/slab.h>
15#include <linux/export.h>
16#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
17#include <linux/bvec.h>
18#include <linux/highmem.h>
19#include <linux/vhost_iotlb.h>
20#endif
21#include <uapi/linux/virtio_config.h>
22
23static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
24{
25 static DEFINE_RATELIMIT_STATE(vringh_rs,
26 DEFAULT_RATELIMIT_INTERVAL,
27 DEFAULT_RATELIMIT_BURST);
28 if (__ratelimit(&vringh_rs)) {
29 va_list ap;
30 va_start(ap, fmt);
31 printk(KERN_NOTICE "vringh:");
32 vprintk(fmt, ap);
33 va_end(ap);
34 }
35}
36
37
38static inline int __vringh_get_head(const struct vringh *vrh,
39 int (*getu16)(const struct vringh *vrh,
40 u16 *val, const __virtio16 *p),
41 u16 *last_avail_idx)
42{
43 u16 avail_idx, i, head;
44 int err;
45
46 err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx);
47 if (err) {
48 vringh_bad("Failed to access avail idx at %p",
49 &vrh->vring.avail->idx);
50 return err;
51 }
52
53 if (*last_avail_idx == avail_idx)
54 return vrh->vring.num;
55
56
57 virtio_rmb(vrh->weak_barriers);
58
59 i = *last_avail_idx & (vrh->vring.num - 1);
60
61 err = getu16(vrh, &head, &vrh->vring.avail->ring[i]);
62 if (err) {
63 vringh_bad("Failed to read head: idx %d address %p",
64 *last_avail_idx, &vrh->vring.avail->ring[i]);
65 return err;
66 }
67
68 if (head >= vrh->vring.num) {
69 vringh_bad("Guest says index %u > %u is available",
70 head, vrh->vring.num);
71 return -EINVAL;
72 }
73
74 (*last_avail_idx)++;
75 return head;
76}
77
78
79
80
81
82
83void vringh_kiov_advance(struct vringh_kiov *iov, size_t len)
84{
85 while (len && iov->i < iov->used) {
86 size_t partlen = min(iov->iov[iov->i].iov_len, len);
87
88 iov->consumed += partlen;
89 iov->iov[iov->i].iov_len -= partlen;
90 iov->iov[iov->i].iov_base += partlen;
91
92 if (!iov->iov[iov->i].iov_len) {
93
94 iov->iov[iov->i].iov_len = iov->consumed;
95 iov->iov[iov->i].iov_base -= iov->consumed;
96
97 iov->consumed = 0;
98 iov->i++;
99 }
100
101 len -= partlen;
102 }
103}
104EXPORT_SYMBOL(vringh_kiov_advance);
105
106
107static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
108 struct vringh_kiov *iov,
109 void *ptr, size_t len,
110 int (*xfer)(const struct vringh *vrh,
111 void *addr, void *ptr,
112 size_t len))
113{
114 int err, done = 0;
115
116 while (len && iov->i < iov->used) {
117 size_t partlen;
118
119 partlen = min(iov->iov[iov->i].iov_len, len);
120 err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen);
121 if (err)
122 return err;
123 done += partlen;
124 len -= partlen;
125 ptr += partlen;
126
127 vringh_kiov_advance(iov, partlen);
128 }
129 return done;
130}
131
132
133static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len,
134 struct vringh_range *range,
135 bool (*getrange)(struct vringh *,
136 u64, struct vringh_range *))
137{
138 if (addr < range->start || addr > range->end_incl) {
139 if (!getrange(vrh, addr, range))
140 return false;
141 }
142 BUG_ON(addr < range->start || addr > range->end_incl);
143
144
145 if (unlikely(addr + *len == 0)) {
146 if (range->end_incl == -1ULL)
147 return true;
148 goto truncate;
149 }
150
151
152 if (addr + *len < addr) {
153 vringh_bad("Wrapping descriptor %zu@0x%llx",
154 *len, (unsigned long long)addr);
155 return false;
156 }
157
158 if (unlikely(addr + *len - 1 > range->end_incl))
159 goto truncate;
160 return true;
161
162truncate:
163 *len = range->end_incl + 1 - addr;
164 return true;
165}
166
167static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len,
168 struct vringh_range *range,
169 bool (*getrange)(struct vringh *,
170 u64, struct vringh_range *))
171{
172 return true;
173}
174
175
176static int move_to_indirect(const struct vringh *vrh,
177 int *up_next, u16 *i, void *addr,
178 const struct vring_desc *desc,
179 struct vring_desc **descs, int *desc_max)
180{
181 u32 len;
182
183
184 if (*up_next != -1) {
185 vringh_bad("Multilevel indirect %u->%u", *up_next, *i);
186 return -EINVAL;
187 }
188
189 len = vringh32_to_cpu(vrh, desc->len);
190 if (unlikely(len % sizeof(struct vring_desc))) {
191 vringh_bad("Strange indirect len %u", desc->len);
192 return -EINVAL;
193 }
194
195
196 if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT))
197 *up_next = vringh16_to_cpu(vrh, desc->next);
198 else
199 *up_next = -2;
200 *descs = addr;
201 *desc_max = len / sizeof(struct vring_desc);
202
203
204 *i = 0;
205 return 0;
206}
207
208static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp)
209{
210 struct kvec *new;
211 unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2;
212
213 if (new_num < 8)
214 new_num = 8;
215
216 flag = (iov->max_num & VRINGH_IOV_ALLOCATED);
217 if (flag)
218 new = krealloc_array(iov->iov, new_num,
219 sizeof(struct iovec), gfp);
220 else {
221 new = kmalloc_array(new_num, sizeof(struct iovec), gfp);
222 if (new) {
223 memcpy(new, iov->iov,
224 iov->max_num * sizeof(struct iovec));
225 flag = VRINGH_IOV_ALLOCATED;
226 }
227 }
228 if (!new)
229 return -ENOMEM;
230 iov->iov = new;
231 iov->max_num = (new_num | flag);
232 return 0;
233}
234
235static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next,
236 struct vring_desc **descs, int *desc_max)
237{
238 u16 i = *up_next;
239
240 *up_next = -1;
241 *descs = vrh->vring.desc;
242 *desc_max = vrh->vring.num;
243 return i;
244}
245
246static int slow_copy(struct vringh *vrh, void *dst, const void *src,
247 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
248 struct vringh_range *range,
249 bool (*getrange)(struct vringh *vrh,
250 u64,
251 struct vringh_range *)),
252 bool (*getrange)(struct vringh *vrh,
253 u64 addr,
254 struct vringh_range *r),
255 struct vringh_range *range,
256 int (*copy)(const struct vringh *vrh,
257 void *dst, const void *src, size_t len))
258{
259 size_t part, len = sizeof(struct vring_desc);
260
261 do {
262 u64 addr;
263 int err;
264
265 part = len;
266 addr = (u64)(unsigned long)src - range->offset;
267
268 if (!rcheck(vrh, addr, &part, range, getrange))
269 return -EINVAL;
270
271 err = copy(vrh, dst, src, part);
272 if (err)
273 return err;
274
275 dst += part;
276 src += part;
277 len -= part;
278 } while (len);
279 return 0;
280}
281
282static inline int
283__vringh_iov(struct vringh *vrh, u16 i,
284 struct vringh_kiov *riov,
285 struct vringh_kiov *wiov,
286 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
287 struct vringh_range *range,
288 bool (*getrange)(struct vringh *, u64,
289 struct vringh_range *)),
290 bool (*getrange)(struct vringh *, u64, struct vringh_range *),
291 gfp_t gfp,
292 int (*copy)(const struct vringh *vrh,
293 void *dst, const void *src, size_t len))
294{
295 int err, count = 0, up_next, desc_max;
296 struct vring_desc desc, *descs;
297 struct vringh_range range = { -1ULL, 0 }, slowrange;
298 bool slow = false;
299
300
301 descs = vrh->vring.desc;
302 desc_max = vrh->vring.num;
303 up_next = -1;
304
305
306 if (WARN_ON(!riov && !wiov))
307 return -EINVAL;
308
309 if (riov)
310 riov->i = riov->used = riov->consumed = 0;
311 if (wiov)
312 wiov->i = wiov->used = wiov->consumed = 0;
313
314 for (;;) {
315 void *addr;
316 struct vringh_kiov *iov;
317 size_t len;
318
319 if (unlikely(slow))
320 err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
321 &slowrange, copy);
322 else
323 err = copy(vrh, &desc, &descs[i], sizeof(desc));
324 if (unlikely(err))
325 goto fail;
326
327 if (unlikely(desc.flags &
328 cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) {
329 u64 a = vringh64_to_cpu(vrh, desc.addr);
330
331
332 len = vringh32_to_cpu(vrh, desc.len);
333 if (!rcheck(vrh, a, &len, &range, getrange)) {
334 err = -EINVAL;
335 goto fail;
336 }
337
338 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
339 slow = true;
340
341 slowrange = range;
342 }
343
344 addr = (void *)(long)(a + range.offset);
345 err = move_to_indirect(vrh, &up_next, &i, addr, &desc,
346 &descs, &desc_max);
347 if (err)
348 goto fail;
349 continue;
350 }
351
352 if (count++ == vrh->vring.num) {
353 vringh_bad("Descriptor loop in %p", descs);
354 err = -ELOOP;
355 goto fail;
356 }
357
358 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE))
359 iov = wiov;
360 else {
361 iov = riov;
362 if (unlikely(wiov && wiov->used)) {
363 vringh_bad("Readable desc %p after writable",
364 &descs[i]);
365 err = -EINVAL;
366 goto fail;
367 }
368 }
369
370 if (!iov) {
371 vringh_bad("Unexpected %s desc",
372 !wiov ? "writable" : "readable");
373 err = -EPROTO;
374 goto fail;
375 }
376
377 again:
378
379 len = vringh32_to_cpu(vrh, desc.len);
380 if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range,
381 getrange)) {
382 err = -EINVAL;
383 goto fail;
384 }
385 addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) +
386 range.offset);
387
388 if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
389 err = resize_iovec(iov, gfp);
390 if (err)
391 goto fail;
392 }
393
394 iov->iov[iov->used].iov_base = addr;
395 iov->iov[iov->used].iov_len = len;
396 iov->used++;
397
398 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
399 desc.len = cpu_to_vringh32(vrh,
400 vringh32_to_cpu(vrh, desc.len) - len);
401 desc.addr = cpu_to_vringh64(vrh,
402 vringh64_to_cpu(vrh, desc.addr) + len);
403 goto again;
404 }
405
406 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) {
407 i = vringh16_to_cpu(vrh, desc.next);
408 } else {
409
410 if (unlikely(up_next > 0)) {
411 i = return_from_indirect(vrh, &up_next,
412 &descs, &desc_max);
413 slow = false;
414 } else
415 break;
416 }
417
418 if (i >= desc_max) {
419 vringh_bad("Chained index %u > %u", i, desc_max);
420 err = -EINVAL;
421 goto fail;
422 }
423 }
424
425 return 0;
426
427fail:
428 return err;
429}
430
431static inline int __vringh_complete(struct vringh *vrh,
432 const struct vring_used_elem *used,
433 unsigned int num_used,
434 int (*putu16)(const struct vringh *vrh,
435 __virtio16 *p, u16 val),
436 int (*putused)(const struct vringh *vrh,
437 struct vring_used_elem *dst,
438 const struct vring_used_elem
439 *src, unsigned num))
440{
441 struct vring_used *used_ring;
442 int err;
443 u16 used_idx, off;
444
445 used_ring = vrh->vring.used;
446 used_idx = vrh->last_used_idx + vrh->completed;
447
448 off = used_idx % vrh->vring.num;
449
450
451 if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
452 u16 part = vrh->vring.num - off;
453 err = putused(vrh, &used_ring->ring[off], used, part);
454 if (!err)
455 err = putused(vrh, &used_ring->ring[0], used + part,
456 num_used - part);
457 } else
458 err = putused(vrh, &used_ring->ring[off], used, num_used);
459
460 if (err) {
461 vringh_bad("Failed to write %u used entries %u at %p",
462 num_used, off, &used_ring->ring[off]);
463 return err;
464 }
465
466
467 virtio_wmb(vrh->weak_barriers);
468
469 err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used);
470 if (err) {
471 vringh_bad("Failed to update used index at %p",
472 &vrh->vring.used->idx);
473 return err;
474 }
475
476 vrh->completed += num_used;
477 return 0;
478}
479
480
481static inline int __vringh_need_notify(struct vringh *vrh,
482 int (*getu16)(const struct vringh *vrh,
483 u16 *val,
484 const __virtio16 *p))
485{
486 bool notify;
487 u16 used_event;
488 int err;
489
490
491
492
493 virtio_mb(vrh->weak_barriers);
494
495
496 if (!vrh->event_indices) {
497 u16 flags;
498 err = getu16(vrh, &flags, &vrh->vring.avail->flags);
499 if (err) {
500 vringh_bad("Failed to get flags at %p",
501 &vrh->vring.avail->flags);
502 return err;
503 }
504 return (!(flags & VRING_AVAIL_F_NO_INTERRUPT));
505 }
506
507
508 err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring));
509 if (err) {
510 vringh_bad("Failed to get used event idx at %p",
511 &vring_used_event(&vrh->vring));
512 return err;
513 }
514
515
516 if (unlikely(vrh->completed > 0xffff))
517 notify = true;
518 else
519 notify = vring_need_event(used_event,
520 vrh->last_used_idx + vrh->completed,
521 vrh->last_used_idx);
522
523 vrh->last_used_idx += vrh->completed;
524 vrh->completed = 0;
525 return notify;
526}
527
528static inline bool __vringh_notify_enable(struct vringh *vrh,
529 int (*getu16)(const struct vringh *vrh,
530 u16 *val, const __virtio16 *p),
531 int (*putu16)(const struct vringh *vrh,
532 __virtio16 *p, u16 val))
533{
534 u16 avail;
535
536 if (!vrh->event_indices) {
537
538 if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) {
539 vringh_bad("Clearing used flags %p",
540 &vrh->vring.used->flags);
541 return true;
542 }
543 } else {
544 if (putu16(vrh, &vring_avail_event(&vrh->vring),
545 vrh->last_avail_idx) != 0) {
546 vringh_bad("Updating avail event index %p",
547 &vring_avail_event(&vrh->vring));
548 return true;
549 }
550 }
551
552
553
554 virtio_mb(vrh->weak_barriers);
555
556 if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) {
557 vringh_bad("Failed to check avail idx at %p",
558 &vrh->vring.avail->idx);
559 return true;
560 }
561
562
563
564
565 return avail == vrh->last_avail_idx;
566}
567
568static inline void __vringh_notify_disable(struct vringh *vrh,
569 int (*putu16)(const struct vringh *vrh,
570 __virtio16 *p, u16 val))
571{
572 if (!vrh->event_indices) {
573
574 if (putu16(vrh, &vrh->vring.used->flags,
575 VRING_USED_F_NO_NOTIFY)) {
576 vringh_bad("Setting used flags %p",
577 &vrh->vring.used->flags);
578 }
579 }
580}
581
582
583static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
584{
585 __virtio16 v = 0;
586 int rc = get_user(v, (__force __virtio16 __user *)p);
587 *val = vringh16_to_cpu(vrh, v);
588 return rc;
589}
590
591static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
592{
593 __virtio16 v = cpu_to_vringh16(vrh, val);
594 return put_user(v, (__force __virtio16 __user *)p);
595}
596
597static inline int copydesc_user(const struct vringh *vrh,
598 void *dst, const void *src, size_t len)
599{
600 return copy_from_user(dst, (__force void __user *)src, len) ?
601 -EFAULT : 0;
602}
603
604static inline int putused_user(const struct vringh *vrh,
605 struct vring_used_elem *dst,
606 const struct vring_used_elem *src,
607 unsigned int num)
608{
609 return copy_to_user((__force void __user *)dst, src,
610 sizeof(*dst) * num) ? -EFAULT : 0;
611}
612
613static inline int xfer_from_user(const struct vringh *vrh, void *src,
614 void *dst, size_t len)
615{
616 return copy_from_user(dst, (__force void __user *)src, len) ?
617 -EFAULT : 0;
618}
619
620static inline int xfer_to_user(const struct vringh *vrh,
621 void *dst, void *src, size_t len)
622{
623 return copy_to_user((__force void __user *)dst, src, len) ?
624 -EFAULT : 0;
625}
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640int vringh_init_user(struct vringh *vrh, u64 features,
641 unsigned int num, bool weak_barriers,
642 vring_desc_t __user *desc,
643 vring_avail_t __user *avail,
644 vring_used_t __user *used)
645{
646
647 if (!num || num > 0xffff || (num & (num - 1))) {
648 vringh_bad("Bad ring size %u", num);
649 return -EINVAL;
650 }
651
652 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
653 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
654 vrh->weak_barriers = weak_barriers;
655 vrh->completed = 0;
656 vrh->last_avail_idx = 0;
657 vrh->last_used_idx = 0;
658 vrh->vring.num = num;
659
660 vrh->vring.desc = (__force struct vring_desc *)desc;
661 vrh->vring.avail = (__force struct vring_avail *)avail;
662 vrh->vring.used = (__force struct vring_used *)used;
663 return 0;
664}
665EXPORT_SYMBOL(vringh_init_user);
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687int vringh_getdesc_user(struct vringh *vrh,
688 struct vringh_iov *riov,
689 struct vringh_iov *wiov,
690 bool (*getrange)(struct vringh *vrh,
691 u64 addr, struct vringh_range *r),
692 u16 *head)
693{
694 int err;
695
696 *head = vrh->vring.num;
697 err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx);
698 if (err < 0)
699 return err;
700
701
702 if (err == vrh->vring.num)
703 return 0;
704
705
706 BUILD_BUG_ON(sizeof(struct vringh_kiov) != sizeof(struct vringh_iov));
707 BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) !=
708 offsetof(struct vringh_iov, iov));
709 BUILD_BUG_ON(offsetof(struct vringh_kiov, i) !=
710 offsetof(struct vringh_iov, i));
711 BUILD_BUG_ON(offsetof(struct vringh_kiov, used) !=
712 offsetof(struct vringh_iov, used));
713 BUILD_BUG_ON(offsetof(struct vringh_kiov, max_num) !=
714 offsetof(struct vringh_iov, max_num));
715 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
716 BUILD_BUG_ON(offsetof(struct iovec, iov_base) !=
717 offsetof(struct kvec, iov_base));
718 BUILD_BUG_ON(offsetof(struct iovec, iov_len) !=
719 offsetof(struct kvec, iov_len));
720 BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_base)
721 != sizeof(((struct kvec *)NULL)->iov_base));
722 BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len)
723 != sizeof(((struct kvec *)NULL)->iov_len));
724
725 *head = err;
726 err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov,
727 (struct vringh_kiov *)wiov,
728 range_check, getrange, GFP_KERNEL, copydesc_user);
729 if (err)
730 return err;
731
732 return 1;
733}
734EXPORT_SYMBOL(vringh_getdesc_user);
735
736
737
738
739
740
741
742
743
744ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len)
745{
746 return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov,
747 dst, len, xfer_from_user);
748}
749EXPORT_SYMBOL(vringh_iov_pull_user);
750
751
752
753
754
755
756
757
758
759ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
760 const void *src, size_t len)
761{
762 return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov,
763 (void *)src, len, xfer_to_user);
764}
765EXPORT_SYMBOL(vringh_iov_push_user);
766
767
768
769
770
771
772
773
774
775void vringh_abandon_user(struct vringh *vrh, unsigned int num)
776{
777
778
779 vrh->last_avail_idx -= num;
780}
781EXPORT_SYMBOL(vringh_abandon_user);
782
783
784
785
786
787
788
789
790
791
792int vringh_complete_user(struct vringh *vrh, u16 head, u32 len)
793{
794 struct vring_used_elem used;
795
796 used.id = cpu_to_vringh32(vrh, head);
797 used.len = cpu_to_vringh32(vrh, len);
798 return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
799}
800EXPORT_SYMBOL(vringh_complete_user);
801
802
803
804
805
806
807
808
809
810
811int vringh_complete_multi_user(struct vringh *vrh,
812 const struct vring_used_elem used[],
813 unsigned num_used)
814{
815 return __vringh_complete(vrh, used, num_used,
816 putu16_user, putused_user);
817}
818EXPORT_SYMBOL(vringh_complete_multi_user);
819
820
821
822
823
824
825
826
827bool vringh_notify_enable_user(struct vringh *vrh)
828{
829 return __vringh_notify_enable(vrh, getu16_user, putu16_user);
830}
831EXPORT_SYMBOL(vringh_notify_enable_user);
832
833
834
835
836
837
838
839
840void vringh_notify_disable_user(struct vringh *vrh)
841{
842 __vringh_notify_disable(vrh, putu16_user);
843}
844EXPORT_SYMBOL(vringh_notify_disable_user);
845
846
847
848
849
850
851
852int vringh_need_notify_user(struct vringh *vrh)
853{
854 return __vringh_need_notify(vrh, getu16_user);
855}
856EXPORT_SYMBOL(vringh_need_notify_user);
857
858
859static inline int getu16_kern(const struct vringh *vrh,
860 u16 *val, const __virtio16 *p)
861{
862 *val = vringh16_to_cpu(vrh, READ_ONCE(*p));
863 return 0;
864}
865
866static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
867{
868 WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
869 return 0;
870}
871
872static inline int copydesc_kern(const struct vringh *vrh,
873 void *dst, const void *src, size_t len)
874{
875 memcpy(dst, src, len);
876 return 0;
877}
878
879static inline int putused_kern(const struct vringh *vrh,
880 struct vring_used_elem *dst,
881 const struct vring_used_elem *src,
882 unsigned int num)
883{
884 memcpy(dst, src, num * sizeof(*dst));
885 return 0;
886}
887
888static inline int xfer_kern(const struct vringh *vrh, void *src,
889 void *dst, size_t len)
890{
891 memcpy(dst, src, len);
892 return 0;
893}
894
895static inline int kern_xfer(const struct vringh *vrh, void *dst,
896 void *src, size_t len)
897{
898 memcpy(dst, src, len);
899 return 0;
900}
901
902
903
904
905
906
907
908
909
910
911
912
913
914int vringh_init_kern(struct vringh *vrh, u64 features,
915 unsigned int num, bool weak_barriers,
916 struct vring_desc *desc,
917 struct vring_avail *avail,
918 struct vring_used *used)
919{
920
921 if (!num || num > 0xffff || (num & (num - 1))) {
922 vringh_bad("Bad ring size %u", num);
923 return -EINVAL;
924 }
925
926 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
927 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
928 vrh->weak_barriers = weak_barriers;
929 vrh->completed = 0;
930 vrh->last_avail_idx = 0;
931 vrh->last_used_idx = 0;
932 vrh->vring.num = num;
933 vrh->vring.desc = desc;
934 vrh->vring.avail = avail;
935 vrh->vring.used = used;
936 return 0;
937}
938EXPORT_SYMBOL(vringh_init_kern);
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960int vringh_getdesc_kern(struct vringh *vrh,
961 struct vringh_kiov *riov,
962 struct vringh_kiov *wiov,
963 u16 *head,
964 gfp_t gfp)
965{
966 int err;
967
968 err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx);
969 if (err < 0)
970 return err;
971
972
973 if (err == vrh->vring.num)
974 return 0;
975
976 *head = err;
977 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
978 gfp, copydesc_kern);
979 if (err)
980 return err;
981
982 return 1;
983}
984EXPORT_SYMBOL(vringh_getdesc_kern);
985
986
987
988
989
990
991
992
993
994ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
995{
996 return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern);
997}
998EXPORT_SYMBOL(vringh_iov_pull_kern);
999
1000
1001
1002
1003
1004
1005
1006
1007
1008ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
1009 const void *src, size_t len)
1010{
1011 return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer);
1012}
1013EXPORT_SYMBOL(vringh_iov_push_kern);
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023void vringh_abandon_kern(struct vringh *vrh, unsigned int num)
1024{
1025
1026
1027 vrh->last_avail_idx -= num;
1028}
1029EXPORT_SYMBOL(vringh_abandon_kern);
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len)
1041{
1042 struct vring_used_elem used;
1043
1044 used.id = cpu_to_vringh32(vrh, head);
1045 used.len = cpu_to_vringh32(vrh, len);
1046
1047 return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
1048}
1049EXPORT_SYMBOL(vringh_complete_kern);
1050
1051
1052
1053
1054
1055
1056
1057
1058bool vringh_notify_enable_kern(struct vringh *vrh)
1059{
1060 return __vringh_notify_enable(vrh, getu16_kern, putu16_kern);
1061}
1062EXPORT_SYMBOL(vringh_notify_enable_kern);
1063
1064
1065
1066
1067
1068
1069
1070
1071void vringh_notify_disable_kern(struct vringh *vrh)
1072{
1073 __vringh_notify_disable(vrh, putu16_kern);
1074}
1075EXPORT_SYMBOL(vringh_notify_disable_kern);
1076
1077
1078
1079
1080
1081
1082
1083int vringh_need_notify_kern(struct vringh *vrh)
1084{
1085 return __vringh_need_notify(vrh, getu16_kern);
1086}
1087EXPORT_SYMBOL(vringh_need_notify_kern);
1088
1089#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
1090
1091static int iotlb_translate(const struct vringh *vrh,
1092 u64 addr, u64 len, struct bio_vec iov[],
1093 int iov_size, u32 perm)
1094{
1095 struct vhost_iotlb_map *map;
1096 struct vhost_iotlb *iotlb = vrh->iotlb;
1097 int ret = 0;
1098 u64 s = 0;
1099
1100 spin_lock(vrh->iotlb_lock);
1101
1102 while (len > s) {
1103 u64 size, pa, pfn;
1104
1105 if (unlikely(ret >= iov_size)) {
1106 ret = -ENOBUFS;
1107 break;
1108 }
1109
1110 map = vhost_iotlb_itree_first(iotlb, addr,
1111 addr + len - 1);
1112 if (!map || map->start > addr) {
1113 ret = -EINVAL;
1114 break;
1115 } else if (!(map->perm & perm)) {
1116 ret = -EPERM;
1117 break;
1118 }
1119
1120 size = map->size - addr + map->start;
1121 pa = map->addr + addr - map->start;
1122 pfn = pa >> PAGE_SHIFT;
1123 iov[ret].bv_page = pfn_to_page(pfn);
1124 iov[ret].bv_len = min(len - s, size);
1125 iov[ret].bv_offset = pa & (PAGE_SIZE - 1);
1126 s += size;
1127 addr += size;
1128 ++ret;
1129 }
1130
1131 spin_unlock(vrh->iotlb_lock);
1132
1133 return ret;
1134}
1135
1136static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
1137 void *src, size_t len)
1138{
1139 struct iov_iter iter;
1140 struct bio_vec iov[16];
1141 int ret;
1142
1143 ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
1144 len, iov, 16, VHOST_MAP_RO);
1145 if (ret < 0)
1146 return ret;
1147
1148 iov_iter_bvec(&iter, READ, iov, ret, len);
1149
1150 ret = copy_from_iter(dst, len, &iter);
1151
1152 return ret;
1153}
1154
1155static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
1156 void *src, size_t len)
1157{
1158 struct iov_iter iter;
1159 struct bio_vec iov[16];
1160 int ret;
1161
1162 ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
1163 len, iov, 16, VHOST_MAP_WO);
1164 if (ret < 0)
1165 return ret;
1166
1167 iov_iter_bvec(&iter, WRITE, iov, ret, len);
1168
1169 return copy_to_iter(src, len, &iter);
1170}
1171
1172static inline int getu16_iotlb(const struct vringh *vrh,
1173 u16 *val, const __virtio16 *p)
1174{
1175 struct bio_vec iov;
1176 void *kaddr, *from;
1177 int ret;
1178
1179
1180 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1181 &iov, 1, VHOST_MAP_RO);
1182 if (ret < 0)
1183 return ret;
1184
1185 kaddr = kmap_atomic(iov.bv_page);
1186 from = kaddr + iov.bv_offset;
1187 *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from));
1188 kunmap_atomic(kaddr);
1189
1190 return 0;
1191}
1192
1193static inline int putu16_iotlb(const struct vringh *vrh,
1194 __virtio16 *p, u16 val)
1195{
1196 struct bio_vec iov;
1197 void *kaddr, *to;
1198 int ret;
1199
1200
1201 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1202 &iov, 1, VHOST_MAP_WO);
1203 if (ret < 0)
1204 return ret;
1205
1206 kaddr = kmap_atomic(iov.bv_page);
1207 to = kaddr + iov.bv_offset;
1208 WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val));
1209 kunmap_atomic(kaddr);
1210
1211 return 0;
1212}
1213
1214static inline int copydesc_iotlb(const struct vringh *vrh,
1215 void *dst, const void *src, size_t len)
1216{
1217 int ret;
1218
1219 ret = copy_from_iotlb(vrh, dst, (void *)src, len);
1220 if (ret != len)
1221 return -EFAULT;
1222
1223 return 0;
1224}
1225
1226static inline int xfer_from_iotlb(const struct vringh *vrh, void *src,
1227 void *dst, size_t len)
1228{
1229 int ret;
1230
1231 ret = copy_from_iotlb(vrh, dst, src, len);
1232 if (ret != len)
1233 return -EFAULT;
1234
1235 return 0;
1236}
1237
1238static inline int xfer_to_iotlb(const struct vringh *vrh,
1239 void *dst, void *src, size_t len)
1240{
1241 int ret;
1242
1243 ret = copy_to_iotlb(vrh, dst, src, len);
1244 if (ret != len)
1245 return -EFAULT;
1246
1247 return 0;
1248}
1249
1250static inline int putused_iotlb(const struct vringh *vrh,
1251 struct vring_used_elem *dst,
1252 const struct vring_used_elem *src,
1253 unsigned int num)
1254{
1255 int size = num * sizeof(*dst);
1256 int ret;
1257
1258 ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst));
1259 if (ret != size)
1260 return -EFAULT;
1261
1262 return 0;
1263}
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277int vringh_init_iotlb(struct vringh *vrh, u64 features,
1278 unsigned int num, bool weak_barriers,
1279 struct vring_desc *desc,
1280 struct vring_avail *avail,
1281 struct vring_used *used)
1282{
1283 return vringh_init_kern(vrh, features, num, weak_barriers,
1284 desc, avail, used);
1285}
1286EXPORT_SYMBOL(vringh_init_iotlb);
1287
1288
1289
1290
1291
1292
1293
1294void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb,
1295 spinlock_t *iotlb_lock)
1296{
1297 vrh->iotlb = iotlb;
1298 vrh->iotlb_lock = iotlb_lock;
1299}
1300EXPORT_SYMBOL(vringh_set_iotlb);
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323int vringh_getdesc_iotlb(struct vringh *vrh,
1324 struct vringh_kiov *riov,
1325 struct vringh_kiov *wiov,
1326 u16 *head,
1327 gfp_t gfp)
1328{
1329 int err;
1330
1331 err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx);
1332 if (err < 0)
1333 return err;
1334
1335
1336 if (err == vrh->vring.num)
1337 return 0;
1338
1339 *head = err;
1340 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
1341 gfp, copydesc_iotlb);
1342 if (err)
1343 return err;
1344
1345 return 1;
1346}
1347EXPORT_SYMBOL(vringh_getdesc_iotlb);
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
1359 struct vringh_kiov *riov,
1360 void *dst, size_t len)
1361{
1362 return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb);
1363}
1364EXPORT_SYMBOL(vringh_iov_pull_iotlb);
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
1376 struct vringh_kiov *wiov,
1377 const void *src, size_t len)
1378{
1379 return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb);
1380}
1381EXPORT_SYMBOL(vringh_iov_push_iotlb);
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num)
1392{
1393
1394
1395
1396 vrh->last_avail_idx -= num;
1397}
1398EXPORT_SYMBOL(vringh_abandon_iotlb);
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len)
1410{
1411 struct vring_used_elem used;
1412
1413 used.id = cpu_to_vringh32(vrh, head);
1414 used.len = cpu_to_vringh32(vrh, len);
1415
1416 return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb);
1417}
1418EXPORT_SYMBOL(vringh_complete_iotlb);
1419
1420
1421
1422
1423
1424
1425
1426
1427bool vringh_notify_enable_iotlb(struct vringh *vrh)
1428{
1429 return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb);
1430}
1431EXPORT_SYMBOL(vringh_notify_enable_iotlb);
1432
1433
1434
1435
1436
1437
1438
1439
1440void vringh_notify_disable_iotlb(struct vringh *vrh)
1441{
1442 __vringh_notify_disable(vrh, putu16_iotlb);
1443}
1444EXPORT_SYMBOL(vringh_notify_disable_iotlb);
1445
1446
1447
1448
1449
1450
1451
1452int vringh_need_notify_iotlb(struct vringh *vrh)
1453{
1454 return __vringh_need_notify(vrh, getu16_iotlb);
1455}
1456EXPORT_SYMBOL(vringh_need_notify_iotlb);
1457
1458#endif
1459
1460MODULE_LICENSE("GPL");
1461