1
2
3
4
5
6
7#include <linux/compiler.h>
8#include <linux/module.h>
9#include <linux/vringh.h>
10#include <linux/virtio_ring.h>
11#include <linux/kernel.h>
12#include <linux/ratelimit.h>
13#include <linux/uaccess.h>
14#include <linux/slab.h>
15#include <linux/export.h>
16#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
17#include <linux/bvec.h>
18#include <linux/highmem.h>
19#include <linux/vhost_iotlb.h>
20#endif
21#include <uapi/linux/virtio_config.h>
22
23static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
24{
25 static DEFINE_RATELIMIT_STATE(vringh_rs,
26 DEFAULT_RATELIMIT_INTERVAL,
27 DEFAULT_RATELIMIT_BURST);
28 if (__ratelimit(&vringh_rs)) {
29 va_list ap;
30 va_start(ap, fmt);
31 printk(KERN_NOTICE "vringh:");
32 vprintk(fmt, ap);
33 va_end(ap);
34 }
35}
36
37
38static inline int __vringh_get_head(const struct vringh *vrh,
39 int (*getu16)(const struct vringh *vrh,
40 u16 *val, const __virtio16 *p),
41 u16 *last_avail_idx)
42{
43 u16 avail_idx, i, head;
44 int err;
45
46 err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx);
47 if (err) {
48 vringh_bad("Failed to access avail idx at %p",
49 &vrh->vring.avail->idx);
50 return err;
51 }
52
53 if (*last_avail_idx == avail_idx)
54 return vrh->vring.num;
55
56
57 virtio_rmb(vrh->weak_barriers);
58
59 i = *last_avail_idx & (vrh->vring.num - 1);
60
61 err = getu16(vrh, &head, &vrh->vring.avail->ring[i]);
62 if (err) {
63 vringh_bad("Failed to read head: idx %d address %p",
64 *last_avail_idx, &vrh->vring.avail->ring[i]);
65 return err;
66 }
67
68 if (head >= vrh->vring.num) {
69 vringh_bad("Guest says index %u > %u is available",
70 head, vrh->vring.num);
71 return -EINVAL;
72 }
73
74 (*last_avail_idx)++;
75 return head;
76}
77
78
79static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
80 struct vringh_kiov *iov,
81 void *ptr, size_t len,
82 int (*xfer)(const struct vringh *vrh,
83 void *addr, void *ptr,
84 size_t len))
85{
86 int err, done = 0;
87
88 while (len && iov->i < iov->used) {
89 size_t partlen;
90
91 partlen = min(iov->iov[iov->i].iov_len, len);
92 err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen);
93 if (err)
94 return err;
95 done += partlen;
96 len -= partlen;
97 ptr += partlen;
98 iov->consumed += partlen;
99 iov->iov[iov->i].iov_len -= partlen;
100 iov->iov[iov->i].iov_base += partlen;
101
102 if (!iov->iov[iov->i].iov_len) {
103
104 iov->iov[iov->i].iov_len = iov->consumed;
105 iov->iov[iov->i].iov_base -= iov->consumed;
106
107
108 iov->consumed = 0;
109 iov->i++;
110 }
111 }
112 return done;
113}
114
115
116static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len,
117 struct vringh_range *range,
118 bool (*getrange)(struct vringh *,
119 u64, struct vringh_range *))
120{
121 if (addr < range->start || addr > range->end_incl) {
122 if (!getrange(vrh, addr, range))
123 return false;
124 }
125 BUG_ON(addr < range->start || addr > range->end_incl);
126
127
128 if (unlikely(addr + *len == 0)) {
129 if (range->end_incl == -1ULL)
130 return true;
131 goto truncate;
132 }
133
134
135 if (addr + *len < addr) {
136 vringh_bad("Wrapping descriptor %zu@0x%llx",
137 *len, (unsigned long long)addr);
138 return false;
139 }
140
141 if (unlikely(addr + *len - 1 > range->end_incl))
142 goto truncate;
143 return true;
144
145truncate:
146 *len = range->end_incl + 1 - addr;
147 return true;
148}
149
150static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len,
151 struct vringh_range *range,
152 bool (*getrange)(struct vringh *,
153 u64, struct vringh_range *))
154{
155 return true;
156}
157
158
159static int move_to_indirect(const struct vringh *vrh,
160 int *up_next, u16 *i, void *addr,
161 const struct vring_desc *desc,
162 struct vring_desc **descs, int *desc_max)
163{
164 u32 len;
165
166
167 if (*up_next != -1) {
168 vringh_bad("Multilevel indirect %u->%u", *up_next, *i);
169 return -EINVAL;
170 }
171
172 len = vringh32_to_cpu(vrh, desc->len);
173 if (unlikely(len % sizeof(struct vring_desc))) {
174 vringh_bad("Strange indirect len %u", desc->len);
175 return -EINVAL;
176 }
177
178
179 if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT))
180 *up_next = vringh16_to_cpu(vrh, desc->next);
181 else
182 *up_next = -2;
183 *descs = addr;
184 *desc_max = len / sizeof(struct vring_desc);
185
186
187 *i = 0;
188 return 0;
189}
190
191static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp)
192{
193 struct kvec *new;
194 unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2;
195
196 if (new_num < 8)
197 new_num = 8;
198
199 flag = (iov->max_num & VRINGH_IOV_ALLOCATED);
200 if (flag)
201 new = krealloc_array(iov->iov, new_num,
202 sizeof(struct iovec), gfp);
203 else {
204 new = kmalloc_array(new_num, sizeof(struct iovec), gfp);
205 if (new) {
206 memcpy(new, iov->iov,
207 iov->max_num * sizeof(struct iovec));
208 flag = VRINGH_IOV_ALLOCATED;
209 }
210 }
211 if (!new)
212 return -ENOMEM;
213 iov->iov = new;
214 iov->max_num = (new_num | flag);
215 return 0;
216}
217
218static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next,
219 struct vring_desc **descs, int *desc_max)
220{
221 u16 i = *up_next;
222
223 *up_next = -1;
224 *descs = vrh->vring.desc;
225 *desc_max = vrh->vring.num;
226 return i;
227}
228
229static int slow_copy(struct vringh *vrh, void *dst, const void *src,
230 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
231 struct vringh_range *range,
232 bool (*getrange)(struct vringh *vrh,
233 u64,
234 struct vringh_range *)),
235 bool (*getrange)(struct vringh *vrh,
236 u64 addr,
237 struct vringh_range *r),
238 struct vringh_range *range,
239 int (*copy)(const struct vringh *vrh,
240 void *dst, const void *src, size_t len))
241{
242 size_t part, len = sizeof(struct vring_desc);
243
244 do {
245 u64 addr;
246 int err;
247
248 part = len;
249 addr = (u64)(unsigned long)src - range->offset;
250
251 if (!rcheck(vrh, addr, &part, range, getrange))
252 return -EINVAL;
253
254 err = copy(vrh, dst, src, part);
255 if (err)
256 return err;
257
258 dst += part;
259 src += part;
260 len -= part;
261 } while (len);
262 return 0;
263}
264
265static inline int
266__vringh_iov(struct vringh *vrh, u16 i,
267 struct vringh_kiov *riov,
268 struct vringh_kiov *wiov,
269 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
270 struct vringh_range *range,
271 bool (*getrange)(struct vringh *, u64,
272 struct vringh_range *)),
273 bool (*getrange)(struct vringh *, u64, struct vringh_range *),
274 gfp_t gfp,
275 int (*copy)(const struct vringh *vrh,
276 void *dst, const void *src, size_t len))
277{
278 int err, count = 0, up_next, desc_max;
279 struct vring_desc desc, *descs;
280 struct vringh_range range = { -1ULL, 0 }, slowrange;
281 bool slow = false;
282
283
284 descs = vrh->vring.desc;
285 desc_max = vrh->vring.num;
286 up_next = -1;
287
288
289 if (WARN_ON(!riov && !wiov))
290 return -EINVAL;
291
292 if (riov)
293 riov->i = riov->used = 0;
294 if (wiov)
295 wiov->i = wiov->used = 0;
296
297 for (;;) {
298 void *addr;
299 struct vringh_kiov *iov;
300 size_t len;
301
302 if (unlikely(slow))
303 err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
304 &slowrange, copy);
305 else
306 err = copy(vrh, &desc, &descs[i], sizeof(desc));
307 if (unlikely(err))
308 goto fail;
309
310 if (unlikely(desc.flags &
311 cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) {
312 u64 a = vringh64_to_cpu(vrh, desc.addr);
313
314
315 len = vringh32_to_cpu(vrh, desc.len);
316 if (!rcheck(vrh, a, &len, &range, getrange)) {
317 err = -EINVAL;
318 goto fail;
319 }
320
321 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
322 slow = true;
323
324 slowrange = range;
325 }
326
327 addr = (void *)(long)(a + range.offset);
328 err = move_to_indirect(vrh, &up_next, &i, addr, &desc,
329 &descs, &desc_max);
330 if (err)
331 goto fail;
332 continue;
333 }
334
335 if (count++ == vrh->vring.num) {
336 vringh_bad("Descriptor loop in %p", descs);
337 err = -ELOOP;
338 goto fail;
339 }
340
341 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE))
342 iov = wiov;
343 else {
344 iov = riov;
345 if (unlikely(wiov && wiov->i)) {
346 vringh_bad("Readable desc %p after writable",
347 &descs[i]);
348 err = -EINVAL;
349 goto fail;
350 }
351 }
352
353 if (!iov) {
354 vringh_bad("Unexpected %s desc",
355 !wiov ? "writable" : "readable");
356 err = -EPROTO;
357 goto fail;
358 }
359
360 again:
361
362 len = vringh32_to_cpu(vrh, desc.len);
363 if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range,
364 getrange)) {
365 err = -EINVAL;
366 goto fail;
367 }
368 addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) +
369 range.offset);
370
371 if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
372 err = resize_iovec(iov, gfp);
373 if (err)
374 goto fail;
375 }
376
377 iov->iov[iov->used].iov_base = addr;
378 iov->iov[iov->used].iov_len = len;
379 iov->used++;
380
381 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
382 desc.len = cpu_to_vringh32(vrh,
383 vringh32_to_cpu(vrh, desc.len) - len);
384 desc.addr = cpu_to_vringh64(vrh,
385 vringh64_to_cpu(vrh, desc.addr) + len);
386 goto again;
387 }
388
389 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) {
390 i = vringh16_to_cpu(vrh, desc.next);
391 } else {
392
393 if (unlikely(up_next > 0)) {
394 i = return_from_indirect(vrh, &up_next,
395 &descs, &desc_max);
396 slow = false;
397 } else
398 break;
399 }
400
401 if (i >= desc_max) {
402 vringh_bad("Chained index %u > %u", i, desc_max);
403 err = -EINVAL;
404 goto fail;
405 }
406 }
407
408 return 0;
409
410fail:
411 return err;
412}
413
414static inline int __vringh_complete(struct vringh *vrh,
415 const struct vring_used_elem *used,
416 unsigned int num_used,
417 int (*putu16)(const struct vringh *vrh,
418 __virtio16 *p, u16 val),
419 int (*putused)(const struct vringh *vrh,
420 struct vring_used_elem *dst,
421 const struct vring_used_elem
422 *src, unsigned num))
423{
424 struct vring_used *used_ring;
425 int err;
426 u16 used_idx, off;
427
428 used_ring = vrh->vring.used;
429 used_idx = vrh->last_used_idx + vrh->completed;
430
431 off = used_idx % vrh->vring.num;
432
433
434 if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
435 u16 part = vrh->vring.num - off;
436 err = putused(vrh, &used_ring->ring[off], used, part);
437 if (!err)
438 err = putused(vrh, &used_ring->ring[0], used + part,
439 num_used - part);
440 } else
441 err = putused(vrh, &used_ring->ring[off], used, num_used);
442
443 if (err) {
444 vringh_bad("Failed to write %u used entries %u at %p",
445 num_used, off, &used_ring->ring[off]);
446 return err;
447 }
448
449
450 virtio_wmb(vrh->weak_barriers);
451
452 err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used);
453 if (err) {
454 vringh_bad("Failed to update used index at %p",
455 &vrh->vring.used->idx);
456 return err;
457 }
458
459 vrh->completed += num_used;
460 return 0;
461}
462
463
464static inline int __vringh_need_notify(struct vringh *vrh,
465 int (*getu16)(const struct vringh *vrh,
466 u16 *val,
467 const __virtio16 *p))
468{
469 bool notify;
470 u16 used_event;
471 int err;
472
473
474
475
476 virtio_mb(vrh->weak_barriers);
477
478
479 if (!vrh->event_indices) {
480 u16 flags;
481 err = getu16(vrh, &flags, &vrh->vring.avail->flags);
482 if (err) {
483 vringh_bad("Failed to get flags at %p",
484 &vrh->vring.avail->flags);
485 return err;
486 }
487 return (!(flags & VRING_AVAIL_F_NO_INTERRUPT));
488 }
489
490
491 err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring));
492 if (err) {
493 vringh_bad("Failed to get used event idx at %p",
494 &vring_used_event(&vrh->vring));
495 return err;
496 }
497
498
499 if (unlikely(vrh->completed > 0xffff))
500 notify = true;
501 else
502 notify = vring_need_event(used_event,
503 vrh->last_used_idx + vrh->completed,
504 vrh->last_used_idx);
505
506 vrh->last_used_idx += vrh->completed;
507 vrh->completed = 0;
508 return notify;
509}
510
511static inline bool __vringh_notify_enable(struct vringh *vrh,
512 int (*getu16)(const struct vringh *vrh,
513 u16 *val, const __virtio16 *p),
514 int (*putu16)(const struct vringh *vrh,
515 __virtio16 *p, u16 val))
516{
517 u16 avail;
518
519 if (!vrh->event_indices) {
520
521 if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) {
522 vringh_bad("Clearing used flags %p",
523 &vrh->vring.used->flags);
524 return true;
525 }
526 } else {
527 if (putu16(vrh, &vring_avail_event(&vrh->vring),
528 vrh->last_avail_idx) != 0) {
529 vringh_bad("Updating avail event index %p",
530 &vring_avail_event(&vrh->vring));
531 return true;
532 }
533 }
534
535
536
537 virtio_mb(vrh->weak_barriers);
538
539 if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) {
540 vringh_bad("Failed to check avail idx at %p",
541 &vrh->vring.avail->idx);
542 return true;
543 }
544
545
546
547
548 return avail == vrh->last_avail_idx;
549}
550
551static inline void __vringh_notify_disable(struct vringh *vrh,
552 int (*putu16)(const struct vringh *vrh,
553 __virtio16 *p, u16 val))
554{
555 if (!vrh->event_indices) {
556
557 if (putu16(vrh, &vrh->vring.used->flags,
558 VRING_USED_F_NO_NOTIFY)) {
559 vringh_bad("Setting used flags %p",
560 &vrh->vring.used->flags);
561 }
562 }
563}
564
565
566static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
567{
568 __virtio16 v = 0;
569 int rc = get_user(v, (__force __virtio16 __user *)p);
570 *val = vringh16_to_cpu(vrh, v);
571 return rc;
572}
573
574static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
575{
576 __virtio16 v = cpu_to_vringh16(vrh, val);
577 return put_user(v, (__force __virtio16 __user *)p);
578}
579
580static inline int copydesc_user(const struct vringh *vrh,
581 void *dst, const void *src, size_t len)
582{
583 return copy_from_user(dst, (__force void __user *)src, len) ?
584 -EFAULT : 0;
585}
586
587static inline int putused_user(const struct vringh *vrh,
588 struct vring_used_elem *dst,
589 const struct vring_used_elem *src,
590 unsigned int num)
591{
592 return copy_to_user((__force void __user *)dst, src,
593 sizeof(*dst) * num) ? -EFAULT : 0;
594}
595
596static inline int xfer_from_user(const struct vringh *vrh, void *src,
597 void *dst, size_t len)
598{
599 return copy_from_user(dst, (__force void __user *)src, len) ?
600 -EFAULT : 0;
601}
602
603static inline int xfer_to_user(const struct vringh *vrh,
604 void *dst, void *src, size_t len)
605{
606 return copy_to_user((__force void __user *)dst, src, len) ?
607 -EFAULT : 0;
608}
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623int vringh_init_user(struct vringh *vrh, u64 features,
624 unsigned int num, bool weak_barriers,
625 vring_desc_t __user *desc,
626 vring_avail_t __user *avail,
627 vring_used_t __user *used)
628{
629
630 if (!num || num > 0xffff || (num & (num - 1))) {
631 vringh_bad("Bad ring size %u", num);
632 return -EINVAL;
633 }
634
635 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
636 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
637 vrh->weak_barriers = weak_barriers;
638 vrh->completed = 0;
639 vrh->last_avail_idx = 0;
640 vrh->last_used_idx = 0;
641 vrh->vring.num = num;
642
643 vrh->vring.desc = (__force struct vring_desc *)desc;
644 vrh->vring.avail = (__force struct vring_avail *)avail;
645 vrh->vring.used = (__force struct vring_used *)used;
646 return 0;
647}
648EXPORT_SYMBOL(vringh_init_user);
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667int vringh_getdesc_user(struct vringh *vrh,
668 struct vringh_iov *riov,
669 struct vringh_iov *wiov,
670 bool (*getrange)(struct vringh *vrh,
671 u64 addr, struct vringh_range *r),
672 u16 *head)
673{
674 int err;
675
676 *head = vrh->vring.num;
677 err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx);
678 if (err < 0)
679 return err;
680
681
682 if (err == vrh->vring.num)
683 return 0;
684
685
686 BUILD_BUG_ON(sizeof(struct vringh_kiov) != sizeof(struct vringh_iov));
687 BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) !=
688 offsetof(struct vringh_iov, iov));
689 BUILD_BUG_ON(offsetof(struct vringh_kiov, i) !=
690 offsetof(struct vringh_iov, i));
691 BUILD_BUG_ON(offsetof(struct vringh_kiov, used) !=
692 offsetof(struct vringh_iov, used));
693 BUILD_BUG_ON(offsetof(struct vringh_kiov, max_num) !=
694 offsetof(struct vringh_iov, max_num));
695 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
696 BUILD_BUG_ON(offsetof(struct iovec, iov_base) !=
697 offsetof(struct kvec, iov_base));
698 BUILD_BUG_ON(offsetof(struct iovec, iov_len) !=
699 offsetof(struct kvec, iov_len));
700 BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_base)
701 != sizeof(((struct kvec *)NULL)->iov_base));
702 BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len)
703 != sizeof(((struct kvec *)NULL)->iov_len));
704
705 *head = err;
706 err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov,
707 (struct vringh_kiov *)wiov,
708 range_check, getrange, GFP_KERNEL, copydesc_user);
709 if (err)
710 return err;
711
712 return 1;
713}
714EXPORT_SYMBOL(vringh_getdesc_user);
715
716
717
718
719
720
721
722
723
724ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len)
725{
726 return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov,
727 dst, len, xfer_from_user);
728}
729EXPORT_SYMBOL(vringh_iov_pull_user);
730
731
732
733
734
735
736
737
738
739ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
740 const void *src, size_t len)
741{
742 return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov,
743 (void *)src, len, xfer_to_user);
744}
745EXPORT_SYMBOL(vringh_iov_push_user);
746
747
748
749
750
751
752
753
754
755void vringh_abandon_user(struct vringh *vrh, unsigned int num)
756{
757
758
759 vrh->last_avail_idx -= num;
760}
761EXPORT_SYMBOL(vringh_abandon_user);
762
763
764
765
766
767
768
769
770
771
772int vringh_complete_user(struct vringh *vrh, u16 head, u32 len)
773{
774 struct vring_used_elem used;
775
776 used.id = cpu_to_vringh32(vrh, head);
777 used.len = cpu_to_vringh32(vrh, len);
778 return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
779}
780EXPORT_SYMBOL(vringh_complete_user);
781
782
783
784
785
786
787
788
789
790
791int vringh_complete_multi_user(struct vringh *vrh,
792 const struct vring_used_elem used[],
793 unsigned num_used)
794{
795 return __vringh_complete(vrh, used, num_used,
796 putu16_user, putused_user);
797}
798EXPORT_SYMBOL(vringh_complete_multi_user);
799
800
801
802
803
804
805
806
807bool vringh_notify_enable_user(struct vringh *vrh)
808{
809 return __vringh_notify_enable(vrh, getu16_user, putu16_user);
810}
811EXPORT_SYMBOL(vringh_notify_enable_user);
812
813
814
815
816
817
818
819
820void vringh_notify_disable_user(struct vringh *vrh)
821{
822 __vringh_notify_disable(vrh, putu16_user);
823}
824EXPORT_SYMBOL(vringh_notify_disable_user);
825
826
827
828
829
830
831
832int vringh_need_notify_user(struct vringh *vrh)
833{
834 return __vringh_need_notify(vrh, getu16_user);
835}
836EXPORT_SYMBOL(vringh_need_notify_user);
837
838
839static inline int getu16_kern(const struct vringh *vrh,
840 u16 *val, const __virtio16 *p)
841{
842 *val = vringh16_to_cpu(vrh, READ_ONCE(*p));
843 return 0;
844}
845
846static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
847{
848 WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
849 return 0;
850}
851
852static inline int copydesc_kern(const struct vringh *vrh,
853 void *dst, const void *src, size_t len)
854{
855 memcpy(dst, src, len);
856 return 0;
857}
858
859static inline int putused_kern(const struct vringh *vrh,
860 struct vring_used_elem *dst,
861 const struct vring_used_elem *src,
862 unsigned int num)
863{
864 memcpy(dst, src, num * sizeof(*dst));
865 return 0;
866}
867
868static inline int xfer_kern(const struct vringh *vrh, void *src,
869 void *dst, size_t len)
870{
871 memcpy(dst, src, len);
872 return 0;
873}
874
875static inline int kern_xfer(const struct vringh *vrh, void *dst,
876 void *src, size_t len)
877{
878 memcpy(dst, src, len);
879 return 0;
880}
881
882
883
884
885
886
887
888
889
890
891
892
893
894int vringh_init_kern(struct vringh *vrh, u64 features,
895 unsigned int num, bool weak_barriers,
896 struct vring_desc *desc,
897 struct vring_avail *avail,
898 struct vring_used *used)
899{
900
901 if (!num || num > 0xffff || (num & (num - 1))) {
902 vringh_bad("Bad ring size %u", num);
903 return -EINVAL;
904 }
905
906 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
907 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
908 vrh->weak_barriers = weak_barriers;
909 vrh->completed = 0;
910 vrh->last_avail_idx = 0;
911 vrh->last_used_idx = 0;
912 vrh->vring.num = num;
913 vrh->vring.desc = desc;
914 vrh->vring.avail = avail;
915 vrh->vring.used = used;
916 return 0;
917}
918EXPORT_SYMBOL(vringh_init_kern);
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937int vringh_getdesc_kern(struct vringh *vrh,
938 struct vringh_kiov *riov,
939 struct vringh_kiov *wiov,
940 u16 *head,
941 gfp_t gfp)
942{
943 int err;
944
945 err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx);
946 if (err < 0)
947 return err;
948
949
950 if (err == vrh->vring.num)
951 return 0;
952
953 *head = err;
954 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
955 gfp, copydesc_kern);
956 if (err)
957 return err;
958
959 return 1;
960}
961EXPORT_SYMBOL(vringh_getdesc_kern);
962
963
964
965
966
967
968
969
970
971ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
972{
973 return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern);
974}
975EXPORT_SYMBOL(vringh_iov_pull_kern);
976
977
978
979
980
981
982
983
984
985ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
986 const void *src, size_t len)
987{
988 return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer);
989}
990EXPORT_SYMBOL(vringh_iov_push_kern);
991
992
993
994
995
996
997
998
999
1000void vringh_abandon_kern(struct vringh *vrh, unsigned int num)
1001{
1002
1003
1004 vrh->last_avail_idx -= num;
1005}
1006EXPORT_SYMBOL(vringh_abandon_kern);
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len)
1018{
1019 struct vring_used_elem used;
1020
1021 used.id = cpu_to_vringh32(vrh, head);
1022 used.len = cpu_to_vringh32(vrh, len);
1023
1024 return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
1025}
1026EXPORT_SYMBOL(vringh_complete_kern);
1027
1028
1029
1030
1031
1032
1033
1034
1035bool vringh_notify_enable_kern(struct vringh *vrh)
1036{
1037 return __vringh_notify_enable(vrh, getu16_kern, putu16_kern);
1038}
1039EXPORT_SYMBOL(vringh_notify_enable_kern);
1040
1041
1042
1043
1044
1045
1046
1047
1048void vringh_notify_disable_kern(struct vringh *vrh)
1049{
1050 __vringh_notify_disable(vrh, putu16_kern);
1051}
1052EXPORT_SYMBOL(vringh_notify_disable_kern);
1053
1054
1055
1056
1057
1058
1059
1060int vringh_need_notify_kern(struct vringh *vrh)
1061{
1062 return __vringh_need_notify(vrh, getu16_kern);
1063}
1064EXPORT_SYMBOL(vringh_need_notify_kern);
1065
1066#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
1067
1068static int iotlb_translate(const struct vringh *vrh,
1069 u64 addr, u64 len, struct bio_vec iov[],
1070 int iov_size, u32 perm)
1071{
1072 struct vhost_iotlb_map *map;
1073 struct vhost_iotlb *iotlb = vrh->iotlb;
1074 int ret = 0;
1075 u64 s = 0;
1076
1077 while (len > s) {
1078 u64 size, pa, pfn;
1079
1080 if (unlikely(ret >= iov_size)) {
1081 ret = -ENOBUFS;
1082 break;
1083 }
1084
1085 map = vhost_iotlb_itree_first(iotlb, addr,
1086 addr + len - 1);
1087 if (!map || map->start > addr) {
1088 ret = -EINVAL;
1089 break;
1090 } else if (!(map->perm & perm)) {
1091 ret = -EPERM;
1092 break;
1093 }
1094
1095 size = map->size - addr + map->start;
1096 pa = map->addr + addr - map->start;
1097 pfn = pa >> PAGE_SHIFT;
1098 iov[ret].bv_page = pfn_to_page(pfn);
1099 iov[ret].bv_len = min(len - s, size);
1100 iov[ret].bv_offset = pa & (PAGE_SIZE - 1);
1101 s += size;
1102 addr += size;
1103 ++ret;
1104 }
1105
1106 return ret;
1107}
1108
1109static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
1110 void *src, size_t len)
1111{
1112 struct iov_iter iter;
1113 struct bio_vec iov[16];
1114 int ret;
1115
1116 ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
1117 len, iov, 16, VHOST_MAP_RO);
1118 if (ret < 0)
1119 return ret;
1120
1121 iov_iter_bvec(&iter, READ, iov, ret, len);
1122
1123 ret = copy_from_iter(dst, len, &iter);
1124
1125 return ret;
1126}
1127
1128static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
1129 void *src, size_t len)
1130{
1131 struct iov_iter iter;
1132 struct bio_vec iov[16];
1133 int ret;
1134
1135 ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
1136 len, iov, 16, VHOST_MAP_WO);
1137 if (ret < 0)
1138 return ret;
1139
1140 iov_iter_bvec(&iter, WRITE, iov, ret, len);
1141
1142 return copy_to_iter(src, len, &iter);
1143}
1144
1145static inline int getu16_iotlb(const struct vringh *vrh,
1146 u16 *val, const __virtio16 *p)
1147{
1148 struct bio_vec iov;
1149 void *kaddr, *from;
1150 int ret;
1151
1152
1153 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1154 &iov, 1, VHOST_MAP_RO);
1155 if (ret < 0)
1156 return ret;
1157
1158 kaddr = kmap_atomic(iov.bv_page);
1159 from = kaddr + iov.bv_offset;
1160 *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from));
1161 kunmap_atomic(kaddr);
1162
1163 return 0;
1164}
1165
1166static inline int putu16_iotlb(const struct vringh *vrh,
1167 __virtio16 *p, u16 val)
1168{
1169 struct bio_vec iov;
1170 void *kaddr, *to;
1171 int ret;
1172
1173
1174 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1175 &iov, 1, VHOST_MAP_WO);
1176 if (ret < 0)
1177 return ret;
1178
1179 kaddr = kmap_atomic(iov.bv_page);
1180 to = kaddr + iov.bv_offset;
1181 WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val));
1182 kunmap_atomic(kaddr);
1183
1184 return 0;
1185}
1186
1187static inline int copydesc_iotlb(const struct vringh *vrh,
1188 void *dst, const void *src, size_t len)
1189{
1190 int ret;
1191
1192 ret = copy_from_iotlb(vrh, dst, (void *)src, len);
1193 if (ret != len)
1194 return -EFAULT;
1195
1196 return 0;
1197}
1198
1199static inline int xfer_from_iotlb(const struct vringh *vrh, void *src,
1200 void *dst, size_t len)
1201{
1202 int ret;
1203
1204 ret = copy_from_iotlb(vrh, dst, src, len);
1205 if (ret != len)
1206 return -EFAULT;
1207
1208 return 0;
1209}
1210
1211static inline int xfer_to_iotlb(const struct vringh *vrh,
1212 void *dst, void *src, size_t len)
1213{
1214 int ret;
1215
1216 ret = copy_to_iotlb(vrh, dst, src, len);
1217 if (ret != len)
1218 return -EFAULT;
1219
1220 return 0;
1221}
1222
1223static inline int putused_iotlb(const struct vringh *vrh,
1224 struct vring_used_elem *dst,
1225 const struct vring_used_elem *src,
1226 unsigned int num)
1227{
1228 int size = num * sizeof(*dst);
1229 int ret;
1230
1231 ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst));
1232 if (ret != size)
1233 return -EFAULT;
1234
1235 return 0;
1236}
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250int vringh_init_iotlb(struct vringh *vrh, u64 features,
1251 unsigned int num, bool weak_barriers,
1252 struct vring_desc *desc,
1253 struct vring_avail *avail,
1254 struct vring_used *used)
1255{
1256 return vringh_init_kern(vrh, features, num, weak_barriers,
1257 desc, avail, used);
1258}
1259EXPORT_SYMBOL(vringh_init_iotlb);
1260
1261
1262
1263
1264
1265
1266void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb)
1267{
1268 vrh->iotlb = iotlb;
1269}
1270EXPORT_SYMBOL(vringh_set_iotlb);
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290int vringh_getdesc_iotlb(struct vringh *vrh,
1291 struct vringh_kiov *riov,
1292 struct vringh_kiov *wiov,
1293 u16 *head,
1294 gfp_t gfp)
1295{
1296 int err;
1297
1298 err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx);
1299 if (err < 0)
1300 return err;
1301
1302
1303 if (err == vrh->vring.num)
1304 return 0;
1305
1306 *head = err;
1307 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
1308 gfp, copydesc_iotlb);
1309 if (err)
1310 return err;
1311
1312 return 1;
1313}
1314EXPORT_SYMBOL(vringh_getdesc_iotlb);
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
1326 struct vringh_kiov *riov,
1327 void *dst, size_t len)
1328{
1329 return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb);
1330}
1331EXPORT_SYMBOL(vringh_iov_pull_iotlb);
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
1343 struct vringh_kiov *wiov,
1344 const void *src, size_t len)
1345{
1346 return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb);
1347}
1348EXPORT_SYMBOL(vringh_iov_push_iotlb);
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num)
1359{
1360
1361
1362
1363 vrh->last_avail_idx -= num;
1364}
1365EXPORT_SYMBOL(vringh_abandon_iotlb);
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len)
1377{
1378 struct vring_used_elem used;
1379
1380 used.id = cpu_to_vringh32(vrh, head);
1381 used.len = cpu_to_vringh32(vrh, len);
1382
1383 return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb);
1384}
1385EXPORT_SYMBOL(vringh_complete_iotlb);
1386
1387
1388
1389
1390
1391
1392
1393
1394bool vringh_notify_enable_iotlb(struct vringh *vrh)
1395{
1396 return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb);
1397}
1398EXPORT_SYMBOL(vringh_notify_enable_iotlb);
1399
1400
1401
1402
1403
1404
1405
1406
1407void vringh_notify_disable_iotlb(struct vringh *vrh)
1408{
1409 __vringh_notify_disable(vrh, putu16_iotlb);
1410}
1411EXPORT_SYMBOL(vringh_notify_disable_iotlb);
1412
1413
1414
1415
1416
1417
1418
1419int vringh_need_notify_iotlb(struct vringh *vrh)
1420{
1421 return __vringh_need_notify(vrh, getu16_iotlb);
1422}
1423EXPORT_SYMBOL(vringh_need_notify_iotlb);
1424
1425#endif
1426
1427MODULE_LICENSE("GPL");
1428