1
2
3
4
5
6
7
8
9#include <linux/errno.h>
10#include <linux/kernel.h>
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/sizes.h>
14#include <linux/slab.h>
15#include <linux/uaccess.h>
16#include <linux/vmalloc.h>
17#include <linux/vbox_err.h>
18#include <linux/vbox_utils.h>
19#include "vboxguest_core.h"
20
21
22#define VMMDEV_HGCM_CALL_PARMS(a) \
23 ((struct vmmdev_hgcm_function_parameter *)( \
24 (u8 *)(a) + sizeof(struct vmmdev_hgcm_call)))
25
26
27#define VBG_MAX_HGCM_USER_PARM (24 * SZ_1M)
28
29#define VBG_MAX_HGCM_KERNEL_PARM (16 * SZ_1M)
30
31#define VBG_DEBUG_PORT 0x504
32
33
34static DEFINE_SPINLOCK(vbg_log_lock);
35static char vbg_log_buf[128];
36
37#define VBG_LOG(name, pr_func) \
38void name(const char *fmt, ...) \
39{ \
40 unsigned long flags; \
41 va_list args; \
42 int i, count; \
43 \
44 va_start(args, fmt); \
45 spin_lock_irqsave(&vbg_log_lock, flags); \
46 \
47 count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\
48 for (i = 0; i < count; i++) \
49 outb(vbg_log_buf[i], VBG_DEBUG_PORT); \
50 \
51 pr_func("%s", vbg_log_buf); \
52 \
53 spin_unlock_irqrestore(&vbg_log_lock, flags); \
54 va_end(args); \
55} \
56EXPORT_SYMBOL(name)
57
58VBG_LOG(vbg_info, pr_info);
59VBG_LOG(vbg_warn, pr_warn);
60VBG_LOG(vbg_err, pr_err);
61#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
62VBG_LOG(vbg_debug, pr_debug);
63#endif
64
65void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
66 u32 requestor)
67{
68 struct vmmdev_request_header *req;
69 int order = get_order(PAGE_ALIGN(len));
70
71 req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
72 if (!req)
73 return NULL;
74
75 memset(req, 0xaa, len);
76
77 req->size = len;
78 req->version = VMMDEV_REQUEST_HEADER_VERSION;
79 req->request_type = req_type;
80 req->rc = VERR_GENERAL_FAILURE;
81 req->reserved1 = 0;
82 req->requestor = requestor;
83
84 return req;
85}
86
87void vbg_req_free(void *req, size_t len)
88{
89 if (!req)
90 return;
91
92 free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
93}
94
95
96int vbg_req_perform(struct vbg_dev *gdev, void *req)
97{
98 unsigned long phys_req = virt_to_phys(req);
99
100 outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST);
101
102
103
104
105 mb();
106
107 return ((struct vmmdev_request_header *)req)->rc;
108}
109
110static bool hgcm_req_done(struct vbg_dev *gdev,
111 struct vmmdev_hgcmreq_header *header)
112{
113 unsigned long flags;
114 bool done;
115
116 spin_lock_irqsave(&gdev->event_spinlock, flags);
117 done = header->flags & VMMDEV_HGCM_REQ_DONE;
118 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
119
120 return done;
121}
122
123int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
124 struct vmmdev_hgcm_service_location *loc,
125 u32 *client_id, int *vbox_status)
126{
127 struct vmmdev_hgcm_connect *hgcm_connect = NULL;
128 int rc;
129
130 hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
131 VMMDEVREQ_HGCM_CONNECT, requestor);
132 if (!hgcm_connect)
133 return -ENOMEM;
134
135 hgcm_connect->header.flags = 0;
136 memcpy(&hgcm_connect->loc, loc, sizeof(*loc));
137 hgcm_connect->client_id = 0;
138
139 rc = vbg_req_perform(gdev, hgcm_connect);
140
141 if (rc == VINF_HGCM_ASYNC_EXECUTE)
142 wait_event(gdev->hgcm_wq,
143 hgcm_req_done(gdev, &hgcm_connect->header));
144
145 if (rc >= 0) {
146 *client_id = hgcm_connect->client_id;
147 rc = hgcm_connect->header.result;
148 }
149
150 vbg_req_free(hgcm_connect, sizeof(*hgcm_connect));
151
152 *vbox_status = rc;
153 return 0;
154}
155EXPORT_SYMBOL(vbg_hgcm_connect);
156
157int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
158 u32 client_id, int *vbox_status)
159{
160 struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
161 int rc;
162
163 hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
164 VMMDEVREQ_HGCM_DISCONNECT,
165 requestor);
166 if (!hgcm_disconnect)
167 return -ENOMEM;
168
169 hgcm_disconnect->header.flags = 0;
170 hgcm_disconnect->client_id = client_id;
171
172 rc = vbg_req_perform(gdev, hgcm_disconnect);
173
174 if (rc == VINF_HGCM_ASYNC_EXECUTE)
175 wait_event(gdev->hgcm_wq,
176 hgcm_req_done(gdev, &hgcm_disconnect->header));
177
178 if (rc >= 0)
179 rc = hgcm_disconnect->header.result;
180
181 vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect));
182
183 *vbox_status = rc;
184 return 0;
185}
186EXPORT_SYMBOL(vbg_hgcm_disconnect);
187
188static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len)
189{
190 u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK));
191
192 return size >> PAGE_SHIFT;
193}
194
195static void hgcm_call_add_pagelist_size(void *buf, u32 len, size_t *extra)
196{
197 u32 page_count;
198
199 page_count = hgcm_call_buf_size_in_pages(buf, len);
200 *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
201}
202
203static int hgcm_call_preprocess_linaddr(
204 const struct vmmdev_hgcm_function_parameter *src_parm,
205 void **bounce_buf_ret, size_t *extra)
206{
207 void *buf, *bounce_buf;
208 bool copy_in;
209 u32 len;
210 int ret;
211
212 buf = (void *)src_parm->u.pointer.u.linear_addr;
213 len = src_parm->u.pointer.size;
214 copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT;
215
216 if (len > VBG_MAX_HGCM_USER_PARM)
217 return -E2BIG;
218
219 bounce_buf = kvmalloc(len, GFP_KERNEL);
220 if (!bounce_buf)
221 return -ENOMEM;
222
223 *bounce_buf_ret = bounce_buf;
224
225 if (copy_in) {
226 ret = copy_from_user(bounce_buf, (void __user *)buf, len);
227 if (ret)
228 return -EFAULT;
229 } else {
230 memset(bounce_buf, 0, len);
231 }
232
233 hgcm_call_add_pagelist_size(bounce_buf, len, extra);
234 return 0;
235}
236
237
238
239
240
241
242
243
244
245
246
247static int hgcm_call_preprocess(
248 const struct vmmdev_hgcm_function_parameter *src_parm,
249 u32 parm_count, void ***bounce_bufs_ret, size_t *extra)
250{
251 void *buf, **bounce_bufs = NULL;
252 u32 i, len;
253 int ret;
254
255 for (i = 0; i < parm_count; i++, src_parm++) {
256 switch (src_parm->type) {
257 case VMMDEV_HGCM_PARM_TYPE_32BIT:
258 case VMMDEV_HGCM_PARM_TYPE_64BIT:
259 break;
260
261 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
262 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
263 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
264 if (!bounce_bufs) {
265 bounce_bufs = kcalloc(parm_count,
266 sizeof(void *),
267 GFP_KERNEL);
268 if (!bounce_bufs)
269 return -ENOMEM;
270
271 *bounce_bufs_ret = bounce_bufs;
272 }
273
274 ret = hgcm_call_preprocess_linaddr(src_parm,
275 &bounce_bufs[i],
276 extra);
277 if (ret)
278 return ret;
279
280 break;
281
282 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
283 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
284 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
285 buf = (void *)src_parm->u.pointer.u.linear_addr;
286 len = src_parm->u.pointer.size;
287 if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM))
288 return -E2BIG;
289
290 hgcm_call_add_pagelist_size(buf, len, extra);
291 break;
292
293 default:
294 return -EINVAL;
295 }
296 }
297
298 return 0;
299}
300
301
302
303
304
305
306
307static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
308 enum vmmdev_hgcm_function_parameter_type type)
309{
310 switch (type) {
311 default:
312 WARN_ON(1);
313
314 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
315 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
316 return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH;
317
318 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
319 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
320 return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST;
321
322 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
323 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
324 return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST;
325 }
326}
327
328static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call,
329 struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len,
330 enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra)
331{
332 struct vmmdev_hgcm_pagelist *dst_pg_lst;
333 struct page *page;
334 bool is_vmalloc;
335 u32 i, page_count;
336
337 dst_parm->type = type;
338
339 if (len == 0) {
340 dst_parm->u.pointer.size = 0;
341 dst_parm->u.pointer.u.linear_addr = 0;
342 return;
343 }
344
345 dst_pg_lst = (void *)call + *off_extra;
346 page_count = hgcm_call_buf_size_in_pages(buf, len);
347 is_vmalloc = is_vmalloc_addr(buf);
348
349 dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST;
350 dst_parm->u.page_list.size = len;
351 dst_parm->u.page_list.offset = *off_extra;
352 dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type);
353 dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK;
354 dst_pg_lst->page_count = page_count;
355
356 for (i = 0; i < page_count; i++) {
357 if (is_vmalloc)
358 page = vmalloc_to_page(buf);
359 else
360 page = virt_to_page(buf);
361
362 dst_pg_lst->pages[i] = page_to_phys(page);
363 buf += PAGE_SIZE;
364 }
365
366 *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
367}
368
369
370
371
372
373
374
375
376
377
378static void hgcm_call_init_call(
379 struct vmmdev_hgcm_call *call, u32 client_id, u32 function,
380 const struct vmmdev_hgcm_function_parameter *src_parm,
381 u32 parm_count, void **bounce_bufs)
382{
383 struct vmmdev_hgcm_function_parameter *dst_parm =
384 VMMDEV_HGCM_CALL_PARMS(call);
385 u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call;
386 void *buf;
387
388 call->header.flags = 0;
389 call->header.result = VINF_SUCCESS;
390 call->client_id = client_id;
391 call->function = function;
392 call->parm_count = parm_count;
393
394 for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
395 switch (src_parm->type) {
396 case VMMDEV_HGCM_PARM_TYPE_32BIT:
397 case VMMDEV_HGCM_PARM_TYPE_64BIT:
398 *dst_parm = *src_parm;
399 break;
400
401 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
402 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
403 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
404 hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i],
405 src_parm->u.pointer.size,
406 src_parm->type, &off_extra);
407 break;
408
409 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
410 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
411 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
412 buf = (void *)src_parm->u.pointer.u.linear_addr;
413 hgcm_call_init_linaddr(call, dst_parm, buf,
414 src_parm->u.pointer.size,
415 src_parm->type, &off_extra);
416 break;
417
418 default:
419 WARN_ON(1);
420 dst_parm->type = VMMDEV_HGCM_PARM_TYPE_INVALID;
421 }
422 }
423}
424
425
426
427
428
429
430static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
431{
432 int rc;
433
434
435
436
437
438
439 mutex_lock(&gdev->cancel_req_mutex);
440 gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call);
441 rc = vbg_req_perform(gdev, gdev->cancel_req);
442 mutex_unlock(&gdev->cancel_req_mutex);
443
444 if (rc == VERR_NOT_IMPLEMENTED) {
445 call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
446 call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL;
447
448 rc = vbg_req_perform(gdev, call);
449 if (rc == VERR_INVALID_PARAMETER)
450 rc = VERR_NOT_FOUND;
451 }
452
453 if (rc >= 0)
454 call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
455
456 return rc;
457}
458
459
460
461
462
463
464
465
466
467
468static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
469 u32 timeout_ms, bool *leak_it)
470{
471 int rc, cancel_rc, ret;
472 long timeout;
473
474 *leak_it = false;
475
476 rc = vbg_req_perform(gdev, call);
477
478
479
480
481
482 if (rc < 0) {
483 call->header.result = rc;
484 return 0;
485 }
486
487 if (rc != VINF_HGCM_ASYNC_EXECUTE)
488 return 0;
489
490
491 if (timeout_ms == U32_MAX)
492 timeout = MAX_SCHEDULE_TIMEOUT;
493 else
494 timeout = msecs_to_jiffies(timeout_ms);
495
496 timeout = wait_event_interruptible_timeout(
497 gdev->hgcm_wq,
498 hgcm_req_done(gdev, &call->header),
499 timeout);
500
501
502 if (timeout > 0)
503 return 0;
504
505 if (timeout == 0)
506 ret = -ETIMEDOUT;
507 else
508 ret = -EINTR;
509
510
511 cancel_rc = hgcm_cancel_call(gdev, call);
512 if (cancel_rc >= 0)
513 return ret;
514
515
516
517
518
519 if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED)
520 timeout = msecs_to_jiffies(500);
521 else
522 timeout = msecs_to_jiffies(2000);
523
524 timeout = wait_event_timeout(gdev->hgcm_wq,
525 hgcm_req_done(gdev, &call->header),
526 timeout);
527
528 if (WARN_ON(timeout == 0)) {
529
530 vbg_err("%s: Call timedout and cancellation failed, leaking the request\n",
531 __func__);
532 *leak_it = true;
533 return ret;
534 }
535
536
537 return 0;
538}
539
540
541
542
543
544
545
546
547
548
549static int hgcm_call_copy_back_result(
550 const struct vmmdev_hgcm_call *call,
551 struct vmmdev_hgcm_function_parameter *dst_parm,
552 u32 parm_count, void **bounce_bufs)
553{
554 const struct vmmdev_hgcm_function_parameter *src_parm =
555 VMMDEV_HGCM_CALL_PARMS(call);
556 void __user *p;
557 int ret;
558 u32 i;
559
560
561 for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
562 switch (dst_parm->type) {
563 case VMMDEV_HGCM_PARM_TYPE_32BIT:
564 case VMMDEV_HGCM_PARM_TYPE_64BIT:
565 *dst_parm = *src_parm;
566 break;
567
568 case VMMDEV_HGCM_PARM_TYPE_PAGELIST:
569 dst_parm->u.page_list.size = src_parm->u.page_list.size;
570 break;
571
572 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
573 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
574 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
575 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
576 dst_parm->u.pointer.size = src_parm->u.pointer.size;
577 break;
578
579 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
580 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
581 dst_parm->u.pointer.size = src_parm->u.pointer.size;
582
583 p = (void __user *)dst_parm->u.pointer.u.linear_addr;
584 ret = copy_to_user(p, bounce_bufs[i],
585 min(src_parm->u.pointer.size,
586 dst_parm->u.pointer.size));
587 if (ret)
588 return -EFAULT;
589 break;
590
591 default:
592 WARN_ON(1);
593 return -EINVAL;
594 }
595 }
596
597 return 0;
598}
599
600int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
601 u32 function, u32 timeout_ms,
602 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
603 int *vbox_status)
604{
605 struct vmmdev_hgcm_call *call;
606 void **bounce_bufs = NULL;
607 bool leak_it;
608 size_t size;
609 int i, ret;
610
611 size = sizeof(struct vmmdev_hgcm_call) +
612 parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
613
614
615
616
617 ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size);
618 if (ret) {
619
620 goto free_bounce_bufs;
621 }
622
623 call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
624 if (!call) {
625 ret = -ENOMEM;
626 goto free_bounce_bufs;
627 }
628
629 hgcm_call_init_call(call, client_id, function, parms, parm_count,
630 bounce_bufs);
631
632 ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it);
633 if (ret == 0) {
634 *vbox_status = call->header.result;
635 ret = hgcm_call_copy_back_result(call, parms, parm_count,
636 bounce_bufs);
637 }
638
639 if (!leak_it)
640 vbg_req_free(call, size);
641
642free_bounce_bufs:
643 if (bounce_bufs) {
644 for (i = 0; i < parm_count; i++)
645 kvfree(bounce_bufs[i]);
646 kfree(bounce_bufs);
647 }
648
649 return ret;
650}
651EXPORT_SYMBOL(vbg_hgcm_call);
652
653#ifdef CONFIG_COMPAT
654int vbg_hgcm_call32(
655 struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
656 u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
657 u32 parm_count, int *vbox_status)
658{
659 struct vmmdev_hgcm_function_parameter *parm64 = NULL;
660 u32 i, size;
661 int ret = 0;
662
663
664 size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
665 parm64 = kzalloc(size, GFP_KERNEL);
666 if (!parm64)
667 return -ENOMEM;
668
669 for (i = 0; i < parm_count; i++) {
670 switch (parm32[i].type) {
671 case VMMDEV_HGCM_PARM_TYPE_32BIT:
672 parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT;
673 parm64[i].u.value32 = parm32[i].u.value32;
674 break;
675
676 case VMMDEV_HGCM_PARM_TYPE_64BIT:
677 parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT;
678 parm64[i].u.value64 = parm32[i].u.value64;
679 break;
680
681 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
682 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
683 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
684 parm64[i].type = parm32[i].type;
685 parm64[i].u.pointer.size = parm32[i].u.pointer.size;
686 parm64[i].u.pointer.u.linear_addr =
687 parm32[i].u.pointer.u.linear_addr;
688 break;
689
690 default:
691 ret = -EINVAL;
692 }
693 if (ret < 0)
694 goto out_free;
695 }
696
697 ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
698 parm64, parm_count, vbox_status);
699 if (ret < 0)
700 goto out_free;
701
702
703 for (i = 0; i < parm_count; i++, parm32++, parm64++) {
704 switch (parm64[i].type) {
705 case VMMDEV_HGCM_PARM_TYPE_32BIT:
706 parm32[i].u.value32 = parm64[i].u.value32;
707 break;
708
709 case VMMDEV_HGCM_PARM_TYPE_64BIT:
710 parm32[i].u.value64 = parm64[i].u.value64;
711 break;
712
713 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
714 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
715 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
716 parm32[i].u.pointer.size = parm64[i].u.pointer.size;
717 break;
718
719 default:
720 WARN_ON(1);
721 ret = -EINVAL;
722 }
723 }
724
725out_free:
726 kfree(parm64);
727 return ret;
728}
729#endif
730
731static const int vbg_status_code_to_errno_table[] = {
732 [-VERR_ACCESS_DENIED] = -EPERM,
733 [-VERR_FILE_NOT_FOUND] = -ENOENT,
734 [-VERR_PROCESS_NOT_FOUND] = -ESRCH,
735 [-VERR_INTERRUPTED] = -EINTR,
736 [-VERR_DEV_IO_ERROR] = -EIO,
737 [-VERR_TOO_MUCH_DATA] = -E2BIG,
738 [-VERR_BAD_EXE_FORMAT] = -ENOEXEC,
739 [-VERR_INVALID_HANDLE] = -EBADF,
740 [-VERR_TRY_AGAIN] = -EAGAIN,
741 [-VERR_NO_MEMORY] = -ENOMEM,
742 [-VERR_INVALID_POINTER] = -EFAULT,
743 [-VERR_RESOURCE_BUSY] = -EBUSY,
744 [-VERR_ALREADY_EXISTS] = -EEXIST,
745 [-VERR_NOT_SAME_DEVICE] = -EXDEV,
746 [-VERR_NOT_A_DIRECTORY] = -ENOTDIR,
747 [-VERR_PATH_NOT_FOUND] = -ENOTDIR,
748 [-VERR_INVALID_NAME] = -ENOENT,
749 [-VERR_IS_A_DIRECTORY] = -EISDIR,
750 [-VERR_INVALID_PARAMETER] = -EINVAL,
751 [-VERR_TOO_MANY_OPEN_FILES] = -ENFILE,
752 [-VERR_INVALID_FUNCTION] = -ENOTTY,
753 [-VERR_SHARING_VIOLATION] = -ETXTBSY,
754 [-VERR_FILE_TOO_BIG] = -EFBIG,
755 [-VERR_DISK_FULL] = -ENOSPC,
756 [-VERR_SEEK_ON_DEVICE] = -ESPIPE,
757 [-VERR_WRITE_PROTECT] = -EROFS,
758 [-VERR_BROKEN_PIPE] = -EPIPE,
759 [-VERR_DEADLOCK] = -EDEADLK,
760 [-VERR_FILENAME_TOO_LONG] = -ENAMETOOLONG,
761 [-VERR_FILE_LOCK_FAILED] = -ENOLCK,
762 [-VERR_NOT_IMPLEMENTED] = -ENOSYS,
763 [-VERR_NOT_SUPPORTED] = -ENOSYS,
764 [-VERR_DIR_NOT_EMPTY] = -ENOTEMPTY,
765 [-VERR_TOO_MANY_SYMLINKS] = -ELOOP,
766 [-VERR_NO_MORE_FILES] = -ENODATA,
767 [-VERR_NO_DATA] = -ENODATA,
768 [-VERR_NET_NO_NETWORK] = -ENONET,
769 [-VERR_NET_NOT_UNIQUE_NAME] = -ENOTUNIQ,
770 [-VERR_NO_TRANSLATION] = -EILSEQ,
771 [-VERR_NET_NOT_SOCKET] = -ENOTSOCK,
772 [-VERR_NET_DEST_ADDRESS_REQUIRED] = -EDESTADDRREQ,
773 [-VERR_NET_MSG_SIZE] = -EMSGSIZE,
774 [-VERR_NET_PROTOCOL_TYPE] = -EPROTOTYPE,
775 [-VERR_NET_PROTOCOL_NOT_AVAILABLE] = -ENOPROTOOPT,
776 [-VERR_NET_PROTOCOL_NOT_SUPPORTED] = -EPROTONOSUPPORT,
777 [-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED] = -ESOCKTNOSUPPORT,
778 [-VERR_NET_OPERATION_NOT_SUPPORTED] = -EOPNOTSUPP,
779 [-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED] = -EPFNOSUPPORT,
780 [-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED] = -EAFNOSUPPORT,
781 [-VERR_NET_ADDRESS_IN_USE] = -EADDRINUSE,
782 [-VERR_NET_ADDRESS_NOT_AVAILABLE] = -EADDRNOTAVAIL,
783 [-VERR_NET_DOWN] = -ENETDOWN,
784 [-VERR_NET_UNREACHABLE] = -ENETUNREACH,
785 [-VERR_NET_CONNECTION_RESET] = -ENETRESET,
786 [-VERR_NET_CONNECTION_ABORTED] = -ECONNABORTED,
787 [-VERR_NET_CONNECTION_RESET_BY_PEER] = -ECONNRESET,
788 [-VERR_NET_NO_BUFFER_SPACE] = -ENOBUFS,
789 [-VERR_NET_ALREADY_CONNECTED] = -EISCONN,
790 [-VERR_NET_NOT_CONNECTED] = -ENOTCONN,
791 [-VERR_NET_SHUTDOWN] = -ESHUTDOWN,
792 [-VERR_NET_TOO_MANY_REFERENCES] = -ETOOMANYREFS,
793 [-VERR_TIMEOUT] = -ETIMEDOUT,
794 [-VERR_NET_CONNECTION_REFUSED] = -ECONNREFUSED,
795 [-VERR_NET_HOST_DOWN] = -EHOSTDOWN,
796 [-VERR_NET_HOST_UNREACHABLE] = -EHOSTUNREACH,
797 [-VERR_NET_ALREADY_IN_PROGRESS] = -EALREADY,
798 [-VERR_NET_IN_PROGRESS] = -EINPROGRESS,
799 [-VERR_MEDIA_NOT_PRESENT] = -ENOMEDIUM,
800 [-VERR_MEDIA_NOT_RECOGNIZED] = -EMEDIUMTYPE,
801};
802
803int vbg_status_code_to_errno(int rc)
804{
805 if (rc >= 0)
806 return 0;
807
808 rc = -rc;
809 if (rc >= ARRAY_SIZE(vbg_status_code_to_errno_table) ||
810 vbg_status_code_to_errno_table[rc] == 0) {
811 vbg_warn("%s: Unhandled err %d\n", __func__, -rc);
812 return -EPROTO;
813 }
814
815 return vbg_status_code_to_errno_table[rc];
816}
817EXPORT_SYMBOL(vbg_status_code_to_errno);
818