1
2
3
4
5
6
7
8#include <linux/device.h>
9#include <linux/mm.h>
10#include <linux/sched.h>
11#include <linux/sizes.h>
12#include <linux/slab.h>
13#include <linux/vbox_err.h>
14#include <linux/vbox_utils.h>
15#include <linux/vmalloc.h>
16#include "vboxguest_core.h"
17#include "vboxguest_version.h"
18
19
20#define VBG_IOCTL_HGCM_CALL_PARMS(a) \
21 ((struct vmmdev_hgcm_function_parameter *)( \
22 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
23
24#define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
25 ((struct vmmdev_hgcm_function_parameter32 *)( \
26 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
27
28#define GUEST_MAPPINGS_TRIES 5
29
30#define VBG_KERNEL_REQUEST \
31 (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
32 VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
33
34
35
36
37
38
39
40
41
42
43
44
45
46static void vbg_guest_mappings_init(struct vbg_dev *gdev)
47{
48 struct vmmdev_hypervisorinfo *req;
49 void *guest_mappings[GUEST_MAPPINGS_TRIES];
50 struct page **pages = NULL;
51 u32 size, hypervisor_size;
52 int i, rc;
53
54
55 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
56 VBG_KERNEL_REQUEST);
57 if (!req)
58 return;
59
60 req->hypervisor_start = 0;
61 req->hypervisor_size = 0;
62 rc = vbg_req_perform(gdev, req);
63 if (rc < 0)
64 goto out;
65
66
67
68
69
70 if (req->hypervisor_size == 0)
71 goto out;
72
73 hypervisor_size = req->hypervisor_size;
74
75 size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
76
77 pages = kmalloc_array(size >> PAGE_SHIFT, sizeof(*pages), GFP_KERNEL);
78 if (!pages)
79 goto out;
80
81 gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER);
82 if (!gdev->guest_mappings_dummy_page)
83 goto out;
84
85 for (i = 0; i < (size >> PAGE_SHIFT); i++)
86 pages[i] = gdev->guest_mappings_dummy_page;
87
88
89
90
91
92 for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) {
93 guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT),
94 VM_MAP, PAGE_KERNEL_RO);
95 if (!guest_mappings[i])
96 break;
97
98 req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO;
99 req->header.rc = VERR_INTERNAL_ERROR;
100 req->hypervisor_size = hypervisor_size;
101 req->hypervisor_start =
102 (unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M);
103
104 rc = vbg_req_perform(gdev, req);
105 if (rc >= 0) {
106 gdev->guest_mappings = guest_mappings[i];
107 break;
108 }
109 }
110
111
112 while (--i >= 0)
113 vunmap(guest_mappings[i]);
114
115
116 if (!gdev->guest_mappings) {
117 __free_page(gdev->guest_mappings_dummy_page);
118 gdev->guest_mappings_dummy_page = NULL;
119 }
120
121out:
122 vbg_req_free(req, sizeof(*req));
123 kfree(pages);
124}
125
126
127
128
129
130
131static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
132{
133 struct vmmdev_hypervisorinfo *req;
134 int rc;
135
136 if (!gdev->guest_mappings)
137 return;
138
139
140
141
142
143 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
144 VBG_KERNEL_REQUEST);
145 if (!req)
146 return;
147
148 req->hypervisor_start = 0;
149 req->hypervisor_size = 0;
150
151 rc = vbg_req_perform(gdev, req);
152
153 vbg_req_free(req, sizeof(*req));
154
155 if (rc < 0) {
156 vbg_err("%s error: %d\n", __func__, rc);
157 return;
158 }
159
160 vunmap(gdev->guest_mappings);
161 gdev->guest_mappings = NULL;
162
163 __free_page(gdev->guest_mappings_dummy_page);
164 gdev->guest_mappings_dummy_page = NULL;
165}
166
167
168
169
170
171
172static int vbg_report_guest_info(struct vbg_dev *gdev)
173{
174
175
176
177 struct vmmdev_guest_info *req1 = NULL;
178 struct vmmdev_guest_info2 *req2 = NULL;
179 int rc, ret = -ENOMEM;
180
181 req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
182 VBG_KERNEL_REQUEST);
183 req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
184 VBG_KERNEL_REQUEST);
185 if (!req1 || !req2)
186 goto out_free;
187
188 req1->interface_version = VMMDEV_VERSION;
189 req1->os_type = VMMDEV_OSTYPE_LINUX26;
190#if __BITS_PER_LONG == 64
191 req1->os_type |= VMMDEV_OSTYPE_X64;
192#endif
193
194 req2->additions_major = VBG_VERSION_MAJOR;
195 req2->additions_minor = VBG_VERSION_MINOR;
196 req2->additions_build = VBG_VERSION_BUILD;
197 req2->additions_revision = VBG_SVN_REV;
198 req2->additions_features =
199 VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
200 strlcpy(req2->name, VBG_VERSION_STRING,
201 sizeof(req2->name));
202
203
204
205
206
207
208
209
210
211 rc = vbg_req_perform(gdev, req2);
212 if (rc >= 0) {
213 rc = vbg_req_perform(gdev, req1);
214 } else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) {
215 rc = vbg_req_perform(gdev, req1);
216 if (rc >= 0) {
217 rc = vbg_req_perform(gdev, req2);
218 if (rc == VERR_NOT_IMPLEMENTED)
219 rc = VINF_SUCCESS;
220 }
221 }
222 ret = vbg_status_code_to_errno(rc);
223
224out_free:
225 vbg_req_free(req2, sizeof(*req2));
226 vbg_req_free(req1, sizeof(*req1));
227 return ret;
228}
229
230
231
232
233
234
235
236static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
237{
238 struct vmmdev_guest_status *req;
239 int rc;
240
241 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
242 VBG_KERNEL_REQUEST);
243 if (!req)
244 return -ENOMEM;
245
246 req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER;
247 if (active)
248 req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE;
249 else
250 req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE;
251 req->flags = 0;
252
253 rc = vbg_req_perform(gdev, req);
254 if (rc == VERR_NOT_IMPLEMENTED)
255 rc = VINF_SUCCESS;
256
257 vbg_req_free(req, sizeof(*req));
258
259 return vbg_status_code_to_errno(rc);
260}
261
262
263
264
265
266
267
268static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx)
269{
270 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
271 struct page **pages;
272 int i, rc, ret;
273
274 pages = kmalloc_array(VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
275 sizeof(*pages),
276 GFP_KERNEL | __GFP_NOWARN);
277 if (!pages)
278 return -ENOMEM;
279
280 req->header.size = sizeof(*req);
281 req->inflate = true;
282 req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
283
284 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) {
285 pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN);
286 if (!pages[i]) {
287 ret = -ENOMEM;
288 goto out_error;
289 }
290
291 req->phys_page[i] = page_to_phys(pages[i]);
292 }
293
294 rc = vbg_req_perform(gdev, req);
295 if (rc < 0) {
296 vbg_err("%s error, rc: %d\n", __func__, rc);
297 ret = vbg_status_code_to_errno(rc);
298 goto out_error;
299 }
300
301 gdev->mem_balloon.pages[chunk_idx] = pages;
302
303 return 0;
304
305out_error:
306 while (--i >= 0)
307 __free_page(pages[i]);
308 kfree(pages);
309
310 return ret;
311}
312
313
314
315
316
317
318
319static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
320{
321 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
322 struct page **pages = gdev->mem_balloon.pages[chunk_idx];
323 int i, rc;
324
325 req->header.size = sizeof(*req);
326 req->inflate = false;
327 req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
328
329 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
330 req->phys_page[i] = page_to_phys(pages[i]);
331
332 rc = vbg_req_perform(gdev, req);
333 if (rc < 0) {
334 vbg_err("%s error, rc: %d\n", __func__, rc);
335 return vbg_status_code_to_errno(rc);
336 }
337
338 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
339 __free_page(pages[i]);
340 kfree(pages);
341 gdev->mem_balloon.pages[chunk_idx] = NULL;
342
343 return 0;
344}
345
346
347
348
349
350static void vbg_balloon_work(struct work_struct *work)
351{
352 struct vbg_dev *gdev =
353 container_of(work, struct vbg_dev, mem_balloon.work);
354 struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req;
355 u32 i, chunks;
356 int rc, ret;
357
358
359
360
361
362 req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
363 rc = vbg_req_perform(gdev, req);
364 if (rc < 0) {
365 vbg_err("%s error, rc: %d)\n", __func__, rc);
366 return;
367 }
368
369
370
371
372
373 if (!gdev->mem_balloon.max_chunks) {
374 gdev->mem_balloon.pages =
375 devm_kcalloc(gdev->dev, req->phys_mem_chunks,
376 sizeof(struct page **), GFP_KERNEL);
377 if (!gdev->mem_balloon.pages)
378 return;
379
380 gdev->mem_balloon.max_chunks = req->phys_mem_chunks;
381 }
382
383 chunks = req->balloon_chunks;
384 if (chunks > gdev->mem_balloon.max_chunks) {
385 vbg_err("%s: illegal balloon size %u (max=%u)\n",
386 __func__, chunks, gdev->mem_balloon.max_chunks);
387 return;
388 }
389
390 if (chunks > gdev->mem_balloon.chunks) {
391
392 for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
393 ret = vbg_balloon_inflate(gdev, i);
394 if (ret < 0)
395 return;
396
397 gdev->mem_balloon.chunks++;
398 }
399 } else {
400
401 for (i = gdev->mem_balloon.chunks; i-- > chunks;) {
402 ret = vbg_balloon_deflate(gdev, i);
403 if (ret < 0)
404 return;
405
406 gdev->mem_balloon.chunks--;
407 }
408 }
409}
410
411
412
413
414static void vbg_heartbeat_timer(struct timer_list *t)
415{
416 struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer);
417
418 vbg_req_perform(gdev, gdev->guest_heartbeat_req);
419 mod_timer(&gdev->heartbeat_timer,
420 msecs_to_jiffies(gdev->heartbeat_interval_ms));
421}
422
423
424
425
426
427
428
429
430static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
431{
432 struct vmmdev_heartbeat *req;
433 int rc;
434
435 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
436 VBG_KERNEL_REQUEST);
437 if (!req)
438 return -ENOMEM;
439
440 req->enabled = enabled;
441 req->interval_ns = 0;
442 rc = vbg_req_perform(gdev, req);
443 do_div(req->interval_ns, 1000000);
444 gdev->heartbeat_interval_ms = req->interval_ns;
445 vbg_req_free(req, sizeof(*req));
446
447 return vbg_status_code_to_errno(rc);
448}
449
450
451
452
453
454
455static int vbg_heartbeat_init(struct vbg_dev *gdev)
456{
457 int ret;
458
459
460 ret = vbg_heartbeat_host_config(gdev, false);
461 if (ret < 0)
462 return ret;
463
464 ret = vbg_heartbeat_host_config(gdev, true);
465 if (ret < 0)
466 return ret;
467
468 gdev->guest_heartbeat_req = vbg_req_alloc(
469 sizeof(*gdev->guest_heartbeat_req),
470 VMMDEVREQ_GUEST_HEARTBEAT,
471 VBG_KERNEL_REQUEST);
472 if (!gdev->guest_heartbeat_req)
473 return -ENOMEM;
474
475 vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
476 __func__, gdev->heartbeat_interval_ms);
477 mod_timer(&gdev->heartbeat_timer, 0);
478
479 return 0;
480}
481
482
483
484
485
486static void vbg_heartbeat_exit(struct vbg_dev *gdev)
487{
488 del_timer_sync(&gdev->heartbeat_timer);
489 vbg_heartbeat_host_config(gdev, false);
490 vbg_req_free(gdev->guest_heartbeat_req,
491 sizeof(*gdev->guest_heartbeat_req));
492}
493
494
495
496
497
498
499
500
501static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
502 u32 changed, u32 previous)
503{
504 bool global_change = false;
505
506 while (changed) {
507 u32 bit = ffs(changed) - 1;
508 u32 bitmask = BIT(bit);
509
510 if (bitmask & previous) {
511 tracker->per_bit_usage[bit] -= 1;
512 if (tracker->per_bit_usage[bit] == 0) {
513 global_change = true;
514 tracker->mask &= ~bitmask;
515 }
516 } else {
517 tracker->per_bit_usage[bit] += 1;
518 if (tracker->per_bit_usage[bit] == 1) {
519 global_change = true;
520 tracker->mask |= bitmask;
521 }
522 }
523
524 changed &= ~bitmask;
525 }
526
527 return global_change;
528}
529
530
531
532
533
534
535
536static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
537 u32 fixed_events)
538{
539 struct vmmdev_mask *req;
540 int rc;
541
542 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
543 VBG_KERNEL_REQUEST);
544 if (!req)
545 return -ENOMEM;
546
547 req->not_mask = U32_MAX & ~fixed_events;
548 req->or_mask = fixed_events;
549 rc = vbg_req_perform(gdev, req);
550 if (rc < 0)
551 vbg_err("%s error, rc: %d\n", __func__, rc);
552
553 vbg_req_free(req, sizeof(*req));
554 return vbg_status_code_to_errno(rc);
555}
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573static int vbg_set_session_event_filter(struct vbg_dev *gdev,
574 struct vbg_session *session,
575 u32 or_mask, u32 not_mask,
576 bool session_termination)
577{
578 struct vmmdev_mask *req;
579 u32 changed, previous;
580 int rc, ret = 0;
581
582
583
584
585
586
587 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
588 session_termination ? VBG_KERNEL_REQUEST :
589 session->requestor);
590 if (!req) {
591 if (!session_termination)
592 return -ENOMEM;
593
594 }
595
596 mutex_lock(&gdev->session_mutex);
597
598
599 previous = session->event_filter;
600 session->event_filter |= or_mask;
601 session->event_filter &= ~not_mask;
602
603
604 changed = previous ^ session->event_filter;
605 if (!changed)
606 goto out;
607
608 vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous);
609 or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask;
610
611 if (gdev->event_filter_host == or_mask || !req)
612 goto out;
613
614 gdev->event_filter_host = or_mask;
615 req->or_mask = or_mask;
616 req->not_mask = ~or_mask;
617 rc = vbg_req_perform(gdev, req);
618 if (rc < 0) {
619 ret = vbg_status_code_to_errno(rc);
620
621
622 gdev->event_filter_host = U32_MAX;
623 if (session_termination)
624 goto out;
625
626 vbg_track_bit_usage(&gdev->event_filter_tracker, changed,
627 session->event_filter);
628 session->event_filter = previous;
629 }
630
631out:
632 mutex_unlock(&gdev->session_mutex);
633 vbg_req_free(req, sizeof(*req));
634
635 return ret;
636}
637
638
639
640
641
642
643static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
644{
645 struct vmmdev_mask *req;
646 int rc;
647
648 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
649 VBG_KERNEL_REQUEST);
650 if (!req)
651 return -ENOMEM;
652
653 req->not_mask = U32_MAX;
654 req->or_mask = 0;
655 rc = vbg_req_perform(gdev, req);
656 if (rc < 0)
657 vbg_err("%s error, rc: %d\n", __func__, rc);
658
659 vbg_req_free(req, sizeof(*req));
660 return vbg_status_code_to_errno(rc);
661}
662
663
664
665
666
667
668
669
670
671
672
673
674
675static int vbg_set_session_capabilities(struct vbg_dev *gdev,
676 struct vbg_session *session,
677 u32 or_mask, u32 not_mask,
678 bool session_termination)
679{
680 struct vmmdev_mask *req;
681 u32 changed, previous;
682 int rc, ret = 0;
683
684
685
686
687
688
689 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
690 session_termination ? VBG_KERNEL_REQUEST :
691 session->requestor);
692 if (!req) {
693 if (!session_termination)
694 return -ENOMEM;
695
696 }
697
698 mutex_lock(&gdev->session_mutex);
699
700
701 previous = session->guest_caps;
702 session->guest_caps |= or_mask;
703 session->guest_caps &= ~not_mask;
704
705
706 changed = previous ^ session->guest_caps;
707 if (!changed)
708 goto out;
709
710 vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous);
711 or_mask = gdev->guest_caps_tracker.mask;
712
713 if (gdev->guest_caps_host == or_mask || !req)
714 goto out;
715
716 gdev->guest_caps_host = or_mask;
717 req->or_mask = or_mask;
718 req->not_mask = ~or_mask;
719 rc = vbg_req_perform(gdev, req);
720 if (rc < 0) {
721 ret = vbg_status_code_to_errno(rc);
722
723
724 gdev->guest_caps_host = U32_MAX;
725 if (session_termination)
726 goto out;
727
728 vbg_track_bit_usage(&gdev->guest_caps_tracker, changed,
729 session->guest_caps);
730 session->guest_caps = previous;
731 }
732
733out:
734 mutex_unlock(&gdev->session_mutex);
735 vbg_req_free(req, sizeof(*req));
736
737 return ret;
738}
739
740
741
742
743
744
745static int vbg_query_host_version(struct vbg_dev *gdev)
746{
747 struct vmmdev_host_version *req;
748 int rc, ret;
749
750 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
751 VBG_KERNEL_REQUEST);
752 if (!req)
753 return -ENOMEM;
754
755 rc = vbg_req_perform(gdev, req);
756 ret = vbg_status_code_to_errno(rc);
757 if (ret) {
758 vbg_err("%s error: %d\n", __func__, rc);
759 goto out;
760 }
761
762 snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
763 req->major, req->minor, req->build, req->revision);
764 gdev->host_features = req->features;
765
766 vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version,
767 gdev->host_features);
768
769 if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
770 vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
771 ret = -ENODEV;
772 }
773
774out:
775 vbg_req_free(req, sizeof(*req));
776 return ret;
777}
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
795{
796 int ret = -ENOMEM;
797
798 gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM;
799 gdev->event_filter_host = U32_MAX;
800 gdev->guest_caps_host = U32_MAX;
801
802 init_waitqueue_head(&gdev->event_wq);
803 init_waitqueue_head(&gdev->hgcm_wq);
804 spin_lock_init(&gdev->event_spinlock);
805 mutex_init(&gdev->session_mutex);
806 mutex_init(&gdev->cancel_req_mutex);
807 timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0);
808 INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work);
809
810 gdev->mem_balloon.get_req =
811 vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
812 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
813 VBG_KERNEL_REQUEST);
814 gdev->mem_balloon.change_req =
815 vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
816 VMMDEVREQ_CHANGE_MEMBALLOON,
817 VBG_KERNEL_REQUEST);
818 gdev->cancel_req =
819 vbg_req_alloc(sizeof(*(gdev->cancel_req)),
820 VMMDEVREQ_HGCM_CANCEL2,
821 VBG_KERNEL_REQUEST);
822 gdev->ack_events_req =
823 vbg_req_alloc(sizeof(*gdev->ack_events_req),
824 VMMDEVREQ_ACKNOWLEDGE_EVENTS,
825 VBG_KERNEL_REQUEST);
826 gdev->mouse_status_req =
827 vbg_req_alloc(sizeof(*gdev->mouse_status_req),
828 VMMDEVREQ_GET_MOUSE_STATUS,
829 VBG_KERNEL_REQUEST);
830
831 if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
832 !gdev->cancel_req || !gdev->ack_events_req ||
833 !gdev->mouse_status_req)
834 goto err_free_reqs;
835
836 ret = vbg_query_host_version(gdev);
837 if (ret)
838 goto err_free_reqs;
839
840 ret = vbg_report_guest_info(gdev);
841 if (ret) {
842 vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret);
843 goto err_free_reqs;
844 }
845
846 ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events);
847 if (ret) {
848 vbg_err("vboxguest: Error setting fixed event filter: %d\n",
849 ret);
850 goto err_free_reqs;
851 }
852
853 ret = vbg_reset_host_capabilities(gdev);
854 if (ret) {
855 vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
856 ret);
857 goto err_free_reqs;
858 }
859
860 ret = vbg_core_set_mouse_status(gdev, 0);
861 if (ret) {
862 vbg_err("vboxguest: Error clearing mouse status: %d\n", ret);
863 goto err_free_reqs;
864 }
865
866
867 vbg_guest_mappings_init(gdev);
868 vbg_heartbeat_init(gdev);
869
870
871 ret = vbg_report_driver_status(gdev, true);
872 if (ret < 0)
873 vbg_err("vboxguest: Error reporting driver status: %d\n", ret);
874
875 return 0;
876
877err_free_reqs:
878 vbg_req_free(gdev->mouse_status_req,
879 sizeof(*gdev->mouse_status_req));
880 vbg_req_free(gdev->ack_events_req,
881 sizeof(*gdev->ack_events_req));
882 vbg_req_free(gdev->cancel_req,
883 sizeof(*gdev->cancel_req));
884 vbg_req_free(gdev->mem_balloon.change_req,
885 sizeof(*gdev->mem_balloon.change_req));
886 vbg_req_free(gdev->mem_balloon.get_req,
887 sizeof(*gdev->mem_balloon.get_req));
888 return ret;
889}
890
891
892
893
894
895
896
897
898void vbg_core_exit(struct vbg_dev *gdev)
899{
900 vbg_heartbeat_exit(gdev);
901 vbg_guest_mappings_exit(gdev);
902
903
904 vbg_reset_host_event_filter(gdev, 0);
905 vbg_reset_host_capabilities(gdev);
906 vbg_core_set_mouse_status(gdev, 0);
907
908 vbg_req_free(gdev->mouse_status_req,
909 sizeof(*gdev->mouse_status_req));
910 vbg_req_free(gdev->ack_events_req,
911 sizeof(*gdev->ack_events_req));
912 vbg_req_free(gdev->cancel_req,
913 sizeof(*gdev->cancel_req));
914 vbg_req_free(gdev->mem_balloon.change_req,
915 sizeof(*gdev->mem_balloon.change_req));
916 vbg_req_free(gdev->mem_balloon.get_req,
917 sizeof(*gdev->mem_balloon.get_req));
918}
919
920
921
922
923
924
925
926
927
928struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
929{
930 struct vbg_session *session;
931
932 session = kzalloc(sizeof(*session), GFP_KERNEL);
933 if (!session)
934 return ERR_PTR(-ENOMEM);
935
936 session->gdev = gdev;
937 session->requestor = requestor;
938
939 return session;
940}
941
942
943
944
945
946void vbg_core_close_session(struct vbg_session *session)
947{
948 struct vbg_dev *gdev = session->gdev;
949 int i, rc;
950
951 vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
952 vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
953
954 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
955 if (!session->hgcm_client_ids[i])
956 continue;
957
958
959 vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
960 session->hgcm_client_ids[i], &rc);
961 }
962
963 kfree(session);
964}
965
966static int vbg_ioctl_chk(struct vbg_ioctl_hdr *hdr, size_t in_size,
967 size_t out_size)
968{
969 if (hdr->size_in != (sizeof(*hdr) + in_size) ||
970 hdr->size_out != (sizeof(*hdr) + out_size))
971 return -EINVAL;
972
973 return 0;
974}
975
976static int vbg_ioctl_driver_version_info(
977 struct vbg_ioctl_driver_version_info *info)
978{
979 const u16 vbg_maj_version = VBG_IOC_VERSION >> 16;
980 u16 min_maj_version, req_maj_version;
981
982 if (vbg_ioctl_chk(&info->hdr, sizeof(info->u.in), sizeof(info->u.out)))
983 return -EINVAL;
984
985 req_maj_version = info->u.in.req_version >> 16;
986 min_maj_version = info->u.in.min_version >> 16;
987
988 if (info->u.in.min_version > info->u.in.req_version ||
989 min_maj_version != req_maj_version)
990 return -EINVAL;
991
992 if (info->u.in.min_version <= VBG_IOC_VERSION &&
993 min_maj_version == vbg_maj_version) {
994 info->u.out.session_version = VBG_IOC_VERSION;
995 } else {
996 info->u.out.session_version = U32_MAX;
997 info->hdr.rc = VERR_VERSION_MISMATCH;
998 }
999
1000 info->u.out.driver_version = VBG_IOC_VERSION;
1001 info->u.out.driver_revision = 0;
1002 info->u.out.reserved1 = 0;
1003 info->u.out.reserved2 = 0;
1004
1005 return 0;
1006}
1007
1008static bool vbg_wait_event_cond(struct vbg_dev *gdev,
1009 struct vbg_session *session,
1010 u32 event_mask)
1011{
1012 unsigned long flags;
1013 bool wakeup;
1014 u32 events;
1015
1016 spin_lock_irqsave(&gdev->event_spinlock, flags);
1017
1018 events = gdev->pending_events & event_mask;
1019 wakeup = events || session->cancel_waiters;
1020
1021 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1022
1023 return wakeup;
1024}
1025
1026
1027static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
1028 struct vbg_session *session,
1029 u32 event_mask)
1030{
1031 u32 events = gdev->pending_events & event_mask;
1032
1033 gdev->pending_events &= ~events;
1034 return events;
1035}
1036
1037static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev,
1038 struct vbg_session *session,
1039 struct vbg_ioctl_wait_for_events *wait)
1040{
1041 u32 timeout_ms = wait->u.in.timeout_ms;
1042 u32 event_mask = wait->u.in.events;
1043 unsigned long flags;
1044 long timeout;
1045 int ret = 0;
1046
1047 if (vbg_ioctl_chk(&wait->hdr, sizeof(wait->u.in), sizeof(wait->u.out)))
1048 return -EINVAL;
1049
1050 if (timeout_ms == U32_MAX)
1051 timeout = MAX_SCHEDULE_TIMEOUT;
1052 else
1053 timeout = msecs_to_jiffies(timeout_ms);
1054
1055 wait->u.out.events = 0;
1056 do {
1057 timeout = wait_event_interruptible_timeout(
1058 gdev->event_wq,
1059 vbg_wait_event_cond(gdev, session, event_mask),
1060 timeout);
1061
1062 spin_lock_irqsave(&gdev->event_spinlock, flags);
1063
1064 if (timeout < 0 || session->cancel_waiters) {
1065 ret = -EINTR;
1066 } else if (timeout == 0) {
1067 ret = -ETIMEDOUT;
1068 } else {
1069 wait->u.out.events =
1070 vbg_consume_events_locked(gdev, session, event_mask);
1071 }
1072
1073 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1074
1075
1076
1077
1078
1079 } while (ret == 0 && wait->u.out.events == 0);
1080
1081 return ret;
1082}
1083
1084static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev,
1085 struct vbg_session *session,
1086 struct vbg_ioctl_hdr *hdr)
1087{
1088 unsigned long flags;
1089
1090 if (hdr->size_in != sizeof(*hdr) || hdr->size_out != sizeof(*hdr))
1091 return -EINVAL;
1092
1093 spin_lock_irqsave(&gdev->event_spinlock, flags);
1094 session->cancel_waiters = true;
1095 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1096
1097 wake_up(&gdev->event_wq);
1098
1099 return 0;
1100}
1101
1102
1103
1104
1105
1106
1107
1108
1109static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
1110 const struct vmmdev_request_header *req)
1111{
1112 const struct vmmdev_guest_status *guest_status;
1113 bool trusted_apps_only;
1114
1115 switch (req->request_type) {
1116
1117 case VMMDEVREQ_QUERY_CREDENTIALS:
1118 case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT:
1119 case VMMDEVREQ_REGISTER_SHARED_MODULE:
1120 case VMMDEVREQ_UNREGISTER_SHARED_MODULE:
1121 case VMMDEVREQ_WRITE_COREDUMP:
1122 case VMMDEVREQ_GET_CPU_HOTPLUG_REQ:
1123 case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS:
1124 case VMMDEVREQ_CHECK_SHARED_MODULES:
1125 case VMMDEVREQ_GET_PAGE_SHARING_STATUS:
1126 case VMMDEVREQ_DEBUG_IS_PAGE_SHARED:
1127 case VMMDEVREQ_REPORT_GUEST_STATS:
1128 case VMMDEVREQ_REPORT_GUEST_USER_STATE:
1129 case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ:
1130 trusted_apps_only = true;
1131 break;
1132
1133
1134 case VMMDEVREQ_GET_MOUSE_STATUS:
1135 case VMMDEVREQ_SET_MOUSE_STATUS:
1136 case VMMDEVREQ_SET_POINTER_SHAPE:
1137 case VMMDEVREQ_GET_HOST_VERSION:
1138 case VMMDEVREQ_IDLE:
1139 case VMMDEVREQ_GET_HOST_TIME:
1140 case VMMDEVREQ_SET_POWER_STATUS:
1141 case VMMDEVREQ_ACKNOWLEDGE_EVENTS:
1142 case VMMDEVREQ_CTL_GUEST_FILTER_MASK:
1143 case VMMDEVREQ_REPORT_GUEST_STATUS:
1144 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ:
1145 case VMMDEVREQ_VIDEMODE_SUPPORTED:
1146 case VMMDEVREQ_GET_HEIGHT_REDUCTION:
1147 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2:
1148 case VMMDEVREQ_VIDEMODE_SUPPORTED2:
1149 case VMMDEVREQ_VIDEO_ACCEL_ENABLE:
1150 case VMMDEVREQ_VIDEO_ACCEL_FLUSH:
1151 case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION:
1152 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX:
1153 case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ:
1154 case VMMDEVREQ_GET_VRDPCHANGE_REQ:
1155 case VMMDEVREQ_LOG_STRING:
1156 case VMMDEVREQ_GET_SESSION_ID:
1157 trusted_apps_only = false;
1158 break;
1159
1160
1161 case VMMDEVREQ_REPORT_GUEST_CAPABILITIES:
1162 guest_status = (const struct vmmdev_guest_status *)req;
1163 switch (guest_status->facility) {
1164 case VBOXGUEST_FACILITY_TYPE_ALL:
1165 case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER:
1166 vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
1167 guest_status->facility);
1168 return -EPERM;
1169 case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE:
1170 trusted_apps_only = true;
1171 break;
1172 case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT:
1173 case VBOXGUEST_FACILITY_TYPE_SEAMLESS:
1174 case VBOXGUEST_FACILITY_TYPE_GRAPHICS:
1175 default:
1176 trusted_apps_only = false;
1177 break;
1178 }
1179 break;
1180
1181
1182 default:
1183 vbg_err("Denying userspace vmm call type %#08x\n",
1184 req->request_type);
1185 return -EPERM;
1186 }
1187
1188 if (trusted_apps_only &&
1189 (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
1190 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1191 req->request_type);
1192 return -EPERM;
1193 }
1194
1195 return 0;
1196}
1197
1198static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev,
1199 struct vbg_session *session, void *data)
1200{
1201 struct vbg_ioctl_hdr *hdr = data;
1202 int ret;
1203
1204 if (hdr->size_in != hdr->size_out)
1205 return -EINVAL;
1206
1207 if (hdr->size_in > VMMDEV_MAX_VMMDEVREQ_SIZE)
1208 return -E2BIG;
1209
1210 if (hdr->type == VBG_IOCTL_HDR_TYPE_DEFAULT)
1211 return -EINVAL;
1212
1213 ret = vbg_req_allowed(gdev, session, data);
1214 if (ret < 0)
1215 return ret;
1216
1217 vbg_req_perform(gdev, data);
1218 WARN_ON(hdr->rc == VINF_HGCM_ASYNC_EXECUTE);
1219
1220 return 0;
1221}
1222
1223static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
1224 struct vbg_session *session,
1225 struct vbg_ioctl_hgcm_connect *conn)
1226{
1227 u32 client_id;
1228 int i, ret;
1229
1230 if (vbg_ioctl_chk(&conn->hdr, sizeof(conn->u.in), sizeof(conn->u.out)))
1231 return -EINVAL;
1232
1233
1234 mutex_lock(&gdev->session_mutex);
1235 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1236 if (!session->hgcm_client_ids[i]) {
1237 session->hgcm_client_ids[i] = U32_MAX;
1238 break;
1239 }
1240 }
1241 mutex_unlock(&gdev->session_mutex);
1242
1243 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1244 return -EMFILE;
1245
1246 ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
1247 &client_id, &conn->hdr.rc);
1248
1249 mutex_lock(&gdev->session_mutex);
1250 if (ret == 0 && conn->hdr.rc >= 0) {
1251 conn->u.out.client_id = client_id;
1252 session->hgcm_client_ids[i] = client_id;
1253 } else {
1254 conn->u.out.client_id = 0;
1255 session->hgcm_client_ids[i] = 0;
1256 }
1257 mutex_unlock(&gdev->session_mutex);
1258
1259 return ret;
1260}
1261
1262static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
1263 struct vbg_session *session,
1264 struct vbg_ioctl_hgcm_disconnect *disconn)
1265{
1266 u32 client_id;
1267 int i, ret;
1268
1269 if (vbg_ioctl_chk(&disconn->hdr, sizeof(disconn->u.in), 0))
1270 return -EINVAL;
1271
1272 client_id = disconn->u.in.client_id;
1273 if (client_id == 0 || client_id == U32_MAX)
1274 return -EINVAL;
1275
1276 mutex_lock(&gdev->session_mutex);
1277 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1278 if (session->hgcm_client_ids[i] == client_id) {
1279 session->hgcm_client_ids[i] = U32_MAX;
1280 break;
1281 }
1282 }
1283 mutex_unlock(&gdev->session_mutex);
1284
1285 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1286 return -EINVAL;
1287
1288 ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
1289 &disconn->hdr.rc);
1290
1291 mutex_lock(&gdev->session_mutex);
1292 if (ret == 0 && disconn->hdr.rc >= 0)
1293 session->hgcm_client_ids[i] = 0;
1294 else
1295 session->hgcm_client_ids[i] = client_id;
1296 mutex_unlock(&gdev->session_mutex);
1297
1298 return ret;
1299}
1300
1301static bool vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type)
1302{
1303 switch (type) {
1304 case VMMDEV_HGCM_PARM_TYPE_32BIT:
1305 case VMMDEV_HGCM_PARM_TYPE_64BIT:
1306 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
1307 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
1308 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
1309 return true;
1310 default:
1311 return false;
1312 }
1313}
1314
1315static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
1316 struct vbg_session *session, bool f32bit,
1317 struct vbg_ioctl_hgcm_call *call)
1318{
1319 size_t actual_size;
1320 u32 client_id;
1321 int i, ret;
1322
1323 if (call->hdr.size_in < sizeof(*call))
1324 return -EINVAL;
1325
1326 if (call->hdr.size_in != call->hdr.size_out)
1327 return -EINVAL;
1328
1329 if (call->parm_count > VMMDEV_HGCM_MAX_PARMS)
1330 return -E2BIG;
1331
1332 client_id = call->client_id;
1333 if (client_id == 0 || client_id == U32_MAX)
1334 return -EINVAL;
1335
1336 actual_size = sizeof(*call);
1337 if (f32bit)
1338 actual_size += call->parm_count *
1339 sizeof(struct vmmdev_hgcm_function_parameter32);
1340 else
1341 actual_size += call->parm_count *
1342 sizeof(struct vmmdev_hgcm_function_parameter);
1343 if (call->hdr.size_in < actual_size) {
1344 vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
1345 call->hdr.size_in, actual_size);
1346 return -EINVAL;
1347 }
1348 call->hdr.size_out = actual_size;
1349
1350
1351 if (f32bit) {
1352 struct vmmdev_hgcm_function_parameter32 *parm =
1353 VBG_IOCTL_HGCM_CALL_PARMS32(call);
1354
1355 for (i = 0; i < call->parm_count; i++)
1356 if (!vbg_param_valid(parm[i].type))
1357 return -EINVAL;
1358 } else {
1359 struct vmmdev_hgcm_function_parameter *parm =
1360 VBG_IOCTL_HGCM_CALL_PARMS(call);
1361
1362 for (i = 0; i < call->parm_count; i++)
1363 if (!vbg_param_valid(parm[i].type))
1364 return -EINVAL;
1365 }
1366
1367
1368
1369
1370 mutex_lock(&gdev->session_mutex);
1371 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++)
1372 if (session->hgcm_client_ids[i] == client_id)
1373 break;
1374 mutex_unlock(&gdev->session_mutex);
1375 if (i >= ARRAY_SIZE(session->hgcm_client_ids)) {
1376 vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
1377 client_id);
1378 return -EINVAL;
1379 }
1380
1381 if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
1382 ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
1383 call->function, call->timeout_ms,
1384 VBG_IOCTL_HGCM_CALL_PARMS32(call),
1385 call->parm_count, &call->hdr.rc);
1386 else
1387 ret = vbg_hgcm_call(gdev, session->requestor, client_id,
1388 call->function, call->timeout_ms,
1389 VBG_IOCTL_HGCM_CALL_PARMS(call),
1390 call->parm_count, &call->hdr.rc);
1391
1392 if (ret == -E2BIG) {
1393
1394 call->hdr.rc = VERR_OUT_OF_RANGE;
1395 ret = 0;
1396 }
1397
1398 if (ret && ret != -EINTR && ret != -ETIMEDOUT)
1399 vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret);
1400
1401 return ret;
1402}
1403
1404static int vbg_ioctl_log(struct vbg_ioctl_log *log)
1405{
1406 if (log->hdr.size_out != sizeof(log->hdr))
1407 return -EINVAL;
1408
1409 vbg_info("%.*s", (int)(log->hdr.size_in - sizeof(log->hdr)),
1410 log->u.in.msg);
1411
1412 return 0;
1413}
1414
1415static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev,
1416 struct vbg_session *session,
1417 struct vbg_ioctl_change_filter *filter)
1418{
1419 u32 or_mask, not_mask;
1420
1421 if (vbg_ioctl_chk(&filter->hdr, sizeof(filter->u.in), 0))
1422 return -EINVAL;
1423
1424 or_mask = filter->u.in.or_mask;
1425 not_mask = filter->u.in.not_mask;
1426
1427 if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
1428 return -EINVAL;
1429
1430 return vbg_set_session_event_filter(gdev, session, or_mask, not_mask,
1431 false);
1432}
1433
1434static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
1435 struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps)
1436{
1437 u32 or_mask, not_mask;
1438 int ret;
1439
1440 if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), sizeof(caps->u.out)))
1441 return -EINVAL;
1442
1443 or_mask = caps->u.in.or_mask;
1444 not_mask = caps->u.in.not_mask;
1445
1446 if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
1447 return -EINVAL;
1448
1449 ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask,
1450 false);
1451 if (ret)
1452 return ret;
1453
1454 caps->u.out.session_caps = session->guest_caps;
1455 caps->u.out.global_caps = gdev->guest_caps_host;
1456
1457 return 0;
1458}
1459
1460static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
1461 struct vbg_ioctl_check_balloon *balloon_info)
1462{
1463 if (vbg_ioctl_chk(&balloon_info->hdr, 0, sizeof(balloon_info->u.out)))
1464 return -EINVAL;
1465
1466 balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks;
1467
1468
1469
1470
1471 balloon_info->u.out.handle_in_r3 = false;
1472
1473 return 0;
1474}
1475
1476static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1477 struct vbg_session *session,
1478 struct vbg_ioctl_write_coredump *dump)
1479{
1480 struct vmmdev_write_core_dump *req;
1481
1482 if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
1483 return -EINVAL;
1484
1485 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
1486 session->requestor);
1487 if (!req)
1488 return -ENOMEM;
1489
1490 req->flags = dump->u.in.flags;
1491 dump->hdr.rc = vbg_req_perform(gdev, req);
1492
1493 vbg_req_free(req, sizeof(*req));
1494 return 0;
1495}
1496
1497
1498
1499
1500
1501
1502
1503
1504int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
1505{
1506 unsigned int req_no_size = req & ~IOCSIZE_MASK;
1507 struct vbg_dev *gdev = session->gdev;
1508 struct vbg_ioctl_hdr *hdr = data;
1509 bool f32bit = false;
1510
1511 hdr->rc = VINF_SUCCESS;
1512 if (!hdr->size_out)
1513 hdr->size_out = hdr->size_in;
1514
1515
1516
1517
1518
1519
1520
1521 if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) ||
1522 req == VBG_IOCTL_VMMDEV_REQUEST_BIG)
1523 return vbg_ioctl_vmmrequest(gdev, session, data);
1524
1525 if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT)
1526 return -EINVAL;
1527
1528
1529 switch (req) {
1530 case VBG_IOCTL_DRIVER_VERSION_INFO:
1531 return vbg_ioctl_driver_version_info(data);
1532 case VBG_IOCTL_HGCM_CONNECT:
1533 return vbg_ioctl_hgcm_connect(gdev, session, data);
1534 case VBG_IOCTL_HGCM_DISCONNECT:
1535 return vbg_ioctl_hgcm_disconnect(gdev, session, data);
1536 case VBG_IOCTL_WAIT_FOR_EVENTS:
1537 return vbg_ioctl_wait_for_events(gdev, session, data);
1538 case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
1539 return vbg_ioctl_interrupt_all_wait_events(gdev, session, data);
1540 case VBG_IOCTL_CHANGE_FILTER_MASK:
1541 return vbg_ioctl_change_filter_mask(gdev, session, data);
1542 case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES:
1543 return vbg_ioctl_change_guest_capabilities(gdev, session, data);
1544 case VBG_IOCTL_CHECK_BALLOON:
1545 return vbg_ioctl_check_balloon(gdev, data);
1546 case VBG_IOCTL_WRITE_CORE_DUMP:
1547 return vbg_ioctl_write_core_dump(gdev, session, data);
1548 }
1549
1550
1551 switch (req_no_size) {
1552#ifdef CONFIG_COMPAT
1553 case VBG_IOCTL_HGCM_CALL_32(0):
1554 f32bit = true;
1555#endif
1556
1557 case VBG_IOCTL_HGCM_CALL(0):
1558 return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
1559 case VBG_IOCTL_LOG(0):
1560 return vbg_ioctl_log(data);
1561 }
1562
1563 vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req);
1564 return -ENOTTY;
1565}
1566
1567
1568
1569
1570
1571
1572
1573
1574int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
1575{
1576 struct vmmdev_mouse_status *req;
1577 int rc;
1578
1579 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
1580 VBG_KERNEL_REQUEST);
1581 if (!req)
1582 return -ENOMEM;
1583
1584 req->mouse_features = features;
1585 req->pointer_pos_x = 0;
1586 req->pointer_pos_y = 0;
1587
1588 rc = vbg_req_perform(gdev, req);
1589 if (rc < 0)
1590 vbg_err("%s error, rc: %d\n", __func__, rc);
1591
1592 vbg_req_free(req, sizeof(*req));
1593 return vbg_status_code_to_errno(rc);
1594}
1595
1596
1597irqreturn_t vbg_core_isr(int irq, void *dev_id)
1598{
1599 struct vbg_dev *gdev = dev_id;
1600 struct vmmdev_events *req = gdev->ack_events_req;
1601 bool mouse_position_changed = false;
1602 unsigned long flags;
1603 u32 events = 0;
1604 int rc;
1605
1606 if (!gdev->mmio->V.V1_04.have_events)
1607 return IRQ_NONE;
1608
1609
1610 req->header.rc = VERR_INTERNAL_ERROR;
1611 req->events = 0;
1612 rc = vbg_req_perform(gdev, req);
1613 if (rc < 0) {
1614 vbg_err("Error performing events req, rc: %d\n", rc);
1615 return IRQ_NONE;
1616 }
1617
1618 events = req->events;
1619
1620 if (events & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) {
1621 mouse_position_changed = true;
1622 events &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
1623 }
1624
1625 if (events & VMMDEV_EVENT_HGCM) {
1626 wake_up(&gdev->hgcm_wq);
1627 events &= ~VMMDEV_EVENT_HGCM;
1628 }
1629
1630 if (events & VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) {
1631 schedule_work(&gdev->mem_balloon.work);
1632 events &= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1633 }
1634
1635 if (events) {
1636 spin_lock_irqsave(&gdev->event_spinlock, flags);
1637 gdev->pending_events |= events;
1638 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1639
1640 wake_up(&gdev->event_wq);
1641 }
1642
1643 if (mouse_position_changed)
1644 vbg_linux_mouse_event(gdev);
1645
1646 return IRQ_HANDLED;
1647}
1648