linux/drivers/virt/vboxguest/vboxguest_core.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
   2/*
   3 * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
   4 *
   5 * Copyright (C) 2007-2016 Oracle Corporation
   6 */
   7
   8#include <linux/device.h>
   9#include <linux/mm.h>
  10#include <linux/sched.h>
  11#include <linux/sizes.h>
  12#include <linux/slab.h>
  13#include <linux/vbox_err.h>
  14#include <linux/vbox_utils.h>
  15#include <linux/vmalloc.h>
  16#include "vboxguest_core.h"
  17#include "vboxguest_version.h"
  18
  19/* Get the pointer to the first HGCM parameter. */
  20#define VBG_IOCTL_HGCM_CALL_PARMS(a) \
  21        ((struct vmmdev_hgcm_function_parameter *)( \
  22                (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
  23/* Get the pointer to the first HGCM parameter in a 32-bit request. */
  24#define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
  25        ((struct vmmdev_hgcm_function_parameter32 *)( \
  26                (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
  27
  28#define GUEST_MAPPINGS_TRIES    5
  29
  30/**
  31 * Reserves memory in which the VMM can relocate any guest mappings
  32 * that are floating around.
  33 *
  34 * This operation is a little bit tricky since the VMM might not accept
  35 * just any address because of address clashes between the three contexts
  36 * it operates in, so we try several times.
  37 *
  38 * Failure to reserve the guest mappings is ignored.
  39 *
  40 * @gdev:               The Guest extension device.
  41 */
  42static void vbg_guest_mappings_init(struct vbg_dev *gdev)
  43{
  44        struct vmmdev_hypervisorinfo *req;
  45        void *guest_mappings[GUEST_MAPPINGS_TRIES];
  46        struct page **pages = NULL;
  47        u32 size, hypervisor_size;
  48        int i, rc;
  49
  50        /* Query the required space. */
  51        req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO);
  52        if (!req)
  53                return;
  54
  55        req->hypervisor_start = 0;
  56        req->hypervisor_size = 0;
  57        rc = vbg_req_perform(gdev, req);
  58        if (rc < 0)
  59                goto out;
  60
  61        /*
  62         * The VMM will report back if there is nothing it wants to map, like
  63         * for instance in VT-x and AMD-V mode.
  64         */
  65        if (req->hypervisor_size == 0)
  66                goto out;
  67
  68        hypervisor_size = req->hypervisor_size;
  69        /* Add 4M so that we can align the vmap to 4MiB as the host requires. */
  70        size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
  71
  72        pages = kmalloc_array(size >> PAGE_SHIFT, sizeof(*pages), GFP_KERNEL);
  73        if (!pages)
  74                goto out;
  75
  76        gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER);
  77        if (!gdev->guest_mappings_dummy_page)
  78                goto out;
  79
  80        for (i = 0; i < (size >> PAGE_SHIFT); i++)
  81                pages[i] = gdev->guest_mappings_dummy_page;
  82
  83        /*
  84         * Try several times, the VMM might not accept some addresses because
  85         * of address clashes between the three contexts.
  86         */
  87        for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) {
  88                guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT),
  89                                         VM_MAP, PAGE_KERNEL_RO);
  90                if (!guest_mappings[i])
  91                        break;
  92
  93                req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO;
  94                req->header.rc = VERR_INTERNAL_ERROR;
  95                req->hypervisor_size = hypervisor_size;
  96                req->hypervisor_start =
  97                        (unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M);
  98
  99                rc = vbg_req_perform(gdev, req);
 100                if (rc >= 0) {
 101                        gdev->guest_mappings = guest_mappings[i];
 102                        break;
 103                }
 104        }
 105
 106        /* Free vmap's from failed attempts. */
 107        while (--i >= 0)
 108                vunmap(guest_mappings[i]);
 109
 110        /* On failure free the dummy-page backing the vmap */
 111        if (!gdev->guest_mappings) {
 112                __free_page(gdev->guest_mappings_dummy_page);
 113                gdev->guest_mappings_dummy_page = NULL;
 114        }
 115
 116out:
 117        vbg_req_free(req, sizeof(*req));
 118        kfree(pages);
 119}
 120
 121/**
 122 * Undo what vbg_guest_mappings_init did.
 123 *
 124 * @gdev:               The Guest extension device.
 125 */
 126static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
 127{
 128        struct vmmdev_hypervisorinfo *req;
 129        int rc;
 130
 131        if (!gdev->guest_mappings)
 132                return;
 133
 134        /*
 135         * Tell the host that we're going to free the memory we reserved for
 136         * it, the free it up. (Leak the memory if anything goes wrong here.)
 137         */
 138        req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO);
 139        if (!req)
 140                return;
 141
 142        req->hypervisor_start = 0;
 143        req->hypervisor_size = 0;
 144
 145        rc = vbg_req_perform(gdev, req);
 146
 147        vbg_req_free(req, sizeof(*req));
 148
 149        if (rc < 0) {
 150                vbg_err("%s error: %d\n", __func__, rc);
 151                return;
 152        }
 153
 154        vunmap(gdev->guest_mappings);
 155        gdev->guest_mappings = NULL;
 156
 157        __free_page(gdev->guest_mappings_dummy_page);
 158        gdev->guest_mappings_dummy_page = NULL;
 159}
 160
 161/**
 162 * Report the guest information to the host.
 163 * Return: 0 or negative errno value.
 164 * @gdev:               The Guest extension device.
 165 */
 166static int vbg_report_guest_info(struct vbg_dev *gdev)
 167{
 168        /*
 169         * Allocate and fill in the two guest info reports.
 170         */
 171        struct vmmdev_guest_info *req1 = NULL;
 172        struct vmmdev_guest_info2 *req2 = NULL;
 173        int rc, ret = -ENOMEM;
 174
 175        req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO);
 176        req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2);
 177        if (!req1 || !req2)
 178                goto out_free;
 179
 180        req1->interface_version = VMMDEV_VERSION;
 181        req1->os_type = VMMDEV_OSTYPE_LINUX26;
 182#if __BITS_PER_LONG == 64
 183        req1->os_type |= VMMDEV_OSTYPE_X64;
 184#endif
 185
 186        req2->additions_major = VBG_VERSION_MAJOR;
 187        req2->additions_minor = VBG_VERSION_MINOR;
 188        req2->additions_build = VBG_VERSION_BUILD;
 189        req2->additions_revision = VBG_SVN_REV;
 190        /* (no features defined yet) */
 191        req2->additions_features = 0;
 192        strlcpy(req2->name, VBG_VERSION_STRING,
 193                sizeof(req2->name));
 194
 195        /*
 196         * There are two protocols here:
 197         *      1. INFO2 + INFO1. Supported by >=3.2.51.
 198         *      2. INFO1 and optionally INFO2. The old protocol.
 199         *
 200         * We try protocol 2 first.  It will fail with VERR_NOT_SUPPORTED
 201         * if not supported by the VMMDev (message ordering requirement).
 202         */
 203        rc = vbg_req_perform(gdev, req2);
 204        if (rc >= 0) {
 205                rc = vbg_req_perform(gdev, req1);
 206        } else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) {
 207                rc = vbg_req_perform(gdev, req1);
 208                if (rc >= 0) {
 209                        rc = vbg_req_perform(gdev, req2);
 210                        if (rc == VERR_NOT_IMPLEMENTED)
 211                                rc = VINF_SUCCESS;
 212                }
 213        }
 214        ret = vbg_status_code_to_errno(rc);
 215
 216out_free:
 217        vbg_req_free(req2, sizeof(*req2));
 218        vbg_req_free(req1, sizeof(*req1));
 219        return ret;
 220}
 221
 222/**
 223 * Report the guest driver status to the host.
 224 * Return: 0 or negative errno value.
 225 * @gdev:               The Guest extension device.
 226 * @active:             Flag whether the driver is now active or not.
 227 */
 228static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
 229{
 230        struct vmmdev_guest_status *req;
 231        int rc;
 232
 233        req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS);
 234        if (!req)
 235                return -ENOMEM;
 236
 237        req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER;
 238        if (active)
 239                req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE;
 240        else
 241                req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE;
 242        req->flags = 0;
 243
 244        rc = vbg_req_perform(gdev, req);
 245        if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
 246                rc = VINF_SUCCESS;
 247
 248        vbg_req_free(req, sizeof(*req));
 249
 250        return vbg_status_code_to_errno(rc);
 251}
 252
 253/**
 254 * Inflate the balloon by one chunk. The caller owns the balloon mutex.
 255 * Return: 0 or negative errno value.
 256 * @gdev:               The Guest extension device.
 257 * @chunk_idx:          Index of the chunk.
 258 */
 259static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx)
 260{
 261        struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
 262        struct page **pages;
 263        int i, rc, ret;
 264
 265        pages = kmalloc_array(VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
 266                              sizeof(*pages),
 267                              GFP_KERNEL | __GFP_NOWARN);
 268        if (!pages)
 269                return -ENOMEM;
 270
 271        req->header.size = sizeof(*req);
 272        req->inflate = true;
 273        req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
 274
 275        for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) {
 276                pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN);
 277                if (!pages[i]) {
 278                        ret = -ENOMEM;
 279                        goto out_error;
 280                }
 281
 282                req->phys_page[i] = page_to_phys(pages[i]);
 283        }
 284
 285        rc = vbg_req_perform(gdev, req);
 286        if (rc < 0) {
 287                vbg_err("%s error, rc: %d\n", __func__, rc);
 288                ret = vbg_status_code_to_errno(rc);
 289                goto out_error;
 290        }
 291
 292        gdev->mem_balloon.pages[chunk_idx] = pages;
 293
 294        return 0;
 295
 296out_error:
 297        while (--i >= 0)
 298                __free_page(pages[i]);
 299        kfree(pages);
 300
 301        return ret;
 302}
 303
 304/**
 305 * Deflate the balloon by one chunk. The caller owns the balloon mutex.
 306 * Return: 0 or negative errno value.
 307 * @gdev:               The Guest extension device.
 308 * @chunk_idx:          Index of the chunk.
 309 */
 310static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
 311{
 312        struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
 313        struct page **pages = gdev->mem_balloon.pages[chunk_idx];
 314        int i, rc;
 315
 316        req->header.size = sizeof(*req);
 317        req->inflate = false;
 318        req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
 319
 320        for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
 321                req->phys_page[i] = page_to_phys(pages[i]);
 322
 323        rc = vbg_req_perform(gdev, req);
 324        if (rc < 0) {
 325                vbg_err("%s error, rc: %d\n", __func__, rc);
 326                return vbg_status_code_to_errno(rc);
 327        }
 328
 329        for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
 330                __free_page(pages[i]);
 331        kfree(pages);
 332        gdev->mem_balloon.pages[chunk_idx] = NULL;
 333
 334        return 0;
 335}
 336
 337/**
 338 * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
 339 * the host wants the balloon to be and adjust accordingly.
 340 */
 341static void vbg_balloon_work(struct work_struct *work)
 342{
 343        struct vbg_dev *gdev =
 344                container_of(work, struct vbg_dev, mem_balloon.work);
 345        struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req;
 346        u32 i, chunks;
 347        int rc, ret;
 348
 349        /*
 350         * Setting this bit means that we request the value from the host and
 351         * change the guest memory balloon according to the returned value.
 352         */
 353        req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
 354        rc = vbg_req_perform(gdev, req);
 355        if (rc < 0) {
 356                vbg_err("%s error, rc: %d)\n", __func__, rc);
 357                return;
 358        }
 359
 360        /*
 361         * The host always returns the same maximum amount of chunks, so
 362         * we do this once.
 363         */
 364        if (!gdev->mem_balloon.max_chunks) {
 365                gdev->mem_balloon.pages =
 366                        devm_kcalloc(gdev->dev, req->phys_mem_chunks,
 367                                     sizeof(struct page **), GFP_KERNEL);
 368                if (!gdev->mem_balloon.pages)
 369                        return;
 370
 371                gdev->mem_balloon.max_chunks = req->phys_mem_chunks;
 372        }
 373
 374        chunks = req->balloon_chunks;
 375        if (chunks > gdev->mem_balloon.max_chunks) {
 376                vbg_err("%s: illegal balloon size %u (max=%u)\n",
 377                        __func__, chunks, gdev->mem_balloon.max_chunks);
 378                return;
 379        }
 380
 381        if (chunks > gdev->mem_balloon.chunks) {
 382                /* inflate */
 383                for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
 384                        ret = vbg_balloon_inflate(gdev, i);
 385                        if (ret < 0)
 386                                return;
 387
 388                        gdev->mem_balloon.chunks++;
 389                }
 390        } else {
 391                /* deflate */
 392                for (i = gdev->mem_balloon.chunks; i-- > chunks;) {
 393                        ret = vbg_balloon_deflate(gdev, i);
 394                        if (ret < 0)
 395                                return;
 396
 397                        gdev->mem_balloon.chunks--;
 398                }
 399        }
 400}
 401
 402/**
 403 * Callback for heartbeat timer.
 404 */
 405static void vbg_heartbeat_timer(struct timer_list *t)
 406{
 407        struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer);
 408
 409        vbg_req_perform(gdev, gdev->guest_heartbeat_req);
 410        mod_timer(&gdev->heartbeat_timer,
 411                  msecs_to_jiffies(gdev->heartbeat_interval_ms));
 412}
 413
 414/**
 415 * Configure the host to check guest's heartbeat
 416 * and get heartbeat interval from the host.
 417 * Return: 0 or negative errno value.
 418 * @gdev:               The Guest extension device.
 419 * @enabled:            Set true to enable guest heartbeat checks on host.
 420 */
 421static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
 422{
 423        struct vmmdev_heartbeat *req;
 424        int rc;
 425
 426        req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE);
 427        if (!req)
 428                return -ENOMEM;
 429
 430        req->enabled = enabled;
 431        req->interval_ns = 0;
 432        rc = vbg_req_perform(gdev, req);
 433        do_div(req->interval_ns, 1000000); /* ns -> ms */
 434        gdev->heartbeat_interval_ms = req->interval_ns;
 435        vbg_req_free(req, sizeof(*req));
 436
 437        return vbg_status_code_to_errno(rc);
 438}
 439
 440/**
 441 * Initializes the heartbeat timer. This feature may be disabled by the host.
 442 * Return: 0 or negative errno value.
 443 * @gdev:               The Guest extension device.
 444 */
 445static int vbg_heartbeat_init(struct vbg_dev *gdev)
 446{
 447        int ret;
 448
 449        /* Make sure that heartbeat checking is disabled if we fail. */
 450        ret = vbg_heartbeat_host_config(gdev, false);
 451        if (ret < 0)
 452                return ret;
 453
 454        ret = vbg_heartbeat_host_config(gdev, true);
 455        if (ret < 0)
 456                return ret;
 457
 458        gdev->guest_heartbeat_req = vbg_req_alloc(
 459                                        sizeof(*gdev->guest_heartbeat_req),
 460                                        VMMDEVREQ_GUEST_HEARTBEAT);
 461        if (!gdev->guest_heartbeat_req)
 462                return -ENOMEM;
 463
 464        vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
 465                 __func__, gdev->heartbeat_interval_ms);
 466        mod_timer(&gdev->heartbeat_timer, 0);
 467
 468        return 0;
 469}
 470
 471/**
 472 * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
 473 * @gdev:               The Guest extension device.
 474 */
 475static void vbg_heartbeat_exit(struct vbg_dev *gdev)
 476{
 477        del_timer_sync(&gdev->heartbeat_timer);
 478        vbg_heartbeat_host_config(gdev, false);
 479        vbg_req_free(gdev->guest_heartbeat_req,
 480                     sizeof(*gdev->guest_heartbeat_req));
 481}
 482
 483/**
 484 * Applies a change to the bit usage tracker.
 485 * Return: true if the mask changed, false if not.
 486 * @tracker:            The bit usage tracker.
 487 * @changed:            The bits to change.
 488 * @previous:           The previous value of the bits.
 489 */
 490static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
 491                                u32 changed, u32 previous)
 492{
 493        bool global_change = false;
 494
 495        while (changed) {
 496                u32 bit = ffs(changed) - 1;
 497                u32 bitmask = BIT(bit);
 498
 499                if (bitmask & previous) {
 500                        tracker->per_bit_usage[bit] -= 1;
 501                        if (tracker->per_bit_usage[bit] == 0) {
 502                                global_change = true;
 503                                tracker->mask &= ~bitmask;
 504                        }
 505                } else {
 506                        tracker->per_bit_usage[bit] += 1;
 507                        if (tracker->per_bit_usage[bit] == 1) {
 508                                global_change = true;
 509                                tracker->mask |= bitmask;
 510                        }
 511                }
 512
 513                changed &= ~bitmask;
 514        }
 515
 516        return global_change;
 517}
 518
 519/**
 520 * Init and termination worker for resetting the (host) event filter on the host
 521 * Return: 0 or negative errno value.
 522 * @gdev:                  The Guest extension device.
 523 * @fixed_events:          Fixed events (init time).
 524 */
 525static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
 526                                       u32 fixed_events)
 527{
 528        struct vmmdev_mask *req;
 529        int rc;
 530
 531        req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
 532        if (!req)
 533                return -ENOMEM;
 534
 535        req->not_mask = U32_MAX & ~fixed_events;
 536        req->or_mask = fixed_events;
 537        rc = vbg_req_perform(gdev, req);
 538        if (rc < 0)
 539                vbg_err("%s error, rc: %d\n", __func__, rc);
 540
 541        vbg_req_free(req, sizeof(*req));
 542        return vbg_status_code_to_errno(rc);
 543}
 544
 545/**
 546 * Changes the event filter mask for the given session.
 547 *
 548 * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
 549 * do session cleanup. Takes the session spinlock.
 550 *
 551 * Return: 0 or negative errno value.
 552 * @gdev:                       The Guest extension device.
 553 * @session:                    The session.
 554 * @or_mask:                    The events to add.
 555 * @not_mask:                   The events to remove.
 556 * @session_termination:        Set if we're called by the session cleanup code.
 557 *                              This tweaks the error handling so we perform
 558 *                              proper session cleanup even if the host
 559 *                              misbehaves.
 560 */
 561static int vbg_set_session_event_filter(struct vbg_dev *gdev,
 562                                        struct vbg_session *session,
 563                                        u32 or_mask, u32 not_mask,
 564                                        bool session_termination)
 565{
 566        struct vmmdev_mask *req;
 567        u32 changed, previous;
 568        int rc, ret = 0;
 569
 570        /* Allocate a request buffer before taking the spinlock */
 571        req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
 572        if (!req) {
 573                if (!session_termination)
 574                        return -ENOMEM;
 575                /* Ignore allocation failure, we must do session cleanup. */
 576        }
 577
 578        mutex_lock(&gdev->session_mutex);
 579
 580        /* Apply the changes to the session mask. */
 581        previous = session->event_filter;
 582        session->event_filter |= or_mask;
 583        session->event_filter &= ~not_mask;
 584
 585        /* If anything actually changed, update the global usage counters. */
 586        changed = previous ^ session->event_filter;
 587        if (!changed)
 588                goto out;
 589
 590        vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous);
 591        or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask;
 592
 593        if (gdev->event_filter_host == or_mask || !req)
 594                goto out;
 595
 596        gdev->event_filter_host = or_mask;
 597        req->or_mask = or_mask;
 598        req->not_mask = ~or_mask;
 599        rc = vbg_req_perform(gdev, req);
 600        if (rc < 0) {
 601                ret = vbg_status_code_to_errno(rc);
 602
 603                /* Failed, roll back (unless it's session termination time). */
 604                gdev->event_filter_host = U32_MAX;
 605                if (session_termination)
 606                        goto out;
 607
 608                vbg_track_bit_usage(&gdev->event_filter_tracker, changed,
 609                                    session->event_filter);
 610                session->event_filter = previous;
 611        }
 612
 613out:
 614        mutex_unlock(&gdev->session_mutex);
 615        vbg_req_free(req, sizeof(*req));
 616
 617        return ret;
 618}
 619
 620/**
 621 * Init and termination worker for set guest capabilities to zero on the host.
 622 * Return: 0 or negative errno value.
 623 * @gdev:               The Guest extension device.
 624 */
 625static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
 626{
 627        struct vmmdev_mask *req;
 628        int rc;
 629
 630        req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
 631        if (!req)
 632                return -ENOMEM;
 633
 634        req->not_mask = U32_MAX;
 635        req->or_mask = 0;
 636        rc = vbg_req_perform(gdev, req);
 637        if (rc < 0)
 638                vbg_err("%s error, rc: %d\n", __func__, rc);
 639
 640        vbg_req_free(req, sizeof(*req));
 641        return vbg_status_code_to_errno(rc);
 642}
 643
 644/**
 645 * Sets the guest capabilities for a session. Takes the session spinlock.
 646 * Return: 0 or negative errno value.
 647 * @gdev:                       The Guest extension device.
 648 * @session:                    The session.
 649 * @or_mask:                    The capabilities to add.
 650 * @not_mask:                   The capabilities to remove.
 651 * @session_termination:        Set if we're called by the session cleanup code.
 652 *                              This tweaks the error handling so we perform
 653 *                              proper session cleanup even if the host
 654 *                              misbehaves.
 655 */
 656static int vbg_set_session_capabilities(struct vbg_dev *gdev,
 657                                        struct vbg_session *session,
 658                                        u32 or_mask, u32 not_mask,
 659                                        bool session_termination)
 660{
 661        struct vmmdev_mask *req;
 662        u32 changed, previous;
 663        int rc, ret = 0;
 664
 665        /* Allocate a request buffer before taking the spinlock */
 666        req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
 667        if (!req) {
 668                if (!session_termination)
 669                        return -ENOMEM;
 670                /* Ignore allocation failure, we must do session cleanup. */
 671        }
 672
 673        mutex_lock(&gdev->session_mutex);
 674
 675        /* Apply the changes to the session mask. */
 676        previous = session->guest_caps;
 677        session->guest_caps |= or_mask;
 678        session->guest_caps &= ~not_mask;
 679
 680        /* If anything actually changed, update the global usage counters. */
 681        changed = previous ^ session->guest_caps;
 682        if (!changed)
 683                goto out;
 684
 685        vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous);
 686        or_mask = gdev->guest_caps_tracker.mask;
 687
 688        if (gdev->guest_caps_host == or_mask || !req)
 689                goto out;
 690
 691        gdev->guest_caps_host = or_mask;
 692        req->or_mask = or_mask;
 693        req->not_mask = ~or_mask;
 694        rc = vbg_req_perform(gdev, req);
 695        if (rc < 0) {
 696                ret = vbg_status_code_to_errno(rc);
 697
 698                /* Failed, roll back (unless it's session termination time). */
 699                gdev->guest_caps_host = U32_MAX;
 700                if (session_termination)
 701                        goto out;
 702
 703                vbg_track_bit_usage(&gdev->guest_caps_tracker, changed,
 704                                    session->guest_caps);
 705                session->guest_caps = previous;
 706        }
 707
 708out:
 709        mutex_unlock(&gdev->session_mutex);
 710        vbg_req_free(req, sizeof(*req));
 711
 712        return ret;
 713}
 714
 715/**
 716 * vbg_query_host_version get the host feature mask and version information.
 717 * Return: 0 or negative errno value.
 718 * @gdev:               The Guest extension device.
 719 */
 720static int vbg_query_host_version(struct vbg_dev *gdev)
 721{
 722        struct vmmdev_host_version *req;
 723        int rc, ret;
 724
 725        req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION);
 726        if (!req)
 727                return -ENOMEM;
 728
 729        rc = vbg_req_perform(gdev, req);
 730        ret = vbg_status_code_to_errno(rc);
 731        if (ret) {
 732                vbg_err("%s error: %d\n", __func__, rc);
 733                goto out;
 734        }
 735
 736        snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
 737                 req->major, req->minor, req->build, req->revision);
 738        gdev->host_features = req->features;
 739
 740        vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version,
 741                 gdev->host_features);
 742
 743        if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
 744                vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
 745                ret = -ENODEV;
 746        }
 747
 748out:
 749        vbg_req_free(req, sizeof(*req));
 750        return ret;
 751}
 752
 753/**
 754 * Initializes the VBoxGuest device extension when the
 755 * device driver is loaded.
 756 *
 757 * The native code locates the VMMDev on the PCI bus and retrieve
 758 * the MMIO and I/O port ranges, this function will take care of
 759 * mapping the MMIO memory (if present). Upon successful return
 760 * the native code should set up the interrupt handler.
 761 *
 762 * Return: 0 or negative errno value.
 763 *
 764 * @gdev:               The Guest extension device.
 765 * @fixed_events:       Events that will be enabled upon init and no client
 766 *                      will ever be allowed to mask.
 767 */
 768int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
 769{
 770        int ret = -ENOMEM;
 771
 772        gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM;
 773        gdev->event_filter_host = U32_MAX;      /* forces a report */
 774        gdev->guest_caps_host = U32_MAX;        /* forces a report */
 775
 776        init_waitqueue_head(&gdev->event_wq);
 777        init_waitqueue_head(&gdev->hgcm_wq);
 778        spin_lock_init(&gdev->event_spinlock);
 779        mutex_init(&gdev->session_mutex);
 780        mutex_init(&gdev->cancel_req_mutex);
 781        timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0);
 782        INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work);
 783
 784        gdev->mem_balloon.get_req =
 785                vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
 786                              VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ);
 787        gdev->mem_balloon.change_req =
 788                vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
 789                              VMMDEVREQ_CHANGE_MEMBALLOON);
 790        gdev->cancel_req =
 791                vbg_req_alloc(sizeof(*(gdev->cancel_req)),
 792                              VMMDEVREQ_HGCM_CANCEL2);
 793        gdev->ack_events_req =
 794                vbg_req_alloc(sizeof(*gdev->ack_events_req),
 795                              VMMDEVREQ_ACKNOWLEDGE_EVENTS);
 796        gdev->mouse_status_req =
 797                vbg_req_alloc(sizeof(*gdev->mouse_status_req),
 798                              VMMDEVREQ_GET_MOUSE_STATUS);
 799
 800        if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
 801            !gdev->cancel_req || !gdev->ack_events_req ||
 802            !gdev->mouse_status_req)
 803                goto err_free_reqs;
 804
 805        ret = vbg_query_host_version(gdev);
 806        if (ret)
 807                goto err_free_reqs;
 808
 809        ret = vbg_report_guest_info(gdev);
 810        if (ret) {
 811                vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret);
 812                goto err_free_reqs;
 813        }
 814
 815        ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events);
 816        if (ret) {
 817                vbg_err("vboxguest: Error setting fixed event filter: %d\n",
 818                        ret);
 819                goto err_free_reqs;
 820        }
 821
 822        ret = vbg_reset_host_capabilities(gdev);
 823        if (ret) {
 824                vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
 825                        ret);
 826                goto err_free_reqs;
 827        }
 828
 829        ret = vbg_core_set_mouse_status(gdev, 0);
 830        if (ret) {
 831                vbg_err("vboxguest: Error clearing mouse status: %d\n", ret);
 832                goto err_free_reqs;
 833        }
 834
 835        /* These may fail without requiring the driver init to fail. */
 836        vbg_guest_mappings_init(gdev);
 837        vbg_heartbeat_init(gdev);
 838
 839        /* All Done! */
 840        ret = vbg_report_driver_status(gdev, true);
 841        if (ret < 0)
 842                vbg_err("vboxguest: Error reporting driver status: %d\n", ret);
 843
 844        return 0;
 845
 846err_free_reqs:
 847        vbg_req_free(gdev->mouse_status_req,
 848                     sizeof(*gdev->mouse_status_req));
 849        vbg_req_free(gdev->ack_events_req,
 850                     sizeof(*gdev->ack_events_req));
 851        vbg_req_free(gdev->cancel_req,
 852                     sizeof(*gdev->cancel_req));
 853        vbg_req_free(gdev->mem_balloon.change_req,
 854                     sizeof(*gdev->mem_balloon.change_req));
 855        vbg_req_free(gdev->mem_balloon.get_req,
 856                     sizeof(*gdev->mem_balloon.get_req));
 857        return ret;
 858}
 859
 860/**
 861 * Call this on exit to clean-up vboxguest-core managed resources.
 862 *
 863 * The native code should call this before the driver is loaded,
 864 * but don't call this on shutdown.
 865 * @gdev:               The Guest extension device.
 866 */
 867void vbg_core_exit(struct vbg_dev *gdev)
 868{
 869        vbg_heartbeat_exit(gdev);
 870        vbg_guest_mappings_exit(gdev);
 871
 872        /* Clear the host flags (mouse status etc). */
 873        vbg_reset_host_event_filter(gdev, 0);
 874        vbg_reset_host_capabilities(gdev);
 875        vbg_core_set_mouse_status(gdev, 0);
 876
 877        vbg_req_free(gdev->mouse_status_req,
 878                     sizeof(*gdev->mouse_status_req));
 879        vbg_req_free(gdev->ack_events_req,
 880                     sizeof(*gdev->ack_events_req));
 881        vbg_req_free(gdev->cancel_req,
 882                     sizeof(*gdev->cancel_req));
 883        vbg_req_free(gdev->mem_balloon.change_req,
 884                     sizeof(*gdev->mem_balloon.change_req));
 885        vbg_req_free(gdev->mem_balloon.get_req,
 886                     sizeof(*gdev->mem_balloon.get_req));
 887}
 888
 889/**
 890 * Creates a VBoxGuest user session.
 891 *
 892 * vboxguest_linux.c calls this when userspace opens the char-device.
 893 * Return: A pointer to the new session or an ERR_PTR on error.
 894 * @gdev:               The Guest extension device.
 895 * @user:               Set if this is a session for the vboxuser device.
 896 */
 897struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
 898{
 899        struct vbg_session *session;
 900
 901        session = kzalloc(sizeof(*session), GFP_KERNEL);
 902        if (!session)
 903                return ERR_PTR(-ENOMEM);
 904
 905        session->gdev = gdev;
 906        session->user_session = user;
 907
 908        return session;
 909}
 910
 911/**
 912 * Closes a VBoxGuest session.
 913 * @session:            The session to close (and free).
 914 */
 915void vbg_core_close_session(struct vbg_session *session)
 916{
 917        struct vbg_dev *gdev = session->gdev;
 918        int i, rc;
 919
 920        vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
 921        vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
 922
 923        for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
 924                if (!session->hgcm_client_ids[i])
 925                        continue;
 926
 927                vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc);
 928        }
 929
 930        kfree(session);
 931}
 932
 933static int vbg_ioctl_chk(struct vbg_ioctl_hdr *hdr, size_t in_size,
 934                         size_t out_size)
 935{
 936        if (hdr->size_in  != (sizeof(*hdr) + in_size) ||
 937            hdr->size_out != (sizeof(*hdr) + out_size))
 938                return -EINVAL;
 939
 940        return 0;
 941}
 942
 943static int vbg_ioctl_driver_version_info(
 944        struct vbg_ioctl_driver_version_info *info)
 945{
 946        const u16 vbg_maj_version = VBG_IOC_VERSION >> 16;
 947        u16 min_maj_version, req_maj_version;
 948
 949        if (vbg_ioctl_chk(&info->hdr, sizeof(info->u.in), sizeof(info->u.out)))
 950                return -EINVAL;
 951
 952        req_maj_version = info->u.in.req_version >> 16;
 953        min_maj_version = info->u.in.min_version >> 16;
 954
 955        if (info->u.in.min_version > info->u.in.req_version ||
 956            min_maj_version != req_maj_version)
 957                return -EINVAL;
 958
 959        if (info->u.in.min_version <= VBG_IOC_VERSION &&
 960            min_maj_version == vbg_maj_version) {
 961                info->u.out.session_version = VBG_IOC_VERSION;
 962        } else {
 963                info->u.out.session_version = U32_MAX;
 964                info->hdr.rc = VERR_VERSION_MISMATCH;
 965        }
 966
 967        info->u.out.driver_version  = VBG_IOC_VERSION;
 968        info->u.out.driver_revision = 0;
 969        info->u.out.reserved1      = 0;
 970        info->u.out.reserved2      = 0;
 971
 972        return 0;
 973}
 974
 975static bool vbg_wait_event_cond(struct vbg_dev *gdev,
 976                                struct vbg_session *session,
 977                                u32 event_mask)
 978{
 979        unsigned long flags;
 980        bool wakeup;
 981        u32 events;
 982
 983        spin_lock_irqsave(&gdev->event_spinlock, flags);
 984
 985        events = gdev->pending_events & event_mask;
 986        wakeup = events || session->cancel_waiters;
 987
 988        spin_unlock_irqrestore(&gdev->event_spinlock, flags);
 989
 990        return wakeup;
 991}
 992
 993/* Must be called with the event_lock held */
 994static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
 995                                     struct vbg_session *session,
 996                                     u32 event_mask)
 997{
 998        u32 events = gdev->pending_events & event_mask;
 999
1000        gdev->pending_events &= ~events;
1001        return events;
1002}
1003
1004static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev,
1005                                     struct vbg_session *session,
1006                                     struct vbg_ioctl_wait_for_events *wait)
1007{
1008        u32 timeout_ms = wait->u.in.timeout_ms;
1009        u32 event_mask = wait->u.in.events;
1010        unsigned long flags;
1011        long timeout;
1012        int ret = 0;
1013
1014        if (vbg_ioctl_chk(&wait->hdr, sizeof(wait->u.in), sizeof(wait->u.out)))
1015                return -EINVAL;
1016
1017        if (timeout_ms == U32_MAX)
1018                timeout = MAX_SCHEDULE_TIMEOUT;
1019        else
1020                timeout = msecs_to_jiffies(timeout_ms);
1021
1022        wait->u.out.events = 0;
1023        do {
1024                timeout = wait_event_interruptible_timeout(
1025                                gdev->event_wq,
1026                                vbg_wait_event_cond(gdev, session, event_mask),
1027                                timeout);
1028
1029                spin_lock_irqsave(&gdev->event_spinlock, flags);
1030
1031                if (timeout < 0 || session->cancel_waiters) {
1032                        ret = -EINTR;
1033                } else if (timeout == 0) {
1034                        ret = -ETIMEDOUT;
1035                } else {
1036                        wait->u.out.events =
1037                           vbg_consume_events_locked(gdev, session, event_mask);
1038                }
1039
1040                spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1041
1042                /*
1043                 * Someone else may have consumed the event(s) first, in
1044                 * which case we go back to waiting.
1045                 */
1046        } while (ret == 0 && wait->u.out.events == 0);
1047
1048        return ret;
1049}
1050
1051static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev,
1052                                               struct vbg_session *session,
1053                                               struct vbg_ioctl_hdr *hdr)
1054{
1055        unsigned long flags;
1056
1057        if (hdr->size_in != sizeof(*hdr) || hdr->size_out != sizeof(*hdr))
1058                return -EINVAL;
1059
1060        spin_lock_irqsave(&gdev->event_spinlock, flags);
1061        session->cancel_waiters = true;
1062        spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1063
1064        wake_up(&gdev->event_wq);
1065
1066        return 0;
1067}
1068
1069/**
1070 * Checks if the VMM request is allowed in the context of the given session.
1071 * Return: 0 or negative errno value.
1072 * @gdev:               The Guest extension device.
1073 * @session:            The calling session.
1074 * @req:                The request.
1075 */
1076static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
1077                           const struct vmmdev_request_header *req)
1078{
1079        const struct vmmdev_guest_status *guest_status;
1080        bool trusted_apps_only;
1081
1082        switch (req->request_type) {
1083        /* Trusted users apps only. */
1084        case VMMDEVREQ_QUERY_CREDENTIALS:
1085        case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT:
1086        case VMMDEVREQ_REGISTER_SHARED_MODULE:
1087        case VMMDEVREQ_UNREGISTER_SHARED_MODULE:
1088        case VMMDEVREQ_WRITE_COREDUMP:
1089        case VMMDEVREQ_GET_CPU_HOTPLUG_REQ:
1090        case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS:
1091        case VMMDEVREQ_CHECK_SHARED_MODULES:
1092        case VMMDEVREQ_GET_PAGE_SHARING_STATUS:
1093        case VMMDEVREQ_DEBUG_IS_PAGE_SHARED:
1094        case VMMDEVREQ_REPORT_GUEST_STATS:
1095        case VMMDEVREQ_REPORT_GUEST_USER_STATE:
1096        case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ:
1097                trusted_apps_only = true;
1098                break;
1099
1100        /* Anyone. */
1101        case VMMDEVREQ_GET_MOUSE_STATUS:
1102        case VMMDEVREQ_SET_MOUSE_STATUS:
1103        case VMMDEVREQ_SET_POINTER_SHAPE:
1104        case VMMDEVREQ_GET_HOST_VERSION:
1105        case VMMDEVREQ_IDLE:
1106        case VMMDEVREQ_GET_HOST_TIME:
1107        case VMMDEVREQ_SET_POWER_STATUS:
1108        case VMMDEVREQ_ACKNOWLEDGE_EVENTS:
1109        case VMMDEVREQ_CTL_GUEST_FILTER_MASK:
1110        case VMMDEVREQ_REPORT_GUEST_STATUS:
1111        case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ:
1112        case VMMDEVREQ_VIDEMODE_SUPPORTED:
1113        case VMMDEVREQ_GET_HEIGHT_REDUCTION:
1114        case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2:
1115        case VMMDEVREQ_VIDEMODE_SUPPORTED2:
1116        case VMMDEVREQ_VIDEO_ACCEL_ENABLE:
1117        case VMMDEVREQ_VIDEO_ACCEL_FLUSH:
1118        case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION:
1119        case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX:
1120        case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ:
1121        case VMMDEVREQ_GET_VRDPCHANGE_REQ:
1122        case VMMDEVREQ_LOG_STRING:
1123        case VMMDEVREQ_GET_SESSION_ID:
1124                trusted_apps_only = false;
1125                break;
1126
1127        /* Depends on the request parameters... */
1128        case VMMDEVREQ_REPORT_GUEST_CAPABILITIES:
1129                guest_status = (const struct vmmdev_guest_status *)req;
1130                switch (guest_status->facility) {
1131                case VBOXGUEST_FACILITY_TYPE_ALL:
1132                case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER:
1133                        vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
1134                                guest_status->facility);
1135                        return -EPERM;
1136                case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE:
1137                        trusted_apps_only = true;
1138                        break;
1139                case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT:
1140                case VBOXGUEST_FACILITY_TYPE_SEAMLESS:
1141                case VBOXGUEST_FACILITY_TYPE_GRAPHICS:
1142                default:
1143                        trusted_apps_only = false;
1144                        break;
1145                }
1146                break;
1147
1148        /* Anything else is not allowed. */
1149        default:
1150                vbg_err("Denying userspace vmm call type %#08x\n",
1151                        req->request_type);
1152                return -EPERM;
1153        }
1154
1155        if (trusted_apps_only && session->user_session) {
1156                vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1157                        req->request_type);
1158                return -EPERM;
1159        }
1160
1161        return 0;
1162}
1163
1164static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev,
1165                                struct vbg_session *session, void *data)
1166{
1167        struct vbg_ioctl_hdr *hdr = data;
1168        int ret;
1169
1170        if (hdr->size_in != hdr->size_out)
1171                return -EINVAL;
1172
1173        if (hdr->size_in > VMMDEV_MAX_VMMDEVREQ_SIZE)
1174                return -E2BIG;
1175
1176        if (hdr->type == VBG_IOCTL_HDR_TYPE_DEFAULT)
1177                return -EINVAL;
1178
1179        ret = vbg_req_allowed(gdev, session, data);
1180        if (ret < 0)
1181                return ret;
1182
1183        vbg_req_perform(gdev, data);
1184        WARN_ON(hdr->rc == VINF_HGCM_ASYNC_EXECUTE);
1185
1186        return 0;
1187}
1188
1189static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
1190                                  struct vbg_session *session,
1191                                  struct vbg_ioctl_hgcm_connect *conn)
1192{
1193        u32 client_id;
1194        int i, ret;
1195
1196        if (vbg_ioctl_chk(&conn->hdr, sizeof(conn->u.in), sizeof(conn->u.out)))
1197                return -EINVAL;
1198
1199        /* Find a free place in the sessions clients array and claim it */
1200        mutex_lock(&gdev->session_mutex);
1201        for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1202                if (!session->hgcm_client_ids[i]) {
1203                        session->hgcm_client_ids[i] = U32_MAX;
1204                        break;
1205                }
1206        }
1207        mutex_unlock(&gdev->session_mutex);
1208
1209        if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1210                return -EMFILE;
1211
1212        ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id,
1213                               &conn->hdr.rc);
1214
1215        mutex_lock(&gdev->session_mutex);
1216        if (ret == 0 && conn->hdr.rc >= 0) {
1217                conn->u.out.client_id = client_id;
1218                session->hgcm_client_ids[i] = client_id;
1219        } else {
1220                conn->u.out.client_id = 0;
1221                session->hgcm_client_ids[i] = 0;
1222        }
1223        mutex_unlock(&gdev->session_mutex);
1224
1225        return ret;
1226}
1227
1228static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
1229                                     struct vbg_session *session,
1230                                     struct vbg_ioctl_hgcm_disconnect *disconn)
1231{
1232        u32 client_id;
1233        int i, ret;
1234
1235        if (vbg_ioctl_chk(&disconn->hdr, sizeof(disconn->u.in), 0))
1236                return -EINVAL;
1237
1238        client_id = disconn->u.in.client_id;
1239        if (client_id == 0 || client_id == U32_MAX)
1240                return -EINVAL;
1241
1242        mutex_lock(&gdev->session_mutex);
1243        for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1244                if (session->hgcm_client_ids[i] == client_id) {
1245                        session->hgcm_client_ids[i] = U32_MAX;
1246                        break;
1247                }
1248        }
1249        mutex_unlock(&gdev->session_mutex);
1250
1251        if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1252                return -EINVAL;
1253
1254        ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc);
1255
1256        mutex_lock(&gdev->session_mutex);
1257        if (ret == 0 && disconn->hdr.rc >= 0)
1258                session->hgcm_client_ids[i] = 0;
1259        else
1260                session->hgcm_client_ids[i] = client_id;
1261        mutex_unlock(&gdev->session_mutex);
1262
1263        return ret;
1264}
1265
1266static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
1267                               struct vbg_session *session, bool f32bit,
1268                               struct vbg_ioctl_hgcm_call *call)
1269{
1270        size_t actual_size;
1271        u32 client_id;
1272        int i, ret;
1273
1274        if (call->hdr.size_in < sizeof(*call))
1275                return -EINVAL;
1276
1277        if (call->hdr.size_in != call->hdr.size_out)
1278                return -EINVAL;
1279
1280        if (call->parm_count > VMMDEV_HGCM_MAX_PARMS)
1281                return -E2BIG;
1282
1283        client_id = call->client_id;
1284        if (client_id == 0 || client_id == U32_MAX)
1285                return -EINVAL;
1286
1287        actual_size = sizeof(*call);
1288        if (f32bit)
1289                actual_size += call->parm_count *
1290                               sizeof(struct vmmdev_hgcm_function_parameter32);
1291        else
1292                actual_size += call->parm_count *
1293                               sizeof(struct vmmdev_hgcm_function_parameter);
1294        if (call->hdr.size_in < actual_size) {
1295                vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
1296                          call->hdr.size_in, actual_size);
1297                return -EINVAL;
1298        }
1299        call->hdr.size_out = actual_size;
1300
1301        /*
1302         * Validate the client id.
1303         */
1304        mutex_lock(&gdev->session_mutex);
1305        for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++)
1306                if (session->hgcm_client_ids[i] == client_id)
1307                        break;
1308        mutex_unlock(&gdev->session_mutex);
1309        if (i >= ARRAY_SIZE(session->hgcm_client_ids)) {
1310                vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
1311                          client_id);
1312                return -EINVAL;
1313        }
1314
1315        if (f32bit)
1316                ret = vbg_hgcm_call32(gdev, client_id,
1317                                      call->function, call->timeout_ms,
1318                                      VBG_IOCTL_HGCM_CALL_PARMS32(call),
1319                                      call->parm_count, &call->hdr.rc);
1320        else
1321                ret = vbg_hgcm_call(gdev, client_id,
1322                                    call->function, call->timeout_ms,
1323                                    VBG_IOCTL_HGCM_CALL_PARMS(call),
1324                                    call->parm_count, &call->hdr.rc);
1325
1326        if (ret == -E2BIG) {
1327                /* E2BIG needs to be reported through the hdr.rc field. */
1328                call->hdr.rc = VERR_OUT_OF_RANGE;
1329                ret = 0;
1330        }
1331
1332        if (ret && ret != -EINTR && ret != -ETIMEDOUT)
1333                vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret);
1334
1335        return ret;
1336}
1337
1338static int vbg_ioctl_log(struct vbg_ioctl_log *log)
1339{
1340        if (log->hdr.size_out != sizeof(log->hdr))
1341                return -EINVAL;
1342
1343        vbg_info("%.*s", (int)(log->hdr.size_in - sizeof(log->hdr)),
1344                 log->u.in.msg);
1345
1346        return 0;
1347}
1348
1349static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev,
1350                                        struct vbg_session *session,
1351                                        struct vbg_ioctl_change_filter *filter)
1352{
1353        u32 or_mask, not_mask;
1354
1355        if (vbg_ioctl_chk(&filter->hdr, sizeof(filter->u.in), 0))
1356                return -EINVAL;
1357
1358        or_mask = filter->u.in.or_mask;
1359        not_mask = filter->u.in.not_mask;
1360
1361        if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
1362                return -EINVAL;
1363
1364        return vbg_set_session_event_filter(gdev, session, or_mask, not_mask,
1365                                            false);
1366}
1367
1368static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
1369             struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps)
1370{
1371        u32 or_mask, not_mask;
1372        int ret;
1373
1374        if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), sizeof(caps->u.out)))
1375                return -EINVAL;
1376
1377        or_mask = caps->u.in.or_mask;
1378        not_mask = caps->u.in.not_mask;
1379
1380        if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
1381                return -EINVAL;
1382
1383        ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask,
1384                                           false);
1385        if (ret)
1386                return ret;
1387
1388        caps->u.out.session_caps = session->guest_caps;
1389        caps->u.out.global_caps = gdev->guest_caps_host;
1390
1391        return 0;
1392}
1393
1394static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
1395                                   struct vbg_ioctl_check_balloon *balloon_info)
1396{
1397        if (vbg_ioctl_chk(&balloon_info->hdr, 0, sizeof(balloon_info->u.out)))
1398                return -EINVAL;
1399
1400        balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks;
1401        /*
1402         * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
1403         * events entirely in the kernel, see vbg_core_isr().
1404         */
1405        balloon_info->u.out.handle_in_r3 = false;
1406
1407        return 0;
1408}
1409
1410static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1411                                     struct vbg_ioctl_write_coredump *dump)
1412{
1413        struct vmmdev_write_core_dump *req;
1414
1415        if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
1416                return -EINVAL;
1417
1418        req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP);
1419        if (!req)
1420                return -ENOMEM;
1421
1422        req->flags = dump->u.in.flags;
1423        dump->hdr.rc = vbg_req_perform(gdev, req);
1424
1425        vbg_req_free(req, sizeof(*req));
1426        return 0;
1427}
1428
1429/**
1430 * Common IOCtl for user to kernel communication.
1431 * Return: 0 or negative errno value.
1432 * @session:    The client session.
1433 * @req:        The requested function.
1434 * @data:       The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
1435 */
1436int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
1437{
1438        unsigned int req_no_size = req & ~IOCSIZE_MASK;
1439        struct vbg_dev *gdev = session->gdev;
1440        struct vbg_ioctl_hdr *hdr = data;
1441        bool f32bit = false;
1442
1443        hdr->rc = VINF_SUCCESS;
1444        if (!hdr->size_out)
1445                hdr->size_out = hdr->size_in;
1446
1447        /*
1448         * hdr->version and hdr->size_in / hdr->size_out minimum size are
1449         * already checked by vbg_misc_device_ioctl().
1450         */
1451
1452        /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
1453        if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) ||
1454            req == VBG_IOCTL_VMMDEV_REQUEST_BIG)
1455                return vbg_ioctl_vmmrequest(gdev, session, data);
1456
1457        if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT)
1458                return -EINVAL;
1459
1460        /* Fixed size requests. */
1461        switch (req) {
1462        case VBG_IOCTL_DRIVER_VERSION_INFO:
1463                return vbg_ioctl_driver_version_info(data);
1464        case VBG_IOCTL_HGCM_CONNECT:
1465                return vbg_ioctl_hgcm_connect(gdev, session, data);
1466        case VBG_IOCTL_HGCM_DISCONNECT:
1467                return vbg_ioctl_hgcm_disconnect(gdev, session, data);
1468        case VBG_IOCTL_WAIT_FOR_EVENTS:
1469                return vbg_ioctl_wait_for_events(gdev, session, data);
1470        case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
1471                return vbg_ioctl_interrupt_all_wait_events(gdev, session, data);
1472        case VBG_IOCTL_CHANGE_FILTER_MASK:
1473                return vbg_ioctl_change_filter_mask(gdev, session, data);
1474        case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES:
1475                return vbg_ioctl_change_guest_capabilities(gdev, session, data);
1476        case VBG_IOCTL_CHECK_BALLOON:
1477                return vbg_ioctl_check_balloon(gdev, data);
1478        case VBG_IOCTL_WRITE_CORE_DUMP:
1479                return vbg_ioctl_write_core_dump(gdev, data);
1480        }
1481
1482        /* Variable sized requests. */
1483        switch (req_no_size) {
1484#ifdef CONFIG_COMPAT
1485        case VBG_IOCTL_HGCM_CALL_32(0):
1486                f32bit = true;
1487                /* Fall through */
1488#endif
1489        case VBG_IOCTL_HGCM_CALL(0):
1490                return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
1491        case VBG_IOCTL_LOG(0):
1492                return vbg_ioctl_log(data);
1493        }
1494
1495        vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req);
1496        return -ENOTTY;
1497}
1498
1499/**
1500 * Report guest supported mouse-features to the host.
1501 *
1502 * Return: 0 or negative errno value.
1503 * @gdev:               The Guest extension device.
1504 * @features:           The set of features to report to the host.
1505 */
1506int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
1507{
1508        struct vmmdev_mouse_status *req;
1509        int rc;
1510
1511        req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS);
1512        if (!req)
1513                return -ENOMEM;
1514
1515        req->mouse_features = features;
1516        req->pointer_pos_x = 0;
1517        req->pointer_pos_y = 0;
1518
1519        rc = vbg_req_perform(gdev, req);
1520        if (rc < 0)
1521                vbg_err("%s error, rc: %d\n", __func__, rc);
1522
1523        vbg_req_free(req, sizeof(*req));
1524        return vbg_status_code_to_errno(rc);
1525}
1526
1527/** Core interrupt service routine. */
1528irqreturn_t vbg_core_isr(int irq, void *dev_id)
1529{
1530        struct vbg_dev *gdev = dev_id;
1531        struct vmmdev_events *req = gdev->ack_events_req;
1532        bool mouse_position_changed = false;
1533        unsigned long flags;
1534        u32 events = 0;
1535        int rc;
1536
1537        if (!gdev->mmio->V.V1_04.have_events)
1538                return IRQ_NONE;
1539
1540        /* Get and acknowlegde events. */
1541        req->header.rc = VERR_INTERNAL_ERROR;
1542        req->events = 0;
1543        rc = vbg_req_perform(gdev, req);
1544        if (rc < 0) {
1545                vbg_err("Error performing events req, rc: %d\n", rc);
1546                return IRQ_NONE;
1547        }
1548
1549        events = req->events;
1550
1551        if (events & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) {
1552                mouse_position_changed = true;
1553                events &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
1554        }
1555
1556        if (events & VMMDEV_EVENT_HGCM) {
1557                wake_up(&gdev->hgcm_wq);
1558                events &= ~VMMDEV_EVENT_HGCM;
1559        }
1560
1561        if (events & VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) {
1562                schedule_work(&gdev->mem_balloon.work);
1563                events &= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1564        }
1565
1566        if (events) {
1567                spin_lock_irqsave(&gdev->event_spinlock, flags);
1568                gdev->pending_events |= events;
1569                spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1570
1571                wake_up(&gdev->event_wq);
1572        }
1573
1574        if (mouse_position_changed)
1575                vbg_linux_mouse_event(gdev);
1576
1577        return IRQ_HANDLED;
1578}
1579