linux/arch/x86/kernel/cpu/sgx/ioctl.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*  Copyright(c) 2016-20 Intel Corporation. */
   3
   4#include <asm/mman.h>
   5#include <asm/sgx.h>
   6#include <linux/mman.h>
   7#include <linux/delay.h>
   8#include <linux/file.h>
   9#include <linux/hashtable.h>
  10#include <linux/highmem.h>
  11#include <linux/ratelimit.h>
  12#include <linux/sched/signal.h>
  13#include <linux/shmem_fs.h>
  14#include <linux/slab.h>
  15#include <linux/suspend.h>
  16#include "driver.h"
  17#include "encl.h"
  18#include "encls.h"
  19
  20static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl)
  21{
  22        struct sgx_va_page *va_page = NULL;
  23        void *err;
  24
  25        BUILD_BUG_ON(SGX_VA_SLOT_COUNT !=
  26                (SGX_ENCL_PAGE_VA_OFFSET_MASK >> 3) + 1);
  27
  28        if (!(encl->page_cnt % SGX_VA_SLOT_COUNT)) {
  29                va_page = kzalloc(sizeof(*va_page), GFP_KERNEL);
  30                if (!va_page)
  31                        return ERR_PTR(-ENOMEM);
  32
  33                va_page->epc_page = sgx_alloc_va_page();
  34                if (IS_ERR(va_page->epc_page)) {
  35                        err = ERR_CAST(va_page->epc_page);
  36                        kfree(va_page);
  37                        return err;
  38                }
  39
  40                WARN_ON_ONCE(encl->page_cnt % SGX_VA_SLOT_COUNT);
  41        }
  42        encl->page_cnt++;
  43        return va_page;
  44}
  45
  46static void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page)
  47{
  48        encl->page_cnt--;
  49
  50        if (va_page) {
  51                sgx_encl_free_epc_page(va_page->epc_page);
  52                list_del(&va_page->list);
  53                kfree(va_page);
  54        }
  55}
  56
  57static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
  58{
  59        struct sgx_epc_page *secs_epc;
  60        struct sgx_va_page *va_page;
  61        struct sgx_pageinfo pginfo;
  62        struct sgx_secinfo secinfo;
  63        unsigned long encl_size;
  64        struct file *backing;
  65        long ret;
  66
  67        va_page = sgx_encl_grow(encl);
  68        if (IS_ERR(va_page))
  69                return PTR_ERR(va_page);
  70        else if (va_page)
  71                list_add(&va_page->list, &encl->va_pages);
  72        /* else the tail page of the VA page list had free slots. */
  73
  74        /* The extra page goes to SECS. */
  75        encl_size = secs->size + PAGE_SIZE;
  76
  77        backing = shmem_file_setup("SGX backing", encl_size + (encl_size >> 5),
  78                                   VM_NORESERVE);
  79        if (IS_ERR(backing)) {
  80                ret = PTR_ERR(backing);
  81                goto err_out_shrink;
  82        }
  83
  84        encl->backing = backing;
  85
  86        secs_epc = sgx_alloc_epc_page(&encl->secs, true);
  87        if (IS_ERR(secs_epc)) {
  88                ret = PTR_ERR(secs_epc);
  89                goto err_out_backing;
  90        }
  91
  92        encl->secs.epc_page = secs_epc;
  93
  94        pginfo.addr = 0;
  95        pginfo.contents = (unsigned long)secs;
  96        pginfo.metadata = (unsigned long)&secinfo;
  97        pginfo.secs = 0;
  98        memset(&secinfo, 0, sizeof(secinfo));
  99
 100        ret = __ecreate((void *)&pginfo, sgx_get_epc_virt_addr(secs_epc));
 101        if (ret) {
 102                ret = -EIO;
 103                goto err_out;
 104        }
 105
 106        if (secs->attributes & SGX_ATTR_DEBUG)
 107                set_bit(SGX_ENCL_DEBUG, &encl->flags);
 108
 109        encl->secs.encl = encl;
 110        encl->base = secs->base;
 111        encl->size = secs->size;
 112        encl->attributes = secs->attributes;
 113        encl->attributes_mask = SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT | SGX_ATTR_KSS;
 114
 115        /* Set only after completion, as encl->lock has not been taken. */
 116        set_bit(SGX_ENCL_CREATED, &encl->flags);
 117
 118        return 0;
 119
 120err_out:
 121        sgx_encl_free_epc_page(encl->secs.epc_page);
 122        encl->secs.epc_page = NULL;
 123
 124err_out_backing:
 125        fput(encl->backing);
 126        encl->backing = NULL;
 127
 128err_out_shrink:
 129        sgx_encl_shrink(encl, va_page);
 130
 131        return ret;
 132}
 133
 134/**
 135 * sgx_ioc_enclave_create() - handler for %SGX_IOC_ENCLAVE_CREATE
 136 * @encl:       An enclave pointer.
 137 * @arg:        The ioctl argument.
 138 *
 139 * Allocate kernel data structures for the enclave and invoke ECREATE.
 140 *
 141 * Return:
 142 * - 0:         Success.
 143 * - -EIO:      ECREATE failed.
 144 * - -errno:    POSIX error.
 145 */
 146static long sgx_ioc_enclave_create(struct sgx_encl *encl, void __user *arg)
 147{
 148        struct sgx_enclave_create create_arg;
 149        void *secs;
 150        int ret;
 151
 152        if (test_bit(SGX_ENCL_CREATED, &encl->flags))
 153                return -EINVAL;
 154
 155        if (copy_from_user(&create_arg, arg, sizeof(create_arg)))
 156                return -EFAULT;
 157
 158        secs = kmalloc(PAGE_SIZE, GFP_KERNEL);
 159        if (!secs)
 160                return -ENOMEM;
 161
 162        if (copy_from_user(secs, (void __user *)create_arg.src, PAGE_SIZE))
 163                ret = -EFAULT;
 164        else
 165                ret = sgx_encl_create(encl, secs);
 166
 167        kfree(secs);
 168        return ret;
 169}
 170
 171static struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
 172                                                 unsigned long offset,
 173                                                 u64 secinfo_flags)
 174{
 175        struct sgx_encl_page *encl_page;
 176        unsigned long prot;
 177
 178        encl_page = kzalloc(sizeof(*encl_page), GFP_KERNEL);
 179        if (!encl_page)
 180                return ERR_PTR(-ENOMEM);
 181
 182        encl_page->desc = encl->base + offset;
 183        encl_page->encl = encl;
 184
 185        prot = _calc_vm_trans(secinfo_flags, SGX_SECINFO_R, PROT_READ)  |
 186               _calc_vm_trans(secinfo_flags, SGX_SECINFO_W, PROT_WRITE) |
 187               _calc_vm_trans(secinfo_flags, SGX_SECINFO_X, PROT_EXEC);
 188
 189        /*
 190         * TCS pages must always RW set for CPU access while the SECINFO
 191         * permissions are *always* zero - the CPU ignores the user provided
 192         * values and silently overwrites them with zero permissions.
 193         */
 194        if ((secinfo_flags & SGX_SECINFO_PAGE_TYPE_MASK) == SGX_SECINFO_TCS)
 195                prot |= PROT_READ | PROT_WRITE;
 196
 197        /* Calculate maximum of the VM flags for the page. */
 198        encl_page->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
 199
 200        return encl_page;
 201}
 202
 203static int sgx_validate_secinfo(struct sgx_secinfo *secinfo)
 204{
 205        u64 perm = secinfo->flags & SGX_SECINFO_PERMISSION_MASK;
 206        u64 pt   = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
 207
 208        if (pt != SGX_SECINFO_REG && pt != SGX_SECINFO_TCS)
 209                return -EINVAL;
 210
 211        if ((perm & SGX_SECINFO_W) && !(perm & SGX_SECINFO_R))
 212                return -EINVAL;
 213
 214        /*
 215         * CPU will silently overwrite the permissions as zero, which means
 216         * that we need to validate it ourselves.
 217         */
 218        if (pt == SGX_SECINFO_TCS && perm)
 219                return -EINVAL;
 220
 221        if (secinfo->flags & SGX_SECINFO_RESERVED_MASK)
 222                return -EINVAL;
 223
 224        if (memchr_inv(secinfo->reserved, 0, sizeof(secinfo->reserved)))
 225                return -EINVAL;
 226
 227        return 0;
 228}
 229
 230static int __sgx_encl_add_page(struct sgx_encl *encl,
 231                               struct sgx_encl_page *encl_page,
 232                               struct sgx_epc_page *epc_page,
 233                               struct sgx_secinfo *secinfo, unsigned long src)
 234{
 235        struct sgx_pageinfo pginfo;
 236        struct vm_area_struct *vma;
 237        struct page *src_page;
 238        int ret;
 239
 240        /* Deny noexec. */
 241        vma = find_vma(current->mm, src);
 242        if (!vma)
 243                return -EFAULT;
 244
 245        if (!(vma->vm_flags & VM_MAYEXEC))
 246                return -EACCES;
 247
 248        ret = get_user_pages(src, 1, 0, &src_page, NULL);
 249        if (ret < 1)
 250                return -EFAULT;
 251
 252        pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page);
 253        pginfo.addr = encl_page->desc & PAGE_MASK;
 254        pginfo.metadata = (unsigned long)secinfo;
 255        pginfo.contents = (unsigned long)kmap_atomic(src_page);
 256
 257        ret = __eadd(&pginfo, sgx_get_epc_virt_addr(epc_page));
 258
 259        kunmap_atomic((void *)pginfo.contents);
 260        put_page(src_page);
 261
 262        return ret ? -EIO : 0;
 263}
 264
 265/*
 266 * If the caller requires measurement of the page as a proof for the content,
 267 * use EEXTEND to add a measurement for 256 bytes of the page. Repeat this
 268 * operation until the entire page is measured."
 269 */
 270static int __sgx_encl_extend(struct sgx_encl *encl,
 271                             struct sgx_epc_page *epc_page)
 272{
 273        unsigned long offset;
 274        int ret;
 275
 276        for (offset = 0; offset < PAGE_SIZE; offset += SGX_EEXTEND_BLOCK_SIZE) {
 277                ret = __eextend(sgx_get_epc_virt_addr(encl->secs.epc_page),
 278                                sgx_get_epc_virt_addr(epc_page) + offset);
 279                if (ret) {
 280                        if (encls_failed(ret))
 281                                ENCLS_WARN(ret, "EEXTEND");
 282
 283                        return -EIO;
 284                }
 285        }
 286
 287        return 0;
 288}
 289
 290static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
 291                             unsigned long offset, struct sgx_secinfo *secinfo,
 292                             unsigned long flags)
 293{
 294        struct sgx_encl_page *encl_page;
 295        struct sgx_epc_page *epc_page;
 296        struct sgx_va_page *va_page;
 297        int ret;
 298
 299        encl_page = sgx_encl_page_alloc(encl, offset, secinfo->flags);
 300        if (IS_ERR(encl_page))
 301                return PTR_ERR(encl_page);
 302
 303        epc_page = sgx_alloc_epc_page(encl_page, true);
 304        if (IS_ERR(epc_page)) {
 305                kfree(encl_page);
 306                return PTR_ERR(epc_page);
 307        }
 308
 309        va_page = sgx_encl_grow(encl);
 310        if (IS_ERR(va_page)) {
 311                ret = PTR_ERR(va_page);
 312                goto err_out_free;
 313        }
 314
 315        mmap_read_lock(current->mm);
 316        mutex_lock(&encl->lock);
 317
 318        /*
 319         * Adding to encl->va_pages must be done under encl->lock.  Ditto for
 320         * deleting (via sgx_encl_shrink()) in the error path.
 321         */
 322        if (va_page)
 323                list_add(&va_page->list, &encl->va_pages);
 324
 325        /*
 326         * Insert prior to EADD in case of OOM.  EADD modifies MRENCLAVE, i.e.
 327         * can't be gracefully unwound, while failure on EADD/EXTEND is limited
 328         * to userspace errors (or kernel/hardware bugs).
 329         */
 330        ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc),
 331                        encl_page, GFP_KERNEL);
 332        if (ret)
 333                goto err_out_unlock;
 334
 335        ret = __sgx_encl_add_page(encl, encl_page, epc_page, secinfo,
 336                                  src);
 337        if (ret)
 338                goto err_out;
 339
 340        /*
 341         * Complete the "add" before doing the "extend" so that the "add"
 342         * isn't in a half-baked state in the extremely unlikely scenario
 343         * the enclave will be destroyed in response to EEXTEND failure.
 344         */
 345        encl_page->encl = encl;
 346        encl_page->epc_page = epc_page;
 347        encl->secs_child_cnt++;
 348
 349        if (flags & SGX_PAGE_MEASURE) {
 350                ret = __sgx_encl_extend(encl, epc_page);
 351                if (ret)
 352                        goto err_out;
 353        }
 354
 355        sgx_mark_page_reclaimable(encl_page->epc_page);
 356        mutex_unlock(&encl->lock);
 357        mmap_read_unlock(current->mm);
 358        return ret;
 359
 360err_out:
 361        xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc));
 362
 363err_out_unlock:
 364        sgx_encl_shrink(encl, va_page);
 365        mutex_unlock(&encl->lock);
 366        mmap_read_unlock(current->mm);
 367
 368err_out_free:
 369        sgx_encl_free_epc_page(epc_page);
 370        kfree(encl_page);
 371
 372        return ret;
 373}
 374
 375/**
 376 * sgx_ioc_enclave_add_pages() - The handler for %SGX_IOC_ENCLAVE_ADD_PAGES
 377 * @encl:       an enclave pointer
 378 * @arg:        a user pointer to a struct sgx_enclave_add_pages instance
 379 *
 380 * Add one or more pages to an uninitialized enclave, and optionally extend the
 381 * measurement with the contents of the page. The SECINFO and measurement mask
 382 * are applied to all pages.
 383 *
 384 * A SECINFO for a TCS is required to always contain zero permissions because
 385 * CPU silently zeros them. Allowing anything else would cause a mismatch in
 386 * the measurement.
 387 *
 388 * mmap()'s protection bits are capped by the page permissions. For each page
 389 * address, the maximum protection bits are computed with the following
 390 * heuristics:
 391 *
 392 * 1. A regular page: PROT_R, PROT_W and PROT_X match the SECINFO permissions.
 393 * 2. A TCS page: PROT_R | PROT_W.
 394 *
 395 * mmap() is not allowed to surpass the minimum of the maximum protection bits
 396 * within the given address range.
 397 *
 398 * The function deinitializes kernel data structures for enclave and returns
 399 * -EIO in any of the following conditions:
 400 *
 401 * - Enclave Page Cache (EPC), the physical memory holding enclaves, has
 402 *   been invalidated. This will cause EADD and EEXTEND to fail.
 403 * - If the source address is corrupted somehow when executing EADD.
 404 *
 405 * Return:
 406 * - 0:         Success.
 407 * - -EACCES:   The source page is located in a noexec partition.
 408 * - -ENOMEM:   Out of EPC pages.
 409 * - -EINTR:    The call was interrupted before data was processed.
 410 * - -EIO:      Either EADD or EEXTEND failed because invalid source address
 411 *              or power cycle.
 412 * - -errno:    POSIX error.
 413 */
 414static long sgx_ioc_enclave_add_pages(struct sgx_encl *encl, void __user *arg)
 415{
 416        struct sgx_enclave_add_pages add_arg;
 417        struct sgx_secinfo secinfo;
 418        unsigned long c;
 419        int ret;
 420
 421        if (!test_bit(SGX_ENCL_CREATED, &encl->flags) ||
 422            test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
 423                return -EINVAL;
 424
 425        if (copy_from_user(&add_arg, arg, sizeof(add_arg)))
 426                return -EFAULT;
 427
 428        if (!IS_ALIGNED(add_arg.offset, PAGE_SIZE) ||
 429            !IS_ALIGNED(add_arg.src, PAGE_SIZE))
 430                return -EINVAL;
 431
 432        if (!add_arg.length || add_arg.length & (PAGE_SIZE - 1))
 433                return -EINVAL;
 434
 435        if (add_arg.offset + add_arg.length - PAGE_SIZE >= encl->size)
 436                return -EINVAL;
 437
 438        if (copy_from_user(&secinfo, (void __user *)add_arg.secinfo,
 439                           sizeof(secinfo)))
 440                return -EFAULT;
 441
 442        if (sgx_validate_secinfo(&secinfo))
 443                return -EINVAL;
 444
 445        for (c = 0 ; c < add_arg.length; c += PAGE_SIZE) {
 446                if (signal_pending(current)) {
 447                        if (!c)
 448                                ret = -ERESTARTSYS;
 449
 450                        break;
 451                }
 452
 453                if (need_resched())
 454                        cond_resched();
 455
 456                ret = sgx_encl_add_page(encl, add_arg.src + c, add_arg.offset + c,
 457                                        &secinfo, add_arg.flags);
 458                if (ret)
 459                        break;
 460        }
 461
 462        add_arg.count = c;
 463
 464        if (copy_to_user(arg, &add_arg, sizeof(add_arg)))
 465                return -EFAULT;
 466
 467        return ret;
 468}
 469
 470static int __sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus,
 471                              void *hash)
 472{
 473        SHASH_DESC_ON_STACK(shash, tfm);
 474
 475        shash->tfm = tfm;
 476
 477        return crypto_shash_digest(shash, modulus, SGX_MODULUS_SIZE, hash);
 478}
 479
 480static int sgx_get_key_hash(const void *modulus, void *hash)
 481{
 482        struct crypto_shash *tfm;
 483        int ret;
 484
 485        tfm = crypto_alloc_shash("sha256", 0, CRYPTO_ALG_ASYNC);
 486        if (IS_ERR(tfm))
 487                return PTR_ERR(tfm);
 488
 489        ret = __sgx_get_key_hash(tfm, modulus, hash);
 490
 491        crypto_free_shash(tfm);
 492        return ret;
 493}
 494
 495static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
 496                         void *token)
 497{
 498        u64 mrsigner[4];
 499        int i, j;
 500        void *addr;
 501        int ret;
 502
 503        /*
 504         * Deny initializing enclaves with attributes (namely provisioning)
 505         * that have not been explicitly allowed.
 506         */
 507        if (encl->attributes & ~encl->attributes_mask)
 508                return -EACCES;
 509
 510        /*
 511         * Attributes should not be enforced *only* against what's available on
 512         * platform (done in sgx_encl_create) but checked and enforced against
 513         * the mask for enforcement in sigstruct. For example an enclave could
 514         * opt to sign with AVX bit in xfrm, but still be loadable on a platform
 515         * without it if the sigstruct->body.attributes_mask does not turn that
 516         * bit on.
 517         */
 518        if (sigstruct->body.attributes & sigstruct->body.attributes_mask &
 519            sgx_attributes_reserved_mask)
 520                return -EINVAL;
 521
 522        if (sigstruct->body.miscselect & sigstruct->body.misc_mask &
 523            sgx_misc_reserved_mask)
 524                return -EINVAL;
 525
 526        if (sigstruct->body.xfrm & sigstruct->body.xfrm_mask &
 527            sgx_xfrm_reserved_mask)
 528                return -EINVAL;
 529
 530        ret = sgx_get_key_hash(sigstruct->modulus, mrsigner);
 531        if (ret)
 532                return ret;
 533
 534        mutex_lock(&encl->lock);
 535
 536        /*
 537         * ENCLS[EINIT] is interruptible because it has such a high latency,
 538         * e.g. 50k+ cycles on success. If an IRQ/NMI/SMI becomes pending,
 539         * EINIT may fail with SGX_UNMASKED_EVENT so that the event can be
 540         * serviced.
 541         */
 542        for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) {
 543                for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) {
 544                        addr = sgx_get_epc_virt_addr(encl->secs.epc_page);
 545
 546                        preempt_disable();
 547
 548                        sgx_update_lepubkeyhash(mrsigner);
 549
 550                        ret = __einit(sigstruct, token, addr);
 551
 552                        preempt_enable();
 553
 554                        if (ret == SGX_UNMASKED_EVENT)
 555                                continue;
 556                        else
 557                                break;
 558                }
 559
 560                if (ret != SGX_UNMASKED_EVENT)
 561                        break;
 562
 563                msleep_interruptible(SGX_EINIT_SLEEP_TIME);
 564
 565                if (signal_pending(current)) {
 566                        ret = -ERESTARTSYS;
 567                        goto err_out;
 568                }
 569        }
 570
 571        if (encls_faulted(ret)) {
 572                if (encls_failed(ret))
 573                        ENCLS_WARN(ret, "EINIT");
 574
 575                ret = -EIO;
 576        } else if (ret) {
 577                pr_debug("EINIT returned %d\n", ret);
 578                ret = -EPERM;
 579        } else {
 580                set_bit(SGX_ENCL_INITIALIZED, &encl->flags);
 581        }
 582
 583err_out:
 584        mutex_unlock(&encl->lock);
 585        return ret;
 586}
 587
 588/**
 589 * sgx_ioc_enclave_init() - handler for %SGX_IOC_ENCLAVE_INIT
 590 * @encl:       an enclave pointer
 591 * @arg:        userspace pointer to a struct sgx_enclave_init instance
 592 *
 593 * Flush any outstanding enqueued EADD operations and perform EINIT.  The
 594 * Launch Enclave Public Key Hash MSRs are rewritten as necessary to match
 595 * the enclave's MRSIGNER, which is caculated from the provided sigstruct.
 596 *
 597 * Return:
 598 * - 0:         Success.
 599 * - -EPERM:    Invalid SIGSTRUCT.
 600 * - -EIO:      EINIT failed because of a power cycle.
 601 * - -errno:    POSIX error.
 602 */
 603static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg)
 604{
 605        struct sgx_sigstruct *sigstruct;
 606        struct sgx_enclave_init init_arg;
 607        void *token;
 608        int ret;
 609
 610        if (!test_bit(SGX_ENCL_CREATED, &encl->flags) ||
 611            test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
 612                return -EINVAL;
 613
 614        if (copy_from_user(&init_arg, arg, sizeof(init_arg)))
 615                return -EFAULT;
 616
 617        /*
 618         * 'sigstruct' must be on a page boundary and 'token' on a 512 byte
 619         * boundary.  kmalloc() will give this alignment when allocating
 620         * PAGE_SIZE bytes.
 621         */
 622        sigstruct = kmalloc(PAGE_SIZE, GFP_KERNEL);
 623        if (!sigstruct)
 624                return -ENOMEM;
 625
 626        token = (void *)((unsigned long)sigstruct + PAGE_SIZE / 2);
 627        memset(token, 0, SGX_LAUNCH_TOKEN_SIZE);
 628
 629        if (copy_from_user(sigstruct, (void __user *)init_arg.sigstruct,
 630                           sizeof(*sigstruct))) {
 631                ret = -EFAULT;
 632                goto out;
 633        }
 634
 635        /*
 636         * A legacy field used with Intel signed enclaves. These used to mean
 637         * regular and architectural enclaves. The CPU only accepts these values
 638         * but they do not have any other meaning.
 639         *
 640         * Thus, reject any other values.
 641         */
 642        if (sigstruct->header.vendor != 0x0000 &&
 643            sigstruct->header.vendor != 0x8086) {
 644                ret = -EINVAL;
 645                goto out;
 646        }
 647
 648        ret = sgx_encl_init(encl, sigstruct, token);
 649
 650out:
 651        kfree(sigstruct);
 652        return ret;
 653}
 654
 655/**
 656 * sgx_ioc_enclave_provision() - handler for %SGX_IOC_ENCLAVE_PROVISION
 657 * @encl:       an enclave pointer
 658 * @arg:        userspace pointer to a struct sgx_enclave_provision instance
 659 *
 660 * Allow ATTRIBUTE.PROVISION_KEY for an enclave by providing a file handle to
 661 * /dev/sgx_provision.
 662 *
 663 * Return:
 664 * - 0:         Success.
 665 * - -errno:    Otherwise.
 666 */
 667static long sgx_ioc_enclave_provision(struct sgx_encl *encl, void __user *arg)
 668{
 669        struct sgx_enclave_provision params;
 670
 671        if (copy_from_user(&params, arg, sizeof(params)))
 672                return -EFAULT;
 673
 674        return sgx_set_attribute(&encl->attributes_mask, params.fd);
 675}
 676
 677long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
 678{
 679        struct sgx_encl *encl = filep->private_data;
 680        int ret;
 681
 682        if (test_and_set_bit(SGX_ENCL_IOCTL, &encl->flags))
 683                return -EBUSY;
 684
 685        switch (cmd) {
 686        case SGX_IOC_ENCLAVE_CREATE:
 687                ret = sgx_ioc_enclave_create(encl, (void __user *)arg);
 688                break;
 689        case SGX_IOC_ENCLAVE_ADD_PAGES:
 690                ret = sgx_ioc_enclave_add_pages(encl, (void __user *)arg);
 691                break;
 692        case SGX_IOC_ENCLAVE_INIT:
 693                ret = sgx_ioc_enclave_init(encl, (void __user *)arg);
 694                break;
 695        case SGX_IOC_ENCLAVE_PROVISION:
 696                ret = sgx_ioc_enclave_provision(encl, (void __user *)arg);
 697                break;
 698        default:
 699                ret = -ENOIOCTLCMD;
 700                break;
 701        }
 702
 703        clear_bit(SGX_ENCL_IOCTL, &encl->flags);
 704        return ret;
 705}
 706