linux/arch/x86/kernel/ldt.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
   4 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
   5 * Copyright (C) 2002 Andi Kleen
   6 *
   7 * This handles calls from both 32bit and 64bit mode.
   8 *
   9 * Lock order:
  10 *      contex.ldt_usr_sem
  11 *        mmap_sem
  12 *          context.lock
  13 */
  14
  15#include <linux/errno.h>
  16#include <linux/gfp.h>
  17#include <linux/sched.h>
  18#include <linux/string.h>
  19#include <linux/mm.h>
  20#include <linux/smp.h>
  21#include <linux/syscalls.h>
  22#include <linux/slab.h>
  23#include <linux/vmalloc.h>
  24#include <linux/uaccess.h>
  25
  26#include <asm/ldt.h>
  27#include <asm/tlb.h>
  28#include <asm/desc.h>
  29#include <asm/mmu_context.h>
  30#include <asm/syscalls.h>
  31
  32static void refresh_ldt_segments(void)
  33{
  34#ifdef CONFIG_X86_64
  35        unsigned short sel;
  36
  37        /*
  38         * Make sure that the cached DS and ES descriptors match the updated
  39         * LDT.
  40         */
  41        savesegment(ds, sel);
  42        if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
  43                loadsegment(ds, sel);
  44
  45        savesegment(es, sel);
  46        if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
  47                loadsegment(es, sel);
  48#endif
  49}
  50
  51/* context.lock is held by the task which issued the smp function call */
  52static void flush_ldt(void *__mm)
  53{
  54        struct mm_struct *mm = __mm;
  55
  56        if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
  57                return;
  58
  59        load_mm_ldt(mm);
  60
  61        refresh_ldt_segments();
  62}
  63
  64/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
  65static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
  66{
  67        struct ldt_struct *new_ldt;
  68        unsigned int alloc_size;
  69
  70        if (num_entries > LDT_ENTRIES)
  71                return NULL;
  72
  73        new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
  74        if (!new_ldt)
  75                return NULL;
  76
  77        BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
  78        alloc_size = num_entries * LDT_ENTRY_SIZE;
  79
  80        /*
  81         * Xen is very picky: it requires a page-aligned LDT that has no
  82         * trailing nonzero bytes in any page that contains LDT descriptors.
  83         * Keep it simple: zero the whole allocation and never allocate less
  84         * than PAGE_SIZE.
  85         */
  86        if (alloc_size > PAGE_SIZE)
  87                new_ldt->entries = vzalloc(alloc_size);
  88        else
  89                new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
  90
  91        if (!new_ldt->entries) {
  92                kfree(new_ldt);
  93                return NULL;
  94        }
  95
  96        /* The new LDT isn't aliased for PTI yet. */
  97        new_ldt->slot = -1;
  98
  99        new_ldt->nr_entries = num_entries;
 100        return new_ldt;
 101}
 102
 103#ifdef CONFIG_PAGE_TABLE_ISOLATION
 104
 105static void do_sanity_check(struct mm_struct *mm,
 106                            bool had_kernel_mapping,
 107                            bool had_user_mapping)
 108{
 109        if (mm->context.ldt) {
 110                /*
 111                 * We already had an LDT.  The top-level entry should already
 112                 * have been allocated and synchronized with the usermode
 113                 * tables.
 114                 */
 115                WARN_ON(!had_kernel_mapping);
 116                if (static_cpu_has(X86_FEATURE_PTI))
 117                        WARN_ON(!had_user_mapping);
 118        } else {
 119                /*
 120                 * This is the first time we're mapping an LDT for this process.
 121                 * Sync the pgd to the usermode tables.
 122                 */
 123                WARN_ON(had_kernel_mapping);
 124                if (static_cpu_has(X86_FEATURE_PTI))
 125                        WARN_ON(had_user_mapping);
 126        }
 127}
 128
 129static void map_ldt_struct_to_user(struct mm_struct *mm)
 130{
 131        pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
 132
 133        if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
 134                set_pgd(kernel_to_user_pgdp(pgd), *pgd);
 135}
 136
 137static void sanity_check_ldt_mapping(struct mm_struct *mm)
 138{
 139        pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
 140        bool had_kernel = (pgd->pgd != 0);
 141        bool had_user   = (kernel_to_user_pgdp(pgd)->pgd != 0);
 142
 143        do_sanity_check(mm, had_kernel, had_user);
 144}
 145
 146/*
 147 * If PTI is enabled, this maps the LDT into the kernelmode and
 148 * usermode tables for the given mm.
 149 */
 150static int
 151map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
 152{
 153        unsigned long va;
 154        bool is_vmalloc;
 155        spinlock_t *ptl;
 156        int i, nr_pages;
 157
 158        if (!static_cpu_has(X86_FEATURE_PTI))
 159                return 0;
 160
 161        /*
 162         * Any given ldt_struct should have map_ldt_struct() called at most
 163         * once.
 164         */
 165        WARN_ON(ldt->slot != -1);
 166
 167        /* Check if the current mappings are sane */
 168        sanity_check_ldt_mapping(mm);
 169
 170        is_vmalloc = is_vmalloc_addr(ldt->entries);
 171
 172        nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
 173
 174        for (i = 0; i < nr_pages; i++) {
 175                unsigned long offset = i << PAGE_SHIFT;
 176                const void *src = (char *)ldt->entries + offset;
 177                unsigned long pfn;
 178                pgprot_t pte_prot;
 179                pte_t pte, *ptep;
 180
 181                va = (unsigned long)ldt_slot_va(slot) + offset;
 182                pfn = is_vmalloc ? vmalloc_to_pfn(src) :
 183                        page_to_pfn(virt_to_page(src));
 184                /*
 185                 * Treat the PTI LDT range as a *userspace* range.
 186                 * get_locked_pte() will allocate all needed pagetables
 187                 * and account for them in this mm.
 188                 */
 189                ptep = get_locked_pte(mm, va, &ptl);
 190                if (!ptep)
 191                        return -ENOMEM;
 192                /*
 193                 * Map it RO so the easy to find address is not a primary
 194                 * target via some kernel interface which misses a
 195                 * permission check.
 196                 */
 197                pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL);
 198                /* Filter out unsuppored __PAGE_KERNEL* bits: */
 199                pgprot_val(pte_prot) &= __supported_pte_mask;
 200                pte = pfn_pte(pfn, pte_prot);
 201                set_pte_at(mm, va, ptep, pte);
 202                pte_unmap_unlock(ptep, ptl);
 203        }
 204
 205        /* Propagate LDT mapping to the user page-table */
 206        map_ldt_struct_to_user(mm);
 207
 208        ldt->slot = slot;
 209        return 0;
 210}
 211
 212static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
 213{
 214        unsigned long va;
 215        int i, nr_pages;
 216
 217        if (!ldt)
 218                return;
 219
 220        /* LDT map/unmap is only required for PTI */
 221        if (!static_cpu_has(X86_FEATURE_PTI))
 222                return;
 223
 224        nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
 225
 226        for (i = 0; i < nr_pages; i++) {
 227                unsigned long offset = i << PAGE_SHIFT;
 228                spinlock_t *ptl;
 229                pte_t *ptep;
 230
 231                va = (unsigned long)ldt_slot_va(ldt->slot) + offset;
 232                ptep = get_locked_pte(mm, va, &ptl);
 233                pte_clear(mm, va, ptep);
 234                pte_unmap_unlock(ptep, ptl);
 235        }
 236
 237        va = (unsigned long)ldt_slot_va(ldt->slot);
 238        flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false);
 239}
 240
 241#else /* !CONFIG_PAGE_TABLE_ISOLATION */
 242
 243static int
 244map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
 245{
 246        return 0;
 247}
 248
 249static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
 250{
 251}
 252#endif /* CONFIG_PAGE_TABLE_ISOLATION */
 253
 254static void free_ldt_pgtables(struct mm_struct *mm)
 255{
 256#ifdef CONFIG_PAGE_TABLE_ISOLATION
 257        struct mmu_gather tlb;
 258        unsigned long start = LDT_BASE_ADDR;
 259        unsigned long end = LDT_END_ADDR;
 260
 261        if (!static_cpu_has(X86_FEATURE_PTI))
 262                return;
 263
 264        tlb_gather_mmu(&tlb, mm, start, end);
 265        free_pgd_range(&tlb, start, end, start, end);
 266        tlb_finish_mmu(&tlb, start, end);
 267#endif
 268}
 269
 270/* After calling this, the LDT is immutable. */
 271static void finalize_ldt_struct(struct ldt_struct *ldt)
 272{
 273        paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
 274}
 275
 276static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
 277{
 278        mutex_lock(&mm->context.lock);
 279
 280        /* Synchronizes with READ_ONCE in load_mm_ldt. */
 281        smp_store_release(&mm->context.ldt, ldt);
 282
 283        /* Activate the LDT for all CPUs using currents mm. */
 284        on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
 285
 286        mutex_unlock(&mm->context.lock);
 287}
 288
 289static void free_ldt_struct(struct ldt_struct *ldt)
 290{
 291        if (likely(!ldt))
 292                return;
 293
 294        paravirt_free_ldt(ldt->entries, ldt->nr_entries);
 295        if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
 296                vfree_atomic(ldt->entries);
 297        else
 298                free_page((unsigned long)ldt->entries);
 299        kfree(ldt);
 300}
 301
 302/*
 303 * Called on fork from arch_dup_mmap(). Just copy the current LDT state,
 304 * the new task is not running, so nothing can be installed.
 305 */
 306int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
 307{
 308        struct ldt_struct *new_ldt;
 309        int retval = 0;
 310
 311        if (!old_mm)
 312                return 0;
 313
 314        mutex_lock(&old_mm->context.lock);
 315        if (!old_mm->context.ldt)
 316                goto out_unlock;
 317
 318        new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
 319        if (!new_ldt) {
 320                retval = -ENOMEM;
 321                goto out_unlock;
 322        }
 323
 324        memcpy(new_ldt->entries, old_mm->context.ldt->entries,
 325               new_ldt->nr_entries * LDT_ENTRY_SIZE);
 326        finalize_ldt_struct(new_ldt);
 327
 328        retval = map_ldt_struct(mm, new_ldt, 0);
 329        if (retval) {
 330                free_ldt_pgtables(mm);
 331                free_ldt_struct(new_ldt);
 332                goto out_unlock;
 333        }
 334        mm->context.ldt = new_ldt;
 335
 336out_unlock:
 337        mutex_unlock(&old_mm->context.lock);
 338        return retval;
 339}
 340
 341/*
 342 * No need to lock the MM as we are the last user
 343 *
 344 * 64bit: Don't touch the LDT register - we're already in the next thread.
 345 */
 346void destroy_context_ldt(struct mm_struct *mm)
 347{
 348        free_ldt_struct(mm->context.ldt);
 349        mm->context.ldt = NULL;
 350}
 351
 352void ldt_arch_exit_mmap(struct mm_struct *mm)
 353{
 354        free_ldt_pgtables(mm);
 355}
 356
 357static int read_ldt(void __user *ptr, unsigned long bytecount)
 358{
 359        struct mm_struct *mm = current->mm;
 360        unsigned long entries_size;
 361        int retval;
 362
 363        down_read(&mm->context.ldt_usr_sem);
 364
 365        if (!mm->context.ldt) {
 366                retval = 0;
 367                goto out_unlock;
 368        }
 369
 370        if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
 371                bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
 372
 373        entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
 374        if (entries_size > bytecount)
 375                entries_size = bytecount;
 376
 377        if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
 378                retval = -EFAULT;
 379                goto out_unlock;
 380        }
 381
 382        if (entries_size != bytecount) {
 383                /* Zero-fill the rest and pretend we read bytecount bytes. */
 384                if (clear_user(ptr + entries_size, bytecount - entries_size)) {
 385                        retval = -EFAULT;
 386                        goto out_unlock;
 387                }
 388        }
 389        retval = bytecount;
 390
 391out_unlock:
 392        up_read(&mm->context.ldt_usr_sem);
 393        return retval;
 394}
 395
 396static int read_default_ldt(void __user *ptr, unsigned long bytecount)
 397{
 398        /* CHECKME: Can we use _one_ random number ? */
 399#ifdef CONFIG_X86_32
 400        unsigned long size = 5 * sizeof(struct desc_struct);
 401#else
 402        unsigned long size = 128;
 403#endif
 404        if (bytecount > size)
 405                bytecount = size;
 406        if (clear_user(ptr, bytecount))
 407                return -EFAULT;
 408        return bytecount;
 409}
 410
 411static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
 412{
 413        struct mm_struct *mm = current->mm;
 414        struct ldt_struct *new_ldt, *old_ldt;
 415        unsigned int old_nr_entries, new_nr_entries;
 416        struct user_desc ldt_info;
 417        struct desc_struct ldt;
 418        int error;
 419
 420        error = -EINVAL;
 421        if (bytecount != sizeof(ldt_info))
 422                goto out;
 423        error = -EFAULT;
 424        if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
 425                goto out;
 426
 427        error = -EINVAL;
 428        if (ldt_info.entry_number >= LDT_ENTRIES)
 429                goto out;
 430        if (ldt_info.contents == 3) {
 431                if (oldmode)
 432                        goto out;
 433                if (ldt_info.seg_not_present == 0)
 434                        goto out;
 435        }
 436
 437        if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
 438            LDT_empty(&ldt_info)) {
 439                /* The user wants to clear the entry. */
 440                memset(&ldt, 0, sizeof(ldt));
 441        } else {
 442                if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
 443                        error = -EINVAL;
 444                        goto out;
 445                }
 446
 447                fill_ldt(&ldt, &ldt_info);
 448                if (oldmode)
 449                        ldt.avl = 0;
 450        }
 451
 452        if (down_write_killable(&mm->context.ldt_usr_sem))
 453                return -EINTR;
 454
 455        old_ldt       = mm->context.ldt;
 456        old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
 457        new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries);
 458
 459        error = -ENOMEM;
 460        new_ldt = alloc_ldt_struct(new_nr_entries);
 461        if (!new_ldt)
 462                goto out_unlock;
 463
 464        if (old_ldt)
 465                memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
 466
 467        new_ldt->entries[ldt_info.entry_number] = ldt;
 468        finalize_ldt_struct(new_ldt);
 469
 470        /*
 471         * If we are using PTI, map the new LDT into the userspace pagetables.
 472         * If there is already an LDT, use the other slot so that other CPUs
 473         * will continue to use the old LDT until install_ldt() switches
 474         * them over to the new LDT.
 475         */
 476        error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0);
 477        if (error) {
 478                /*
 479                 * This only can fail for the first LDT setup. If an LDT is
 480                 * already installed then the PTE page is already
 481                 * populated. Mop up a half populated page table.
 482                 */
 483                if (!WARN_ON_ONCE(old_ldt))
 484                        free_ldt_pgtables(mm);
 485                free_ldt_struct(new_ldt);
 486                goto out_unlock;
 487        }
 488
 489        install_ldt(mm, new_ldt);
 490        unmap_ldt_struct(mm, old_ldt);
 491        free_ldt_struct(old_ldt);
 492        error = 0;
 493
 494out_unlock:
 495        up_write(&mm->context.ldt_usr_sem);
 496out:
 497        return error;
 498}
 499
 500SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
 501                unsigned long , bytecount)
 502{
 503        int ret = -ENOSYS;
 504
 505        switch (func) {
 506        case 0:
 507                ret = read_ldt(ptr, bytecount);
 508                break;
 509        case 1:
 510                ret = write_ldt(ptr, bytecount, 1);
 511                break;
 512        case 2:
 513                ret = read_default_ldt(ptr, bytecount);
 514                break;
 515        case 0x11:
 516                ret = write_ldt(ptr, bytecount, 0);
 517                break;
 518        }
 519        /*
 520         * The SYSCALL_DEFINE() macros give us an 'unsigned long'
 521         * return type, but tht ABI for sys_modify_ldt() expects
 522         * 'int'.  This cast gives us an int-sized value in %rax
 523         * for the return code.  The 'unsigned' is necessary so
 524         * the compiler does not try to sign-extend the negative
 525         * return codes into the high half of the register when
 526         * taking the value from int->long.
 527         */
 528        return (unsigned int)ret;
 529}
 530