linux/arch/x86/kernel/ldt.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
   3 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
   4 * Copyright (C) 2002 Andi Kleen
   5 *
   6 * This handles calls from both 32bit and 64bit mode.
   7 */
   8
   9#include <linux/errno.h>
  10#include <linux/gfp.h>
  11#include <linux/sched.h>
  12#include <linux/string.h>
  13#include <linux/kaiser.h>
  14#include <linux/mm.h>
  15#include <linux/smp.h>
  16#include <linux/vmalloc.h>
  17#include <linux/uaccess.h>
  18
  19#include <asm/ldt.h>
  20#include <asm/desc.h>
  21#include <asm/mmu_context.h>
  22#include <asm/syscalls.h>
  23
  24#ifdef CONFIG_SMP
  25static void flush_ldt(void *current_mm)
  26{
  27        if (current->active_mm == current_mm)
  28                load_LDT(&current->active_mm->context);
  29}
  30#endif
  31
  32static void free_ldt(void *ldt, int size)
  33{
  34        if (size * LDT_ENTRY_SIZE > PAGE_SIZE)
  35                vfree(ldt);
  36        else
  37                put_page(virt_to_page(ldt));
  38}
  39
  40static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
  41{
  42        void *oldldt, *newldt;
  43        int oldsize;
  44        int ret;
  45
  46        if (mincount <= pc->size)
  47                return 0;
  48        oldsize = pc->size;
  49        mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
  50                        (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
  51        if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
  52                newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
  53        else
  54                newldt = (void *)__get_free_page(GFP_KERNEL);
  55
  56        if (!newldt)
  57                return -ENOMEM;
  58        ret = kaiser_add_mapping((unsigned long)newldt,
  59                                 mincount * LDT_ENTRY_SIZE,
  60                                 __PAGE_KERNEL | _PAGE_GLOBAL);
  61        if (ret) {
  62                free_ldt(newldt, mincount);
  63                return -ENOMEM;
  64        }
  65
  66        if (oldsize)
  67                memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
  68        oldldt = pc->ldt;
  69        memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
  70               (mincount - oldsize) * LDT_ENTRY_SIZE);
  71
  72        paravirt_alloc_ldt(newldt, mincount);
  73
  74#ifdef CONFIG_X86_64
  75        /* CHECKME: Do we really need this ? */
  76        wmb();
  77#endif
  78        pc->ldt = newldt;
  79        wmb();
  80        pc->size = mincount;
  81        wmb();
  82
  83        if (reload) {
  84#ifdef CONFIG_SMP
  85                preempt_disable();
  86                load_LDT(pc);
  87                if (!cpumask_equal(mm_cpumask(current->mm),
  88                                   cpumask_of(smp_processor_id())))
  89                        smp_call_function(flush_ldt, current->mm, 1);
  90                preempt_enable();
  91#else
  92                load_LDT(pc);
  93#endif
  94        }
  95        if (oldsize) {
  96                kaiser_remove_mapping((unsigned long)oldldt,
  97                                      oldsize * LDT_ENTRY_SIZE);
  98                paravirt_free_ldt(oldldt, oldsize);
  99                free_ldt(oldldt, oldsize);
 100        }
 101        return 0;
 102}
 103
 104static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
 105{
 106        int err = alloc_ldt(new, old->size, 0);
 107        int i;
 108
 109        if (err < 0)
 110                return err;
 111
 112        for (i = 0; i < old->size; i++)
 113                write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
 114        return 0;
 115}
 116
 117/*
 118 * we do not have to muck with descriptors here, that is
 119 * done in switch_mm() as needed.
 120 */
 121int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
 122{
 123        struct mm_struct *old_mm;
 124        int retval = 0;
 125
 126        mutex_init(&mm->context.lock);
 127        mm->context.size = 0;
 128        old_mm = current->mm;
 129        if (old_mm && old_mm->context.size > 0) {
 130                mutex_lock(&old_mm->context.lock);
 131                retval = copy_ldt(&mm->context, &old_mm->context);
 132                mutex_unlock(&old_mm->context.lock);
 133        }
 134        return retval;
 135}
 136
 137/*
 138 * No need to lock the MM as we are the last user
 139 *
 140 * 64bit: Don't touch the LDT register - we're already in the next thread.
 141 */
 142void destroy_context_ldt(struct mm_struct *mm)
 143{
 144        if (mm->context.size) {
 145#ifdef CONFIG_X86_32
 146                /* CHECKME: Can this ever happen ? */
 147                if (mm == current->active_mm)
 148                        clear_LDT();
 149#endif
 150                kaiser_remove_mapping((unsigned long)mm->context.ldt,
 151                                      mm->context.size * LDT_ENTRY_SIZE);
 152                paravirt_free_ldt(mm->context.ldt, mm->context.size);
 153                if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
 154                        vfree(mm->context.ldt);
 155                else
 156                        put_page(virt_to_page(mm->context.ldt));
 157                mm->context.size = 0;
 158        }
 159}
 160
 161static int read_ldt(void __user *ptr, unsigned long bytecount)
 162{
 163        int err;
 164        unsigned long size;
 165        struct mm_struct *mm = current->mm;
 166
 167        if (!mm->context.size)
 168                return 0;
 169        if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
 170                bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
 171
 172        mutex_lock(&mm->context.lock);
 173        size = mm->context.size * LDT_ENTRY_SIZE;
 174        if (size > bytecount)
 175                size = bytecount;
 176
 177        err = 0;
 178        if (copy_to_user(ptr, mm->context.ldt, size))
 179                err = -EFAULT;
 180        mutex_unlock(&mm->context.lock);
 181        if (err < 0)
 182                goto error_return;
 183        if (size != bytecount) {
 184                /* zero-fill the rest */
 185                if (clear_user(ptr + size, bytecount - size) != 0) {
 186                        err = -EFAULT;
 187                        goto error_return;
 188                }
 189        }
 190        return bytecount;
 191error_return:
 192        return err;
 193}
 194
 195static int read_default_ldt(void __user *ptr, unsigned long bytecount)
 196{
 197        /* CHECKME: Can we use _one_ random number ? */
 198#ifdef CONFIG_X86_32
 199        unsigned long size = 5 * sizeof(struct desc_struct);
 200#else
 201        unsigned long size = 128;
 202#endif
 203        if (bytecount > size)
 204                bytecount = size;
 205        if (clear_user(ptr, bytecount))
 206                return -EFAULT;
 207        return bytecount;
 208}
 209
 210static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
 211{
 212        struct mm_struct *mm = current->mm;
 213        struct desc_struct ldt;
 214        int error;
 215        struct user_desc ldt_info;
 216
 217        error = -EINVAL;
 218        if (bytecount != sizeof(ldt_info))
 219                goto out;
 220        error = -EFAULT;
 221        if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
 222                goto out;
 223
 224        error = -EINVAL;
 225        if (ldt_info.entry_number >= LDT_ENTRIES)
 226                goto out;
 227        if (ldt_info.contents == 3) {
 228                if (oldmode)
 229                        goto out;
 230                if (ldt_info.seg_not_present == 0)
 231                        goto out;
 232        }
 233
 234        mutex_lock(&mm->context.lock);
 235        if (ldt_info.entry_number >= mm->context.size) {
 236                error = alloc_ldt(&current->mm->context,
 237                                  ldt_info.entry_number + 1, 1);
 238                if (error < 0)
 239                        goto out_unlock;
 240        }
 241
 242        /* Allow LDTs to be cleared by the user. */
 243        if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
 244                if (oldmode || LDT_empty(&ldt_info)) {
 245                        memset(&ldt, 0, sizeof(ldt));
 246                        goto install;
 247                }
 248        }
 249
 250        fill_ldt(&ldt, &ldt_info);
 251        if (oldmode)
 252                ldt.avl = 0;
 253
 254        /* Install the new entry ...  */
 255install:
 256        write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
 257        error = 0;
 258
 259out_unlock:
 260        mutex_unlock(&mm->context.lock);
 261out:
 262        return error;
 263}
 264
 265asmlinkage int sys_modify_ldt(int func, void __user *ptr,
 266                              unsigned long bytecount)
 267{
 268        int ret = -ENOSYS;
 269
 270        switch (func) {
 271        case 0:
 272                ret = read_ldt(ptr, bytecount);
 273                break;
 274        case 1:
 275                ret = write_ldt(ptr, bytecount, 1);
 276                break;
 277        case 2:
 278                ret = read_default_ldt(ptr, bytecount);
 279                break;
 280        case 0x11:
 281                ret = write_ldt(ptr, bytecount, 0);
 282                break;
 283        }
 284        return ret;
 285}
 286