linux/arch/um/sys-i386/ldt.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
   3 * Licensed under the GPL
   4 */
   5
   6#include <linux/mm.h>
   7#include <linux/sched.h>
   8#include <asm/unistd.h>
   9#include "os.h"
  10#include "proc_mm.h"
  11#include "skas.h"
  12#include "skas_ptrace.h"
  13#include "sysdep/tls.h"
  14
  15extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
  16
  17static long write_ldt_entry(struct mm_id *mm_idp, int func,
  18                     struct user_desc *desc, void **addr, int done)
  19{
  20        long res;
  21
  22        if (proc_mm) {
  23                /*
  24                 * This is a special handling for the case, that the mm to
  25                 * modify isn't current->active_mm.
  26                 * If this is called directly by modify_ldt,
  27                 *     (current->active_mm->context.skas.u == mm_idp)
  28                 * will be true. So no call to __switch_mm(mm_idp) is done.
  29                 * If this is called in case of init_new_ldt or PTRACE_LDT,
  30                 * mm_idp won't belong to current->active_mm, but child->mm.
  31                 * So we need to switch child's mm into our userspace, then
  32                 * later switch back.
  33                 *
  34                 * Note: I'm unsure: should interrupts be disabled here?
  35                 */
  36                if (!current->active_mm || current->active_mm == &init_mm ||
  37                    mm_idp != &current->active_mm->context.id)
  38                        __switch_mm(mm_idp);
  39        }
  40
  41        if (ptrace_ldt) {
  42                struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
  43                        .func = func,
  44                        .ptr = desc,
  45                        .bytecount = sizeof(*desc)};
  46                u32 cpu;
  47                int pid;
  48
  49                if (!proc_mm)
  50                        pid = mm_idp->u.pid;
  51                else {
  52                        cpu = get_cpu();
  53                        pid = userspace_pid[cpu];
  54                }
  55
  56                res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op);
  57
  58                if (proc_mm)
  59                        put_cpu();
  60        }
  61        else {
  62                void *stub_addr;
  63                res = syscall_stub_data(mm_idp, (unsigned long *)desc,
  64                                        (sizeof(*desc) + sizeof(long) - 1) &
  65                                            ~(sizeof(long) - 1),
  66                                        addr, &stub_addr);
  67                if (!res) {
  68                        unsigned long args[] = { func,
  69                                                 (unsigned long)stub_addr,
  70                                                 sizeof(*desc),
  71                                                 0, 0, 0 };
  72                        res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
  73                                               0, addr, done);
  74                }
  75        }
  76
  77        if (proc_mm) {
  78                /*
  79                 * This is the second part of special handling, that makes
  80                 * PTRACE_LDT possible to implement.
  81                 */
  82                if (current->active_mm && current->active_mm != &init_mm &&
  83                    mm_idp != &current->active_mm->context.id)
  84                        __switch_mm(&current->active_mm->context.id);
  85        }
  86
  87        return res;
  88}
  89
  90static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
  91{
  92        int res, n;
  93        struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
  94                        .func = 0,
  95                        .bytecount = bytecount,
  96                        .ptr = kmalloc(bytecount, GFP_KERNEL)};
  97        u32 cpu;
  98
  99        if (ptrace_ldt.ptr == NULL)
 100                return -ENOMEM;
 101
 102        /*
 103         * This is called from sys_modify_ldt only, so userspace_pid gives
 104         * us the right number
 105         */
 106
 107        cpu = get_cpu();
 108        res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt);
 109        put_cpu();
 110        if (res < 0)
 111                goto out;
 112
 113        n = copy_to_user(ptr, ptrace_ldt.ptr, res);
 114        if (n != 0)
 115                res = -EFAULT;
 116
 117  out:
 118        kfree(ptrace_ldt.ptr);
 119
 120        return res;
 121}
 122
 123/*
 124 * In skas mode, we hold our own ldt data in UML.
 125 * Thus, the code implementing sys_modify_ldt_skas
 126 * is very similar to (and mostly stolen from) sys_modify_ldt
 127 * for arch/i386/kernel/ldt.c
 128 * The routines copied and modified in part are:
 129 * - read_ldt
 130 * - read_default_ldt
 131 * - write_ldt
 132 * - sys_modify_ldt_skas
 133 */
 134
 135static int read_ldt(void __user * ptr, unsigned long bytecount)
 136{
 137        int i, err = 0;
 138        unsigned long size;
 139        uml_ldt_t * ldt = &current->mm->context.ldt;
 140
 141        if (!ldt->entry_count)
 142                goto out;
 143        if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
 144                bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
 145        err = bytecount;
 146
 147        if (ptrace_ldt)
 148                return read_ldt_from_host(ptr, bytecount);
 149
 150        mutex_lock(&ldt->lock);
 151        if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
 152                size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
 153                if (size > bytecount)
 154                        size = bytecount;
 155                if (copy_to_user(ptr, ldt->u.entries, size))
 156                        err = -EFAULT;
 157                bytecount -= size;
 158                ptr += size;
 159        }
 160        else {
 161                for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
 162                     i++) {
 163                        size = PAGE_SIZE;
 164                        if (size > bytecount)
 165                                size = bytecount;
 166                        if (copy_to_user(ptr, ldt->u.pages[i], size)) {
 167                                err = -EFAULT;
 168                                break;
 169                        }
 170                        bytecount -= size;
 171                        ptr += size;
 172                }
 173        }
 174        mutex_unlock(&ldt->lock);
 175
 176        if (bytecount == 0 || err == -EFAULT)
 177                goto out;
 178
 179        if (clear_user(ptr, bytecount))
 180                err = -EFAULT;
 181
 182out:
 183        return err;
 184}
 185
 186static int read_default_ldt(void __user * ptr, unsigned long bytecount)
 187{
 188        int err;
 189
 190        if (bytecount > 5*LDT_ENTRY_SIZE)
 191                bytecount = 5*LDT_ENTRY_SIZE;
 192
 193        err = bytecount;
 194        /*
 195         * UML doesn't support lcall7 and lcall27.
 196         * So, we don't really have a default ldt, but emulate
 197         * an empty ldt of common host default ldt size.
 198         */
 199        if (clear_user(ptr, bytecount))
 200                err = -EFAULT;
 201
 202        return err;
 203}
 204
 205static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
 206{
 207        uml_ldt_t * ldt = &current->mm->context.ldt;
 208        struct mm_id * mm_idp = &current->mm->context.id;
 209        int i, err;
 210        struct user_desc ldt_info;
 211        struct ldt_entry entry0, *ldt_p;
 212        void *addr = NULL;
 213
 214        err = -EINVAL;
 215        if (bytecount != sizeof(ldt_info))
 216                goto out;
 217        err = -EFAULT;
 218        if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
 219                goto out;
 220
 221        err = -EINVAL;
 222        if (ldt_info.entry_number >= LDT_ENTRIES)
 223                goto out;
 224        if (ldt_info.contents == 3) {
 225                if (func == 1)
 226                        goto out;
 227                if (ldt_info.seg_not_present == 0)
 228                        goto out;
 229        }
 230
 231        if (!ptrace_ldt)
 232                mutex_lock(&ldt->lock);
 233
 234        err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
 235        if (err)
 236                goto out_unlock;
 237        else if (ptrace_ldt) {
 238                /* With PTRACE_LDT available, this is used as a flag only */
 239                ldt->entry_count = 1;
 240                goto out;
 241        }
 242
 243        if (ldt_info.entry_number >= ldt->entry_count &&
 244            ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
 245                for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
 246                     i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
 247                     i++) {
 248                        if (i == 0)
 249                                memcpy(&entry0, ldt->u.entries,
 250                                       sizeof(entry0));
 251                        ldt->u.pages[i] = (struct ldt_entry *)
 252                                __get_free_page(GFP_KERNEL|__GFP_ZERO);
 253                        if (!ldt->u.pages[i]) {
 254                                err = -ENOMEM;
 255                                /* Undo the change in host */
 256                                memset(&ldt_info, 0, sizeof(ldt_info));
 257                                write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
 258                                goto out_unlock;
 259                        }
 260                        if (i == 0) {
 261                                memcpy(ldt->u.pages[0], &entry0,
 262                                       sizeof(entry0));
 263                                memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
 264                                       sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
 265                        }
 266                        ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
 267                }
 268        }
 269        if (ldt->entry_count <= ldt_info.entry_number)
 270                ldt->entry_count = ldt_info.entry_number + 1;
 271
 272        if (ldt->entry_count <= LDT_DIRECT_ENTRIES)
 273                ldt_p = ldt->u.entries + ldt_info.entry_number;
 274        else
 275                ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
 276                        ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
 277
 278        if (ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
 279           (func == 1 || LDT_empty(&ldt_info))) {
 280                ldt_p->a = 0;
 281                ldt_p->b = 0;
 282        }
 283        else{
 284                if (func == 1)
 285                        ldt_info.useable = 0;
 286                ldt_p->a = LDT_entry_a(&ldt_info);
 287                ldt_p->b = LDT_entry_b(&ldt_info);
 288        }
 289        err = 0;
 290
 291out_unlock:
 292        mutex_unlock(&ldt->lock);
 293out:
 294        return err;
 295}
 296
 297static long do_modify_ldt_skas(int func, void __user *ptr,
 298                               unsigned long bytecount)
 299{
 300        int ret = -ENOSYS;
 301
 302        switch (func) {
 303                case 0:
 304                        ret = read_ldt(ptr, bytecount);
 305                        break;
 306                case 1:
 307                case 0x11:
 308                        ret = write_ldt(ptr, bytecount, func);
 309                        break;
 310                case 2:
 311                        ret = read_default_ldt(ptr, bytecount);
 312                        break;
 313        }
 314        return ret;
 315}
 316
 317static DEFINE_SPINLOCK(host_ldt_lock);
 318static short dummy_list[9] = {0, -1};
 319static short * host_ldt_entries = NULL;
 320
 321static void ldt_get_host_info(void)
 322{
 323        long ret;
 324        struct ldt_entry * ldt;
 325        short *tmp;
 326        int i, size, k, order;
 327
 328        spin_lock(&host_ldt_lock);
 329
 330        if (host_ldt_entries != NULL) {
 331                spin_unlock(&host_ldt_lock);
 332                return;
 333        }
 334        host_ldt_entries = dummy_list+1;
 335
 336        spin_unlock(&host_ldt_lock);
 337
 338        for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++)
 339                ;
 340
 341        ldt = (struct ldt_entry *)
 342              __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
 343        if (ldt == NULL) {
 344                printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer "
 345                       "for host ldt\n");
 346                return;
 347        }
 348
 349        ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
 350        if (ret < 0) {
 351                printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n");
 352                goto out_free;
 353        }
 354        if (ret == 0) {
 355                /* default_ldt is active, simply write an empty entry 0 */
 356                host_ldt_entries = dummy_list;
 357                goto out_free;
 358        }
 359
 360        for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) {
 361                if (ldt[i].a != 0 || ldt[i].b != 0)
 362                        size++;
 363        }
 364
 365        if (size < ARRAY_SIZE(dummy_list))
 366                host_ldt_entries = dummy_list;
 367        else {
 368                size = (size + 1) * sizeof(dummy_list[0]);
 369                tmp = kmalloc(size, GFP_KERNEL);
 370                if (tmp == NULL) {
 371                        printk(KERN_ERR "ldt_get_host_info: couldn't allocate "
 372                               "host ldt list\n");
 373                        goto out_free;
 374                }
 375                host_ldt_entries = tmp;
 376        }
 377
 378        for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) {
 379                if (ldt[i].a != 0 || ldt[i].b != 0)
 380                        host_ldt_entries[k++] = i;
 381        }
 382        host_ldt_entries[k] = -1;
 383
 384out_free:
 385        free_pages((unsigned long)ldt, order);
 386}
 387
 388long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
 389{
 390        struct user_desc desc;
 391        short * num_p;
 392        int i;
 393        long page, err=0;
 394        void *addr = NULL;
 395        struct proc_mm_op copy;
 396
 397
 398        if (!ptrace_ldt)
 399                mutex_init(&new_mm->ldt.lock);
 400
 401        if (!from_mm) {
 402                memset(&desc, 0, sizeof(desc));
 403                /*
 404                 * We have to initialize a clean ldt.
 405                 */
 406                if (proc_mm) {
 407                        /*
 408                         * If the new mm was created using proc_mm, host's
 409                         * default-ldt currently is assigned, which normally
 410                         * contains the call-gates for lcall7 and lcall27.
 411                         * To remove these gates, we simply write an empty
 412                         * entry as number 0 to the host.
 413                         */
 414                        err = write_ldt_entry(&new_mm->id, 1, &desc, &addr, 1);
 415                }
 416                else{
 417                        /*
 418                         * Now we try to retrieve info about the ldt, we
 419                         * inherited from the host. All ldt-entries found
 420                         * will be reset in the following loop
 421                         */
 422                        ldt_get_host_info();
 423                        for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
 424                                desc.entry_number = *num_p;
 425                                err = write_ldt_entry(&new_mm->id, 1, &desc,
 426                                                      &addr, *(num_p + 1) == -1);
 427                                if (err)
 428                                        break;
 429                        }
 430                }
 431                new_mm->ldt.entry_count = 0;
 432
 433                goto out;
 434        }
 435
 436        if (proc_mm) {
 437                /*
 438                 * We have a valid from_mm, so we now have to copy the LDT of
 439                 * from_mm to new_mm, because using proc_mm an new mm with
 440                 * an empty/default LDT was created in new_mm()
 441                 */
 442                copy = ((struct proc_mm_op) { .op       = MM_COPY_SEGMENTS,
 443                                              .u        =
 444                                              { .copy_segments =
 445                                                        from_mm->id.u.mm_fd } } );
 446                i = os_write_file(new_mm->id.u.mm_fd, &copy, sizeof(copy));
 447                if (i != sizeof(copy))
 448                        printk(KERN_ERR "new_mm : /proc/mm copy_segments "
 449                               "failed, err = %d\n", -i);
 450        }
 451
 452        if (!ptrace_ldt) {
 453                /*
 454                 * Our local LDT is used to supply the data for
 455                 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
 456                 * i.e., we have to use the stub for modify_ldt, which
 457                 * can't handle the big read buffer of up to 64kB.
 458                 */
 459                mutex_lock(&from_mm->ldt.lock);
 460                if (from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES)
 461                        memcpy(new_mm->ldt.u.entries, from_mm->ldt.u.entries,
 462                               sizeof(new_mm->ldt.u.entries));
 463                else {
 464                        i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
 465                        while (i-->0) {
 466                                page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
 467                                if (!page) {
 468                                        err = -ENOMEM;
 469                                        break;
 470                                }
 471                                new_mm->ldt.u.pages[i] =
 472                                        (struct ldt_entry *) page;
 473                                memcpy(new_mm->ldt.u.pages[i],
 474                                       from_mm->ldt.u.pages[i], PAGE_SIZE);
 475                        }
 476                }
 477                new_mm->ldt.entry_count = from_mm->ldt.entry_count;
 478                mutex_unlock(&from_mm->ldt.lock);
 479        }
 480
 481    out:
 482        return err;
 483}
 484
 485
 486void free_ldt(struct mm_context *mm)
 487{
 488        int i;
 489
 490        if (!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES) {
 491                i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
 492                while (i-- > 0)
 493                        free_page((long) mm->ldt.u.pages[i]);
 494        }
 495        mm->ldt.entry_count = 0;
 496}
 497
 498int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
 499{
 500        return do_modify_ldt_skas(func, ptr, bytecount);
 501}
 502