linux/arch/ia64/ia32/sys_ia32.c
<<
>>
Prefs
   1/*
   2 * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c.
   3 *
   4 * Copyright (C) 2000           VA Linux Co
   5 * Copyright (C) 2000           Don Dugger <n0ano@valinux.com>
   6 * Copyright (C) 1999           Arun Sharma <arun.sharma@intel.com>
   7 * Copyright (C) 1997,1998      Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   8 * Copyright (C) 1997           David S. Miller (davem@caip.rutgers.edu)
   9 * Copyright (C) 2000-2003, 2005 Hewlett-Packard Co
  10 *      David Mosberger-Tang <davidm@hpl.hp.com>
  11 * Copyright (C) 2004           Gordon Jin <gordon.jin@intel.com>
  12 *
  13 * These routines maintain argument size conversion between 32bit and 64bit
  14 * environment.
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/syscalls.h>
  19#include <linux/sysctl.h>
  20#include <linux/sched.h>
  21#include <linux/fs.h>
  22#include <linux/file.h>
  23#include <linux/signal.h>
  24#include <linux/resource.h>
  25#include <linux/times.h>
  26#include <linux/utsname.h>
  27#include <linux/smp.h>
  28#include <linux/smp_lock.h>
  29#include <linux/sem.h>
  30#include <linux/msg.h>
  31#include <linux/mm.h>
  32#include <linux/shm.h>
  33#include <linux/slab.h>
  34#include <linux/uio.h>
  35#include <linux/socket.h>
  36#include <linux/quota.h>
  37#include <linux/poll.h>
  38#include <linux/eventpoll.h>
  39#include <linux/personality.h>
  40#include <linux/ptrace.h>
  41#include <linux/regset.h>
  42#include <linux/stat.h>
  43#include <linux/ipc.h>
  44#include <linux/capability.h>
  45#include <linux/compat.h>
  46#include <linux/vfs.h>
  47#include <linux/mman.h>
  48#include <linux/mutex.h>
  49
  50#include <asm/intrinsics.h>
  51#include <asm/types.h>
  52#include <asm/uaccess.h>
  53#include <asm/unistd.h>
  54
  55#include "ia32priv.h"
  56
  57#include <net/scm.h>
  58#include <net/sock.h>
  59
  60#define DEBUG   0
  61
  62#if DEBUG
  63# define DBG(fmt...)    printk(KERN_DEBUG fmt)
  64#else
  65# define DBG(fmt...)
  66#endif
  67
  68#define ROUND_UP(x,a)   ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1)))
  69
  70#define OFFSET4K(a)             ((a) & 0xfff)
  71#define PAGE_START(addr)        ((addr) & PAGE_MASK)
  72#define MINSIGSTKSZ_IA32        2048
  73
  74#define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid))
  75#define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid))
  76
  77/*
  78 * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore
  79 * while doing so.
  80 */
  81/* XXX make per-mm: */
  82static DEFINE_MUTEX(ia32_mmap_mutex);
  83
  84asmlinkage long
  85sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp,
  86              struct pt_regs *regs)
  87{
  88        long error;
  89        char *filename;
  90        unsigned long old_map_base, old_task_size, tssd;
  91
  92        filename = getname(name);
  93        error = PTR_ERR(filename);
  94        if (IS_ERR(filename))
  95                return error;
  96
  97        old_map_base  = current->thread.map_base;
  98        old_task_size = current->thread.task_size;
  99        tssd = ia64_get_kr(IA64_KR_TSSD);
 100
 101        /* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */
 102        current->thread.map_base  = DEFAULT_MAP_BASE;
 103        current->thread.task_size = DEFAULT_TASK_SIZE;
 104        ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
 105        ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
 106
 107        error = compat_do_execve(filename, argv, envp, regs);
 108        putname(filename);
 109
 110        if (error < 0) {
 111                /* oops, execve failed, switch back to old values... */
 112                ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
 113                ia64_set_kr(IA64_KR_TSSD, tssd);
 114                current->thread.map_base  = old_map_base;
 115                current->thread.task_size = old_task_size;
 116        }
 117
 118        return error;
 119}
 120
 121
 122#if PAGE_SHIFT > IA32_PAGE_SHIFT
 123
 124
 125static int
 126get_page_prot (struct vm_area_struct *vma, unsigned long addr)
 127{
 128        int prot = 0;
 129
 130        if (!vma || vma->vm_start > addr)
 131                return 0;
 132
 133        if (vma->vm_flags & VM_READ)
 134                prot |= PROT_READ;
 135        if (vma->vm_flags & VM_WRITE)
 136                prot |= PROT_WRITE;
 137        if (vma->vm_flags & VM_EXEC)
 138                prot |= PROT_EXEC;
 139        return prot;
 140}
 141
 142/*
 143 * Map a subpage by creating an anonymous page that contains the union of the old page and
 144 * the subpage.
 145 */
 146static unsigned long
 147mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags,
 148              loff_t off)
 149{
 150        void *page = NULL;
 151        struct inode *inode;
 152        unsigned long ret = 0;
 153        struct vm_area_struct *vma = find_vma(current->mm, start);
 154        int old_prot = get_page_prot(vma, start);
 155
 156        DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n",
 157            file, start, end, prot, flags, off);
 158
 159
 160        /* Optimize the case where the old mmap and the new mmap are both anonymous */
 161        if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) {
 162                if (clear_user((void __user *) start, end - start)) {
 163                        ret = -EFAULT;
 164                        goto out;
 165                }
 166                goto skip_mmap;
 167        }
 168
 169        page = (void *) get_zeroed_page(GFP_KERNEL);
 170        if (!page)
 171                return -ENOMEM;
 172
 173        if (old_prot)
 174                copy_from_user(page, (void __user *) PAGE_START(start), PAGE_SIZE);
 175
 176        down_write(&current->mm->mmap_sem);
 177        {
 178                ret = do_mmap(NULL, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE,
 179                              flags | MAP_FIXED | MAP_ANONYMOUS, 0);
 180        }
 181        up_write(&current->mm->mmap_sem);
 182
 183        if (IS_ERR((void *) ret))
 184                goto out;
 185
 186        if (old_prot) {
 187                /* copy back the old page contents.  */
 188                if (offset_in_page(start))
 189                        copy_to_user((void __user *) PAGE_START(start), page,
 190                                     offset_in_page(start));
 191                if (offset_in_page(end))
 192                        copy_to_user((void __user *) end, page + offset_in_page(end),
 193                                     PAGE_SIZE - offset_in_page(end));
 194        }
 195
 196        if (!(flags & MAP_ANONYMOUS)) {
 197                /* read the file contents */
 198                inode = file->f_path.dentry->d_inode;
 199                if (!inode->i_fop || !file->f_op->read
 200                    || ((*file->f_op->read)(file, (char __user *) start, end - start, &off) < 0))
 201                {
 202                        ret = -EINVAL;
 203                        goto out;
 204                }
 205        }
 206
 207 skip_mmap:
 208        if (!(prot & PROT_WRITE))
 209                ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot);
 210  out:
 211        if (page)
 212                free_page((unsigned long) page);
 213        return ret;
 214}
 215
 216/* SLAB cache for ia64_partial_page structures */
 217struct kmem_cache *ia64_partial_page_cachep;
 218
 219/*
 220 * init ia64_partial_page_list.
 221 * return 0 means kmalloc fail.
 222 */
 223struct ia64_partial_page_list*
 224ia32_init_pp_list(void)
 225{
 226        struct ia64_partial_page_list *p;
 227
 228        if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
 229                return p;
 230        p->pp_head = NULL;
 231        p->ppl_rb = RB_ROOT;
 232        p->pp_hint = NULL;
 233        atomic_set(&p->pp_count, 1);
 234        return p;
 235}
 236
 237/*
 238 * Search for the partial page with @start in partial page list @ppl.
 239 * If finds the partial page, return the found partial page.
 240 * Else, return 0 and provide @pprev, @rb_link, @rb_parent to
 241 * be used by later __ia32_insert_pp().
 242 */
 243static struct ia64_partial_page *
 244__ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start,
 245        struct ia64_partial_page **pprev, struct rb_node ***rb_link,
 246        struct rb_node **rb_parent)
 247{
 248        struct ia64_partial_page *pp;
 249        struct rb_node **__rb_link, *__rb_parent, *rb_prev;
 250
 251        pp = ppl->pp_hint;
 252        if (pp && pp->base == start)
 253                return pp;
 254
 255        __rb_link = &ppl->ppl_rb.rb_node;
 256        rb_prev = __rb_parent = NULL;
 257
 258        while (*__rb_link) {
 259                __rb_parent = *__rb_link;
 260                pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb);
 261
 262                if (pp->base == start) {
 263                        ppl->pp_hint = pp;
 264                        return pp;
 265                } else if (pp->base < start) {
 266                        rb_prev = __rb_parent;
 267                        __rb_link = &__rb_parent->rb_right;
 268                } else {
 269                        __rb_link = &__rb_parent->rb_left;
 270                }
 271        }
 272
 273        *rb_link = __rb_link;
 274        *rb_parent = __rb_parent;
 275        *pprev = NULL;
 276        if (rb_prev)
 277                *pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb);
 278        return NULL;
 279}
 280
 281/*
 282 * insert @pp into @ppl.
 283 */
 284static void
 285__ia32_insert_pp(struct ia64_partial_page_list *ppl,
 286        struct ia64_partial_page *pp, struct ia64_partial_page *prev,
 287        struct rb_node **rb_link, struct rb_node *rb_parent)
 288{
 289        /* link list */
 290        if (prev) {
 291                pp->next = prev->next;
 292                prev->next = pp;
 293        } else {
 294                ppl->pp_head = pp;
 295                if (rb_parent)
 296                        pp->next = rb_entry(rb_parent,
 297                                struct ia64_partial_page, pp_rb);
 298                else
 299                        pp->next = NULL;
 300        }
 301
 302        /* link rb */
 303        rb_link_node(&pp->pp_rb, rb_parent, rb_link);
 304        rb_insert_color(&pp->pp_rb, &ppl->ppl_rb);
 305
 306        ppl->pp_hint = pp;
 307}
 308
 309/*
 310 * delete @pp from partial page list @ppl.
 311 */
 312static void
 313__ia32_delete_pp(struct ia64_partial_page_list *ppl,
 314        struct ia64_partial_page *pp, struct ia64_partial_page *prev)
 315{
 316        if (prev) {
 317                prev->next = pp->next;
 318                if (ppl->pp_hint == pp)
 319                        ppl->pp_hint = prev;
 320        } else {
 321                ppl->pp_head = pp->next;
 322                if (ppl->pp_hint == pp)
 323                        ppl->pp_hint = pp->next;
 324        }
 325        rb_erase(&pp->pp_rb, &ppl->ppl_rb);
 326        kmem_cache_free(ia64_partial_page_cachep, pp);
 327}
 328
 329static struct ia64_partial_page *
 330__pp_prev(struct ia64_partial_page *pp)
 331{
 332        struct rb_node *prev = rb_prev(&pp->pp_rb);
 333        if (prev)
 334                return rb_entry(prev, struct ia64_partial_page, pp_rb);
 335        else
 336                return NULL;
 337}
 338
 339/*
 340 * Delete partial pages with address between @start and @end.
 341 * @start and @end are page aligned.
 342 */
 343static void
 344__ia32_delete_pp_range(unsigned int start, unsigned int end)
 345{
 346        struct ia64_partial_page *pp, *prev;
 347        struct rb_node **rb_link, *rb_parent;
 348
 349        if (start >= end)
 350                return;
 351
 352        pp = __ia32_find_pp(current->thread.ppl, start, &prev,
 353                                        &rb_link, &rb_parent);
 354        if (pp)
 355                prev = __pp_prev(pp);
 356        else {
 357                if (prev)
 358                        pp = prev->next;
 359                else
 360                        pp = current->thread.ppl->pp_head;
 361        }
 362
 363        while (pp && pp->base < end) {
 364                struct ia64_partial_page *tmp = pp->next;
 365                __ia32_delete_pp(current->thread.ppl, pp, prev);
 366                pp = tmp;
 367        }
 368}
 369
 370/*
 371 * Set the range between @start and @end in bitmap.
 372 * @start and @end should be IA32 page aligned and in the same IA64 page.
 373 */
 374static int
 375__ia32_set_pp(unsigned int start, unsigned int end, int flags)
 376{
 377        struct ia64_partial_page *pp, *prev;
 378        struct rb_node ** rb_link, *rb_parent;
 379        unsigned int pstart, start_bit, end_bit, i;
 380
 381        pstart = PAGE_START(start);
 382        start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
 383        end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
 384        if (end_bit == 0)
 385                end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
 386        pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
 387                                        &rb_link, &rb_parent);
 388        if (pp) {
 389                for (i = start_bit; i < end_bit; i++)
 390                        set_bit(i, &pp->bitmap);
 391                /*
 392                 * Check: if this partial page has been set to a full page,
 393                 * then delete it.
 394                 */
 395                if (find_first_zero_bit(&pp->bitmap, sizeof(pp->bitmap)*8) >=
 396                                PAGE_SIZE/IA32_PAGE_SIZE) {
 397                        __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
 398                }
 399                return 0;
 400        }
 401
 402        /*
 403         * MAP_FIXED may lead to overlapping mmap.
 404         * In this case, the requested mmap area may already mmaped as a full
 405         * page. So check vma before adding a new partial page.
 406         */
 407        if (flags & MAP_FIXED) {
 408                struct vm_area_struct *vma = find_vma(current->mm, pstart);
 409                if (vma && vma->vm_start <= pstart)
 410                        return 0;
 411        }
 412
 413        /* new a ia64_partial_page */
 414        pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
 415        if (!pp)
 416                return -ENOMEM;
 417        pp->base = pstart;
 418        pp->bitmap = 0;
 419        for (i=start_bit; i<end_bit; i++)
 420                set_bit(i, &(pp->bitmap));
 421        pp->next = NULL;
 422        __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
 423        return 0;
 424}
 425
 426/*
 427 * @start and @end should be IA32 page aligned, but don't need to be in the
 428 * same IA64 page. Split @start and @end to make sure they're in the same IA64
 429 * page, then call __ia32_set_pp().
 430 */
 431static void
 432ia32_set_pp(unsigned int start, unsigned int end, int flags)
 433{
 434        down_write(&current->mm->mmap_sem);
 435        if (flags & MAP_FIXED) {
 436                /*
 437                 * MAP_FIXED may lead to overlapping mmap. When this happens,
 438                 * a series of complete IA64 pages results in deletion of
 439                 * old partial pages in that range.
 440                 */
 441                __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
 442        }
 443
 444        if (end < PAGE_ALIGN(start)) {
 445                __ia32_set_pp(start, end, flags);
 446        } else {
 447                if (offset_in_page(start))
 448                        __ia32_set_pp(start, PAGE_ALIGN(start), flags);
 449                if (offset_in_page(end))
 450                        __ia32_set_pp(PAGE_START(end), end, flags);
 451        }
 452        up_write(&current->mm->mmap_sem);
 453}
 454
 455/*
 456 * Unset the range between @start and @end in bitmap.
 457 * @start and @end should be IA32 page aligned and in the same IA64 page.
 458 * After doing that, if the bitmap is 0, then free the page and return 1,
 459 *      else return 0;
 460 * If not find the partial page in the list, then
 461 *      If the vma exists, then the full page is set to a partial page;
 462 *      Else return -ENOMEM.
 463 */
 464static int
 465__ia32_unset_pp(unsigned int start, unsigned int end)
 466{
 467        struct ia64_partial_page *pp, *prev;
 468        struct rb_node ** rb_link, *rb_parent;
 469        unsigned int pstart, start_bit, end_bit, i;
 470        struct vm_area_struct *vma;
 471
 472        pstart = PAGE_START(start);
 473        start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
 474        end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
 475        if (end_bit == 0)
 476                end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
 477
 478        pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
 479                                        &rb_link, &rb_parent);
 480        if (pp) {
 481                for (i = start_bit; i < end_bit; i++)
 482                        clear_bit(i, &pp->bitmap);
 483                if (pp->bitmap == 0) {
 484                        __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
 485                        return 1;
 486                }
 487                return 0;
 488        }
 489
 490        vma = find_vma(current->mm, pstart);
 491        if (!vma || vma->vm_start > pstart) {
 492                return -ENOMEM;
 493        }
 494
 495        /* new a ia64_partial_page */
 496        pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
 497        if (!pp)
 498                return -ENOMEM;
 499        pp->base = pstart;
 500        pp->bitmap = 0;
 501        for (i = 0; i < start_bit; i++)
 502                set_bit(i, &(pp->bitmap));
 503        for (i = end_bit; i < PAGE_SIZE / IA32_PAGE_SIZE; i++)
 504                set_bit(i, &(pp->bitmap));
 505        pp->next = NULL;
 506        __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
 507        return 0;
 508}
 509
 510/*
 511 * Delete pp between PAGE_ALIGN(start) and PAGE_START(end) by calling
 512 * __ia32_delete_pp_range(). Unset possible partial pages by calling
 513 * __ia32_unset_pp().
 514 * The returned value see __ia32_unset_pp().
 515 */
 516static int
 517ia32_unset_pp(unsigned int *startp, unsigned int *endp)
 518{
 519        unsigned int start = *startp, end = *endp;
 520        int ret = 0;
 521
 522        down_write(&current->mm->mmap_sem);
 523
 524        __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
 525
 526        if (end < PAGE_ALIGN(start)) {
 527                ret = __ia32_unset_pp(start, end);
 528                if (ret == 1) {
 529                        *startp = PAGE_START(start);
 530                        *endp = PAGE_ALIGN(end);
 531                }
 532                if (ret == 0) {
 533                        /* to shortcut sys_munmap() in sys32_munmap() */
 534                        *startp = PAGE_START(start);
 535                        *endp = PAGE_START(end);
 536                }
 537        } else {
 538                if (offset_in_page(start)) {
 539                        ret = __ia32_unset_pp(start, PAGE_ALIGN(start));
 540                        if (ret == 1)
 541                                *startp = PAGE_START(start);
 542                        if (ret == 0)
 543                                *startp = PAGE_ALIGN(start);
 544                        if (ret < 0)
 545                                goto out;
 546                }
 547                if (offset_in_page(end)) {
 548                        ret = __ia32_unset_pp(PAGE_START(end), end);
 549                        if (ret == 1)
 550                                *endp = PAGE_ALIGN(end);
 551                        if (ret == 0)
 552                                *endp = PAGE_START(end);
 553                }
 554        }
 555
 556 out:
 557        up_write(&current->mm->mmap_sem);
 558        return ret;
 559}
 560
 561/*
 562 * Compare the range between @start and @end with bitmap in partial page.
 563 * @start and @end should be IA32 page aligned and in the same IA64 page.
 564 */
 565static int
 566__ia32_compare_pp(unsigned int start, unsigned int end)
 567{
 568        struct ia64_partial_page *pp, *prev;
 569        struct rb_node ** rb_link, *rb_parent;
 570        unsigned int pstart, start_bit, end_bit, size;
 571        unsigned int first_bit, next_zero_bit;  /* the first range in bitmap */
 572
 573        pstart = PAGE_START(start);
 574
 575        pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
 576                                        &rb_link, &rb_parent);
 577        if (!pp)
 578                return 1;
 579
 580        start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
 581        end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
 582        size = sizeof(pp->bitmap) * 8;
 583        first_bit = find_first_bit(&pp->bitmap, size);
 584        next_zero_bit = find_next_zero_bit(&pp->bitmap, size, first_bit);
 585        if ((start_bit < first_bit) || (end_bit > next_zero_bit)) {
 586                /* exceeds the first range in bitmap */
 587                return -ENOMEM;
 588        } else if ((start_bit == first_bit) && (end_bit == next_zero_bit)) {
 589                first_bit = find_next_bit(&pp->bitmap, size, next_zero_bit);
 590                if ((next_zero_bit < first_bit) && (first_bit < size))
 591                        return 1;       /* has next range */
 592                else
 593                        return 0;       /* no next range */
 594        } else
 595                return 1;
 596}
 597
 598/*
 599 * @start and @end should be IA32 page aligned, but don't need to be in the
 600 * same IA64 page. Split @start and @end to make sure they're in the same IA64
 601 * page, then call __ia32_compare_pp().
 602 *
 603 * Take this as example: the range is the 1st and 2nd 4K page.
 604 * Return 0 if they fit bitmap exactly, i.e. bitmap = 00000011;
 605 * Return 1 if the range doesn't cover whole bitmap, e.g. bitmap = 00001111;
 606 * Return -ENOMEM if the range exceeds the bitmap, e.g. bitmap = 00000001 or
 607 *      bitmap = 00000101.
 608 */
 609static int
 610ia32_compare_pp(unsigned int *startp, unsigned int *endp)
 611{
 612        unsigned int start = *startp, end = *endp;
 613        int retval = 0;
 614
 615        down_write(&current->mm->mmap_sem);
 616
 617        if (end < PAGE_ALIGN(start)) {
 618                retval = __ia32_compare_pp(start, end);
 619                if (retval == 0) {
 620                        *startp = PAGE_START(start);
 621                        *endp = PAGE_ALIGN(end);
 622                }
 623        } else {
 624                if (offset_in_page(start)) {
 625                        retval = __ia32_compare_pp(start,
 626                                                   PAGE_ALIGN(start));
 627                        if (retval == 0)
 628                                *startp = PAGE_START(start);
 629                        if (retval < 0)
 630                                goto out;
 631                }
 632                if (offset_in_page(end)) {
 633                        retval = __ia32_compare_pp(PAGE_START(end), end);
 634                        if (retval == 0)
 635                                *endp = PAGE_ALIGN(end);
 636                }
 637        }
 638
 639 out:
 640        up_write(&current->mm->mmap_sem);
 641        return retval;
 642}
 643
 644static void
 645__ia32_drop_pp_list(struct ia64_partial_page_list *ppl)
 646{
 647        struct ia64_partial_page *pp = ppl->pp_head;
 648
 649        while (pp) {
 650                struct ia64_partial_page *next = pp->next;
 651                kmem_cache_free(ia64_partial_page_cachep, pp);
 652                pp = next;
 653        }
 654
 655        kfree(ppl);
 656}
 657
 658void
 659ia32_drop_ia64_partial_page_list(struct task_struct *task)
 660{
 661        struct ia64_partial_page_list* ppl = task->thread.ppl;
 662
 663        if (ppl && atomic_dec_and_test(&ppl->pp_count))
 664                __ia32_drop_pp_list(ppl);
 665}
 666
 667/*
 668 * Copy current->thread.ppl to ppl (already initialized).
 669 */
 670static int
 671__ia32_copy_pp_list(struct ia64_partial_page_list *ppl)
 672{
 673        struct ia64_partial_page *pp, *tmp, *prev;
 674        struct rb_node **rb_link, *rb_parent;
 675
 676        ppl->pp_head = NULL;
 677        ppl->pp_hint = NULL;
 678        ppl->ppl_rb = RB_ROOT;
 679        rb_link = &ppl->ppl_rb.rb_node;
 680        rb_parent = NULL;
 681        prev = NULL;
 682
 683        for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
 684                tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
 685                if (!tmp)
 686                        return -ENOMEM;
 687                *tmp = *pp;
 688                __ia32_insert_pp(ppl, tmp, prev, rb_link, rb_parent);
 689                prev = tmp;
 690                rb_link = &tmp->pp_rb.rb_right;
 691                rb_parent = &tmp->pp_rb;
 692        }
 693        return 0;
 694}
 695
 696int
 697ia32_copy_ia64_partial_page_list(struct task_struct *p,
 698                                unsigned long clone_flags)
 699{
 700        int retval = 0;
 701
 702        if (clone_flags & CLONE_VM) {
 703                atomic_inc(&current->thread.ppl->pp_count);
 704                p->thread.ppl = current->thread.ppl;
 705        } else {
 706                p->thread.ppl = ia32_init_pp_list();
 707                if (!p->thread.ppl)
 708                        return -ENOMEM;
 709                down_write(&current->mm->mmap_sem);
 710                {
 711                        retval = __ia32_copy_pp_list(p->thread.ppl);
 712                }
 713                up_write(&current->mm->mmap_sem);
 714        }
 715
 716        return retval;
 717}
 718
 719static unsigned long
 720emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags,
 721              loff_t off)
 722{
 723        unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0;
 724        struct inode *inode;
 725        loff_t poff;
 726
 727        end = start + len;
 728        pstart = PAGE_START(start);
 729        pend = PAGE_ALIGN(end);
 730
 731        if (flags & MAP_FIXED) {
 732                ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
 733                if (start > pstart) {
 734                        if (flags & MAP_SHARED)
 735                                printk(KERN_INFO
 736                                       "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n",
 737                                       current->comm, task_pid_nr(current), start);
 738                        ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags,
 739                                           off);
 740                        if (IS_ERR((void *) ret))
 741                                return ret;
 742                        pstart += PAGE_SIZE;
 743                        if (pstart >= pend)
 744                                goto out;       /* done */
 745                }
 746                if (end < pend) {
 747                        if (flags & MAP_SHARED)
 748                                printk(KERN_INFO
 749                                       "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n",
 750                                       current->comm, task_pid_nr(current), end);
 751                        ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags,
 752                                           (off + len) - offset_in_page(end));
 753                        if (IS_ERR((void *) ret))
 754                                return ret;
 755                        pend -= PAGE_SIZE;
 756                        if (pstart >= pend)
 757                                goto out;       /* done */
 758                }
 759        } else {
 760                /*
 761                 * If a start address was specified, use it if the entire rounded out area
 762                 * is available.
 763                 */
 764                if (start && !pstart)
 765                        fudge = 1;      /* handle case of mapping to range (0,PAGE_SIZE) */
 766                tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags);
 767                if (tmp != pstart) {
 768                        pstart = tmp;
 769                        start = pstart + offset_in_page(off);   /* make start congruent with off */
 770                        end = start + len;
 771                        pend = PAGE_ALIGN(end);
 772                }
 773        }
 774
 775        poff = off + (pstart - start);  /* note: (pstart - start) may be negative */
 776        is_congruent = (flags & MAP_ANONYMOUS) || (offset_in_page(poff) == 0);
 777
 778        if ((flags & MAP_SHARED) && !is_congruent)
 779                printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap "
 780                       "(addr=0x%lx,off=0x%llx)\n", current->comm, task_pid_nr(current), start, off);
 781
 782        DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend,
 783            is_congruent ? "congruent" : "not congruent", poff);
 784
 785        down_write(&current->mm->mmap_sem);
 786        {
 787                if (!(flags & MAP_ANONYMOUS) && is_congruent)
 788                        ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff);
 789                else
 790                        ret = do_mmap(NULL, pstart, pend - pstart,
 791                                      prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE),
 792                                      flags | MAP_FIXED | MAP_ANONYMOUS, 0);
 793        }
 794        up_write(&current->mm->mmap_sem);
 795
 796        if (IS_ERR((void *) ret))
 797                return ret;
 798
 799        if (!is_congruent) {
 800                /* read the file contents */
 801                inode = file->f_path.dentry->d_inode;
 802                if (!inode->i_fop || !file->f_op->read
 803                    || ((*file->f_op->read)(file, (char __user *) pstart, pend - pstart, &poff)
 804                        < 0))
 805                {
 806                        sys_munmap(pstart, pend - pstart);
 807                        return -EINVAL;
 808                }
 809                if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0)
 810                        return -EINVAL;
 811        }
 812
 813        if (!(flags & MAP_FIXED))
 814                ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
 815out:
 816        return start;
 817}
 818
 819#endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
 820
 821static inline unsigned int
 822get_prot32 (unsigned int prot)
 823{
 824        if (prot & PROT_WRITE)
 825                /* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */
 826                prot |= PROT_READ | PROT_WRITE | PROT_EXEC;
 827        else if (prot & (PROT_READ | PROT_EXEC))
 828                /* on x86, there is no distinction between PROT_READ and PROT_EXEC */
 829                prot |= (PROT_READ | PROT_EXEC);
 830
 831        return prot;
 832}
 833
 834unsigned long
 835ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags,
 836              loff_t offset)
 837{
 838        DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n",
 839            file, addr, len, prot, flags, offset);
 840
 841        if (file && (!file->f_op || !file->f_op->mmap))
 842                return -ENODEV;
 843
 844        len = IA32_PAGE_ALIGN(len);
 845        if (len == 0)
 846                return addr;
 847
 848        if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len)
 849        {
 850                if (flags & MAP_FIXED)
 851                        return -ENOMEM;
 852                else
 853                return -EINVAL;
 854        }
 855
 856        if (OFFSET4K(offset))
 857                return -EINVAL;
 858
 859        prot = get_prot32(prot);
 860
 861#if PAGE_SHIFT > IA32_PAGE_SHIFT
 862        mutex_lock(&ia32_mmap_mutex);
 863        {
 864                addr = emulate_mmap(file, addr, len, prot, flags, offset);
 865        }
 866        mutex_unlock(&ia32_mmap_mutex);
 867#else
 868        down_write(&current->mm->mmap_sem);
 869        {
 870                addr = do_mmap(file, addr, len, prot, flags, offset);
 871        }
 872        up_write(&current->mm->mmap_sem);
 873#endif
 874        DBG("ia32_do_mmap: returning 0x%lx\n", addr);
 875        return addr;
 876}
 877
 878/*
 879 * Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these
 880 * system calls used a memory block for parameter passing..
 881 */
 882
 883struct mmap_arg_struct {
 884        unsigned int addr;
 885        unsigned int len;
 886        unsigned int prot;
 887        unsigned int flags;
 888        unsigned int fd;
 889        unsigned int offset;
 890};
 891
 892asmlinkage long
 893sys32_mmap (struct mmap_arg_struct __user *arg)
 894{
 895        struct mmap_arg_struct a;
 896        struct file *file = NULL;
 897        unsigned long addr;
 898        int flags;
 899
 900        if (copy_from_user(&a, arg, sizeof(a)))
 901                return -EFAULT;
 902
 903        if (OFFSET4K(a.offset))
 904                return -EINVAL;
 905
 906        flags = a.flags;
 907
 908        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 909        if (!(flags & MAP_ANONYMOUS)) {
 910                file = fget(a.fd);
 911                if (!file)
 912                        return -EBADF;
 913        }
 914
 915        addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset);
 916
 917        if (file)
 918                fput(file);
 919        return addr;
 920}
 921
 922asmlinkage long
 923sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags,
 924             unsigned int fd, unsigned int pgoff)
 925{
 926        struct file *file = NULL;
 927        unsigned long retval;
 928
 929        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 930        if (!(flags & MAP_ANONYMOUS)) {
 931                file = fget(fd);
 932                if (!file)
 933                        return -EBADF;
 934        }
 935
 936        retval = ia32_do_mmap(file, addr, len, prot, flags,
 937                              (unsigned long) pgoff << IA32_PAGE_SHIFT);
 938
 939        if (file)
 940                fput(file);
 941        return retval;
 942}
 943
 944asmlinkage long
 945sys32_munmap (unsigned int start, unsigned int len)
 946{
 947        unsigned int end = start + len;
 948        long ret;
 949
 950#if PAGE_SHIFT <= IA32_PAGE_SHIFT
 951        ret = sys_munmap(start, end - start);
 952#else
 953        if (OFFSET4K(start))
 954                return -EINVAL;
 955
 956        end = IA32_PAGE_ALIGN(end);
 957        if (start >= end)
 958                return -EINVAL;
 959
 960        ret = ia32_unset_pp(&start, &end);
 961        if (ret < 0)
 962                return ret;
 963
 964        if (start >= end)
 965                return 0;
 966
 967        mutex_lock(&ia32_mmap_mutex);
 968        ret = sys_munmap(start, end - start);
 969        mutex_unlock(&ia32_mmap_mutex);
 970#endif
 971        return ret;
 972}
 973
 974#if PAGE_SHIFT > IA32_PAGE_SHIFT
 975
 976/*
 977 * When mprotect()ing a partial page, we set the permission to the union of the old
 978 * settings and the new settings.  In other words, it's only possible to make access to a
 979 * partial page less restrictive.
 980 */
 981static long
 982mprotect_subpage (unsigned long address, int new_prot)
 983{
 984        int old_prot;
 985        struct vm_area_struct *vma;
 986
 987        if (new_prot == PROT_NONE)
 988                return 0;               /* optimize case where nothing changes... */
 989        vma = find_vma(current->mm, address);
 990        old_prot = get_page_prot(vma, address);
 991        return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot);
 992}
 993
 994#endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
 995
 996asmlinkage long
 997sys32_mprotect (unsigned int start, unsigned int len, int prot)
 998{
 999        unsigned int end = start + len;
1000#if PAGE_SHIFT > IA32_PAGE_SHIFT
1001        long retval = 0;
1002#endif
1003
1004        prot = get_prot32(prot);
1005
1006#if PAGE_SHIFT <= IA32_PAGE_SHIFT
1007        return sys_mprotect(start, end - start, prot);
1008#else
1009        if (OFFSET4K(start))
1010                return -EINVAL;
1011
1012        end = IA32_PAGE_ALIGN(end);
1013        if (end < start)
1014                return -EINVAL;
1015
1016        retval = ia32_compare_pp(&start, &end);
1017
1018        if (retval < 0)
1019                return retval;
1020
1021        mutex_lock(&ia32_mmap_mutex);
1022        {
1023                if (offset_in_page(start)) {
1024                        /* start address is 4KB aligned but not page aligned. */
1025                        retval = mprotect_subpage(PAGE_START(start), prot);
1026                        if (retval < 0)
1027                                goto out;
1028
1029                        start = PAGE_ALIGN(start);
1030                        if (start >= end)
1031                                goto out;       /* retval is already zero... */
1032                }
1033
1034                if (offset_in_page(end)) {
1035                        /* end address is 4KB aligned but not page aligned. */
1036                        retval = mprotect_subpage(PAGE_START(end), prot);
1037                        if (retval < 0)
1038                                goto out;
1039
1040                        end = PAGE_START(end);
1041                }
1042                retval = sys_mprotect(start, end - start, prot);
1043        }
1044  out:
1045        mutex_unlock(&ia32_mmap_mutex);
1046        return retval;
1047#endif
1048}
1049
1050asmlinkage long
1051sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len,
1052                unsigned int flags, unsigned int new_addr)
1053{
1054        long ret;
1055
1056#if PAGE_SHIFT <= IA32_PAGE_SHIFT
1057        ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
1058#else
1059        unsigned int old_end, new_end;
1060
1061        if (OFFSET4K(addr))
1062                return -EINVAL;
1063
1064        old_len = IA32_PAGE_ALIGN(old_len);
1065        new_len = IA32_PAGE_ALIGN(new_len);
1066        old_end = addr + old_len;
1067        new_end = addr + new_len;
1068
1069        if (!new_len)
1070                return -EINVAL;
1071
1072        if ((flags & MREMAP_FIXED) && (OFFSET4K(new_addr)))
1073                return -EINVAL;
1074
1075        if (old_len >= new_len) {
1076                ret = sys32_munmap(addr + new_len, old_len - new_len);
1077                if (ret && old_len != new_len)
1078                        return ret;
1079                ret = addr;
1080                if (!(flags & MREMAP_FIXED) || (new_addr == addr))
1081                        return ret;
1082                old_len = new_len;
1083        }
1084
1085        addr = PAGE_START(addr);
1086        old_len = PAGE_ALIGN(old_end) - addr;
1087        new_len = PAGE_ALIGN(new_end) - addr;
1088
1089        mutex_lock(&ia32_mmap_mutex);
1090        ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
1091        mutex_unlock(&ia32_mmap_mutex);
1092
1093        if ((ret >= 0) && (old_len < new_len)) {
1094                /* mremap expanded successfully */
1095                ia32_set_pp(old_end, new_end, flags);
1096        }
1097#endif
1098        return ret;
1099}
1100
1101asmlinkage unsigned long
1102sys32_alarm (unsigned int seconds)
1103{
1104        return alarm_setitimer(seconds);
1105}
1106
1107struct sel_arg_struct {
1108        unsigned int n;
1109        unsigned int inp;
1110        unsigned int outp;
1111        unsigned int exp;
1112        unsigned int tvp;
1113};
1114
1115asmlinkage long
1116sys32_old_select (struct sel_arg_struct __user *arg)
1117{
1118        struct sel_arg_struct a;
1119
1120        if (copy_from_user(&a, arg, sizeof(a)))
1121                return -EFAULT;
1122        return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
1123                                 compat_ptr(a.exp), compat_ptr(a.tvp));
1124}
1125
1126#define SEMOP            1
1127#define SEMGET           2
1128#define SEMCTL           3
1129#define SEMTIMEDOP       4
1130#define MSGSND          11
1131#define MSGRCV          12
1132#define MSGGET          13
1133#define MSGCTL          14
1134#define SHMAT           21
1135#define SHMDT           22
1136#define SHMGET          23
1137#define SHMCTL          24
1138
1139asmlinkage long
1140sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
1141{
1142        int version;
1143
1144        version = call >> 16; /* hack for backward compatibility */
1145        call &= 0xffff;
1146
1147        switch (call) {
1148              case SEMTIMEDOP:
1149                if (fifth)
1150                        return compat_sys_semtimedop(first, compat_ptr(ptr),
1151                                second, compat_ptr(fifth));
1152                /* else fall through for normal semop() */
1153              case SEMOP:
1154                /* struct sembuf is the same on 32 and 64bit :)) */
1155                return sys_semtimedop(first, compat_ptr(ptr), second,
1156                                      NULL);
1157              case SEMGET:
1158                return sys_semget(first, second, third);
1159              case SEMCTL:
1160                return compat_sys_semctl(first, second, third, compat_ptr(ptr));
1161
1162              case MSGSND:
1163                return compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
1164              case MSGRCV:
1165                return compat_sys_msgrcv(first, second, fifth, third, version, compat_ptr(ptr));
1166              case MSGGET:
1167                return sys_msgget((key_t) first, second);
1168              case MSGCTL:
1169                return compat_sys_msgctl(first, second, compat_ptr(ptr));
1170
1171              case SHMAT:
1172                return compat_sys_shmat(first, second, third, version, compat_ptr(ptr));
1173                break;
1174              case SHMDT:
1175                return sys_shmdt(compat_ptr(ptr));
1176              case SHMGET:
1177                return sys_shmget(first, (unsigned)second, third);
1178              case SHMCTL:
1179                return compat_sys_shmctl(first, second, compat_ptr(ptr));
1180
1181              default:
1182                return -ENOSYS;
1183        }
1184        return -EINVAL;
1185}
1186
1187asmlinkage long
1188compat_sys_wait4 (compat_pid_t pid, compat_uint_t * stat_addr, int options,
1189                 struct compat_rusage *ru);
1190
1191asmlinkage long
1192sys32_waitpid (int pid, unsigned int *stat_addr, int options)
1193{
1194        return compat_sys_wait4(pid, stat_addr, options, NULL);
1195}
1196
1197/*
1198 *  The order in which registers are stored in the ptrace regs structure
1199 */
1200#define PT_EBX  0
1201#define PT_ECX  1
1202#define PT_EDX  2
1203#define PT_ESI  3
1204#define PT_EDI  4
1205#define PT_EBP  5
1206#define PT_EAX  6
1207#define PT_DS   7
1208#define PT_ES   8
1209#define PT_FS   9
1210#define PT_GS   10
1211#define PT_ORIG_EAX 11
1212#define PT_EIP  12
1213#define PT_CS   13
1214#define PT_EFL  14
1215#define PT_UESP 15
1216#define PT_SS   16
1217
1218static unsigned int
1219getreg (struct task_struct *child, int regno)
1220{
1221        struct pt_regs *child_regs;
1222
1223        child_regs = task_pt_regs(child);
1224        switch (regno / sizeof(int)) {
1225              case PT_EBX: return child_regs->r11;
1226              case PT_ECX: return child_regs->r9;
1227              case PT_EDX: return child_regs->r10;
1228              case PT_ESI: return child_regs->r14;
1229              case PT_EDI: return child_regs->r15;
1230              case PT_EBP: return child_regs->r13;
1231              case PT_EAX: return child_regs->r8;
1232              case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */
1233              case PT_EIP: return child_regs->cr_iip;
1234              case PT_UESP: return child_regs->r12;
1235              case PT_EFL: return child->thread.eflag;
1236              case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
1237                return __USER_DS;
1238              case PT_CS: return __USER_CS;
1239              default:
1240                printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno);
1241                break;
1242        }
1243        return 0;
1244}
1245
1246static void
1247putreg (struct task_struct *child, int regno, unsigned int value)
1248{
1249        struct pt_regs *child_regs;
1250
1251        child_regs = task_pt_regs(child);
1252        switch (regno / sizeof(int)) {
1253              case PT_EBX: child_regs->r11 = value; break;
1254              case PT_ECX: child_regs->r9 = value; break;
1255              case PT_EDX: child_regs->r10 = value; break;
1256              case PT_ESI: child_regs->r14 = value; break;
1257              case PT_EDI: child_regs->r15 = value; break;
1258              case PT_EBP: child_regs->r13 = value; break;
1259              case PT_EAX: child_regs->r8 = value; break;
1260              case PT_ORIG_EAX: child_regs->r1 = value; break;
1261              case PT_EIP: child_regs->cr_iip = value; break;
1262              case PT_UESP: child_regs->r12 = value; break;
1263              case PT_EFL: child->thread.eflag = value; break;
1264              case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
1265                if (value != __USER_DS)
1266                        printk(KERN_ERR
1267                               "ia32.putreg: attempt to set invalid segment register %d = %x\n",
1268                               regno, value);
1269                break;
1270              case PT_CS:
1271                if (value != __USER_CS)
1272                        printk(KERN_ERR
1273                               "ia32.putreg: attempt to set invalid segment register %d = %x\n",
1274                               regno, value);
1275                break;
1276              default:
1277                printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno);
1278                break;
1279        }
1280}
1281
1282static void
1283put_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
1284           struct switch_stack *swp, int tos)
1285{
1286        struct _fpreg_ia32 *f;
1287        char buf[32];
1288
1289        f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
1290        if ((regno += tos) >= 8)
1291                regno -= 8;
1292        switch (regno) {
1293              case 0:
1294                ia64f2ia32f(f, &ptp->f8);
1295                break;
1296              case 1:
1297                ia64f2ia32f(f, &ptp->f9);
1298                break;
1299              case 2:
1300                ia64f2ia32f(f, &ptp->f10);
1301                break;
1302              case 3:
1303                ia64f2ia32f(f, &ptp->f11);
1304                break;
1305              case 4:
1306              case 5:
1307              case 6:
1308              case 7:
1309                ia64f2ia32f(f, &swp->f12 + (regno - 4));
1310                break;
1311        }
1312        copy_to_user(reg, f, sizeof(*reg));
1313}
1314
1315static void
1316get_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
1317           struct switch_stack *swp, int tos)
1318{
1319
1320        if ((regno += tos) >= 8)
1321                regno -= 8;
1322        switch (regno) {
1323              case 0:
1324                copy_from_user(&ptp->f8, reg, sizeof(*reg));
1325                break;
1326              case 1:
1327                copy_from_user(&ptp->f9, reg, sizeof(*reg));
1328                break;
1329              case 2:
1330                copy_from_user(&ptp->f10, reg, sizeof(*reg));
1331                break;
1332              case 3:
1333                copy_from_user(&ptp->f11, reg, sizeof(*reg));
1334                break;
1335              case 4:
1336              case 5:
1337              case 6:
1338              case 7:
1339                copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg));
1340                break;
1341        }
1342        return;
1343}
1344
1345int
1346save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
1347{
1348        struct switch_stack *swp;
1349        struct pt_regs *ptp;
1350        int i, tos;
1351
1352        if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
1353                return -EFAULT;
1354
1355        __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
1356        __put_user(tsk->thread.fsr & 0xffff, &save->swd);
1357        __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
1358        __put_user(tsk->thread.fir, &save->fip);
1359        __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
1360        __put_user(tsk->thread.fdr, &save->foo);
1361        __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
1362
1363        /*
1364         *  Stack frames start with 16-bytes of temp space
1365         */
1366        swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1367        ptp = task_pt_regs(tsk);
1368        tos = (tsk->thread.fsr >> 11) & 7;
1369        for (i = 0; i < 8; i++)
1370                put_fpreg(i, &save->st_space[i], ptp, swp, tos);
1371        return 0;
1372}
1373
1374static int
1375restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
1376{
1377        struct switch_stack *swp;
1378        struct pt_regs *ptp;
1379        int i, tos;
1380        unsigned int fsrlo, fsrhi, num32;
1381
1382        if (!access_ok(VERIFY_READ, save, sizeof(*save)))
1383                return(-EFAULT);
1384
1385        __get_user(num32, (unsigned int __user *)&save->cwd);
1386        tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
1387        __get_user(fsrlo, (unsigned int __user *)&save->swd);
1388        __get_user(fsrhi, (unsigned int __user *)&save->twd);
1389        num32 = (fsrhi << 16) | fsrlo;
1390        tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
1391        __get_user(num32, (unsigned int __user *)&save->fip);
1392        tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
1393        __get_user(num32, (unsigned int __user *)&save->foo);
1394        tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
1395
1396        /*
1397         *  Stack frames start with 16-bytes of temp space
1398         */
1399        swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1400        ptp = task_pt_regs(tsk);
1401        tos = (tsk->thread.fsr >> 11) & 7;
1402        for (i = 0; i < 8; i++)
1403                get_fpreg(i, &save->st_space[i], ptp, swp, tos);
1404        return 0;
1405}
1406
1407int
1408save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
1409{
1410        struct switch_stack *swp;
1411        struct pt_regs *ptp;
1412        int i, tos;
1413        unsigned long mxcsr=0;
1414        unsigned long num128[2];
1415
1416        if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
1417                return -EFAULT;
1418
1419        __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
1420        __put_user(tsk->thread.fsr & 0xffff, &save->swd);
1421        __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
1422        __put_user(tsk->thread.fir, &save->fip);
1423        __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
1424        __put_user(tsk->thread.fdr, &save->foo);
1425        __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
1426
1427        /*
1428         *  Stack frames start with 16-bytes of temp space
1429         */
1430        swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1431        ptp = task_pt_regs(tsk);
1432        tos = (tsk->thread.fsr >> 11) & 7;
1433        for (i = 0; i < 8; i++)
1434                put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
1435
1436        mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f);
1437        __put_user(mxcsr & 0xffff, &save->mxcsr);
1438        for (i = 0; i < 8; i++) {
1439                memcpy(&(num128[0]), &(swp->f16) + i*2, sizeof(unsigned long));
1440                memcpy(&(num128[1]), &(swp->f17) + i*2, sizeof(unsigned long));
1441                copy_to_user(&save->xmm_space[0] + 4*i, num128, sizeof(struct _xmmreg_ia32));
1442        }
1443        return 0;
1444}
1445
1446static int
1447restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
1448{
1449        struct switch_stack *swp;
1450        struct pt_regs *ptp;
1451        int i, tos;
1452        unsigned int fsrlo, fsrhi, num32;
1453        int mxcsr;
1454        unsigned long num64;
1455        unsigned long num128[2];
1456
1457        if (!access_ok(VERIFY_READ, save, sizeof(*save)))
1458                return(-EFAULT);
1459
1460        __get_user(num32, (unsigned int __user *)&save->cwd);
1461        tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
1462        __get_user(fsrlo, (unsigned int __user *)&save->swd);
1463        __get_user(fsrhi, (unsigned int __user *)&save->twd);
1464        num32 = (fsrhi << 16) | fsrlo;
1465        tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
1466        __get_user(num32, (unsigned int __user *)&save->fip);
1467        tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
1468        __get_user(num32, (unsigned int __user *)&save->foo);
1469        tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
1470
1471        /*
1472         *  Stack frames start with 16-bytes of temp space
1473         */
1474        swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1475        ptp = task_pt_regs(tsk);
1476        tos = (tsk->thread.fsr >> 11) & 7;
1477        for (i = 0; i < 8; i++)
1478        get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
1479
1480        __get_user(mxcsr, (unsigned int __user *)&save->mxcsr);
1481        num64 = mxcsr & 0xff10;
1482        tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000UL)) | (num64<<32);
1483        num64 = mxcsr & 0x3f;
1484        tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000UL)) | (num64<<32);
1485
1486        for (i = 0; i < 8; i++) {
1487                copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32));
1488                memcpy(&(swp->f16) + i*2, &(num128[0]), sizeof(unsigned long));
1489                memcpy(&(swp->f17) + i*2, &(num128[1]), sizeof(unsigned long));
1490        }
1491        return 0;
1492}
1493
1494long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1495        compat_ulong_t caddr, compat_ulong_t cdata)
1496{
1497        unsigned long addr = caddr;
1498        unsigned long data = cdata;
1499        unsigned int tmp;
1500        long i, ret;
1501
1502        switch (request) {
1503              case PTRACE_PEEKUSR:      /* read word at addr in USER area */
1504                ret = -EIO;
1505                if ((addr & 3) || addr > 17*sizeof(int))
1506                        break;
1507
1508                tmp = getreg(child, addr);
1509                if (!put_user(tmp, (unsigned int __user *) compat_ptr(data)))
1510                        ret = 0;
1511                break;
1512
1513              case PTRACE_POKEUSR:      /* write word at addr in USER area */
1514                ret = -EIO;
1515                if ((addr & 3) || addr > 17*sizeof(int))
1516                        break;
1517
1518                putreg(child, addr, data);
1519                ret = 0;
1520                break;
1521
1522              case IA32_PTRACE_GETREGS:
1523                if (!access_ok(VERIFY_WRITE, compat_ptr(data), 17*sizeof(int))) {
1524                        ret = -EIO;
1525                        break;
1526                }
1527                for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
1528                        put_user(getreg(child, i), (unsigned int __user *) compat_ptr(data));
1529                        data += sizeof(int);
1530                }
1531                ret = 0;
1532                break;
1533
1534              case IA32_PTRACE_SETREGS:
1535                if (!access_ok(VERIFY_READ, compat_ptr(data), 17*sizeof(int))) {
1536                        ret = -EIO;
1537                        break;
1538                }
1539                for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
1540                        get_user(tmp, (unsigned int __user *) compat_ptr(data));
1541                        putreg(child, i, tmp);
1542                        data += sizeof(int);
1543                }
1544                ret = 0;
1545                break;
1546
1547              case IA32_PTRACE_GETFPREGS:
1548                ret = save_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
1549                                        compat_ptr(data));
1550                break;
1551
1552              case IA32_PTRACE_GETFPXREGS:
1553                ret = save_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
1554                                         compat_ptr(data));
1555                break;
1556
1557              case IA32_PTRACE_SETFPREGS:
1558                ret = restore_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
1559                                           compat_ptr(data));
1560                break;
1561
1562              case IA32_PTRACE_SETFPXREGS:
1563                ret = restore_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
1564                                            compat_ptr(data));
1565                break;
1566
1567              default:
1568                return compat_ptrace_request(child, request, caddr, cdata);
1569        }
1570        return ret;
1571}
1572
1573typedef struct {
1574        unsigned int    ss_sp;
1575        unsigned int    ss_flags;
1576        unsigned int    ss_size;
1577} ia32_stack_t;
1578
1579asmlinkage long
1580sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32,
1581                   long arg2, long arg3, long arg4, long arg5, long arg6,
1582                   long arg7, struct pt_regs pt)
1583{
1584        stack_t uss, uoss;
1585        ia32_stack_t buf32;
1586        int ret;
1587        mm_segment_t old_fs = get_fs();
1588
1589        if (uss32) {
1590                if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t)))
1591                        return -EFAULT;
1592                uss.ss_sp = (void __user *) (long) buf32.ss_sp;
1593                uss.ss_flags = buf32.ss_flags;
1594                /* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the
1595                   check and set it to the user requested value later */
1596                if ((buf32.ss_flags != SS_DISABLE) && (buf32.ss_size < MINSIGSTKSZ_IA32)) {
1597                        ret = -ENOMEM;
1598                        goto out;
1599                }
1600                uss.ss_size = MINSIGSTKSZ;
1601        }
1602        set_fs(KERNEL_DS);
1603        ret = do_sigaltstack(uss32 ? (stack_t __user *) &uss : NULL,
1604                             (stack_t __user *) &uoss, pt.r12);
1605        current->sas_ss_size = buf32.ss_size;
1606        set_fs(old_fs);
1607out:
1608        if (ret < 0)
1609                return(ret);
1610        if (uoss32) {
1611                buf32.ss_sp = (long __user) uoss.ss_sp;
1612                buf32.ss_flags = uoss.ss_flags;
1613                buf32.ss_size = uoss.ss_size;
1614                if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t)))
1615                        return -EFAULT;
1616        }
1617        return ret;
1618}
1619
1620asmlinkage int
1621sys32_msync (unsigned int start, unsigned int len, int flags)
1622{
1623        unsigned int addr;
1624
1625        if (OFFSET4K(start))
1626                return -EINVAL;
1627        addr = PAGE_START(start);
1628        return sys_msync(addr, len + (start - addr), flags);
1629}
1630
1631struct sysctl32 {
1632        unsigned int    name;
1633        int             nlen;
1634        unsigned int    oldval;
1635        unsigned int    oldlenp;
1636        unsigned int    newval;
1637        unsigned int    newlen;
1638        unsigned int    __unused[4];
1639};
1640
1641#ifdef CONFIG_SYSCTL_SYSCALL
1642asmlinkage long
1643sys32_sysctl (struct sysctl32 __user *args)
1644{
1645        struct sysctl32 a32;
1646        mm_segment_t old_fs = get_fs ();
1647        void __user *oldvalp, *newvalp;
1648        size_t oldlen;
1649        int __user *namep;
1650        long ret;
1651
1652        if (copy_from_user(&a32, args, sizeof(a32)))
1653                return -EFAULT;
1654
1655        /*
1656         * We need to pre-validate these because we have to disable address checking
1657         * before calling do_sysctl() because of OLDLEN but we can't run the risk of the
1658         * user specifying bad addresses here.  Well, since we're dealing with 32 bit
1659         * addresses, we KNOW that access_ok() will always succeed, so this is an
1660         * expensive NOP, but so what...
1661         */
1662        namep = (int __user *) compat_ptr(a32.name);
1663        oldvalp = compat_ptr(a32.oldval);
1664        newvalp = compat_ptr(a32.newval);
1665
1666        if ((oldvalp && get_user(oldlen, (int __user *) compat_ptr(a32.oldlenp)))
1667            || !access_ok(VERIFY_WRITE, namep, 0)
1668            || !access_ok(VERIFY_WRITE, oldvalp, 0)
1669            || !access_ok(VERIFY_WRITE, newvalp, 0))
1670                return -EFAULT;
1671
1672        set_fs(KERNEL_DS);
1673        lock_kernel();
1674        ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *) &oldlen,
1675                        newvalp, (size_t) a32.newlen);
1676        unlock_kernel();
1677        set_fs(old_fs);
1678
1679        if (oldvalp && put_user (oldlen, (int __user *) compat_ptr(a32.oldlenp)))
1680                return -EFAULT;
1681
1682        return ret;
1683}
1684#endif
1685
1686asmlinkage long
1687sys32_newuname (struct new_utsname __user *name)
1688{
1689        int ret = sys_newuname(name);
1690
1691        if (!ret)
1692                if (copy_to_user(name->machine, "i686\0\0\0", 8))
1693                        ret = -EFAULT;
1694        return ret;
1695}
1696
1697asmlinkage long
1698sys32_getresuid16 (u16 __user *ruid, u16 __user *euid, u16 __user *suid)
1699{
1700        uid_t a, b, c;
1701        int ret;
1702        mm_segment_t old_fs = get_fs();
1703
1704        set_fs(KERNEL_DS);
1705        ret = sys_getresuid((uid_t __user *) &a, (uid_t __user *) &b, (uid_t __user *) &c);
1706        set_fs(old_fs);
1707
1708        if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid))
1709                return -EFAULT;
1710        return ret;
1711}
1712
1713asmlinkage long
1714sys32_getresgid16 (u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
1715{
1716        gid_t a, b, c;
1717        int ret;
1718        mm_segment_t old_fs = get_fs();
1719
1720        set_fs(KERNEL_DS);
1721        ret = sys_getresgid((gid_t __user *) &a, (gid_t __user *) &b, (gid_t __user *) &c);
1722        set_fs(old_fs);
1723
1724        if (ret)
1725                return ret;
1726
1727        return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid);
1728}
1729
1730asmlinkage long
1731sys32_lseek (unsigned int fd, int offset, unsigned int whence)
1732{
1733        /* Sign-extension of "offset" is important here... */
1734        return sys_lseek(fd, offset, whence);
1735}
1736
1737static int
1738groups16_to_user(short __user *grouplist, struct group_info *group_info)
1739{
1740        int i;
1741        short group;
1742
1743        for (i = 0; i < group_info->ngroups; i++) {
1744                group = (short)GROUP_AT(group_info, i);
1745                if (put_user(group, grouplist+i))
1746                        return -EFAULT;
1747        }
1748
1749        return 0;
1750}
1751
1752static int
1753groups16_from_user(struct group_info *group_info, short __user *grouplist)
1754{
1755        int i;
1756        short group;
1757
1758        for (i = 0; i < group_info->ngroups; i++) {
1759                if (get_user(group, grouplist+i))
1760                        return  -EFAULT;
1761                GROUP_AT(group_info, i) = (gid_t)group;
1762        }
1763
1764        return 0;
1765}
1766
1767asmlinkage long
1768sys32_getgroups16 (int gidsetsize, short __user *grouplist)
1769{
1770        const struct cred *cred = current_cred();
1771        int i;
1772
1773        if (gidsetsize < 0)
1774                return -EINVAL;
1775
1776        i = cred->group_info->ngroups;
1777        if (gidsetsize) {
1778                if (i > gidsetsize) {
1779                        i = -EINVAL;
1780                        goto out;
1781                }
1782                if (groups16_to_user(grouplist, cred->group_info)) {
1783                        i = -EFAULT;
1784                        goto out;
1785                }
1786        }
1787out:
1788        return i;
1789}
1790
1791asmlinkage long
1792sys32_setgroups16 (int gidsetsize, short __user *grouplist)
1793{
1794        struct group_info *group_info;
1795        int retval;
1796
1797        if (!capable(CAP_SETGID))
1798                return -EPERM;
1799        if ((unsigned)gidsetsize > NGROUPS_MAX)
1800                return -EINVAL;
1801
1802        group_info = groups_alloc(gidsetsize);
1803        if (!group_info)
1804                return -ENOMEM;
1805        retval = groups16_from_user(group_info, grouplist);
1806        if (retval) {
1807                put_group_info(group_info);
1808                return retval;
1809        }
1810
1811        retval = set_current_groups(group_info);
1812        put_group_info(group_info);
1813
1814        return retval;
1815}
1816
1817asmlinkage long
1818sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi)
1819{
1820        return sys_truncate(compat_ptr(path), ((unsigned long) len_hi << 32) | len_lo);
1821}
1822
1823asmlinkage long
1824sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi)
1825{
1826        return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo);
1827}
1828
1829static int
1830putstat64 (struct stat64 __user *ubuf, struct kstat *kbuf)
1831{
1832        int err;
1833        u64 hdev;
1834
1835        if (clear_user(ubuf, sizeof(*ubuf)))
1836                return -EFAULT;
1837
1838        hdev = huge_encode_dev(kbuf->dev);
1839        err  = __put_user(hdev, (u32 __user*)&ubuf->st_dev);
1840        err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_dev) + 1);
1841        err |= __put_user(kbuf->ino, &ubuf->__st_ino);
1842        err |= __put_user(kbuf->ino, &ubuf->st_ino_lo);
1843        err |= __put_user(kbuf->ino >> 32, &ubuf->st_ino_hi);
1844        err |= __put_user(kbuf->mode, &ubuf->st_mode);
1845        err |= __put_user(kbuf->nlink, &ubuf->st_nlink);
1846        err |= __put_user(kbuf->uid, &ubuf->st_uid);
1847        err |= __put_user(kbuf->gid, &ubuf->st_gid);
1848        hdev = huge_encode_dev(kbuf->rdev);
1849        err  = __put_user(hdev, (u32 __user*)&ubuf->st_rdev);
1850        err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_rdev) + 1);
1851        err |= __put_user(kbuf->size, &ubuf->st_size_lo);
1852        err |= __put_user((kbuf->size >> 32), &ubuf->st_size_hi);
1853        err |= __put_user(kbuf->atime.tv_sec, &ubuf->st_atime);
1854        err |= __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec);
1855        err |= __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime);
1856        err |= __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec);
1857        err |= __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime);
1858        err |= __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec);
1859        err |= __put_user(kbuf->blksize, &ubuf->st_blksize);
1860        err |= __put_user(kbuf->blocks, &ubuf->st_blocks);
1861        return err;
1862}
1863
1864asmlinkage long
1865sys32_stat64 (char __user *filename, struct stat64 __user *statbuf)
1866{
1867        struct kstat s;
1868        long ret = vfs_stat(filename, &s);
1869        if (!ret)
1870                ret = putstat64(statbuf, &s);
1871        return ret;
1872}
1873
1874asmlinkage long
1875sys32_lstat64 (char __user *filename, struct stat64 __user *statbuf)
1876{
1877        struct kstat s;
1878        long ret = vfs_lstat(filename, &s);
1879        if (!ret)
1880                ret = putstat64(statbuf, &s);
1881        return ret;
1882}
1883
1884asmlinkage long
1885sys32_fstat64 (unsigned int fd, struct stat64 __user *statbuf)
1886{
1887        struct kstat s;
1888        long ret = vfs_fstat(fd, &s);
1889        if (!ret)
1890                ret = putstat64(statbuf, &s);
1891        return ret;
1892}
1893
1894asmlinkage long
1895sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec __user *interval)
1896{
1897        mm_segment_t old_fs = get_fs();
1898        struct timespec t;
1899        long ret;
1900
1901        set_fs(KERNEL_DS);
1902        ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t);
1903        set_fs(old_fs);
1904        if (put_compat_timespec(&t, interval))
1905                return -EFAULT;
1906        return ret;
1907}
1908
1909asmlinkage long
1910sys32_pread (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
1911{
1912        return sys_pread64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
1913}
1914
1915asmlinkage long
1916sys32_pwrite (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
1917{
1918        return sys_pwrite64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
1919}
1920
1921asmlinkage long
1922sys32_sendfile (int out_fd, int in_fd, int __user *offset, unsigned int count)
1923{
1924        mm_segment_t old_fs = get_fs();
1925        long ret;
1926        off_t of;
1927
1928        if (offset && get_user(of, offset))
1929                return -EFAULT;
1930
1931        set_fs(KERNEL_DS);
1932        ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *) &of : NULL, count);
1933        set_fs(old_fs);
1934
1935        if (offset && put_user(of, offset))
1936                return -EFAULT;
1937
1938        return ret;
1939}
1940
1941asmlinkage long
1942sys32_personality (unsigned int personality)
1943{
1944        long ret;
1945
1946        if (current->personality == PER_LINUX32 && personality == PER_LINUX)
1947                personality = PER_LINUX32;
1948        ret = sys_personality(personality);
1949        if (ret == PER_LINUX32)
1950                ret = PER_LINUX;
1951        return ret;
1952}
1953
1954asmlinkage unsigned long
1955sys32_brk (unsigned int brk)
1956{
1957        unsigned long ret, obrk;
1958        struct mm_struct *mm = current->mm;
1959
1960        obrk = mm->brk;
1961        ret = sys_brk(brk);
1962        if (ret < obrk)
1963                clear_user(compat_ptr(ret), PAGE_ALIGN(ret) - ret);
1964        return ret;
1965}
1966
1967/* Structure for ia32 emulation on ia64 */
1968struct epoll_event32
1969{
1970        u32 events;
1971        u32 data[2];
1972};
1973
1974asmlinkage long
1975sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 __user *event)
1976{
1977        mm_segment_t old_fs = get_fs();
1978        struct epoll_event event64;
1979        int error;
1980        u32 data_halfword;
1981
1982        if (!access_ok(VERIFY_READ, event, sizeof(struct epoll_event32)))
1983                return -EFAULT;
1984
1985        __get_user(event64.events, &event->events);
1986        __get_user(data_halfword, &event->data[0]);
1987        event64.data = data_halfword;
1988        __get_user(data_halfword, &event->data[1]);
1989        event64.data |= (u64)data_halfword << 32;
1990
1991        set_fs(KERNEL_DS);
1992        error = sys_epoll_ctl(epfd, op, fd, (struct epoll_event __user *) &event64);
1993        set_fs(old_fs);
1994
1995        return error;
1996}
1997
1998asmlinkage long
1999sys32_epoll_wait(int epfd, struct epoll_event32 __user * events, int maxevents,
2000                 int timeout)
2001{
2002        struct epoll_event *events64 = NULL;
2003        mm_segment_t old_fs = get_fs();
2004        int numevents, size;
2005        int evt_idx;
2006        int do_free_pages = 0;
2007
2008        if (maxevents <= 0) {
2009                return -EINVAL;
2010        }
2011
2012        /* Verify that the area passed by the user is writeable */
2013        if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event32)))
2014                return -EFAULT;
2015
2016        /*
2017         * Allocate space for the intermediate copy.  If the space needed
2018         * is large enough to cause kmalloc to fail, then try again with
2019         * __get_free_pages.
2020         */
2021        size = maxevents * sizeof(struct epoll_event);
2022        events64 = kmalloc(size, GFP_KERNEL);
2023        if (events64 == NULL) {
2024                events64 = (struct epoll_event *)
2025                                __get_free_pages(GFP_KERNEL, get_order(size));
2026                if (events64 == NULL)
2027                        return -ENOMEM;
2028                do_free_pages = 1;
2029        }
2030
2031        /* Do the system call */
2032        set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/
2033        numevents = sys_epoll_wait(epfd, (struct epoll_event __user *) events64,
2034                                   maxevents, timeout);
2035        set_fs(old_fs);
2036
2037        /* Don't modify userspace memory if we're returning an error */
2038        if (numevents > 0) {
2039                /* Translate the 64-bit structures back into the 32-bit
2040                   structures */
2041                for (evt_idx = 0; evt_idx < numevents; evt_idx++) {
2042                        __put_user(events64[evt_idx].events,
2043                                   &events[evt_idx].events);
2044                        __put_user((u32)events64[evt_idx].data,
2045                                   &events[evt_idx].data[0]);
2046                        __put_user((u32)(events64[evt_idx].data >> 32),
2047                                   &events[evt_idx].data[1]);
2048                }
2049        }
2050
2051        if (do_free_pages)
2052                free_pages((unsigned long) events64, get_order(size));
2053        else
2054                kfree(events64);
2055        return numevents;
2056}
2057
2058/*
2059 * Get a yet unused TLS descriptor index.
2060 */
2061static int
2062get_free_idx (void)
2063{
2064        struct thread_struct *t = &current->thread;
2065        int idx;
2066
2067        for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
2068                if (desc_empty(t->tls_array + idx))
2069                        return idx + GDT_ENTRY_TLS_MIN;
2070        return -ESRCH;
2071}
2072
2073static void set_tls_desc(struct task_struct *p, int idx,
2074                const struct ia32_user_desc *info, int n)
2075{
2076        struct thread_struct *t = &p->thread;
2077        struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
2078        int cpu;
2079
2080        /*
2081         * We must not get preempted while modifying the TLS.
2082         */
2083        cpu = get_cpu();
2084
2085        while (n-- > 0) {
2086                if (LDT_empty(info)) {
2087                        desc->a = 0;
2088                        desc->b = 0;
2089                } else {
2090                        desc->a = LDT_entry_a(info);
2091                        desc->b = LDT_entry_b(info);
2092                }
2093
2094                ++info;
2095                ++desc;
2096        }
2097
2098        if (t == &current->thread)
2099                load_TLS(t, cpu);
2100
2101        put_cpu();
2102}
2103
2104/*
2105 * Set a given TLS descriptor:
2106 */
2107asmlinkage int
2108sys32_set_thread_area (struct ia32_user_desc __user *u_info)
2109{
2110        struct ia32_user_desc info;
2111        int idx;
2112
2113        if (copy_from_user(&info, u_info, sizeof(info)))
2114                return -EFAULT;
2115        idx = info.entry_number;
2116
2117        /*
2118         * index -1 means the kernel should try to find and allocate an empty descriptor:
2119         */
2120        if (idx == -1) {
2121                idx = get_free_idx();
2122                if (idx < 0)
2123                        return idx;
2124                if (put_user(idx, &u_info->entry_number))
2125                        return -EFAULT;
2126        }
2127
2128        if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
2129                return -EINVAL;
2130
2131        set_tls_desc(current, idx, &info, 1);
2132        return 0;
2133}
2134
2135/*
2136 * Get the current Thread-Local Storage area:
2137 */
2138
2139#define GET_BASE(desc) (                        \
2140        (((desc)->a >> 16) & 0x0000ffff) |      \
2141        (((desc)->b << 16) & 0x00ff0000) |      \
2142        ( (desc)->b        & 0xff000000)   )
2143
2144#define GET_LIMIT(desc) (                       \
2145        ((desc)->a & 0x0ffff) |                 \
2146         ((desc)->b & 0xf0000) )
2147
2148#define GET_32BIT(desc)         (((desc)->b >> 22) & 1)
2149#define GET_CONTENTS(desc)      (((desc)->b >> 10) & 3)
2150#define GET_WRITABLE(desc)      (((desc)->b >>  9) & 1)
2151#define GET_LIMIT_PAGES(desc)   (((desc)->b >> 23) & 1)
2152#define GET_PRESENT(desc)       (((desc)->b >> 15) & 1)
2153#define GET_USEABLE(desc)       (((desc)->b >> 20) & 1)
2154
2155static void fill_user_desc(struct ia32_user_desc *info, int idx,
2156                const struct desc_struct *desc)
2157{
2158        info->entry_number = idx;
2159        info->base_addr = GET_BASE(desc);
2160        info->limit = GET_LIMIT(desc);
2161        info->seg_32bit = GET_32BIT(desc);
2162        info->contents = GET_CONTENTS(desc);
2163        info->read_exec_only = !GET_WRITABLE(desc);
2164        info->limit_in_pages = GET_LIMIT_PAGES(desc);
2165        info->seg_not_present = !GET_PRESENT(desc);
2166        info->useable = GET_USEABLE(desc);
2167}
2168
2169asmlinkage int
2170sys32_get_thread_area (struct ia32_user_desc __user *u_info)
2171{
2172        struct ia32_user_desc info;
2173        struct desc_struct *desc;
2174        int idx;
2175
2176        if (get_user(idx, &u_info->entry_number))
2177                return -EFAULT;
2178        if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
2179                return -EINVAL;
2180
2181        desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
2182        fill_user_desc(&info, idx, desc);
2183
2184        if (copy_to_user(u_info, &info, sizeof(info)))
2185                return -EFAULT;
2186        return 0;
2187}
2188
2189struct regset_get {
2190        void *kbuf;
2191        void __user *ubuf;
2192};
2193
2194struct regset_set {
2195        const void *kbuf;
2196        const void __user *ubuf;
2197};
2198
2199struct regset_getset {
2200        struct task_struct *target;
2201        const struct user_regset *regset;
2202        union {
2203                struct regset_get get;
2204                struct regset_set set;
2205        } u;
2206        unsigned int pos;
2207        unsigned int count;
2208        int ret;
2209};
2210
2211static void getfpreg(struct task_struct *task, int regno, int *val)
2212{
2213        switch (regno / sizeof(int)) {
2214        case 0:
2215                *val = task->thread.fcr & 0xffff;
2216                break;
2217        case 1:
2218                *val = task->thread.fsr & 0xffff;
2219                break;
2220        case 2:
2221                *val = (task->thread.fsr>>16) & 0xffff;
2222                break;
2223        case 3:
2224                *val = task->thread.fir;
2225                break;
2226        case 4:
2227                *val = (task->thread.fir>>32) & 0xffff;
2228                break;
2229        case 5:
2230                *val = task->thread.fdr;
2231                break;
2232        case 6:
2233                *val = (task->thread.fdr >> 32) & 0xffff;
2234                break;
2235        }
2236}
2237
2238static void setfpreg(struct task_struct *task, int regno, int val)
2239{
2240        switch (regno / sizeof(int)) {
2241        case 0:
2242                task->thread.fcr = (task->thread.fcr & (~0x1f3f))
2243                        | (val & 0x1f3f);
2244                break;
2245        case 1:
2246                task->thread.fsr = (task->thread.fsr & (~0xffff)) | val;
2247                break;
2248        case 2:
2249                task->thread.fsr = (task->thread.fsr & (~0xffff0000))
2250                        | (val << 16);
2251                break;
2252        case 3:
2253                task->thread.fir = (task->thread.fir & (~0xffffffff)) | val;
2254                break;
2255        case 5:
2256                task->thread.fdr = (task->thread.fdr & (~0xffffffff)) | val;
2257                break;
2258        }
2259}
2260
2261static void access_fpreg_ia32(int regno, void *reg,
2262                struct pt_regs *pt, struct switch_stack *sw,
2263                int tos, int write)
2264{
2265        void *f;
2266
2267        if ((regno += tos) >= 8)
2268                regno -= 8;
2269        if (regno < 4)
2270                f = &pt->f8 + regno;
2271        else if (regno <= 7)
2272                f = &sw->f12 + (regno - 4);
2273        else {
2274                printk(KERN_ERR "regno must be less than 7 \n");
2275                 return;
2276        }
2277
2278        if (write)
2279                memcpy(f, reg, sizeof(struct _fpreg_ia32));
2280        else
2281                memcpy(reg, f, sizeof(struct _fpreg_ia32));
2282}
2283
2284static void do_fpregs_get(struct unw_frame_info *info, void *arg)
2285{
2286        struct regset_getset *dst = arg;
2287        struct task_struct *task = dst->target;
2288        struct pt_regs *pt;
2289        int start, end, tos;
2290        char buf[80];
2291
2292        if (dst->count == 0 || unw_unwind_to_user(info) < 0)
2293                return;
2294        if (dst->pos < 7 * sizeof(int)) {
2295                end = min((dst->pos + dst->count),
2296                        (unsigned int)(7 * sizeof(int)));
2297                for (start = dst->pos; start < end; start += sizeof(int))
2298                        getfpreg(task, start, (int *)(buf + start));
2299                dst->ret = user_regset_copyout(&dst->pos, &dst->count,
2300                                &dst->u.get.kbuf, &dst->u.get.ubuf, buf,
2301                                0, 7 * sizeof(int));
2302                if (dst->ret || dst->count == 0)
2303                        return;
2304        }
2305        if (dst->pos < sizeof(struct ia32_user_i387_struct)) {
2306                pt = task_pt_regs(task);
2307                tos = (task->thread.fsr >> 11) & 7;
2308                end = min(dst->pos + dst->count,
2309                        (unsigned int)(sizeof(struct ia32_user_i387_struct)));
2310                start = (dst->pos - 7 * sizeof(int)) /
2311                        sizeof(struct _fpreg_ia32);
2312                end = (end - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
2313                for (; start < end; start++)
2314                        access_fpreg_ia32(start,
2315                                (struct _fpreg_ia32 *)buf + start,
2316                                pt, info->sw, tos, 0);
2317                dst->ret = user_regset_copyout(&dst->pos, &dst->count,
2318                                &dst->u.get.kbuf, &dst->u.get.ubuf,
2319                                buf, 7 * sizeof(int),
2320                                sizeof(struct ia32_user_i387_struct));
2321                if (dst->ret || dst->count == 0)
2322                        return;
2323        }
2324}
2325
2326static void do_fpregs_set(struct unw_frame_info *info, void *arg)
2327{
2328        struct regset_getset *dst = arg;
2329        struct task_struct *task = dst->target;
2330        struct pt_regs *pt;
2331        char buf[80];
2332        int end, start, tos;
2333
2334        if (dst->count == 0 || unw_unwind_to_user(info) < 0)
2335                return;
2336
2337        if (dst->pos < 7 * sizeof(int)) {
2338                start = dst->pos;
2339                dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2340                                &dst->u.set.kbuf, &dst->u.set.ubuf, buf,
2341                                0, 7 * sizeof(int));
2342                if (dst->ret)
2343                        return;
2344                for (; start < dst->pos; start += sizeof(int))
2345                        setfpreg(task, start, *((int *)(buf + start)));
2346                if (dst->count == 0)
2347                        return;
2348        }
2349        if (dst->pos < sizeof(struct ia32_user_i387_struct)) {
2350                start = (dst->pos - 7 * sizeof(int)) /
2351                        sizeof(struct _fpreg_ia32);
2352                dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2353                                &dst->u.set.kbuf, &dst->u.set.ubuf,
2354                                buf, 7 * sizeof(int),
2355                                sizeof(struct ia32_user_i387_struct));
2356                if (dst->ret)
2357                        return;
2358                pt = task_pt_regs(task);
2359                tos = (task->thread.fsr >> 11) & 7;
2360                end = (dst->pos - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
2361                for (; start < end; start++)
2362                        access_fpreg_ia32(start,
2363                                (struct _fpreg_ia32 *)buf + start,
2364                                pt, info->sw, tos, 1);
2365                if (dst->count == 0)
2366                        return;
2367        }
2368}
2369
2370#define OFFSET(member) ((int)(offsetof(struct ia32_user_fxsr_struct, member)))
2371static void getfpxreg(struct task_struct *task, int start, int end, char *buf)
2372{
2373        int min_val;
2374
2375        min_val = min(end, OFFSET(fop));
2376        while (start < min_val) {
2377                if (start == OFFSET(cwd))
2378                        *((short *)buf) = task->thread.fcr & 0xffff;
2379                else if (start == OFFSET(swd))
2380                        *((short *)buf) = task->thread.fsr & 0xffff;
2381                else if (start == OFFSET(twd))
2382                        *((short *)buf) = (task->thread.fsr>>16) & 0xffff;
2383                buf += 2;
2384                start += 2;
2385        }
2386        /* skip fop element */
2387        if (start == OFFSET(fop)) {
2388                start += 2;
2389                buf += 2;
2390        }
2391        while (start < end) {
2392                if (start == OFFSET(fip))
2393                        *((int *)buf) = task->thread.fir;
2394                else if (start == OFFSET(fcs))
2395                        *((int *)buf) = (task->thread.fir>>32) & 0xffff;
2396                else if (start == OFFSET(foo))
2397                        *((int *)buf) = task->thread.fdr;
2398                else if (start == OFFSET(fos))
2399                        *((int *)buf) = (task->thread.fdr>>32) & 0xffff;
2400                else if (start == OFFSET(mxcsr))
2401                        *((int *)buf) = ((task->thread.fcr>>32) & 0xff80)
2402                                         | ((task->thread.fsr>>32) & 0x3f);
2403                buf += 4;
2404                start += 4;
2405        }
2406}
2407
2408static void setfpxreg(struct task_struct *task, int start, int end, char *buf)
2409{
2410        int min_val, num32;
2411        short num;
2412        unsigned long num64;
2413
2414        min_val = min(end, OFFSET(fop));
2415        while (start < min_val) {
2416                num = *((short *)buf);
2417                if (start == OFFSET(cwd)) {
2418                        task->thread.fcr = (task->thread.fcr & (~0x1f3f))
2419                                                | (num & 0x1f3f);
2420                } else if (start == OFFSET(swd)) {
2421                        task->thread.fsr = (task->thread.fsr & (~0xffff)) | num;
2422                } else if (start == OFFSET(twd)) {
2423                        task->thread.fsr = (task->thread.fsr & (~0xffff0000))
2424                                | (((int)num) << 16);
2425                }
2426                buf += 2;
2427                start += 2;
2428        }
2429        /* skip fop element */
2430        if (start == OFFSET(fop)) {
2431                start += 2;
2432                buf += 2;
2433        }
2434        while (start < end) {
2435                num32 = *((int *)buf);
2436                if (start == OFFSET(fip))
2437                        task->thread.fir = (task->thread.fir & (~0xffffffff))
2438                                                 | num32;
2439                else if (start == OFFSET(foo))
2440                        task->thread.fdr = (task->thread.fdr & (~0xffffffff))
2441                                                 | num32;
2442                else if (start == OFFSET(mxcsr)) {
2443                        num64 = num32 & 0xff10;
2444                        task->thread.fcr = (task->thread.fcr &
2445                                (~0xff1000000000UL)) | (num64<<32);
2446                        num64 = num32 & 0x3f;
2447                        task->thread.fsr = (task->thread.fsr &
2448                                (~0x3f00000000UL)) | (num64<<32);
2449                }
2450                buf += 4;
2451                start += 4;
2452        }
2453}
2454
2455static void do_fpxregs_get(struct unw_frame_info *info, void *arg)
2456{
2457        struct regset_getset *dst = arg;
2458        struct task_struct *task = dst->target;
2459        struct pt_regs *pt;
2460        char buf[128];
2461        int start, end, tos;
2462
2463        if (dst->count == 0 || unw_unwind_to_user(info) < 0)
2464                return;
2465        if (dst->pos < OFFSET(st_space[0])) {
2466                end = min(dst->pos + dst->count, (unsigned int)32);
2467                getfpxreg(task, dst->pos, end, buf);
2468                dst->ret = user_regset_copyout(&dst->pos, &dst->count,
2469                                &dst->u.get.kbuf, &dst->u.get.ubuf, buf,
2470                                0, OFFSET(st_space[0]));
2471                if (dst->ret || dst->count == 0)
2472                        return;
2473        }
2474        if (dst->pos < OFFSET(xmm_space[0])) {
2475                pt = task_pt_regs(task);
2476                tos = (task->thread.fsr >> 11) & 7;
2477                end = min(dst->pos + dst->count,
2478                                (unsigned int)OFFSET(xmm_space[0]));
2479                start = (dst->pos - OFFSET(st_space[0])) / 16;
2480                end = (end - OFFSET(st_space[0])) / 16;
2481                for (; start < end; start++)
2482                        access_fpreg_ia32(start, buf + 16 * start, pt,
2483                                                info->sw, tos, 0);
2484                dst->ret = user_regset_copyout(&dst->pos, &dst->count,
2485                                &dst->u.get.kbuf, &dst->u.get.ubuf,
2486                                buf, OFFSET(st_space[0]), OFFSET(xmm_space[0]));
2487                if (dst->ret || dst->count == 0)
2488                        return;
2489        }
2490        if (dst->pos < OFFSET(padding[0]))
2491                dst->ret = user_regset_copyout(&dst->pos, &dst->count,
2492                                &dst->u.get.kbuf, &dst->u.get.ubuf,
2493                                &info->sw->f16, OFFSET(xmm_space[0]),
2494                                OFFSET(padding[0]));
2495}
2496
2497static void do_fpxregs_set(struct unw_frame_info *info, void *arg)
2498{
2499        struct regset_getset *dst = arg;
2500        struct task_struct *task = dst->target;
2501        char buf[128];
2502        int start, end;
2503
2504        if (dst->count == 0 || unw_unwind_to_user(info) < 0)
2505                return;
2506
2507        if (dst->pos < OFFSET(st_space[0])) {
2508                start = dst->pos;
2509                dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2510                                &dst->u.set.kbuf, &dst->u.set.ubuf,
2511                                buf, 0, OFFSET(st_space[0]));
2512                if (dst->ret)
2513                        return;
2514                setfpxreg(task, start, dst->pos, buf);
2515                if (dst->count == 0)
2516                        return;
2517        }
2518        if (dst->pos < OFFSET(xmm_space[0])) {
2519                struct pt_regs *pt;
2520                int tos;
2521                pt = task_pt_regs(task);
2522                tos = (task->thread.fsr >> 11) & 7;
2523                start = (dst->pos - OFFSET(st_space[0])) / 16;
2524                dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2525                                &dst->u.set.kbuf, &dst->u.set.ubuf,
2526                                buf, OFFSET(st_space[0]), OFFSET(xmm_space[0]));
2527                if (dst->ret)
2528                        return;
2529                end = (dst->pos - OFFSET(st_space[0])) / 16;
2530                for (; start < end; start++)
2531                        access_fpreg_ia32(start, buf + 16 * start, pt, info->sw,
2532                                                 tos, 1);
2533                if (dst->count == 0)
2534                        return;
2535        }
2536        if (dst->pos < OFFSET(padding[0]))
2537                dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2538                                &dst->u.set.kbuf, &dst->u.set.ubuf,
2539                                &info->sw->f16, OFFSET(xmm_space[0]),
2540                                 OFFSET(padding[0]));
2541}
2542#undef OFFSET
2543
2544static int do_regset_call(void (*call)(struct unw_frame_info *, void *),
2545                struct task_struct *target,
2546                const struct user_regset *regset,
2547                unsigned int pos, unsigned int count,
2548                const void *kbuf, const void __user *ubuf)
2549{
2550        struct regset_getset info = { .target = target, .regset = regset,
2551                .pos = pos, .count = count,
2552                .u.set = { .kbuf = kbuf, .ubuf = ubuf },
2553                .ret = 0 };
2554
2555        if (target == current)
2556                unw_init_running(call, &info);
2557        else {
2558                struct unw_frame_info ufi;
2559                memset(&ufi, 0, sizeof(ufi));
2560                unw_init_from_blocked_task(&ufi, target);
2561                (*call)(&ufi, &info);
2562        }
2563
2564        return info.ret;
2565}
2566
2567static int ia32_fpregs_get(struct task_struct *target,
2568                const struct user_regset *regset,
2569                unsigned int pos, unsigned int count,
2570                void *kbuf, void __user *ubuf)
2571{
2572        return do_regset_call(do_fpregs_get, target, regset, pos, count,
2573                kbuf, ubuf);
2574}
2575
2576static int ia32_fpregs_set(struct task_struct *target,
2577                const struct user_regset *regset,
2578                unsigned int pos, unsigned int count,
2579                const void *kbuf, const void __user *ubuf)
2580{
2581        return do_regset_call(do_fpregs_set, target, regset, pos, count,
2582                kbuf, ubuf);
2583}
2584
2585static int ia32_fpxregs_get(struct task_struct *target,
2586                const struct user_regset *regset,
2587                unsigned int pos, unsigned int count,
2588                void *kbuf, void __user *ubuf)
2589{
2590        return do_regset_call(do_fpxregs_get, target, regset, pos, count,
2591                kbuf, ubuf);
2592}
2593
2594static int ia32_fpxregs_set(struct task_struct *target,
2595                const struct user_regset *regset,
2596                unsigned int pos, unsigned int count,
2597                const void *kbuf, const void __user *ubuf)
2598{
2599        return do_regset_call(do_fpxregs_set, target, regset, pos, count,
2600                kbuf, ubuf);
2601}
2602
2603static int ia32_genregs_get(struct task_struct *target,
2604                const struct user_regset *regset,
2605                unsigned int pos, unsigned int count,
2606                void *kbuf, void __user *ubuf)
2607{
2608        if (kbuf) {
2609                u32 *kp = kbuf;
2610                while (count > 0) {
2611                        *kp++ = getreg(target, pos);
2612                        pos += 4;
2613                        count -= 4;
2614                }
2615        } else {
2616                u32 __user *up = ubuf;
2617                while (count > 0) {
2618                        if (__put_user(getreg(target, pos), up++))
2619                                return -EFAULT;
2620                        pos += 4;
2621                        count -= 4;
2622                }
2623        }
2624        return 0;
2625}
2626
2627static int ia32_genregs_set(struct task_struct *target,
2628                const struct user_regset *regset,
2629                unsigned int pos, unsigned int count,
2630                const void *kbuf, const void __user *ubuf)
2631{
2632        int ret = 0;
2633
2634        if (kbuf) {
2635                const u32 *kp = kbuf;
2636                while (!ret && count > 0) {
2637                        putreg(target, pos, *kp++);
2638                        pos += 4;
2639                        count -= 4;
2640                }
2641        } else {
2642                const u32 __user *up = ubuf;
2643                u32 val;
2644                while (!ret && count > 0) {
2645                        ret = __get_user(val, up++);
2646                        if (!ret)
2647                                putreg(target, pos, val);
2648                        pos += 4;
2649                        count -= 4;
2650                }
2651        }
2652        return ret;
2653}
2654
2655static int ia32_tls_active(struct task_struct *target,
2656                const struct user_regset *regset)
2657{
2658        struct thread_struct *t = &target->thread;
2659        int n = GDT_ENTRY_TLS_ENTRIES;
2660        while (n > 0 && desc_empty(&t->tls_array[n -1]))
2661                --n;
2662        return n;
2663}
2664
2665static int ia32_tls_get(struct task_struct *target,
2666                const struct user_regset *regset, unsigned int pos,
2667                unsigned int count, void *kbuf, void __user *ubuf)
2668{
2669        const struct desc_struct *tls;
2670
2671        if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) ||
2672                        (pos % sizeof(struct ia32_user_desc)) != 0 ||
2673                        (count % sizeof(struct ia32_user_desc)) != 0)
2674                return -EINVAL;
2675
2676        pos /= sizeof(struct ia32_user_desc);
2677        count /= sizeof(struct ia32_user_desc);
2678
2679        tls = &target->thread.tls_array[pos];
2680
2681        if (kbuf) {
2682                struct ia32_user_desc *info = kbuf;
2683                while (count-- > 0)
2684                        fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++,
2685                                        tls++);
2686        } else {
2687                struct ia32_user_desc __user *u_info = ubuf;
2688                while (count-- > 0) {
2689                        struct ia32_user_desc info;
2690                        fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++);
2691                        if (__copy_to_user(u_info++, &info, sizeof(info)))
2692                                return -EFAULT;
2693                }
2694        }
2695
2696        return 0;
2697}
2698
2699static int ia32_tls_set(struct task_struct *target,
2700                const struct user_regset *regset, unsigned int pos,
2701                unsigned int count, const void *kbuf, const void __user *ubuf)
2702{
2703        struct ia32_user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
2704        const struct ia32_user_desc *info;
2705
2706        if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) ||
2707                        (pos % sizeof(struct ia32_user_desc)) != 0 ||
2708                        (count % sizeof(struct ia32_user_desc)) != 0)
2709                return -EINVAL;
2710
2711        if (kbuf)
2712                info = kbuf;
2713        else if (__copy_from_user(infobuf, ubuf, count))
2714                return -EFAULT;
2715        else
2716                info = infobuf;
2717
2718        set_tls_desc(target,
2719                GDT_ENTRY_TLS_MIN + (pos / sizeof(struct ia32_user_desc)),
2720                info, count / sizeof(struct ia32_user_desc));
2721
2722        return 0;
2723}
2724
2725/*
2726 * This should match arch/i386/kernel/ptrace.c:native_regsets.
2727 * XXX ioperm? vm86?
2728 */
2729static const struct user_regset ia32_regsets[] = {
2730        {
2731                .core_note_type = NT_PRSTATUS,
2732                .n = sizeof(struct user_regs_struct32)/4,
2733                .size = 4, .align = 4,
2734                .get = ia32_genregs_get, .set = ia32_genregs_set
2735        },
2736        {
2737                .core_note_type = NT_PRFPREG,
2738                .n = sizeof(struct ia32_user_i387_struct) / 4,
2739                .size = 4, .align = 4,
2740                .get = ia32_fpregs_get, .set = ia32_fpregs_set
2741        },
2742        {
2743                .core_note_type = NT_PRXFPREG,
2744                .n = sizeof(struct ia32_user_fxsr_struct) / 4,
2745                .size = 4, .align = 4,
2746                .get = ia32_fpxregs_get, .set = ia32_fpxregs_set
2747        },
2748        {
2749                .core_note_type = NT_386_TLS,
2750                .n = GDT_ENTRY_TLS_ENTRIES,
2751                .bias = GDT_ENTRY_TLS_MIN,
2752                .size = sizeof(struct ia32_user_desc),
2753                .align = sizeof(struct ia32_user_desc),
2754                .active = ia32_tls_active,
2755                .get = ia32_tls_get, .set = ia32_tls_set,
2756        },
2757};
2758
2759const struct user_regset_view user_ia32_view = {
2760        .name = "i386", .e_machine = EM_386,
2761        .regsets = ia32_regsets, .n = ARRAY_SIZE(ia32_regsets)
2762};
2763
2764long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, 
2765                        __u32 len_low, __u32 len_high, int advice)
2766{ 
2767        return sys_fadvise64_64(fd,
2768                               (((u64)offset_high)<<32) | offset_low,
2769                               (((u64)len_high)<<32) | len_low,
2770                               advice); 
2771} 
2772
2773#ifdef  NOTYET  /* UNTESTED FOR IA64 FROM HERE DOWN */
2774
2775asmlinkage long sys32_setreuid(compat_uid_t ruid, compat_uid_t euid)
2776{
2777        uid_t sruid, seuid;
2778
2779        sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
2780        seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
2781        return sys_setreuid(sruid, seuid);
2782}
2783
2784asmlinkage long
2785sys32_setresuid(compat_uid_t ruid, compat_uid_t euid,
2786                compat_uid_t suid)
2787{
2788        uid_t sruid, seuid, ssuid;
2789
2790        sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
2791        seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
2792        ssuid = (suid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)suid);
2793        return sys_setresuid(sruid, seuid, ssuid);
2794}
2795
2796asmlinkage long
2797sys32_setregid(compat_gid_t rgid, compat_gid_t egid)
2798{
2799        gid_t srgid, segid;
2800
2801        srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
2802        segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
2803        return sys_setregid(srgid, segid);
2804}
2805
2806asmlinkage long
2807sys32_setresgid(compat_gid_t rgid, compat_gid_t egid,
2808                compat_gid_t sgid)
2809{
2810        gid_t srgid, segid, ssgid;
2811
2812        srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
2813        segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
2814        ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid);
2815        return sys_setresgid(srgid, segid, ssgid);
2816}
2817#endif /* NOTYET */
2818