linux/arch/um/kernel/tlb.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
   3 * Licensed under the GPL
   4 */
   5
   6#include "linux/mm.h"
   7#include "asm/pgtable.h"
   8#include "asm/tlbflush.h"
   9#include "as-layout.h"
  10#include "mem_user.h"
  11#include "os.h"
  12#include "skas.h"
  13#include "tlb.h"
  14
  15struct host_vm_change {
  16        struct host_vm_op {
  17                enum { NONE, MMAP, MUNMAP, MPROTECT } type;
  18                union {
  19                        struct {
  20                                unsigned long addr;
  21                                unsigned long len;
  22                                unsigned int prot;
  23                                int fd;
  24                                __u64 offset;
  25                        } mmap;
  26                        struct {
  27                                unsigned long addr;
  28                                unsigned long len;
  29                        } munmap;
  30                        struct {
  31                                unsigned long addr;
  32                                unsigned long len;
  33                                unsigned int prot;
  34                        } mprotect;
  35                } u;
  36        } ops[1];
  37        int index;
  38        struct mm_id *id;
  39        void *data;
  40        int force;
  41};
  42
  43#define INIT_HVC(mm, force) \
  44        ((struct host_vm_change) \
  45         { .ops         = { { .type = NONE } }, \
  46           .id          = &mm->context.id, \
  47           .data        = NULL, \
  48           .index       = 0, \
  49           .force       = force })
  50
  51static int do_ops(struct host_vm_change *hvc, int end,
  52                  int finished)
  53{
  54        struct host_vm_op *op;
  55        int i, ret = 0;
  56
  57        for (i = 0; i < end && !ret; i++) {
  58                op = &hvc->ops[i];
  59                switch(op->type) {
  60                case MMAP:
  61                        ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
  62                                  op->u.mmap.prot, op->u.mmap.fd,
  63                                  op->u.mmap.offset, finished, &hvc->data);
  64                        break;
  65                case MUNMAP:
  66                        ret = unmap(hvc->id, op->u.munmap.addr,
  67                                    op->u.munmap.len, finished, &hvc->data);
  68                        break;
  69                case MPROTECT:
  70                        ret = protect(hvc->id, op->u.mprotect.addr,
  71                                      op->u.mprotect.len, op->u.mprotect.prot,
  72                                      finished, &hvc->data);
  73                        break;
  74                default:
  75                        printk(KERN_ERR "Unknown op type %d in do_ops\n",
  76                               op->type);
  77                        break;
  78                }
  79        }
  80
  81        return ret;
  82}
  83
  84static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
  85                    unsigned int prot, struct host_vm_change *hvc)
  86{
  87        __u64 offset;
  88        struct host_vm_op *last;
  89        int fd, ret = 0;
  90
  91        fd = phys_mapping(phys, &offset);
  92        if (hvc->index != 0) {
  93                last = &hvc->ops[hvc->index - 1];
  94                if ((last->type == MMAP) &&
  95                   (last->u.mmap.addr + last->u.mmap.len == virt) &&
  96                   (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
  97                   (last->u.mmap.offset + last->u.mmap.len == offset)) {
  98                        last->u.mmap.len += len;
  99                        return 0;
 100                }
 101        }
 102
 103        if (hvc->index == ARRAY_SIZE(hvc->ops)) {
 104                ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
 105                hvc->index = 0;
 106        }
 107
 108        hvc->ops[hvc->index++] = ((struct host_vm_op)
 109                                  { .type       = MMAP,
 110                                    .u = { .mmap = { .addr      = virt,
 111                                                     .len       = len,
 112                                                     .prot      = prot,
 113                                                     .fd        = fd,
 114                                                     .offset    = offset }
 115                           } });
 116        return ret;
 117}
 118
 119static int add_munmap(unsigned long addr, unsigned long len,
 120                      struct host_vm_change *hvc)
 121{
 122        struct host_vm_op *last;
 123        int ret = 0;
 124
 125        if (hvc->index != 0) {
 126                last = &hvc->ops[hvc->index - 1];
 127                if ((last->type == MUNMAP) &&
 128                   (last->u.munmap.addr + last->u.mmap.len == addr)) {
 129                        last->u.munmap.len += len;
 130                        return 0;
 131                }
 132        }
 133
 134        if (hvc->index == ARRAY_SIZE(hvc->ops)) {
 135                ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
 136                hvc->index = 0;
 137        }
 138
 139        hvc->ops[hvc->index++] = ((struct host_vm_op)
 140                                  { .type       = MUNMAP,
 141                                    .u = { .munmap = { .addr    = addr,
 142                                                       .len     = len } } });
 143        return ret;
 144}
 145
 146static int add_mprotect(unsigned long addr, unsigned long len,
 147                        unsigned int prot, struct host_vm_change *hvc)
 148{
 149        struct host_vm_op *last;
 150        int ret = 0;
 151
 152        if (hvc->index != 0) {
 153                last = &hvc->ops[hvc->index - 1];
 154                if ((last->type == MPROTECT) &&
 155                   (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
 156                   (last->u.mprotect.prot == prot)) {
 157                        last->u.mprotect.len += len;
 158                        return 0;
 159                }
 160        }
 161
 162        if (hvc->index == ARRAY_SIZE(hvc->ops)) {
 163                ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
 164                hvc->index = 0;
 165        }
 166
 167        hvc->ops[hvc->index++] = ((struct host_vm_op)
 168                                  { .type       = MPROTECT,
 169                                    .u = { .mprotect = { .addr  = addr,
 170                                                         .len   = len,
 171                                                         .prot  = prot } } });
 172        return ret;
 173}
 174
 175#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
 176
 177static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
 178                                   unsigned long end,
 179                                   struct host_vm_change *hvc)
 180{
 181        pte_t *pte;
 182        int r, w, x, prot, ret = 0;
 183
 184        pte = pte_offset_kernel(pmd, addr);
 185        do {
 186                r = pte_read(*pte);
 187                w = pte_write(*pte);
 188                x = pte_exec(*pte);
 189                if (!pte_young(*pte)) {
 190                        r = 0;
 191                        w = 0;
 192                } else if (!pte_dirty(*pte)) {
 193                        w = 0;
 194                }
 195                prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
 196                        (x ? UM_PROT_EXEC : 0));
 197                if (hvc->force || pte_newpage(*pte)) {
 198                        if (pte_present(*pte))
 199                                ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
 200                                               PAGE_SIZE, prot, hvc);
 201                        else ret = add_munmap(addr, PAGE_SIZE, hvc);
 202                }
 203                else if (pte_newprot(*pte))
 204                        ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
 205                *pte = pte_mkuptodate(*pte);
 206        } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
 207        return ret;
 208}
 209
 210static inline int update_pmd_range(pud_t *pud, unsigned long addr,
 211                                   unsigned long end,
 212                                   struct host_vm_change *hvc)
 213{
 214        pmd_t *pmd;
 215        unsigned long next;
 216        int ret = 0;
 217
 218        pmd = pmd_offset(pud, addr);
 219        do {
 220                next = pmd_addr_end(addr, end);
 221                if (!pmd_present(*pmd)) {
 222                        if (hvc->force || pmd_newpage(*pmd)) {
 223                                ret = add_munmap(addr, next - addr, hvc);
 224                                pmd_mkuptodate(*pmd);
 225                        }
 226                }
 227                else ret = update_pte_range(pmd, addr, next, hvc);
 228        } while (pmd++, addr = next, ((addr != end) && !ret));
 229        return ret;
 230}
 231
 232static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
 233                                   unsigned long end,
 234                                   struct host_vm_change *hvc)
 235{
 236        pud_t *pud;
 237        unsigned long next;
 238        int ret = 0;
 239
 240        pud = pud_offset(pgd, addr);
 241        do {
 242                next = pud_addr_end(addr, end);
 243                if (!pud_present(*pud)) {
 244                        if (hvc->force || pud_newpage(*pud)) {
 245                                ret = add_munmap(addr, next - addr, hvc);
 246                                pud_mkuptodate(*pud);
 247                        }
 248                }
 249                else ret = update_pmd_range(pud, addr, next, hvc);
 250        } while (pud++, addr = next, ((addr != end) && !ret));
 251        return ret;
 252}
 253
 254void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
 255                      unsigned long end_addr, int force)
 256{
 257        pgd_t *pgd;
 258        struct host_vm_change hvc;
 259        unsigned long addr = start_addr, next;
 260        int ret = 0;
 261
 262        hvc = INIT_HVC(mm, force);
 263        pgd = pgd_offset(mm, addr);
 264        do {
 265                next = pgd_addr_end(addr, end_addr);
 266                if (!pgd_present(*pgd)) {
 267                        if (force || pgd_newpage(*pgd)) {
 268                                ret = add_munmap(addr, next - addr, &hvc);
 269                                pgd_mkuptodate(*pgd);
 270                        }
 271                }
 272                else ret = update_pud_range(pgd, addr, next, &hvc);
 273        } while (pgd++, addr = next, ((addr != end_addr) && !ret));
 274
 275        if (!ret)
 276                ret = do_ops(&hvc, hvc.index, 1);
 277
 278        /* This is not an else because ret is modified above */
 279        if (ret) {
 280                printk(KERN_ERR "fix_range_common: failed, killing current "
 281                       "process\n");
 282                force_sig(SIGKILL, current);
 283        }
 284}
 285
 286int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
 287{
 288        struct mm_struct *mm;
 289        pgd_t *pgd;
 290        pud_t *pud;
 291        pmd_t *pmd;
 292        pte_t *pte;
 293        unsigned long addr, last;
 294        int updated = 0, err;
 295
 296        mm = &init_mm;
 297        for (addr = start; addr < end;) {
 298                pgd = pgd_offset(mm, addr);
 299                if (!pgd_present(*pgd)) {
 300                        last = ADD_ROUND(addr, PGDIR_SIZE);
 301                        if (last > end)
 302                                last = end;
 303                        if (pgd_newpage(*pgd)) {
 304                                updated = 1;
 305                                err = os_unmap_memory((void *) addr,
 306                                                      last - addr);
 307                                if (err < 0)
 308                                        panic("munmap failed, errno = %d\n",
 309                                              -err);
 310                        }
 311                        addr = last;
 312                        continue;
 313                }
 314
 315                pud = pud_offset(pgd, addr);
 316                if (!pud_present(*pud)) {
 317                        last = ADD_ROUND(addr, PUD_SIZE);
 318                        if (last > end)
 319                                last = end;
 320                        if (pud_newpage(*pud)) {
 321                                updated = 1;
 322                                err = os_unmap_memory((void *) addr,
 323                                                      last - addr);
 324                                if (err < 0)
 325                                        panic("munmap failed, errno = %d\n",
 326                                              -err);
 327                        }
 328                        addr = last;
 329                        continue;
 330                }
 331
 332                pmd = pmd_offset(pud, addr);
 333                if (!pmd_present(*pmd)) {
 334                        last = ADD_ROUND(addr, PMD_SIZE);
 335                        if (last > end)
 336                                last = end;
 337                        if (pmd_newpage(*pmd)) {
 338                                updated = 1;
 339                                err = os_unmap_memory((void *) addr,
 340                                                      last - addr);
 341                                if (err < 0)
 342                                        panic("munmap failed, errno = %d\n",
 343                                              -err);
 344                        }
 345                        addr = last;
 346                        continue;
 347                }
 348
 349                pte = pte_offset_kernel(pmd, addr);
 350                if (!pte_present(*pte) || pte_newpage(*pte)) {
 351                        updated = 1;
 352                        err = os_unmap_memory((void *) addr,
 353                                              PAGE_SIZE);
 354                        if (err < 0)
 355                                panic("munmap failed, errno = %d\n",
 356                                      -err);
 357                        if (pte_present(*pte))
 358                                map_memory(addr,
 359                                           pte_val(*pte) & PAGE_MASK,
 360                                           PAGE_SIZE, 1, 1, 1);
 361                }
 362                else if (pte_newprot(*pte)) {
 363                        updated = 1;
 364                        os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
 365                }
 366                addr += PAGE_SIZE;
 367        }
 368        return updated;
 369}
 370
 371void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
 372{
 373        pgd_t *pgd;
 374        pud_t *pud;
 375        pmd_t *pmd;
 376        pte_t *pte;
 377        struct mm_struct *mm = vma->vm_mm;
 378        void *flush = NULL;
 379        int r, w, x, prot, err = 0;
 380        struct mm_id *mm_id;
 381
 382        address &= PAGE_MASK;
 383        pgd = pgd_offset(mm, address);
 384        if (!pgd_present(*pgd))
 385                goto kill;
 386
 387        pud = pud_offset(pgd, address);
 388        if (!pud_present(*pud))
 389                goto kill;
 390
 391        pmd = pmd_offset(pud, address);
 392        if (!pmd_present(*pmd))
 393                goto kill;
 394
 395        pte = pte_offset_kernel(pmd, address);
 396
 397        r = pte_read(*pte);
 398        w = pte_write(*pte);
 399        x = pte_exec(*pte);
 400        if (!pte_young(*pte)) {
 401                r = 0;
 402                w = 0;
 403        } else if (!pte_dirty(*pte)) {
 404                w = 0;
 405        }
 406
 407        mm_id = &mm->context.id;
 408        prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
 409                (x ? UM_PROT_EXEC : 0));
 410        if (pte_newpage(*pte)) {
 411                if (pte_present(*pte)) {
 412                        unsigned long long offset;
 413                        int fd;
 414
 415                        fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
 416                        err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
 417                                  1, &flush);
 418                }
 419                else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
 420        }
 421        else if (pte_newprot(*pte))
 422                err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
 423
 424        if (err)
 425                goto kill;
 426
 427        *pte = pte_mkuptodate(*pte);
 428
 429        return;
 430
 431kill:
 432        printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
 433        force_sig(SIGKILL, current);
 434}
 435
 436pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
 437{
 438        return pgd_offset(mm, address);
 439}
 440
 441pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
 442{
 443        return pud_offset(pgd, address);
 444}
 445
 446pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
 447{
 448        return pmd_offset(pud, address);
 449}
 450
 451pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
 452{
 453        return pte_offset_kernel(pmd, address);
 454}
 455
 456pte_t *addr_pte(struct task_struct *task, unsigned long addr)
 457{
 458        pgd_t *pgd = pgd_offset(task->mm, addr);
 459        pud_t *pud = pud_offset(pgd, addr);
 460        pmd_t *pmd = pmd_offset(pud, addr);
 461
 462        return pte_offset_map(pmd, addr);
 463}
 464
 465void flush_tlb_all(void)
 466{
 467        flush_tlb_mm(current->mm);
 468}
 469
 470void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 471{
 472        flush_tlb_kernel_range_common(start, end);
 473}
 474
 475void flush_tlb_kernel_vm(void)
 476{
 477        flush_tlb_kernel_range_common(start_vm, end_vm);
 478}
 479
 480void __flush_tlb_one(unsigned long addr)
 481{
 482        flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
 483}
 484
 485static void fix_range(struct mm_struct *mm, unsigned long start_addr,
 486                      unsigned long end_addr, int force)
 487{
 488        if (!proc_mm && (end_addr > STUB_START))
 489                end_addr = STUB_START;
 490
 491        fix_range_common(mm, start_addr, end_addr, force);
 492}
 493
 494void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 495                     unsigned long end)
 496{
 497        if (vma->vm_mm == NULL)
 498                flush_tlb_kernel_range_common(start, end);
 499        else fix_range(vma->vm_mm, start, end, 0);
 500}
 501
 502void flush_tlb_mm(struct mm_struct *mm)
 503{
 504        unsigned long end;
 505
 506        /*
 507         * Don't bother flushing if this address space is about to be
 508         * destroyed.
 509         */
 510        if (atomic_read(&mm->mm_users) == 0)
 511                return;
 512
 513        end = proc_mm ? task_size : STUB_START;
 514        fix_range(mm, 0, end, 0);
 515}
 516
 517void force_flush_all(void)
 518{
 519        struct mm_struct *mm = current->mm;
 520        struct vm_area_struct *vma = mm->mmap;
 521
 522        while (vma != NULL) {
 523                fix_range(mm, vma->vm_start, vma->vm_end, 1);
 524                vma = vma->vm_next;
 525        }
 526}
 527