linux/mm/mprotect.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  mm/mprotect.c
   4 *
   5 *  (C) Copyright 1994 Linus Torvalds
   6 *  (C) Copyright 2002 Christoph Hellwig
   7 *
   8 *  Address space accounting code       <alan@lxorguk.ukuu.org.uk>
   9 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
  10 */
  11
  12#include <linux/mm.h>
  13#include <linux/hugetlb.h>
  14#include <linux/shm.h>
  15#include <linux/mman.h>
  16#include <linux/fs.h>
  17#include <linux/highmem.h>
  18#include <linux/security.h>
  19#include <linux/mempolicy.h>
  20#include <linux/personality.h>
  21#include <linux/syscalls.h>
  22#include <linux/swap.h>
  23#include <linux/swapops.h>
  24#include <linux/mmu_notifier.h>
  25#include <linux/migrate.h>
  26#include <linux/perf_event.h>
  27#include <linux/pkeys.h>
  28#include <linux/ksm.h>
  29#include <linux/uaccess.h>
  30#include <asm/pgtable.h>
  31#include <asm/cacheflush.h>
  32#include <asm/mmu_context.h>
  33#include <asm/tlbflush.h>
  34
  35#include "internal.h"
  36
  37static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
  38                unsigned long addr, unsigned long end, pgprot_t newprot,
  39                int dirty_accountable, int prot_numa)
  40{
  41        struct mm_struct *mm = vma->vm_mm;
  42        pte_t *pte, oldpte;
  43        spinlock_t *ptl;
  44        unsigned long pages = 0;
  45        int target_node = NUMA_NO_NODE;
  46
  47        /*
  48         * Can be called with only the mmap_sem for reading by
  49         * prot_numa so we must check the pmd isn't constantly
  50         * changing from under us from pmd_none to pmd_trans_huge
  51         * and/or the other way around.
  52         */
  53        if (pmd_trans_unstable(pmd))
  54                return 0;
  55
  56        /*
  57         * The pmd points to a regular pte so the pmd can't change
  58         * from under us even if the mmap_sem is only hold for
  59         * reading.
  60         */
  61        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  62
  63        /* Get target node for single threaded private VMAs */
  64        if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
  65            atomic_read(&vma->vm_mm->mm_users) == 1)
  66                target_node = numa_node_id();
  67
  68        flush_tlb_batched_pending(vma->vm_mm);
  69        arch_enter_lazy_mmu_mode();
  70        do {
  71                oldpte = *pte;
  72                if (pte_present(oldpte)) {
  73                        pte_t ptent;
  74                        bool preserve_write = prot_numa && pte_write(oldpte);
  75
  76                        /*
  77                         * Avoid trapping faults against the zero or KSM
  78                         * pages. See similar comment in change_huge_pmd.
  79                         */
  80                        if (prot_numa) {
  81                                struct page *page;
  82
  83                                page = vm_normal_page(vma, addr, oldpte);
  84                                if (!page || PageKsm(page))
  85                                        continue;
  86
  87                                /* Avoid TLB flush if possible */
  88                                if (pte_protnone(oldpte))
  89                                        continue;
  90
  91                                /*
  92                                 * Don't mess with PTEs if page is already on the node
  93                                 * a single-threaded process is running on.
  94                                 */
  95                                if (target_node == page_to_nid(page))
  96                                        continue;
  97                        }
  98
  99                        ptent = ptep_modify_prot_start(mm, addr, pte);
 100                        ptent = pte_modify(ptent, newprot);
 101                        if (preserve_write)
 102                                ptent = pte_mk_savedwrite(ptent);
 103
 104                        /* Avoid taking write faults for known dirty pages */
 105                        if (dirty_accountable && pte_dirty(ptent) &&
 106                                        (pte_soft_dirty(ptent) ||
 107                                         !(vma->vm_flags & VM_SOFTDIRTY))) {
 108                                ptent = pte_mkwrite(ptent);
 109                        }
 110                        ptep_modify_prot_commit(mm, addr, pte, ptent);
 111                        pages++;
 112                } else if (IS_ENABLED(CONFIG_MIGRATION)) {
 113                        swp_entry_t entry = pte_to_swp_entry(oldpte);
 114
 115                        if (is_write_migration_entry(entry)) {
 116                                pte_t newpte;
 117                                /*
 118                                 * A protection check is difficult so
 119                                 * just be safe and disable write
 120                                 */
 121                                make_migration_entry_read(&entry);
 122                                newpte = swp_entry_to_pte(entry);
 123                                if (pte_swp_soft_dirty(oldpte))
 124                                        newpte = pte_swp_mksoft_dirty(newpte);
 125                                set_pte_at(mm, addr, pte, newpte);
 126
 127                                pages++;
 128                        }
 129
 130                        if (is_write_device_private_entry(entry)) {
 131                                pte_t newpte;
 132
 133                                /*
 134                                 * We do not preserve soft-dirtiness. See
 135                                 * copy_one_pte() for explanation.
 136                                 */
 137                                make_device_private_entry_read(&entry);
 138                                newpte = swp_entry_to_pte(entry);
 139                                set_pte_at(mm, addr, pte, newpte);
 140
 141                                pages++;
 142                        }
 143                }
 144        } while (pte++, addr += PAGE_SIZE, addr != end);
 145        arch_leave_lazy_mmu_mode();
 146        pte_unmap_unlock(pte - 1, ptl);
 147
 148        return pages;
 149}
 150
 151static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
 152                pud_t *pud, unsigned long addr, unsigned long end,
 153                pgprot_t newprot, int dirty_accountable, int prot_numa)
 154{
 155        pmd_t *pmd;
 156        struct mm_struct *mm = vma->vm_mm;
 157        unsigned long next;
 158        unsigned long pages = 0;
 159        unsigned long nr_huge_updates = 0;
 160        unsigned long mni_start = 0;
 161
 162        pmd = pmd_offset(pud, addr);
 163        do {
 164                unsigned long this_pages;
 165
 166                next = pmd_addr_end(addr, end);
 167                if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
 168                                && pmd_none_or_clear_bad(pmd))
 169                        continue;
 170
 171                /* invoke the mmu notifier if the pmd is populated */
 172                if (!mni_start) {
 173                        mni_start = addr;
 174                        mmu_notifier_invalidate_range_start(mm, mni_start, end);
 175                }
 176
 177                if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
 178                        if (next - addr != HPAGE_PMD_SIZE) {
 179                                __split_huge_pmd(vma, pmd, addr, false, NULL);
 180                        } else {
 181                                int nr_ptes = change_huge_pmd(vma, pmd, addr,
 182                                                newprot, prot_numa);
 183
 184                                if (nr_ptes) {
 185                                        if (nr_ptes == HPAGE_PMD_NR) {
 186                                                pages += HPAGE_PMD_NR;
 187                                                nr_huge_updates++;
 188                                        }
 189
 190                                        /* huge pmd was handled */
 191                                        continue;
 192                                }
 193                        }
 194                        /* fall through, the trans huge pmd just split */
 195                }
 196                this_pages = change_pte_range(vma, pmd, addr, next, newprot,
 197                                 dirty_accountable, prot_numa);
 198                pages += this_pages;
 199        } while (pmd++, addr = next, addr != end);
 200
 201        if (mni_start)
 202                mmu_notifier_invalidate_range_end(mm, mni_start, end);
 203
 204        if (nr_huge_updates)
 205                count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
 206        return pages;
 207}
 208
 209static inline unsigned long change_pud_range(struct vm_area_struct *vma,
 210                p4d_t *p4d, unsigned long addr, unsigned long end,
 211                pgprot_t newprot, int dirty_accountable, int prot_numa)
 212{
 213        pud_t *pud;
 214        unsigned long next;
 215        unsigned long pages = 0;
 216
 217        pud = pud_offset(p4d, addr);
 218        do {
 219                next = pud_addr_end(addr, end);
 220                if (pud_none_or_clear_bad(pud))
 221                        continue;
 222                pages += change_pmd_range(vma, pud, addr, next, newprot,
 223                                 dirty_accountable, prot_numa);
 224        } while (pud++, addr = next, addr != end);
 225
 226        return pages;
 227}
 228
 229static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
 230                pgd_t *pgd, unsigned long addr, unsigned long end,
 231                pgprot_t newprot, int dirty_accountable, int prot_numa)
 232{
 233        p4d_t *p4d;
 234        unsigned long next;
 235        unsigned long pages = 0;
 236
 237        p4d = p4d_offset(pgd, addr);
 238        do {
 239                next = p4d_addr_end(addr, end);
 240                if (p4d_none_or_clear_bad(p4d))
 241                        continue;
 242                pages += change_pud_range(vma, p4d, addr, next, newprot,
 243                                 dirty_accountable, prot_numa);
 244        } while (p4d++, addr = next, addr != end);
 245
 246        return pages;
 247}
 248
 249static unsigned long change_protection_range(struct vm_area_struct *vma,
 250                unsigned long addr, unsigned long end, pgprot_t newprot,
 251                int dirty_accountable, int prot_numa)
 252{
 253        struct mm_struct *mm = vma->vm_mm;
 254        pgd_t *pgd;
 255        unsigned long next;
 256        unsigned long start = addr;
 257        unsigned long pages = 0;
 258
 259        BUG_ON(addr >= end);
 260        pgd = pgd_offset(mm, addr);
 261        flush_cache_range(vma, addr, end);
 262        inc_tlb_flush_pending(mm);
 263        do {
 264                next = pgd_addr_end(addr, end);
 265                if (pgd_none_or_clear_bad(pgd))
 266                        continue;
 267                pages += change_p4d_range(vma, pgd, addr, next, newprot,
 268                                 dirty_accountable, prot_numa);
 269        } while (pgd++, addr = next, addr != end);
 270
 271        /* Only flush the TLB if we actually modified any entries: */
 272        if (pages)
 273                flush_tlb_range(vma, start, end);
 274        dec_tlb_flush_pending(mm);
 275
 276        return pages;
 277}
 278
 279unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
 280                       unsigned long end, pgprot_t newprot,
 281                       int dirty_accountable, int prot_numa)
 282{
 283        unsigned long pages;
 284
 285        if (is_vm_hugetlb_page(vma))
 286                pages = hugetlb_change_protection(vma, start, end, newprot);
 287        else
 288                pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
 289
 290        return pages;
 291}
 292
 293int
 294mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
 295        unsigned long start, unsigned long end, unsigned long newflags)
 296{
 297        struct mm_struct *mm = vma->vm_mm;
 298        unsigned long oldflags = vma->vm_flags;
 299        long nrpages = (end - start) >> PAGE_SHIFT;
 300        unsigned long charged = 0;
 301        pgoff_t pgoff;
 302        int error;
 303        int dirty_accountable = 0;
 304
 305        if (newflags == oldflags) {
 306                *pprev = vma;
 307                return 0;
 308        }
 309
 310        /*
 311         * If we make a private mapping writable we increase our commit;
 312         * but (without finer accounting) cannot reduce our commit if we
 313         * make it unwritable again. hugetlb mapping were accounted for
 314         * even if read-only so there is no need to account for them here
 315         */
 316        if (newflags & VM_WRITE) {
 317                /* Check space limits when area turns into data. */
 318                if (!may_expand_vm(mm, newflags, nrpages) &&
 319                                may_expand_vm(mm, oldflags, nrpages))
 320                        return -ENOMEM;
 321                if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
 322                                                VM_SHARED|VM_NORESERVE))) {
 323                        charged = nrpages;
 324                        if (security_vm_enough_memory_mm(mm, charged))
 325                                return -ENOMEM;
 326                        newflags |= VM_ACCOUNT;
 327                }
 328        }
 329
 330        /*
 331         * First try to merge with previous and/or next vma.
 332         */
 333        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 334        *pprev = vma_merge(mm, *pprev, start, end, newflags,
 335                           vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
 336                           vma->vm_userfaultfd_ctx);
 337        if (*pprev) {
 338                vma = *pprev;
 339                VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
 340                goto success;
 341        }
 342
 343        *pprev = vma;
 344
 345        if (start != vma->vm_start) {
 346                error = split_vma(mm, vma, start, 1);
 347                if (error)
 348                        goto fail;
 349        }
 350
 351        if (end != vma->vm_end) {
 352                error = split_vma(mm, vma, end, 0);
 353                if (error)
 354                        goto fail;
 355        }
 356
 357success:
 358        /*
 359         * vm_flags and vm_page_prot are protected by the mmap_sem
 360         * held in write mode.
 361         */
 362        vma->vm_flags = newflags;
 363        dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
 364        vma_set_page_prot(vma);
 365
 366        change_protection(vma, start, end, vma->vm_page_prot,
 367                          dirty_accountable, 0);
 368
 369        /*
 370         * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
 371         * fault on access.
 372         */
 373        if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
 374                        (newflags & VM_WRITE)) {
 375                populate_vma_page_range(vma, start, end, NULL);
 376        }
 377
 378        vm_stat_account(mm, oldflags, -nrpages);
 379        vm_stat_account(mm, newflags, nrpages);
 380        perf_event_mmap(vma);
 381        return 0;
 382
 383fail:
 384        vm_unacct_memory(charged);
 385        return error;
 386}
 387
 388/*
 389 * pkey==-1 when doing a legacy mprotect()
 390 */
 391static int do_mprotect_pkey(unsigned long start, size_t len,
 392                unsigned long prot, int pkey)
 393{
 394        unsigned long nstart, end, tmp, reqprot;
 395        struct vm_area_struct *vma, *prev;
 396        int error = -EINVAL;
 397        const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
 398        const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
 399                                (prot & PROT_READ);
 400
 401        prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
 402        if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
 403                return -EINVAL;
 404
 405        if (start & ~PAGE_MASK)
 406                return -EINVAL;
 407        if (!len)
 408                return 0;
 409        len = PAGE_ALIGN(len);
 410        end = start + len;
 411        if (end <= start)
 412                return -ENOMEM;
 413        if (!arch_validate_prot(prot))
 414                return -EINVAL;
 415
 416        reqprot = prot;
 417
 418        if (down_write_killable(&current->mm->mmap_sem))
 419                return -EINTR;
 420
 421        /*
 422         * If userspace did not allocate the pkey, do not let
 423         * them use it here.
 424         */
 425        error = -EINVAL;
 426        if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
 427                goto out;
 428
 429        vma = find_vma(current->mm, start);
 430        error = -ENOMEM;
 431        if (!vma)
 432                goto out;
 433        prev = vma->vm_prev;
 434        if (unlikely(grows & PROT_GROWSDOWN)) {
 435                if (vma->vm_start >= end)
 436                        goto out;
 437                start = vma->vm_start;
 438                error = -EINVAL;
 439                if (!(vma->vm_flags & VM_GROWSDOWN))
 440                        goto out;
 441        } else {
 442                if (vma->vm_start > start)
 443                        goto out;
 444                if (unlikely(grows & PROT_GROWSUP)) {
 445                        end = vma->vm_end;
 446                        error = -EINVAL;
 447                        if (!(vma->vm_flags & VM_GROWSUP))
 448                                goto out;
 449                }
 450        }
 451        if (start > vma->vm_start)
 452                prev = vma;
 453
 454        for (nstart = start ; ; ) {
 455                unsigned long mask_off_old_flags;
 456                unsigned long newflags;
 457                int new_vma_pkey;
 458
 459                /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
 460
 461                /* Does the application expect PROT_READ to imply PROT_EXEC */
 462                if (rier && (vma->vm_flags & VM_MAYEXEC))
 463                        prot |= PROT_EXEC;
 464
 465                /*
 466                 * Each mprotect() call explicitly passes r/w/x permissions.
 467                 * If a permission is not passed to mprotect(), it must be
 468                 * cleared from the VMA.
 469                 */
 470                mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC |
 471                                        ARCH_VM_PKEY_FLAGS;
 472
 473                new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
 474                newflags = calc_vm_prot_bits(prot, new_vma_pkey);
 475                newflags |= (vma->vm_flags & ~mask_off_old_flags);
 476
 477                /* newflags >> 4 shift VM_MAY% in place of VM_% */
 478                if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
 479                        error = -EACCES;
 480                        goto out;
 481                }
 482
 483                error = security_file_mprotect(vma, reqprot, prot);
 484                if (error)
 485                        goto out;
 486
 487                tmp = vma->vm_end;
 488                if (tmp > end)
 489                        tmp = end;
 490                error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
 491                if (error)
 492                        goto out;
 493                nstart = tmp;
 494
 495                if (nstart < prev->vm_end)
 496                        nstart = prev->vm_end;
 497                if (nstart >= end)
 498                        goto out;
 499
 500                vma = prev->vm_next;
 501                if (!vma || vma->vm_start != nstart) {
 502                        error = -ENOMEM;
 503                        goto out;
 504                }
 505                prot = reqprot;
 506        }
 507out:
 508        up_write(&current->mm->mmap_sem);
 509        return error;
 510}
 511
 512SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
 513                unsigned long, prot)
 514{
 515        return do_mprotect_pkey(start, len, prot, -1);
 516}
 517
 518#ifdef CONFIG_ARCH_HAS_PKEYS
 519
 520SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
 521                unsigned long, prot, int, pkey)
 522{
 523        return do_mprotect_pkey(start, len, prot, pkey);
 524}
 525
 526SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
 527{
 528        int pkey;
 529        int ret;
 530
 531        /* No flags supported yet. */
 532        if (flags)
 533                return -EINVAL;
 534        /* check for unsupported init values */
 535        if (init_val & ~PKEY_ACCESS_MASK)
 536                return -EINVAL;
 537
 538        down_write(&current->mm->mmap_sem);
 539        pkey = mm_pkey_alloc(current->mm);
 540
 541        ret = -ENOSPC;
 542        if (pkey == -1)
 543                goto out;
 544
 545        ret = arch_set_user_pkey_access(current, pkey, init_val);
 546        if (ret) {
 547                mm_pkey_free(current->mm, pkey);
 548                goto out;
 549        }
 550        ret = pkey;
 551out:
 552        up_write(&current->mm->mmap_sem);
 553        return ret;
 554}
 555
 556SYSCALL_DEFINE1(pkey_free, int, pkey)
 557{
 558        int ret;
 559
 560        down_write(&current->mm->mmap_sem);
 561        ret = mm_pkey_free(current->mm, pkey);
 562        up_write(&current->mm->mmap_sem);
 563
 564        /*
 565         * We could provie warnings or errors if any VMA still
 566         * has the pkey set here.
 567         */
 568        return ret;
 569}
 570
 571#endif /* CONFIG_ARCH_HAS_PKEYS */
 572