linux/arch/powerpc/mm/slice.c
<<
>>
Prefs
   1/*
   2 * address space "slices" (meta-segments) support
   3 *
   4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
   5 *
   6 * Based on hugetlb implementation
   7 *
   8 * Copyright (C) 2003 David Gibson, IBM Corporation.
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  23 */
  24
  25#undef DEBUG
  26
  27#include <linux/kernel.h>
  28#include <linux/mm.h>
  29#include <linux/pagemap.h>
  30#include <linux/err.h>
  31#include <linux/spinlock.h>
  32#include <linux/export.h>
  33#include <linux/hugetlb.h>
  34#include <asm/mman.h>
  35#include <asm/mmu.h>
  36#include <asm/copro.h>
  37#include <asm/hugetlb.h>
  38#include <asm/mmu_context.h>
  39
  40static DEFINE_SPINLOCK(slice_convert_lock);
  41
  42#ifdef DEBUG
  43int _slice_debug = 1;
  44
  45static void slice_print_mask(const char *label, const struct slice_mask *mask)
  46{
  47        if (!_slice_debug)
  48                return;
  49        pr_devel("%s low_slice: %*pbl\n", label,
  50                        (int)SLICE_NUM_LOW, &mask->low_slices);
  51        pr_devel("%s high_slice: %*pbl\n", label,
  52                        (int)SLICE_NUM_HIGH, mask->high_slices);
  53}
  54
  55#define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
  56
  57#else
  58
  59static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
  60#define slice_dbg(fmt...)
  61
  62#endif
  63
  64static void slice_range_to_mask(unsigned long start, unsigned long len,
  65                                struct slice_mask *ret)
  66{
  67        unsigned long end = start + len - 1;
  68
  69        ret->low_slices = 0;
  70        if (SLICE_NUM_HIGH)
  71                bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
  72
  73        if (start < SLICE_LOW_TOP) {
  74                unsigned long mend = min(end,
  75                                         (unsigned long)(SLICE_LOW_TOP - 1));
  76
  77                ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
  78                        - (1u << GET_LOW_SLICE_INDEX(start));
  79        }
  80
  81        if ((start + len) > SLICE_LOW_TOP) {
  82                unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
  83                unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
  84                unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
  85
  86                bitmap_set(ret->high_slices, start_index, count);
  87        }
  88}
  89
  90static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
  91                              unsigned long len)
  92{
  93        struct vm_area_struct *vma;
  94
  95        if ((mm->context.slb_addr_limit - len) < addr)
  96                return 0;
  97        vma = find_vma(mm, addr);
  98        return (!vma || (addr + len) <= vm_start_gap(vma));
  99}
 100
 101static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
 102{
 103        return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
 104                                   1ul << SLICE_LOW_SHIFT);
 105}
 106
 107static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
 108{
 109        unsigned long start = slice << SLICE_HIGH_SHIFT;
 110        unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
 111
 112#ifdef CONFIG_PPC64
 113        /* Hack, so that each addresses is controlled by exactly one
 114         * of the high or low area bitmaps, the first high area starts
 115         * at 4GB, not 0 */
 116        if (start == 0)
 117                start = SLICE_LOW_TOP;
 118#endif
 119
 120        return !slice_area_is_free(mm, start, end - start);
 121}
 122
 123static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
 124                                unsigned long high_limit)
 125{
 126        unsigned long i;
 127
 128        ret->low_slices = 0;
 129        if (SLICE_NUM_HIGH)
 130                bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
 131
 132        for (i = 0; i < SLICE_NUM_LOW; i++)
 133                if (!slice_low_has_vma(mm, i))
 134                        ret->low_slices |= 1u << i;
 135
 136        if (high_limit <= SLICE_LOW_TOP)
 137                return;
 138
 139        for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
 140                if (!slice_high_has_vma(mm, i))
 141                        __set_bit(i, ret->high_slices);
 142}
 143
 144#ifdef CONFIG_PPC_BOOK3S_64
 145static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
 146{
 147#ifdef CONFIG_PPC_64K_PAGES
 148        if (psize == MMU_PAGE_64K)
 149                return &mm->context.mask_64k;
 150#endif
 151        if (psize == MMU_PAGE_4K)
 152                return &mm->context.mask_4k;
 153#ifdef CONFIG_HUGETLB_PAGE
 154        if (psize == MMU_PAGE_16M)
 155                return &mm->context.mask_16m;
 156        if (psize == MMU_PAGE_16G)
 157                return &mm->context.mask_16g;
 158#endif
 159        BUG();
 160}
 161#elif defined(CONFIG_PPC_8xx)
 162static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
 163{
 164        if (psize == mmu_virtual_psize)
 165                return &mm->context.mask_base_psize;
 166#ifdef CONFIG_HUGETLB_PAGE
 167        if (psize == MMU_PAGE_512K)
 168                return &mm->context.mask_512k;
 169        if (psize == MMU_PAGE_8M)
 170                return &mm->context.mask_8m;
 171#endif
 172        BUG();
 173}
 174#else
 175#error "Must define the slice masks for page sizes supported by the platform"
 176#endif
 177
 178static bool slice_check_range_fits(struct mm_struct *mm,
 179                           const struct slice_mask *available,
 180                           unsigned long start, unsigned long len)
 181{
 182        unsigned long end = start + len - 1;
 183        u64 low_slices = 0;
 184
 185        if (start < SLICE_LOW_TOP) {
 186                unsigned long mend = min(end,
 187                                         (unsigned long)(SLICE_LOW_TOP - 1));
 188
 189                low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
 190                                - (1u << GET_LOW_SLICE_INDEX(start));
 191        }
 192        if ((low_slices & available->low_slices) != low_slices)
 193                return false;
 194
 195        if (SLICE_NUM_HIGH && ((start + len) > SLICE_LOW_TOP)) {
 196                unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
 197                unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
 198                unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
 199                unsigned long i;
 200
 201                for (i = start_index; i < start_index + count; i++) {
 202                        if (!test_bit(i, available->high_slices))
 203                                return false;
 204                }
 205        }
 206
 207        return true;
 208}
 209
 210static void slice_flush_segments(void *parm)
 211{
 212#ifdef CONFIG_PPC64
 213        struct mm_struct *mm = parm;
 214        unsigned long flags;
 215
 216        if (mm != current->active_mm)
 217                return;
 218
 219        copy_mm_to_paca(current->active_mm);
 220
 221        local_irq_save(flags);
 222        slb_flush_and_rebolt();
 223        local_irq_restore(flags);
 224#endif
 225}
 226
 227static void slice_convert(struct mm_struct *mm,
 228                                const struct slice_mask *mask, int psize)
 229{
 230        int index, mask_index;
 231        /* Write the new slice psize bits */
 232        unsigned char *hpsizes, *lpsizes;
 233        struct slice_mask *psize_mask, *old_mask;
 234        unsigned long i, flags;
 235        int old_psize;
 236
 237        slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
 238        slice_print_mask(" mask", mask);
 239
 240        psize_mask = slice_mask_for_size(mm, psize);
 241
 242        /* We need to use a spinlock here to protect against
 243         * concurrent 64k -> 4k demotion ...
 244         */
 245        spin_lock_irqsave(&slice_convert_lock, flags);
 246
 247        lpsizes = mm->context.low_slices_psize;
 248        for (i = 0; i < SLICE_NUM_LOW; i++) {
 249                if (!(mask->low_slices & (1u << i)))
 250                        continue;
 251
 252                mask_index = i & 0x1;
 253                index = i >> 1;
 254
 255                /* Update the slice_mask */
 256                old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
 257                old_mask = slice_mask_for_size(mm, old_psize);
 258                old_mask->low_slices &= ~(1u << i);
 259                psize_mask->low_slices |= 1u << i;
 260
 261                /* Update the sizes array */
 262                lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
 263                                (((unsigned long)psize) << (mask_index * 4));
 264        }
 265
 266        hpsizes = mm->context.high_slices_psize;
 267        for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
 268                if (!test_bit(i, mask->high_slices))
 269                        continue;
 270
 271                mask_index = i & 0x1;
 272                index = i >> 1;
 273
 274                /* Update the slice_mask */
 275                old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
 276                old_mask = slice_mask_for_size(mm, old_psize);
 277                __clear_bit(i, old_mask->high_slices);
 278                __set_bit(i, psize_mask->high_slices);
 279
 280                /* Update the sizes array */
 281                hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
 282                                (((unsigned long)psize) << (mask_index * 4));
 283        }
 284
 285        slice_dbg(" lsps=%lx, hsps=%lx\n",
 286                  (unsigned long)mm->context.low_slices_psize,
 287                  (unsigned long)mm->context.high_slices_psize);
 288
 289        spin_unlock_irqrestore(&slice_convert_lock, flags);
 290
 291        copro_flush_all_slbs(mm);
 292}
 293
 294/*
 295 * Compute which slice addr is part of;
 296 * set *boundary_addr to the start or end boundary of that slice
 297 * (depending on 'end' parameter);
 298 * return boolean indicating if the slice is marked as available in the
 299 * 'available' slice_mark.
 300 */
 301static bool slice_scan_available(unsigned long addr,
 302                                 const struct slice_mask *available,
 303                                 int end, unsigned long *boundary_addr)
 304{
 305        unsigned long slice;
 306        if (addr < SLICE_LOW_TOP) {
 307                slice = GET_LOW_SLICE_INDEX(addr);
 308                *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
 309                return !!(available->low_slices & (1u << slice));
 310        } else {
 311                slice = GET_HIGH_SLICE_INDEX(addr);
 312                *boundary_addr = (slice + end) ?
 313                        ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
 314                return !!test_bit(slice, available->high_slices);
 315        }
 316}
 317
 318static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
 319                                              unsigned long len,
 320                                              const struct slice_mask *available,
 321                                              int psize, unsigned long high_limit)
 322{
 323        int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
 324        unsigned long addr, found, next_end;
 325        struct vm_unmapped_area_info info;
 326
 327        info.flags = 0;
 328        info.length = len;
 329        info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
 330        info.align_offset = 0;
 331
 332        addr = TASK_UNMAPPED_BASE;
 333        /*
 334         * Check till the allow max value for this mmap request
 335         */
 336        while (addr < high_limit) {
 337                info.low_limit = addr;
 338                if (!slice_scan_available(addr, available, 1, &addr))
 339                        continue;
 340
 341 next_slice:
 342                /*
 343                 * At this point [info.low_limit; addr) covers
 344                 * available slices only and ends at a slice boundary.
 345                 * Check if we need to reduce the range, or if we can
 346                 * extend it to cover the next available slice.
 347                 */
 348                if (addr >= high_limit)
 349                        addr = high_limit;
 350                else if (slice_scan_available(addr, available, 1, &next_end)) {
 351                        addr = next_end;
 352                        goto next_slice;
 353                }
 354                info.high_limit = addr;
 355
 356                found = vm_unmapped_area(&info);
 357                if (!(found & ~PAGE_MASK))
 358                        return found;
 359        }
 360
 361        return -ENOMEM;
 362}
 363
 364static unsigned long slice_find_area_topdown(struct mm_struct *mm,
 365                                             unsigned long len,
 366                                             const struct slice_mask *available,
 367                                             int psize, unsigned long high_limit)
 368{
 369        int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
 370        unsigned long addr, found, prev;
 371        struct vm_unmapped_area_info info;
 372
 373        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 374        info.length = len;
 375        info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
 376        info.align_offset = 0;
 377
 378        addr = mm->mmap_base;
 379        /*
 380         * If we are trying to allocate above DEFAULT_MAP_WINDOW
 381         * Add the different to the mmap_base.
 382         * Only for that request for which high_limit is above
 383         * DEFAULT_MAP_WINDOW we should apply this.
 384         */
 385        if (high_limit > DEFAULT_MAP_WINDOW)
 386                addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
 387
 388        while (addr > PAGE_SIZE) {
 389                info.high_limit = addr;
 390                if (!slice_scan_available(addr - 1, available, 0, &addr))
 391                        continue;
 392
 393 prev_slice:
 394                /*
 395                 * At this point [addr; info.high_limit) covers
 396                 * available slices only and starts at a slice boundary.
 397                 * Check if we need to reduce the range, or if we can
 398                 * extend it to cover the previous available slice.
 399                 */
 400                if (addr < PAGE_SIZE)
 401                        addr = PAGE_SIZE;
 402                else if (slice_scan_available(addr - 1, available, 0, &prev)) {
 403                        addr = prev;
 404                        goto prev_slice;
 405                }
 406                info.low_limit = addr;
 407
 408                found = vm_unmapped_area(&info);
 409                if (!(found & ~PAGE_MASK))
 410                        return found;
 411        }
 412
 413        /*
 414         * A failed mmap() very likely causes application failure,
 415         * so fall back to the bottom-up function here. This scenario
 416         * can happen with large stack limits and large mmap()
 417         * allocations.
 418         */
 419        return slice_find_area_bottomup(mm, len, available, psize, high_limit);
 420}
 421
 422
 423static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
 424                                     const struct slice_mask *mask, int psize,
 425                                     int topdown, unsigned long high_limit)
 426{
 427        if (topdown)
 428                return slice_find_area_topdown(mm, len, mask, psize, high_limit);
 429        else
 430                return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
 431}
 432
 433static inline void slice_copy_mask(struct slice_mask *dst,
 434                                        const struct slice_mask *src)
 435{
 436        dst->low_slices = src->low_slices;
 437        if (!SLICE_NUM_HIGH)
 438                return;
 439        bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
 440}
 441
 442static inline void slice_or_mask(struct slice_mask *dst,
 443                                        const struct slice_mask *src1,
 444                                        const struct slice_mask *src2)
 445{
 446        dst->low_slices = src1->low_slices | src2->low_slices;
 447        if (!SLICE_NUM_HIGH)
 448                return;
 449        bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
 450}
 451
 452static inline void slice_andnot_mask(struct slice_mask *dst,
 453                                        const struct slice_mask *src1,
 454                                        const struct slice_mask *src2)
 455{
 456        dst->low_slices = src1->low_slices & ~src2->low_slices;
 457        if (!SLICE_NUM_HIGH)
 458                return;
 459        bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
 460}
 461
 462#ifdef CONFIG_PPC_64K_PAGES
 463#define MMU_PAGE_BASE   MMU_PAGE_64K
 464#else
 465#define MMU_PAGE_BASE   MMU_PAGE_4K
 466#endif
 467
 468unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
 469                                      unsigned long flags, unsigned int psize,
 470                                      int topdown)
 471{
 472        struct slice_mask good_mask;
 473        struct slice_mask potential_mask;
 474        const struct slice_mask *maskp;
 475        const struct slice_mask *compat_maskp = NULL;
 476        int fixed = (flags & MAP_FIXED);
 477        int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
 478        unsigned long page_size = 1UL << pshift;
 479        struct mm_struct *mm = current->mm;
 480        unsigned long newaddr;
 481        unsigned long high_limit;
 482
 483        high_limit = DEFAULT_MAP_WINDOW;
 484        if (addr >= high_limit || (fixed && (addr + len > high_limit)))
 485                high_limit = TASK_SIZE;
 486
 487        if (len > high_limit)
 488                return -ENOMEM;
 489        if (len & (page_size - 1))
 490                return -EINVAL;
 491        if (fixed) {
 492                if (addr & (page_size - 1))
 493                        return -EINVAL;
 494                if (addr > high_limit - len)
 495                        return -ENOMEM;
 496        }
 497
 498        if (high_limit > mm->context.slb_addr_limit) {
 499                /*
 500                 * Increasing the slb_addr_limit does not require
 501                 * slice mask cache to be recalculated because it should
 502                 * be already initialised beyond the old address limit.
 503                 */
 504                mm->context.slb_addr_limit = high_limit;
 505
 506                on_each_cpu(slice_flush_segments, mm, 1);
 507        }
 508
 509        /* Sanity checks */
 510        BUG_ON(mm->task_size == 0);
 511        BUG_ON(mm->context.slb_addr_limit == 0);
 512        VM_BUG_ON(radix_enabled());
 513
 514        slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
 515        slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
 516                  addr, len, flags, topdown);
 517
 518        /* If hint, make sure it matches our alignment restrictions */
 519        if (!fixed && addr) {
 520                addr = _ALIGN_UP(addr, page_size);
 521                slice_dbg(" aligned addr=%lx\n", addr);
 522                /* Ignore hint if it's too large or overlaps a VMA */
 523                if (addr > high_limit - len ||
 524                    !slice_area_is_free(mm, addr, len))
 525                        addr = 0;
 526        }
 527
 528        /* First make up a "good" mask of slices that have the right size
 529         * already
 530         */
 531        maskp = slice_mask_for_size(mm, psize);
 532
 533        /*
 534         * Here "good" means slices that are already the right page size,
 535         * "compat" means slices that have a compatible page size (i.e.
 536         * 4k in a 64k pagesize kernel), and "free" means slices without
 537         * any VMAs.
 538         *
 539         * If MAP_FIXED:
 540         *      check if fits in good | compat => OK
 541         *      check if fits in good | compat | free => convert free
 542         *      else bad
 543         * If have hint:
 544         *      check if hint fits in good => OK
 545         *      check if hint fits in good | free => convert free
 546         * Otherwise:
 547         *      search in good, found => OK
 548         *      search in good | free, found => convert free
 549         *      search in good | compat | free, found => convert free.
 550         */
 551
 552        /*
 553         * If we support combo pages, we can allow 64k pages in 4k slices
 554         * The mask copies could be avoided in most cases here if we had
 555         * a pointer to good mask for the next code to use.
 556         */
 557        if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
 558                compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
 559                if (fixed)
 560                        slice_or_mask(&good_mask, maskp, compat_maskp);
 561                else
 562                        slice_copy_mask(&good_mask, maskp);
 563        } else {
 564                slice_copy_mask(&good_mask, maskp);
 565        }
 566
 567        slice_print_mask(" good_mask", &good_mask);
 568        if (compat_maskp)
 569                slice_print_mask(" compat_mask", compat_maskp);
 570
 571        /* First check hint if it's valid or if we have MAP_FIXED */
 572        if (addr != 0 || fixed) {
 573                /* Check if we fit in the good mask. If we do, we just return,
 574                 * nothing else to do
 575                 */
 576                if (slice_check_range_fits(mm, &good_mask, addr, len)) {
 577                        slice_dbg(" fits good !\n");
 578                        newaddr = addr;
 579                        goto return_addr;
 580                }
 581        } else {
 582                /* Now let's see if we can find something in the existing
 583                 * slices for that size
 584                 */
 585                newaddr = slice_find_area(mm, len, &good_mask,
 586                                          psize, topdown, high_limit);
 587                if (newaddr != -ENOMEM) {
 588                        /* Found within the good mask, we don't have to setup,
 589                         * we thus return directly
 590                         */
 591                        slice_dbg(" found area at 0x%lx\n", newaddr);
 592                        goto return_addr;
 593                }
 594        }
 595        /*
 596         * We don't fit in the good mask, check what other slices are
 597         * empty and thus can be converted
 598         */
 599        slice_mask_for_free(mm, &potential_mask, high_limit);
 600        slice_or_mask(&potential_mask, &potential_mask, &good_mask);
 601        slice_print_mask(" potential", &potential_mask);
 602
 603        if (addr != 0 || fixed) {
 604                if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
 605                        slice_dbg(" fits potential !\n");
 606                        newaddr = addr;
 607                        goto convert;
 608                }
 609        }
 610
 611        /* If we have MAP_FIXED and failed the above steps, then error out */
 612        if (fixed)
 613                return -EBUSY;
 614
 615        slice_dbg(" search...\n");
 616
 617        /* If we had a hint that didn't work out, see if we can fit
 618         * anywhere in the good area.
 619         */
 620        if (addr) {
 621                newaddr = slice_find_area(mm, len, &good_mask,
 622                                          psize, topdown, high_limit);
 623                if (newaddr != -ENOMEM) {
 624                        slice_dbg(" found area at 0x%lx\n", newaddr);
 625                        goto return_addr;
 626                }
 627        }
 628
 629        /* Now let's see if we can find something in the existing slices
 630         * for that size plus free slices
 631         */
 632        newaddr = slice_find_area(mm, len, &potential_mask,
 633                                  psize, topdown, high_limit);
 634
 635#ifdef CONFIG_PPC_64K_PAGES
 636        if (newaddr == -ENOMEM && psize == MMU_PAGE_64K) {
 637                /* retry the search with 4k-page slices included */
 638                slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
 639                newaddr = slice_find_area(mm, len, &potential_mask,
 640                                          psize, topdown, high_limit);
 641        }
 642#endif
 643
 644        if (newaddr == -ENOMEM)
 645                return -ENOMEM;
 646
 647        slice_range_to_mask(newaddr, len, &potential_mask);
 648        slice_dbg(" found potential area at 0x%lx\n", newaddr);
 649        slice_print_mask(" mask", &potential_mask);
 650
 651 convert:
 652        /*
 653         * Try to allocate the context before we do slice convert
 654         * so that we handle the context allocation failure gracefully.
 655         */
 656        if (need_extra_context(mm, newaddr)) {
 657                if (alloc_extended_context(mm, newaddr) < 0)
 658                        return -ENOMEM;
 659        }
 660
 661        slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
 662        if (compat_maskp && !fixed)
 663                slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
 664        if (potential_mask.low_slices ||
 665                (SLICE_NUM_HIGH &&
 666                 !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
 667                slice_convert(mm, &potential_mask, psize);
 668                if (psize > MMU_PAGE_BASE)
 669                        on_each_cpu(slice_flush_segments, mm, 1);
 670        }
 671        return newaddr;
 672
 673return_addr:
 674        if (need_extra_context(mm, newaddr)) {
 675                if (alloc_extended_context(mm, newaddr) < 0)
 676                        return -ENOMEM;
 677        }
 678        return newaddr;
 679}
 680EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
 681
 682unsigned long arch_get_unmapped_area(struct file *filp,
 683                                     unsigned long addr,
 684                                     unsigned long len,
 685                                     unsigned long pgoff,
 686                                     unsigned long flags)
 687{
 688        return slice_get_unmapped_area(addr, len, flags,
 689                                       current->mm->context.user_psize, 0);
 690}
 691
 692unsigned long arch_get_unmapped_area_topdown(struct file *filp,
 693                                             const unsigned long addr0,
 694                                             const unsigned long len,
 695                                             const unsigned long pgoff,
 696                                             const unsigned long flags)
 697{
 698        return slice_get_unmapped_area(addr0, len, flags,
 699                                       current->mm->context.user_psize, 1);
 700}
 701
 702unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
 703{
 704        unsigned char *psizes;
 705        int index, mask_index;
 706
 707        VM_BUG_ON(radix_enabled());
 708
 709        if (addr < SLICE_LOW_TOP) {
 710                psizes = mm->context.low_slices_psize;
 711                index = GET_LOW_SLICE_INDEX(addr);
 712        } else {
 713                psizes = mm->context.high_slices_psize;
 714                index = GET_HIGH_SLICE_INDEX(addr);
 715        }
 716        mask_index = index & 0x1;
 717        return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
 718}
 719EXPORT_SYMBOL_GPL(get_slice_psize);
 720
 721void slice_init_new_context_exec(struct mm_struct *mm)
 722{
 723        unsigned char *hpsizes, *lpsizes;
 724        struct slice_mask *mask;
 725        unsigned int psize = mmu_virtual_psize;
 726
 727        slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
 728
 729        /*
 730         * In the case of exec, use the default limit. In the
 731         * case of fork it is just inherited from the mm being
 732         * duplicated.
 733         */
 734#ifdef CONFIG_PPC64
 735        mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
 736#else
 737        mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
 738#endif
 739
 740        mm->context.user_psize = psize;
 741
 742        /*
 743         * Set all slice psizes to the default.
 744         */
 745        lpsizes = mm->context.low_slices_psize;
 746        memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
 747
 748        hpsizes = mm->context.high_slices_psize;
 749        memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
 750
 751        /*
 752         * Slice mask cache starts zeroed, fill the default size cache.
 753         */
 754        mask = slice_mask_for_size(mm, psize);
 755        mask->low_slices = ~0UL;
 756        if (SLICE_NUM_HIGH)
 757                bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
 758}
 759
 760void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
 761                           unsigned long len, unsigned int psize)
 762{
 763        struct slice_mask mask;
 764
 765        VM_BUG_ON(radix_enabled());
 766
 767        slice_range_to_mask(start, len, &mask);
 768        slice_convert(mm, &mask, psize);
 769}
 770
 771#ifdef CONFIG_HUGETLB_PAGE
 772/*
 773 * is_hugepage_only_range() is used by generic code to verify whether
 774 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
 775 *
 776 * until the generic code provides a more generic hook and/or starts
 777 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
 778 * here knows how to deal with), we hijack it to keep standard mappings
 779 * away from us.
 780 *
 781 * because of that generic code limitation, MAP_FIXED mapping cannot
 782 * "convert" back a slice with no VMAs to the standard page size, only
 783 * get_unmapped_area() can. It would be possible to fix it here but I
 784 * prefer working on fixing the generic code instead.
 785 *
 786 * WARNING: This will not work if hugetlbfs isn't enabled since the
 787 * generic code will redefine that function as 0 in that. This is ok
 788 * for now as we only use slices with hugetlbfs enabled. This should
 789 * be fixed as the generic code gets fixed.
 790 */
 791int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
 792                           unsigned long len)
 793{
 794        const struct slice_mask *maskp;
 795        unsigned int psize = mm->context.user_psize;
 796
 797        VM_BUG_ON(radix_enabled());
 798
 799        maskp = slice_mask_for_size(mm, psize);
 800#ifdef CONFIG_PPC_64K_PAGES
 801        /* We need to account for 4k slices too */
 802        if (psize == MMU_PAGE_64K) {
 803                const struct slice_mask *compat_maskp;
 804                struct slice_mask available;
 805
 806                compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
 807                slice_or_mask(&available, maskp, compat_maskp);
 808                return !slice_check_range_fits(mm, &available, addr, len);
 809        }
 810#endif
 811
 812        return !slice_check_range_fits(mm, maskp, addr, len);
 813}
 814#endif
 815