linux/arch/s390/mm/pgalloc.c
<<
>>
Prefs
   1/*
   2 *  Page table allocation functions
   3 *
   4 *    Copyright IBM Corp. 2016
   5 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
   6 */
   7
   8#include <linux/mm.h>
   9#include <linux/sysctl.h>
  10#include <asm/mmu_context.h>
  11#include <asm/pgalloc.h>
  12#include <asm/gmap.h>
  13#include <asm/tlb.h>
  14#include <asm/tlbflush.h>
  15
  16#ifdef CONFIG_PGSTE
  17
  18static int page_table_allocate_pgste_min = 0;
  19static int page_table_allocate_pgste_max = 1;
  20int page_table_allocate_pgste = 0;
  21EXPORT_SYMBOL(page_table_allocate_pgste);
  22
  23static struct ctl_table page_table_sysctl[] = {
  24        {
  25                .procname       = "allocate_pgste",
  26                .data           = &page_table_allocate_pgste,
  27                .maxlen         = sizeof(int),
  28                .mode           = S_IRUGO | S_IWUSR,
  29                .proc_handler   = proc_dointvec,
  30                .extra1         = &page_table_allocate_pgste_min,
  31                .extra2         = &page_table_allocate_pgste_max,
  32        },
  33        { }
  34};
  35
  36static struct ctl_table page_table_sysctl_dir[] = {
  37        {
  38                .procname       = "vm",
  39                .maxlen         = 0,
  40                .mode           = 0555,
  41                .child          = page_table_sysctl,
  42        },
  43        { }
  44};
  45
  46static int __init page_table_register_sysctl(void)
  47{
  48        return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
  49}
  50__initcall(page_table_register_sysctl);
  51
  52#endif /* CONFIG_PGSTE */
  53
  54unsigned long *crst_table_alloc(struct mm_struct *mm)
  55{
  56        struct page *page = alloc_pages(GFP_KERNEL, 2);
  57
  58        if (!page)
  59                return NULL;
  60        return (unsigned long *) page_to_phys(page);
  61}
  62
  63void crst_table_free(struct mm_struct *mm, unsigned long *table)
  64{
  65        free_pages((unsigned long) table, 2);
  66}
  67
  68static void __crst_table_upgrade(void *arg)
  69{
  70        struct mm_struct *mm = arg;
  71
  72        if (current->active_mm == mm) {
  73                clear_user_asce();
  74                set_user_asce(mm);
  75        }
  76        __tlb_flush_local();
  77}
  78
  79int crst_table_upgrade(struct mm_struct *mm)
  80{
  81        unsigned long *table, *pgd;
  82
  83        /* upgrade should only happen from 3 to 4 levels */
  84        BUG_ON(mm->context.asce_limit != (1UL << 42));
  85
  86        table = crst_table_alloc(mm);
  87        if (!table)
  88                return -ENOMEM;
  89
  90        spin_lock_bh(&mm->page_table_lock);
  91        pgd = (unsigned long *) mm->pgd;
  92        crst_table_init(table, _REGION2_ENTRY_EMPTY);
  93        pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
  94        mm->pgd = (pgd_t *) table;
  95        mm->context.asce_limit = 1UL << 53;
  96        mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
  97                           _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
  98        mm->task_size = mm->context.asce_limit;
  99        spin_unlock_bh(&mm->page_table_lock);
 100
 101        on_each_cpu(__crst_table_upgrade, mm, 0);
 102        return 0;
 103}
 104
 105void crst_table_downgrade(struct mm_struct *mm)
 106{
 107        pgd_t *pgd;
 108
 109        /* downgrade should only happen from 3 to 2 levels (compat only) */
 110        BUG_ON(mm->context.asce_limit != (1UL << 42));
 111
 112        if (current->active_mm == mm) {
 113                clear_user_asce();
 114                __tlb_flush_mm(mm);
 115        }
 116
 117        pgd = mm->pgd;
 118        mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
 119        mm->context.asce_limit = 1UL << 31;
 120        mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
 121                           _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
 122        mm->task_size = mm->context.asce_limit;
 123        crst_table_free(mm, (unsigned long *) pgd);
 124
 125        if (current->active_mm == mm)
 126                set_user_asce(mm);
 127}
 128
 129static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
 130{
 131        unsigned int old, new;
 132
 133        do {
 134                old = atomic_read(v);
 135                new = old ^ bits;
 136        } while (atomic_cmpxchg(v, old, new) != old);
 137        return new;
 138}
 139
 140#ifdef CONFIG_PGSTE
 141
 142struct page *page_table_alloc_pgste(struct mm_struct *mm)
 143{
 144        struct page *page;
 145        unsigned long *table;
 146
 147        page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
 148        if (page) {
 149                table = (unsigned long *) page_to_phys(page);
 150                clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
 151                clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
 152        }
 153        return page;
 154}
 155
 156void page_table_free_pgste(struct page *page)
 157{
 158        __free_page(page);
 159}
 160
 161#endif /* CONFIG_PGSTE */
 162
 163/*
 164 * page table entry allocation/free routines.
 165 */
 166unsigned long *page_table_alloc(struct mm_struct *mm)
 167{
 168        unsigned long *table;
 169        struct page *page;
 170        unsigned int mask, bit;
 171
 172        /* Try to get a fragment of a 4K page as a 2K page table */
 173        if (!mm_alloc_pgste(mm)) {
 174                table = NULL;
 175                spin_lock_bh(&mm->context.pgtable_lock);
 176                if (!list_empty(&mm->context.pgtable_list)) {
 177                        page = list_first_entry(&mm->context.pgtable_list,
 178                                                struct page, lru);
 179                        mask = atomic_read(&page->_mapcount);
 180                        mask = (mask | (mask >> 4)) & 3;
 181                        if (mask != 3) {
 182                                table = (unsigned long *) page_to_phys(page);
 183                                bit = mask & 1;         /* =1 -> second 2K */
 184                                if (bit)
 185                                        table += PTRS_PER_PTE;
 186                                atomic_xor_bits(&page->_mapcount, 1U << bit);
 187                                list_del(&page->lru);
 188                        }
 189                }
 190                spin_unlock_bh(&mm->context.pgtable_lock);
 191                if (table)
 192                        return table;
 193        }
 194        /* Allocate a fresh page */
 195        page = alloc_page(GFP_KERNEL);
 196        if (!page)
 197                return NULL;
 198        if (!pgtable_page_ctor(page)) {
 199                __free_page(page);
 200                return NULL;
 201        }
 202        /* Initialize page table */
 203        table = (unsigned long *) page_to_phys(page);
 204        if (mm_alloc_pgste(mm)) {
 205                /* Return 4K page table with PGSTEs */
 206                atomic_set(&page->_mapcount, 3);
 207                clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
 208                clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
 209        } else {
 210                /* Return the first 2K fragment of the page */
 211                atomic_set(&page->_mapcount, 1);
 212                clear_table(table, _PAGE_INVALID, PAGE_SIZE);
 213                spin_lock_bh(&mm->context.pgtable_lock);
 214                list_add(&page->lru, &mm->context.pgtable_list);
 215                spin_unlock_bh(&mm->context.pgtable_lock);
 216        }
 217        return table;
 218}
 219
 220void page_table_free(struct mm_struct *mm, unsigned long *table)
 221{
 222        struct page *page;
 223        unsigned int bit, mask;
 224
 225        page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
 226        if (!mm_alloc_pgste(mm)) {
 227                /* Free 2K page table fragment of a 4K page */
 228                bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
 229                spin_lock_bh(&mm->context.pgtable_lock);
 230                mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
 231                if (mask & 3)
 232                        list_add(&page->lru, &mm->context.pgtable_list);
 233                else
 234                        list_del(&page->lru);
 235                spin_unlock_bh(&mm->context.pgtable_lock);
 236                if (mask != 0)
 237                        return;
 238        }
 239
 240        pgtable_page_dtor(page);
 241        atomic_set(&page->_mapcount, -1);
 242        __free_page(page);
 243}
 244
 245void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
 246                         unsigned long vmaddr)
 247{
 248        struct mm_struct *mm;
 249        struct page *page;
 250        unsigned int bit, mask;
 251
 252        mm = tlb->mm;
 253        page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
 254        if (mm_alloc_pgste(mm)) {
 255                gmap_unlink(mm, table, vmaddr);
 256                table = (unsigned long *) (__pa(table) | 3);
 257                tlb_remove_table(tlb, table);
 258                return;
 259        }
 260        bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
 261        spin_lock_bh(&mm->context.pgtable_lock);
 262        mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
 263        if (mask & 3)
 264                list_add_tail(&page->lru, &mm->context.pgtable_list);
 265        else
 266                list_del(&page->lru);
 267        spin_unlock_bh(&mm->context.pgtable_lock);
 268        table = (unsigned long *) (__pa(table) | (1U << bit));
 269        tlb_remove_table(tlb, table);
 270}
 271
 272static void __tlb_remove_table(void *_table)
 273{
 274        unsigned int mask = (unsigned long) _table & 3;
 275        void *table = (void *)((unsigned long) _table ^ mask);
 276        struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
 277
 278        switch (mask) {
 279        case 0:         /* pmd or pud */
 280                free_pages((unsigned long) table, 2);
 281                break;
 282        case 1:         /* lower 2K of a 4K page table */
 283        case 2:         /* higher 2K of a 4K page table */
 284                if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
 285                        break;
 286                /* fallthrough */
 287        case 3:         /* 4K page table with pgstes */
 288                pgtable_page_dtor(page);
 289                atomic_set(&page->_mapcount, -1);
 290                __free_page(page);
 291                break;
 292        }
 293}
 294
 295static void tlb_remove_table_smp_sync(void *arg)
 296{
 297        /* Simply deliver the interrupt */
 298}
 299
 300static void tlb_remove_table_one(void *table)
 301{
 302        /*
 303         * This isn't an RCU grace period and hence the page-tables cannot be
 304         * assumed to be actually RCU-freed.
 305         *
 306         * It is however sufficient for software page-table walkers that rely
 307         * on IRQ disabling. See the comment near struct mmu_table_batch.
 308         */
 309        smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
 310        __tlb_remove_table(table);
 311}
 312
 313static void tlb_remove_table_rcu(struct rcu_head *head)
 314{
 315        struct mmu_table_batch *batch;
 316        int i;
 317
 318        batch = container_of(head, struct mmu_table_batch, rcu);
 319
 320        for (i = 0; i < batch->nr; i++)
 321                __tlb_remove_table(batch->tables[i]);
 322
 323        free_page((unsigned long)batch);
 324}
 325
 326void tlb_table_flush(struct mmu_gather *tlb)
 327{
 328        struct mmu_table_batch **batch = &tlb->batch;
 329
 330        if (*batch) {
 331                call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
 332                *batch = NULL;
 333        }
 334}
 335
 336void tlb_remove_table(struct mmu_gather *tlb, void *table)
 337{
 338        struct mmu_table_batch **batch = &tlb->batch;
 339
 340        tlb->mm->context.flush_mm = 1;
 341        if (*batch == NULL) {
 342                *batch = (struct mmu_table_batch *)
 343                        __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
 344                if (*batch == NULL) {
 345                        __tlb_flush_mm_lazy(tlb->mm);
 346                        tlb_remove_table_one(table);
 347                        return;
 348                }
 349                (*batch)->nr = 0;
 350        }
 351        (*batch)->tables[(*batch)->nr++] = table;
 352        if ((*batch)->nr == MAX_TABLE_BATCH)
 353                tlb_flush_mmu(tlb);
 354}
 355