linux/arch/powerpc/mm/mmu_context_hash64.c
<<
>>
Prefs
   1/*
   2 *  MMU context allocation for 64-bit kernels.
   3 *
   4 *  Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
   5 *
   6 *  This program is free software; you can redistribute it and/or
   7 *  modify it under the terms of the GNU General Public License
   8 *  as published by the Free Software Foundation; either version
   9 *  2 of the License, or (at your option) any later version.
  10 *
  11 */
  12
  13#include <linux/sched.h>
  14#include <linux/kernel.h>
  15#include <linux/errno.h>
  16#include <linux/string.h>
  17#include <linux/types.h>
  18#include <linux/mm.h>
  19#include <linux/spinlock.h>
  20#include <linux/idr.h>
  21#include <linux/export.h>
  22#include <linux/gfp.h>
  23#include <linux/slab.h>
  24
  25#include <asm/mmu_context.h>
  26#include <asm/pgalloc.h>
  27
  28#include "icswx.h"
  29
  30static DEFINE_SPINLOCK(mmu_context_lock);
  31static DEFINE_IDA(mmu_context_ida);
  32
  33int __init_new_context(void)
  34{
  35        int index;
  36        int err;
  37
  38again:
  39        if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
  40                return -ENOMEM;
  41
  42        spin_lock(&mmu_context_lock);
  43        err = ida_get_new_above(&mmu_context_ida, 1, &index);
  44        spin_unlock(&mmu_context_lock);
  45
  46        if (err == -EAGAIN)
  47                goto again;
  48        else if (err)
  49                return err;
  50
  51        if (index > MAX_USER_CONTEXT) {
  52                spin_lock(&mmu_context_lock);
  53                ida_remove(&mmu_context_ida, index);
  54                spin_unlock(&mmu_context_lock);
  55                return -ENOMEM;
  56        }
  57
  58        return index;
  59}
  60EXPORT_SYMBOL_GPL(__init_new_context);
  61
  62int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  63{
  64        int index;
  65
  66        index = __init_new_context();
  67        if (index < 0)
  68                return index;
  69
  70        /* The old code would re-promote on fork, we don't do that
  71         * when using slices as it could cause problem promoting slices
  72         * that have been forced down to 4K
  73         */
  74        if (slice_mm_new_context(mm))
  75                slice_set_user_psize(mm, mmu_virtual_psize);
  76        subpage_prot_init_new_context(mm);
  77        mm->context.id = index;
  78#ifdef CONFIG_PPC_ICSWX
  79        mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
  80        if (!mm->context.cop_lockp) {
  81                __destroy_context(index);
  82                subpage_prot_free(mm);
  83                mm->context.id = MMU_NO_CONTEXT;
  84                return -ENOMEM;
  85        }
  86        spin_lock_init(mm->context.cop_lockp);
  87#endif /* CONFIG_PPC_ICSWX */
  88
  89#ifdef CONFIG_PPC_64K_PAGES
  90        mm->context.pte_frag = NULL;
  91#endif
  92        return 0;
  93}
  94
  95void __destroy_context(int context_id)
  96{
  97        spin_lock(&mmu_context_lock);
  98        ida_remove(&mmu_context_ida, context_id);
  99        spin_unlock(&mmu_context_lock);
 100}
 101EXPORT_SYMBOL_GPL(__destroy_context);
 102
 103#ifdef CONFIG_PPC_64K_PAGES
 104static void destroy_pagetable_page(struct mm_struct *mm)
 105{
 106        int count;
 107        void *pte_frag;
 108        struct page *page;
 109
 110        pte_frag = mm->context.pte_frag;
 111        if (!pte_frag)
 112                return;
 113
 114        page = virt_to_page(pte_frag);
 115        /* drop all the pending references */
 116        count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
 117        /* We allow PTE_FRAG_NR fragments from a PTE page */
 118        count = atomic_sub_return(PTE_FRAG_NR - count, &page->_count);
 119        if (!count) {
 120                pgtable_page_dtor(page);
 121                free_hot_cold_page(page, 0);
 122        }
 123}
 124
 125#else
 126static inline void destroy_pagetable_page(struct mm_struct *mm)
 127{
 128        return;
 129}
 130#endif
 131
 132
 133void destroy_context(struct mm_struct *mm)
 134{
 135
 136#ifdef CONFIG_PPC_ICSWX
 137        drop_cop(mm->context.acop, mm);
 138        kfree(mm->context.cop_lockp);
 139        mm->context.cop_lockp = NULL;
 140#endif /* CONFIG_PPC_ICSWX */
 141
 142        destroy_pagetable_page(mm);
 143        __destroy_context(mm->context.id);
 144        subpage_prot_free(mm);
 145        mm->context.id = MMU_NO_CONTEXT;
 146}
 147