linux/arch/mn10300/mm/cache-inv-icache.c
<<
>>
Prefs
   1/* Invalidate icache when dcache doesn't need invalidation as it's in
   2 * write-through mode
   3 *
   4 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public Licence
   9 * as published by the Free Software Foundation; either version
  10 * 2 of the Licence, or (at your option) any later version.
  11 */
  12#include <linux/module.h>
  13#include <linux/mm.h>
  14#include <asm/cacheflush.h>
  15#include <asm/smp.h>
  16#include "cache-smp.h"
  17
  18/**
  19 * flush_icache_page_range - Flush dcache and invalidate icache for part of a
  20 *                              single page
  21 * @start: The starting virtual address of the page part.
  22 * @end: The ending virtual address of the page part.
  23 *
  24 * Invalidate the icache for part of a single page, as determined by the
  25 * virtual addresses given.  The page must be in the paged area.  The dcache is
  26 * not flushed as the cache must be in write-through mode to get here.
  27 */
  28static void flush_icache_page_range(unsigned long start, unsigned long end)
  29{
  30        unsigned long addr, size, off;
  31        struct page *page;
  32        pgd_t *pgd;
  33        pud_t *pud;
  34        pmd_t *pmd;
  35        pte_t *ppte, pte;
  36
  37        /* work out how much of the page to flush */
  38        off = start & ~PAGE_MASK;
  39        size = end - start;
  40
  41        /* get the physical address the page is mapped to from the page
  42         * tables */
  43        pgd = pgd_offset(current->mm, start);
  44        if (!pgd || !pgd_val(*pgd))
  45                return;
  46
  47        pud = pud_offset(pgd, start);
  48        if (!pud || !pud_val(*pud))
  49                return;
  50
  51        pmd = pmd_offset(pud, start);
  52        if (!pmd || !pmd_val(*pmd))
  53                return;
  54
  55        ppte = pte_offset_map(pmd, start);
  56        if (!ppte)
  57                return;
  58        pte = *ppte;
  59        pte_unmap(ppte);
  60
  61        if (pte_none(pte))
  62                return;
  63
  64        page = pte_page(pte);
  65        if (!page)
  66                return;
  67
  68        addr = page_to_phys(page);
  69
  70        /* invalidate the icache coverage on that region */
  71        mn10300_local_icache_inv_range2(addr + off, size);
  72        smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
  73}
  74
  75/**
  76 * flush_icache_range - Globally flush dcache and invalidate icache for region
  77 * @start: The starting virtual address of the region.
  78 * @end: The ending virtual address of the region.
  79 *
  80 * This is used by the kernel to globally flush some code it has just written
  81 * from the dcache back to RAM and then to globally invalidate the icache over
  82 * that region so that that code can be run on all CPUs in the system.
  83 */
  84void flush_icache_range(unsigned long start, unsigned long end)
  85{
  86        unsigned long start_page, end_page;
  87        unsigned long flags;
  88
  89        flags = smp_lock_cache();
  90
  91        if (end > 0x80000000UL) {
  92                /* addresses above 0xa0000000 do not go through the cache */
  93                if (end > 0xa0000000UL) {
  94                        end = 0xa0000000UL;
  95                        if (start >= end)
  96                                goto done;
  97                }
  98
  99                /* kernel addresses between 0x80000000 and 0x9fffffff do not
 100                 * require page tables, so we just map such addresses
 101                 * directly */
 102                start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
 103                mn10300_icache_inv_range(start_page, end);
 104                smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
 105                if (start_page == start)
 106                        goto done;
 107                end = start_page;
 108        }
 109
 110        start_page = start & PAGE_MASK;
 111        end_page = (end - 1) & PAGE_MASK;
 112
 113        if (start_page == end_page) {
 114                /* the first and last bytes are on the same page */
 115                flush_icache_page_range(start, end);
 116        } else if (start_page + 1 == end_page) {
 117                /* split over two virtually contiguous pages */
 118                flush_icache_page_range(start, end_page);
 119                flush_icache_page_range(end_page, end);
 120        } else {
 121                /* more than 2 pages; just flush the entire cache */
 122                mn10300_local_icache_inv();
 123                smp_cache_call(SMP_ICACHE_INV, 0, 0);
 124        }
 125
 126done:
 127        smp_unlock_cache(flags);
 128}
 129EXPORT_SYMBOL(flush_icache_range);
 130