linux/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
<<
>>
Prefs
   1/*
   2 * Blackfin CPLB exception handling for when MPU in on
   3 *
   4 * Copyright 2008-2009 Analog Devices Inc.
   5 *
   6 * Licensed under the GPL-2 or later.
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/mm.h>
  11
  12#include <asm/blackfin.h>
  13#include <asm/cacheflush.h>
  14#include <asm/cplb.h>
  15#include <asm/cplbinit.h>
  16#include <asm/mmu_context.h>
  17
  18/*
  19 * WARNING
  20 *
  21 * This file is compiled with certain -ffixed-reg options.  We have to
  22 * make sure not to call any functions here that could clobber these
  23 * registers.
  24 */
  25
  26int page_mask_nelts;
  27int page_mask_order;
  28unsigned long *current_rwx_mask[NR_CPUS];
  29
  30int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
  31int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
  32int nr_cplb_flush[NR_CPUS];
  33
  34/*
  35 * Given the contents of the status register, return the index of the
  36 * CPLB that caused the fault.
  37 */
  38static inline int faulting_cplb_index(int status)
  39{
  40        int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
  41        return 30 - signbits;
  42}
  43
  44/*
  45 * Given the contents of the status register and the DCPLB_DATA contents,
  46 * return true if a write access should be permitted.
  47 */
  48static inline int write_permitted(int status, unsigned long data)
  49{
  50        if (status & FAULT_USERSUPV)
  51                return !!(data & CPLB_SUPV_WR);
  52        else
  53                return !!(data & CPLB_USER_WR);
  54}
  55
  56/* Counters to implement round-robin replacement.  */
  57static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
  58
  59/*
  60 * Find an ICPLB entry to be evicted and return its index.
  61 */
  62static int evict_one_icplb(unsigned int cpu)
  63{
  64        int i;
  65        for (i = first_switched_icplb; i < MAX_CPLBS; i++)
  66                if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  67                        return i;
  68        i = first_switched_icplb + icplb_rr_index[cpu];
  69        if (i >= MAX_CPLBS) {
  70                i -= MAX_CPLBS - first_switched_icplb;
  71                icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
  72        }
  73        icplb_rr_index[cpu]++;
  74        return i;
  75}
  76
  77static int evict_one_dcplb(unsigned int cpu)
  78{
  79        int i;
  80        for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
  81                if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  82                        return i;
  83        i = first_switched_dcplb + dcplb_rr_index[cpu];
  84        if (i >= MAX_CPLBS) {
  85                i -= MAX_CPLBS - first_switched_dcplb;
  86                dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
  87        }
  88        dcplb_rr_index[cpu]++;
  89        return i;
  90}
  91
  92static noinline int dcplb_miss(unsigned int cpu)
  93{
  94        unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
  95        int status = bfin_read_DCPLB_STATUS();
  96        unsigned long *mask;
  97        int idx;
  98        unsigned long d_data;
  99
 100        nr_dcplb_miss[cpu]++;
 101
 102        d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
 103#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
 104        if (bfin_addr_dcacheable(addr)) {
 105                d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
 106# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
 107                d_data |= CPLB_L1_AOW | CPLB_WT;
 108# endif
 109        }
 110#endif
 111
 112        if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
 113                addr = L2_START;
 114                d_data = L2_DMEMORY;
 115        } else if (addr >= physical_mem_end) {
 116                if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
 117                    && (status & FAULT_USERSUPV)) {
 118                        addr &= ~0x3fffff;
 119                        d_data &= ~PAGE_SIZE_4KB;
 120                        d_data |= PAGE_SIZE_4MB;
 121                } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
 122                    && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
 123                        addr &= ~(1 * 1024 * 1024 - 1);
 124                        d_data &= ~PAGE_SIZE_4KB;
 125                        d_data |= PAGE_SIZE_1MB;
 126                } else
 127                        return CPLB_PROT_VIOL;
 128        } else if (addr >= _ramend) {
 129            d_data |= CPLB_USER_RD | CPLB_USER_WR;
 130        } else {
 131                mask = current_rwx_mask[cpu];
 132                if (mask) {
 133                        int page = addr >> PAGE_SHIFT;
 134                        int idx = page >> 5;
 135                        int bit = 1 << (page & 31);
 136
 137                        if (mask[idx] & bit)
 138                                d_data |= CPLB_USER_RD;
 139
 140                        mask += page_mask_nelts;
 141                        if (mask[idx] & bit)
 142                                d_data |= CPLB_USER_WR;
 143                }
 144        }
 145        idx = evict_one_dcplb(cpu);
 146
 147        addr &= PAGE_MASK;
 148        dcplb_tbl[cpu][idx].addr = addr;
 149        dcplb_tbl[cpu][idx].data = d_data;
 150
 151        _disable_dcplb();
 152        bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
 153        bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
 154        _enable_dcplb();
 155
 156        return 0;
 157}
 158
 159static noinline int icplb_miss(unsigned int cpu)
 160{
 161        unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
 162        int status = bfin_read_ICPLB_STATUS();
 163        int idx;
 164        unsigned long i_data;
 165
 166        nr_icplb_miss[cpu]++;
 167
 168        /* If inside the uncached DMA region, fault.  */
 169        if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
 170                return CPLB_PROT_VIOL;
 171
 172        if (status & FAULT_USERSUPV)
 173                nr_icplb_supv_miss[cpu]++;
 174
 175        /*
 176         * First, try to find a CPLB that matches this address.  If we
 177         * find one, then the fact that we're in the miss handler means
 178         * that the instruction crosses a page boundary.
 179         */
 180        for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
 181                if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
 182                        unsigned long this_addr = icplb_tbl[cpu][idx].addr;
 183                        if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
 184                                addr += PAGE_SIZE;
 185                                break;
 186                        }
 187                }
 188        }
 189
 190        i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
 191
 192#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
 193        /*
 194         * Normal RAM, and possibly the reserved memory area, are
 195         * cacheable.
 196         */
 197        if (addr < _ramend ||
 198            (addr < physical_mem_end && reserved_mem_icache_on))
 199                i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
 200#endif
 201
 202        if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
 203                addr = L2_START;
 204                i_data = L2_IMEMORY;
 205        } else if (addr >= physical_mem_end) {
 206                if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
 207                    && (status & FAULT_USERSUPV)) {
 208                        addr &= ~(1 * 1024 * 1024 - 1);
 209                        i_data &= ~PAGE_SIZE_4KB;
 210                        i_data |= PAGE_SIZE_1MB;
 211                } else
 212                    return CPLB_PROT_VIOL;
 213        } else if (addr >= _ramend) {
 214                i_data |= CPLB_USER_RD;
 215        } else {
 216                /*
 217                 * Two cases to distinguish - a supervisor access must
 218                 * necessarily be for a module page; we grant it
 219                 * unconditionally (could do better here in the future).
 220                 * Otherwise, check the x bitmap of the current process.
 221                 */
 222                if (!(status & FAULT_USERSUPV)) {
 223                        unsigned long *mask = current_rwx_mask[cpu];
 224
 225                        if (mask) {
 226                                int page = addr >> PAGE_SHIFT;
 227                                int idx = page >> 5;
 228                                int bit = 1 << (page & 31);
 229
 230                                mask += 2 * page_mask_nelts;
 231                                if (mask[idx] & bit)
 232                                        i_data |= CPLB_USER_RD;
 233                        }
 234                }
 235        }
 236        idx = evict_one_icplb(cpu);
 237        addr &= PAGE_MASK;
 238        icplb_tbl[cpu][idx].addr = addr;
 239        icplb_tbl[cpu][idx].data = i_data;
 240
 241        _disable_icplb();
 242        bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
 243        bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
 244        _enable_icplb();
 245
 246        return 0;
 247}
 248
 249static noinline int dcplb_protection_fault(unsigned int cpu)
 250{
 251        int status = bfin_read_DCPLB_STATUS();
 252
 253        nr_dcplb_prot[cpu]++;
 254
 255        if (status & FAULT_RW) {
 256                int idx = faulting_cplb_index(status);
 257                unsigned long data = dcplb_tbl[cpu][idx].data;
 258                if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
 259                    write_permitted(status, data)) {
 260                        data |= CPLB_DIRTY;
 261                        dcplb_tbl[cpu][idx].data = data;
 262                        bfin_write32(DCPLB_DATA0 + idx * 4, data);
 263                        return 0;
 264                }
 265        }
 266        return CPLB_PROT_VIOL;
 267}
 268
 269int cplb_hdr(int seqstat, struct pt_regs *regs)
 270{
 271        int cause = seqstat & 0x3f;
 272        unsigned int cpu = raw_smp_processor_id();
 273        switch (cause) {
 274        case 0x23:
 275                return dcplb_protection_fault(cpu);
 276        case 0x2C:
 277                return icplb_miss(cpu);
 278        case 0x26:
 279                return dcplb_miss(cpu);
 280        default:
 281                return 1;
 282        }
 283}
 284
 285void flush_switched_cplbs(unsigned int cpu)
 286{
 287        int i;
 288        unsigned long flags;
 289
 290        nr_cplb_flush[cpu]++;
 291
 292        local_irq_save_hw(flags);
 293        _disable_icplb();
 294        for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
 295                icplb_tbl[cpu][i].data = 0;
 296                bfin_write32(ICPLB_DATA0 + i * 4, 0);
 297        }
 298        _enable_icplb();
 299
 300        _disable_dcplb();
 301        for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
 302                dcplb_tbl[cpu][i].data = 0;
 303                bfin_write32(DCPLB_DATA0 + i * 4, 0);
 304        }
 305        _enable_dcplb();
 306        local_irq_restore_hw(flags);
 307
 308}
 309
 310void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
 311{
 312        int i;
 313        unsigned long addr = (unsigned long)masks;
 314        unsigned long d_data;
 315        unsigned long flags;
 316
 317        if (!masks) {
 318                current_rwx_mask[cpu] = masks;
 319                return;
 320        }
 321
 322        local_irq_save_hw(flags);
 323        current_rwx_mask[cpu] = masks;
 324
 325        if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
 326                addr = L2_START;
 327                d_data = L2_DMEMORY;
 328        } else {
 329                d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
 330#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
 331                d_data |= CPLB_L1_CHBL;
 332# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
 333                d_data |= CPLB_L1_AOW | CPLB_WT;
 334# endif
 335#endif
 336        }
 337
 338        _disable_dcplb();
 339        for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
 340                dcplb_tbl[cpu][i].addr = addr;
 341                dcplb_tbl[cpu][i].data = d_data;
 342                bfin_write32(DCPLB_DATA0 + i * 4, d_data);
 343                bfin_write32(DCPLB_ADDR0 + i * 4, addr);
 344                addr += PAGE_SIZE;
 345        }
 346        _enable_dcplb();
 347        local_irq_restore_hw(flags);
 348}
 349