linux/arch/nds32/mm/proc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2// Copyright (C) 2005-2017 Andes Technology Corporation
   3
   4#include <linux/module.h>
   5#include <linux/sched.h>
   6#include <linux/mm.h>
   7#include <asm/nds32.h>
   8#include <asm/tlbflush.h>
   9#include <asm/cacheflush.h>
  10#include <asm/l2_cache.h>
  11#include <nds32_intrinsic.h>
  12
  13#include <asm/cache_info.h>
  14extern struct cache_info L1_cache_info[2];
  15
  16int va_kernel_present(unsigned long addr)
  17{
  18        pmd_t *pmd;
  19        pte_t *ptep, pte;
  20
  21        pmd = pmd_off_k(addr);
  22        if (!pmd_none(*pmd)) {
  23                ptep = pte_offset_map(pmd, addr);
  24                pte = *ptep;
  25                if (pte_present(pte))
  26                        return pte;
  27        }
  28        return 0;
  29}
  30
  31pte_t va_present(struct mm_struct * mm, unsigned long addr)
  32{
  33        pgd_t *pgd;
  34        p4d_t *p4d;
  35        pud_t *pud;
  36        pmd_t *pmd;
  37        pte_t *ptep, pte;
  38
  39        pgd = pgd_offset(mm, addr);
  40        if (!pgd_none(*pgd)) {
  41                p4d = p4d_offset(pgd, addr);
  42                if (!p4d_none(*p4d)) {
  43                        pud = pud_offset(p4d, addr);
  44                        if (!pud_none(*pud)) {
  45                                pmd = pmd_offset(pud, addr);
  46                                if (!pmd_none(*pmd)) {
  47                                        ptep = pte_offset_map(pmd, addr);
  48                                        pte = *ptep;
  49                                        if (pte_present(pte))
  50                                                return pte;
  51                                }
  52                        }
  53                }
  54        }
  55        return 0;
  56
  57}
  58
  59int va_readable(struct pt_regs *regs, unsigned long addr)
  60{
  61        struct mm_struct *mm = current->mm;
  62        pte_t pte;
  63        int ret = 0;
  64
  65        if (user_mode(regs)) {
  66                /* user mode */
  67                pte = va_present(mm, addr);
  68                if (!pte && pte_read(pte))
  69                        ret = 1;
  70        } else {
  71                /* superuser mode is always readable, so we can only
  72                 * check it is present or not*/
  73                return (! !va_kernel_present(addr));
  74        }
  75        return ret;
  76}
  77
  78int va_writable(struct pt_regs *regs, unsigned long addr)
  79{
  80        struct mm_struct *mm = current->mm;
  81        pte_t pte;
  82        int ret = 0;
  83
  84        if (user_mode(regs)) {
  85                /* user mode */
  86                pte = va_present(mm, addr);
  87                if (!pte && pte_write(pte))
  88                        ret = 1;
  89        } else {
  90                /* superuser mode */
  91                pte = va_kernel_present(addr);
  92                if (!pte && pte_kernel_write(pte))
  93                        ret = 1;
  94        }
  95        return ret;
  96}
  97
  98/*
  99 * All
 100 */
 101void cpu_icache_inval_all(void)
 102{
 103        unsigned long end, line_size;
 104
 105        line_size = L1_cache_info[ICACHE].line_size;
 106        end =
 107            line_size * L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].sets;
 108
 109        do {
 110                end -= line_size;
 111                __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
 112                end -= line_size;
 113                __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
 114                end -= line_size;
 115                __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
 116                end -= line_size;
 117                __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
 118        } while (end > 0);
 119        __nds32__isb();
 120}
 121
 122void cpu_dcache_inval_all(void)
 123{
 124        __nds32__cctl_l1d_invalall();
 125}
 126
 127#ifdef CONFIG_CACHE_L2
 128void dcache_wb_all_level(void)
 129{
 130        unsigned long flags, cmd;
 131        local_irq_save(flags);
 132        __nds32__cctl_l1d_wball_alvl();
 133        /* Section 1: Ensure the section 2 & 3 program code execution after */
 134        __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
 135
 136        /* Section 2: Confirm the writeback all level is done in CPU and L2C */
 137        cmd = CCTL_CMD_L2_SYNC;
 138        L2_CMD_RDY();
 139        L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
 140        L2_CMD_RDY();
 141
 142        /* Section 3: Writeback whole L2 cache */
 143        cmd = CCTL_ALL_CMD | CCTL_CMD_L2_IX_WB;
 144        L2_CMD_RDY();
 145        L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
 146        L2_CMD_RDY();
 147        __nds32__msync_all();
 148        local_irq_restore(flags);
 149}
 150EXPORT_SYMBOL(dcache_wb_all_level);
 151#endif
 152
 153void cpu_dcache_wb_all(void)
 154{
 155        __nds32__cctl_l1d_wball_one_lvl();
 156        __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
 157}
 158
 159void cpu_dcache_wbinval_all(void)
 160{
 161#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 162        unsigned long flags;
 163        local_irq_save(flags);
 164#endif
 165        cpu_dcache_wb_all();
 166        cpu_dcache_inval_all();
 167#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 168        local_irq_restore(flags);
 169#endif
 170}
 171
 172/*
 173 * Page
 174 */
 175void cpu_icache_inval_page(unsigned long start)
 176{
 177        unsigned long line_size, end;
 178
 179        line_size = L1_cache_info[ICACHE].line_size;
 180        end = start + PAGE_SIZE;
 181
 182        do {
 183                end -= line_size;
 184                __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
 185                end -= line_size;
 186                __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
 187                end -= line_size;
 188                __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
 189                end -= line_size;
 190                __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
 191        } while (end != start);
 192        __nds32__isb();
 193}
 194
 195void cpu_dcache_inval_page(unsigned long start)
 196{
 197        unsigned long line_size, end;
 198
 199        line_size = L1_cache_info[DCACHE].line_size;
 200        end = start + PAGE_SIZE;
 201
 202        do {
 203                end -= line_size;
 204                __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
 205                end -= line_size;
 206                __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
 207                end -= line_size;
 208                __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
 209                end -= line_size;
 210                __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
 211        } while (end != start);
 212}
 213
 214void cpu_dcache_wb_page(unsigned long start)
 215{
 216#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 217        unsigned long line_size, end;
 218
 219        line_size = L1_cache_info[DCACHE].line_size;
 220        end = start + PAGE_SIZE;
 221
 222        do {
 223                end -= line_size;
 224                __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
 225                end -= line_size;
 226                __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
 227                end -= line_size;
 228                __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
 229                end -= line_size;
 230                __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
 231        } while (end != start);
 232        __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
 233#endif
 234}
 235
 236void cpu_dcache_wbinval_page(unsigned long start)
 237{
 238        unsigned long line_size, end;
 239
 240        line_size = L1_cache_info[DCACHE].line_size;
 241        end = start + PAGE_SIZE;
 242
 243        do {
 244                end -= line_size;
 245#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 246                __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
 247#endif
 248                __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
 249                end -= line_size;
 250#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 251                __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
 252#endif
 253                __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
 254                end -= line_size;
 255#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 256                __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
 257#endif
 258                __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
 259                end -= line_size;
 260#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 261                __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
 262#endif
 263                __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
 264        } while (end != start);
 265        __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
 266}
 267
 268void cpu_cache_wbinval_page(unsigned long page, int flushi)
 269{
 270        cpu_dcache_wbinval_page(page);
 271        if (flushi)
 272                cpu_icache_inval_page(page);
 273}
 274
 275/*
 276 * Range
 277 */
 278void cpu_icache_inval_range(unsigned long start, unsigned long end)
 279{
 280        unsigned long line_size;
 281
 282        line_size = L1_cache_info[ICACHE].line_size;
 283
 284        while (end > start) {
 285                __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (start));
 286                start += line_size;
 287        }
 288        __nds32__isb();
 289}
 290
 291void cpu_dcache_inval_range(unsigned long start, unsigned long end)
 292{
 293        unsigned long line_size;
 294
 295        line_size = L1_cache_info[DCACHE].line_size;
 296
 297        while (end > start) {
 298                __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
 299                start += line_size;
 300        }
 301}
 302
 303void cpu_dcache_wb_range(unsigned long start, unsigned long end)
 304{
 305#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 306        unsigned long line_size;
 307
 308        line_size = L1_cache_info[DCACHE].line_size;
 309
 310        while (end > start) {
 311                __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
 312                start += line_size;
 313        }
 314        __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
 315#endif
 316}
 317
 318void cpu_dcache_wbinval_range(unsigned long start, unsigned long end)
 319{
 320        unsigned long line_size;
 321
 322        line_size = L1_cache_info[DCACHE].line_size;
 323
 324        while (end > start) {
 325#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 326                __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
 327#endif
 328                __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
 329                start += line_size;
 330        }
 331        __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
 332}
 333
 334void cpu_cache_wbinval_range(unsigned long start, unsigned long end, int flushi)
 335{
 336        unsigned long line_size, align_start, align_end;
 337
 338        line_size = L1_cache_info[DCACHE].line_size;
 339        align_start = start & ~(line_size - 1);
 340        align_end = (end + line_size - 1) & ~(line_size - 1);
 341        cpu_dcache_wbinval_range(align_start, align_end);
 342
 343        if (flushi) {
 344                line_size = L1_cache_info[ICACHE].line_size;
 345                align_start = start & ~(line_size - 1);
 346                align_end = (end + line_size - 1) & ~(line_size - 1);
 347                cpu_icache_inval_range(align_start, align_end);
 348        }
 349}
 350
 351void cpu_cache_wbinval_range_check(struct vm_area_struct *vma,
 352                                   unsigned long start, unsigned long end,
 353                                   bool flushi, bool wbd)
 354{
 355        unsigned long line_size, t_start, t_end;
 356
 357        if (!flushi && !wbd)
 358                return;
 359        line_size = L1_cache_info[DCACHE].line_size;
 360        start = start & ~(line_size - 1);
 361        end = (end + line_size - 1) & ~(line_size - 1);
 362
 363        if ((end - start) > (8 * PAGE_SIZE)) {
 364                if (wbd)
 365                        cpu_dcache_wbinval_all();
 366                if (flushi)
 367                        cpu_icache_inval_all();
 368                return;
 369        }
 370
 371        t_start = (start + PAGE_SIZE) & PAGE_MASK;
 372        t_end = ((end - 1) & PAGE_MASK);
 373
 374        if ((start & PAGE_MASK) == t_end) {
 375                if (va_present(vma->vm_mm, start)) {
 376                        if (wbd)
 377                                cpu_dcache_wbinval_range(start, end);
 378                        if (flushi)
 379                                cpu_icache_inval_range(start, end);
 380                }
 381                return;
 382        }
 383
 384        if (va_present(vma->vm_mm, start)) {
 385                if (wbd)
 386                        cpu_dcache_wbinval_range(start, t_start);
 387                if (flushi)
 388                        cpu_icache_inval_range(start, t_start);
 389        }
 390
 391        if (va_present(vma->vm_mm, end - 1)) {
 392                if (wbd)
 393                        cpu_dcache_wbinval_range(t_end, end);
 394                if (flushi)
 395                        cpu_icache_inval_range(t_end, end);
 396        }
 397
 398        while (t_start < t_end) {
 399                if (va_present(vma->vm_mm, t_start)) {
 400                        if (wbd)
 401                                cpu_dcache_wbinval_page(t_start);
 402                        if (flushi)
 403                                cpu_icache_inval_page(t_start);
 404                }
 405                t_start += PAGE_SIZE;
 406        }
 407}
 408
 409#ifdef CONFIG_CACHE_L2
 410static inline void cpu_l2cache_op(unsigned long start, unsigned long end, unsigned long op)
 411{
 412        if (atl2c_base) {
 413                unsigned long p_start = __pa(start);
 414                unsigned long p_end = __pa(end);
 415                unsigned long cmd;
 416                unsigned long line_size;
 417                /* TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE */
 418                line_size = L2_CACHE_LINE_SIZE();
 419                p_start = p_start & (~(line_size - 1));
 420                p_end = (p_end + line_size - 1) & (~(line_size - 1));
 421                cmd =
 422                    (p_start & ~(line_size - 1)) | op |
 423                    CCTL_SINGLE_CMD;
 424                do {
 425                        L2_CMD_RDY();
 426                        L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
 427                        cmd += line_size;
 428                        p_start += line_size;
 429                } while (p_end > p_start);
 430                cmd = CCTL_CMD_L2_SYNC;
 431                L2_CMD_RDY();
 432                L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
 433                L2_CMD_RDY();
 434        }
 435}
 436#else
 437#define cpu_l2cache_op(start,end,op) do { } while (0)
 438#endif
 439/*
 440 * DMA
 441 */
 442void cpu_dma_wb_range(unsigned long start, unsigned long end)
 443{
 444        unsigned long line_size;
 445        unsigned long flags;
 446        line_size = L1_cache_info[DCACHE].line_size;
 447        start = start & (~(line_size - 1));
 448        end = (end + line_size - 1) & (~(line_size - 1));
 449        if (unlikely(start == end))
 450                return;
 451
 452        local_irq_save(flags);
 453        cpu_dcache_wb_range(start, end);
 454        cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WB);
 455        __nds32__msync_all();
 456        local_irq_restore(flags);
 457}
 458
 459void cpu_dma_inval_range(unsigned long start, unsigned long end)
 460{
 461        unsigned long line_size;
 462        unsigned long old_start = start;
 463        unsigned long old_end = end;
 464        unsigned long flags;
 465        line_size = L1_cache_info[DCACHE].line_size;
 466        start = start & (~(line_size - 1));
 467        end = (end + line_size - 1) & (~(line_size - 1));
 468        if (unlikely(start == end))
 469                return;
 470        local_irq_save(flags);
 471        if (start != old_start) {
 472                cpu_dcache_wbinval_range(start, start + line_size);
 473                cpu_l2cache_op(start, start + line_size, CCTL_CMD_L2_PA_WBINVAL);
 474        }
 475        if (end != old_end) {
 476                cpu_dcache_wbinval_range(end - line_size, end);
 477                cpu_l2cache_op(end - line_size, end, CCTL_CMD_L2_PA_WBINVAL);
 478        }
 479        cpu_dcache_inval_range(start, end);
 480        cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_INVAL);
 481        __nds32__msync_all();
 482        local_irq_restore(flags);
 483
 484}
 485
 486void cpu_dma_wbinval_range(unsigned long start, unsigned long end)
 487{
 488        unsigned long line_size;
 489        unsigned long flags;
 490        line_size = L1_cache_info[DCACHE].line_size;
 491        start = start & (~(line_size - 1));
 492        end = (end + line_size - 1) & (~(line_size - 1));
 493        if (unlikely(start == end))
 494                return;
 495
 496        local_irq_save(flags);
 497        cpu_dcache_wbinval_range(start, end);
 498        cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WBINVAL);
 499        __nds32__msync_all();
 500        local_irq_restore(flags);
 501}
 502
 503void cpu_proc_init(void)
 504{
 505}
 506
 507void cpu_proc_fin(void)
 508{
 509}
 510
 511void cpu_do_idle(void)
 512{
 513        __nds32__standby_no_wake_grant();
 514}
 515
 516void cpu_reset(unsigned long reset)
 517{
 518        u32 tmp;
 519        GIE_DISABLE();
 520        tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
 521        tmp &= ~(CACHE_CTL_mskIC_EN | CACHE_CTL_mskDC_EN);
 522        __nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
 523        cpu_dcache_wbinval_all();
 524        cpu_icache_inval_all();
 525
 526        __asm__ __volatile__("jr.toff %0\n\t"::"r"(reset));
 527}
 528
 529void cpu_switch_mm(struct mm_struct *mm)
 530{
 531        unsigned long cid;
 532        cid = __nds32__mfsr(NDS32_SR_TLB_MISC);
 533        cid = (cid & ~TLB_MISC_mskCID) | mm->context.id;
 534        __nds32__mtsr_dsb(cid, NDS32_SR_TLB_MISC);
 535        __nds32__mtsr_isb(__pa(mm->pgd), NDS32_SR_L1_PPTB);
 536}
 537