linux/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c
<<
>>
Prefs
   1/*
   2 * Support for Medifield PNW Camera Imaging ISP subsystem.
   3 *
   4 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
   5 *
   6 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License version
  10 * 2 as published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 *
  18 */
  19/*
  20 * ISP MMU management wrap code
  21 */
  22#include <linux/kernel.h>
  23#include <linux/types.h>
  24#include <linux/gfp.h>
  25#include <linux/mm.h>           /* for GFP_ATOMIC */
  26#include <linux/slab.h>         /* for kmalloc */
  27#include <linux/list.h>
  28#include <linux/io.h>
  29#include <linux/module.h>
  30#include <linux/moduleparam.h>
  31#include <linux/string.h>
  32#include <linux/errno.h>
  33#include <linux/sizes.h>
  34
  35#ifdef CONFIG_X86
  36#include <asm/set_memory.h>
  37#endif
  38
  39#include "atomisp_internal.h"
  40#include "mmu/isp_mmu.h"
  41
  42/*
  43 * 64-bit x86 processor physical address layout:
  44 * 0            - 0x7fffffff            DDR RAM (2GB)
  45 * 0x80000000   - 0xffffffff            MMIO    (2GB)
  46 * 0x100000000  - 0x3fffffffffff        DDR RAM (64TB)
  47 * So if the system has more than 2GB DDR memory, the lower 2GB occupies the
  48 * physical address 0 - 0x7fffffff and the rest will start from 0x100000000.
  49 * We have to make sure memory is allocated from the lower 2GB for devices
  50 * that are only 32-bit capable(e.g. the ISP MMU).
  51 *
  52 * For any confusion, contact bin.gao@intel.com.
  53 */
  54#define NR_PAGES_2GB    (SZ_2G / PAGE_SIZE)
  55
  56static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
  57                                unsigned int end_isp_virt);
  58
  59static unsigned int atomisp_get_pte(phys_addr_t pt, unsigned int idx)
  60{
  61        unsigned int *pt_virt = phys_to_virt(pt);
  62        return *(pt_virt + idx);
  63}
  64
  65static void atomisp_set_pte(phys_addr_t pt,
  66                            unsigned int idx, unsigned int pte)
  67{
  68        unsigned int *pt_virt = phys_to_virt(pt);
  69        *(pt_virt + idx) = pte;
  70}
  71
  72static void *isp_pt_phys_to_virt(phys_addr_t phys)
  73{
  74        return phys_to_virt(phys);
  75}
  76
  77static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu,
  78                                     unsigned int pte)
  79{
  80        return mmu->driver->pte_to_phys(mmu, pte);
  81}
  82
  83static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu,
  84                                            phys_addr_t phys)
  85{
  86        unsigned int pte = mmu->driver->phys_to_pte(mmu, phys);
  87        return (unsigned int) (pte | ISP_PTE_VALID_MASK(mmu));
  88}
  89
  90/*
  91 * allocate a uncacheable page table.
  92 * return physical address.
  93 */
  94static phys_addr_t alloc_page_table(struct isp_mmu *mmu)
  95{
  96        int i;
  97        phys_addr_t page;
  98        void *virt;
  99
 100        /*page table lock may needed here*/
 101        /*
 102         * The slab allocator(kmem_cache and kmalloc family) doesn't handle
 103         * GFP_DMA32 flag, so we have to use buddy allocator.
 104         */
 105        if (totalram_pages > (unsigned long)NR_PAGES_2GB)
 106                virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
 107        else
 108                virt = kmem_cache_zalloc(mmu->tbl_cache, GFP_KERNEL);
 109        if (!virt)
 110                return (phys_addr_t)NULL_PAGE;
 111
 112        /*
 113         * we need a uncacheable page table.
 114         */
 115#ifdef  CONFIG_X86
 116        set_memory_uc((unsigned long)virt, 1);
 117#endif
 118
 119        page = virt_to_phys(virt);
 120
 121        for (i = 0; i < 1024; i++) {
 122                /* NEED CHECK */
 123                atomisp_set_pte(page, i, mmu->driver->null_pte);
 124        }
 125
 126        return page;
 127}
 128
 129static void free_page_table(struct isp_mmu *mmu, phys_addr_t page)
 130{
 131        void *virt;
 132        page &= ISP_PAGE_MASK;
 133        /*
 134         * reset the page to write back before free
 135         */
 136        virt = phys_to_virt(page);
 137
 138#ifdef  CONFIG_X86
 139        set_memory_wb((unsigned long)virt, 1);
 140#endif
 141
 142        kmem_cache_free(mmu->tbl_cache, virt);
 143}
 144
 145static void mmu_remap_error(struct isp_mmu *mmu,
 146                            phys_addr_t l1_pt, unsigned int l1_idx,
 147                            phys_addr_t l2_pt, unsigned int l2_idx,
 148                            unsigned int isp_virt, phys_addr_t old_phys,
 149                            phys_addr_t new_phys)
 150{
 151        dev_err(atomisp_dev, "address remap:\n\n"
 152                     "\tL1 PT: virt = %p, phys = 0x%llx, "
 153                     "idx = %d\n"
 154                     "\tL2 PT: virt = %p, phys = 0x%llx, "
 155                     "idx = %d\n"
 156                     "\told: isp_virt = 0x%x, phys = 0x%llx\n"
 157                     "\tnew: isp_virt = 0x%x, phys = 0x%llx\n",
 158                     isp_pt_phys_to_virt(l1_pt),
 159                     (u64)l1_pt, l1_idx,
 160                     isp_pt_phys_to_virt(l2_pt),
 161                     (u64)l2_pt, l2_idx, isp_virt,
 162                     (u64)old_phys, isp_virt,
 163                     (u64)new_phys);
 164}
 165
 166static void mmu_unmap_l2_pte_error(struct isp_mmu *mmu,
 167                                   phys_addr_t l1_pt, unsigned int l1_idx,
 168                                   phys_addr_t l2_pt, unsigned int l2_idx,
 169                                   unsigned int isp_virt, unsigned int pte)
 170{
 171        dev_err(atomisp_dev, "unmap unvalid L2 pte:\n\n"
 172                     "\tL1 PT: virt = %p, phys = 0x%llx, "
 173                     "idx = %d\n"
 174                     "\tL2 PT: virt = %p, phys = 0x%llx, "
 175                     "idx = %d\n"
 176                     "\tisp_virt = 0x%x, pte(page phys) = 0x%x\n",
 177                     isp_pt_phys_to_virt(l1_pt),
 178                     (u64)l1_pt, l1_idx,
 179                     isp_pt_phys_to_virt(l2_pt),
 180                     (u64)l2_pt, l2_idx, isp_virt,
 181                     pte);
 182}
 183
 184static void mmu_unmap_l1_pte_error(struct isp_mmu *mmu,
 185                                   phys_addr_t l1_pt, unsigned int l1_idx,
 186                                   unsigned int isp_virt, unsigned int pte)
 187{
 188        dev_err(atomisp_dev, "unmap unvalid L1 pte (L2 PT):\n\n"
 189                     "\tL1 PT: virt = %p, phys = 0x%llx, "
 190                     "idx = %d\n"
 191                     "\tisp_virt = 0x%x, l1_pte(L2 PT) = 0x%x\n",
 192                     isp_pt_phys_to_virt(l1_pt),
 193                     (u64)l1_pt, l1_idx, (unsigned int)isp_virt,
 194                     pte);
 195}
 196
 197static void mmu_unmap_l1_pt_error(struct isp_mmu *mmu, unsigned int pte)
 198{
 199        dev_err(atomisp_dev, "unmap unvalid L1PT:\n\n"
 200                     "L1PT = 0x%x\n", (unsigned int)pte);
 201}
 202
 203/*
 204 * Update L2 page table according to isp virtual address and page physical
 205 * address
 206 */
 207static int mmu_l2_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
 208                      unsigned int l1_idx, phys_addr_t l2_pt,
 209                      unsigned int start, unsigned int end, phys_addr_t phys)
 210{
 211        unsigned int ptr;
 212        unsigned int idx;
 213        unsigned int pte;
 214
 215        l2_pt &= ISP_PAGE_MASK;
 216
 217        start = start & ISP_PAGE_MASK;
 218        end = ISP_PAGE_ALIGN(end);
 219        phys &= ISP_PAGE_MASK;
 220
 221        ptr = start;
 222        do {
 223                idx = ISP_PTR_TO_L2_IDX(ptr);
 224
 225                pte = atomisp_get_pte(l2_pt, idx);
 226
 227                if (ISP_PTE_VALID(mmu, pte)) {
 228                        mmu_remap_error(mmu, l1_pt, l1_idx,
 229                                          l2_pt, idx, ptr, pte, phys);
 230
 231                        /* free all mapped pages */
 232                        free_mmu_map(mmu, start, ptr);
 233
 234                        return -EINVAL;
 235                }
 236
 237                pte = isp_pgaddr_to_pte_valid(mmu, phys);
 238
 239                atomisp_set_pte(l2_pt, idx, pte);
 240                mmu->l2_pgt_refcount[l1_idx]++;
 241                ptr += (1U << ISP_L2PT_OFFSET);
 242                phys += (1U << ISP_L2PT_OFFSET);
 243        } while (ptr < end && idx < ISP_L2PT_PTES - 1);
 244
 245        return 0;
 246}
 247
 248/*
 249 * Update L1 page table according to isp virtual address and page physical
 250 * address
 251 */
 252static int mmu_l1_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
 253                      unsigned int start, unsigned int end,
 254                      phys_addr_t phys)
 255{
 256        phys_addr_t l2_pt;
 257        unsigned int ptr, l1_aligned;
 258        unsigned int idx;
 259        unsigned int l2_pte;
 260        int ret;
 261
 262        l1_pt &= ISP_PAGE_MASK;
 263
 264        start = start & ISP_PAGE_MASK;
 265        end = ISP_PAGE_ALIGN(end);
 266        phys &= ISP_PAGE_MASK;
 267
 268        ptr = start;
 269        do {
 270                idx = ISP_PTR_TO_L1_IDX(ptr);
 271
 272                l2_pte = atomisp_get_pte(l1_pt, idx);
 273
 274                if (!ISP_PTE_VALID(mmu, l2_pte)) {
 275                        l2_pt = alloc_page_table(mmu);
 276                        if (l2_pt == NULL_PAGE) {
 277                                dev_err(atomisp_dev,
 278                                             "alloc page table fail.\n");
 279
 280                                /* free all mapped pages */
 281                                free_mmu_map(mmu, start, ptr);
 282
 283                                return -ENOMEM;
 284                        }
 285
 286                        l2_pte = isp_pgaddr_to_pte_valid(mmu, l2_pt);
 287
 288                        atomisp_set_pte(l1_pt, idx, l2_pte);
 289                        mmu->l2_pgt_refcount[idx] = 0;
 290                }
 291
 292                l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
 293
 294                l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
 295
 296                if (l1_aligned < end) {
 297                        ret = mmu_l2_map(mmu, l1_pt, idx,
 298                                           l2_pt, ptr, l1_aligned, phys);
 299                        phys += (l1_aligned - ptr);
 300                        ptr = l1_aligned;
 301                } else {
 302                        ret = mmu_l2_map(mmu, l1_pt, idx,
 303                                           l2_pt, ptr, end, phys);
 304                        phys += (end - ptr);
 305                        ptr = end;
 306                }
 307
 308                if (ret) {
 309                        dev_err(atomisp_dev, "setup mapping in L2PT fail.\n");
 310
 311                        /* free all mapped pages */
 312                        free_mmu_map(mmu, start, ptr);
 313
 314                        return -EINVAL;
 315                }
 316        } while (ptr < end && idx < ISP_L1PT_PTES);
 317
 318        return 0;
 319}
 320
 321/*
 322 * Update page table according to isp virtual address and page physical
 323 * address
 324 */
 325static int mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
 326                   phys_addr_t phys, unsigned int pgnr)
 327{
 328        unsigned int start, end;
 329        phys_addr_t l1_pt;
 330        int ret;
 331
 332        mutex_lock(&mmu->pt_mutex);
 333        if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
 334                /*
 335                 * allocate 1 new page for L1 page table
 336                 */
 337                l1_pt = alloc_page_table(mmu);
 338                if (l1_pt == NULL_PAGE) {
 339                        dev_err(atomisp_dev, "alloc page table fail.\n");
 340                        mutex_unlock(&mmu->pt_mutex);
 341                        return -ENOMEM;
 342                }
 343
 344                /*
 345                 * setup L1 page table physical addr to MMU
 346                 */
 347                mmu->base_address = l1_pt;
 348                mmu->l1_pte = isp_pgaddr_to_pte_valid(mmu, l1_pt);
 349                memset(mmu->l2_pgt_refcount, 0, sizeof(int) * ISP_L1PT_PTES);
 350        }
 351
 352        l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
 353
 354        start = (isp_virt) & ISP_PAGE_MASK;
 355        end = start + (pgnr << ISP_PAGE_OFFSET);
 356        phys &= ISP_PAGE_MASK;
 357
 358        ret = mmu_l1_map(mmu, l1_pt, start, end, phys);
 359
 360        if (ret)
 361                dev_err(atomisp_dev, "setup mapping in L1PT fail.\n");
 362
 363        mutex_unlock(&mmu->pt_mutex);
 364        return ret;
 365}
 366
 367/*
 368 * Free L2 page table according to isp virtual address and page physical
 369 * address
 370 */
 371static void mmu_l2_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
 372                           unsigned int l1_idx, phys_addr_t l2_pt,
 373                           unsigned int start, unsigned int end)
 374{
 375
 376        unsigned int ptr;
 377        unsigned int idx;
 378        unsigned int pte;
 379
 380        l2_pt &= ISP_PAGE_MASK;
 381
 382        start = start & ISP_PAGE_MASK;
 383        end = ISP_PAGE_ALIGN(end);
 384
 385        ptr = start;
 386        do {
 387                idx = ISP_PTR_TO_L2_IDX(ptr);
 388
 389                pte = atomisp_get_pte(l2_pt, idx);
 390
 391                if (!ISP_PTE_VALID(mmu, pte))
 392                        mmu_unmap_l2_pte_error(mmu, l1_pt, l1_idx,
 393                                                 l2_pt, idx, ptr, pte);
 394
 395                atomisp_set_pte(l2_pt, idx, mmu->driver->null_pte);
 396                mmu->l2_pgt_refcount[l1_idx]--;
 397                ptr += (1U << ISP_L2PT_OFFSET);
 398        } while (ptr < end && idx < ISP_L2PT_PTES - 1);
 399
 400        if (mmu->l2_pgt_refcount[l1_idx] == 0) {
 401                free_page_table(mmu, l2_pt);
 402                atomisp_set_pte(l1_pt, l1_idx, mmu->driver->null_pte);
 403        }
 404}
 405
 406/*
 407 * Free L1 page table according to isp virtual address and page physical
 408 * address
 409 */
 410static void mmu_l1_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
 411                           unsigned int start, unsigned int end)
 412{
 413        phys_addr_t l2_pt;
 414        unsigned int ptr, l1_aligned;
 415        unsigned int idx;
 416        unsigned int l2_pte;
 417
 418        l1_pt &= ISP_PAGE_MASK;
 419
 420        start = start & ISP_PAGE_MASK;
 421        end = ISP_PAGE_ALIGN(end);
 422
 423        ptr = start;
 424        do {
 425                idx = ISP_PTR_TO_L1_IDX(ptr);
 426
 427                l2_pte = atomisp_get_pte(l1_pt, idx);
 428
 429                if (!ISP_PTE_VALID(mmu, l2_pte)) {
 430                        mmu_unmap_l1_pte_error(mmu, l1_pt, idx, ptr, l2_pte);
 431                        continue;
 432                }
 433
 434                l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
 435
 436                l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
 437
 438                if (l1_aligned < end) {
 439                        mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, l1_aligned);
 440                        ptr = l1_aligned;
 441                } else {
 442                        mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, end);
 443                        ptr = end;
 444                }
 445                /*
 446                 * use the same L2 page next time, so we don't
 447                 * need to invalidate and free this PT.
 448                 */
 449                /*      atomisp_set_pte(l1_pt, idx, NULL_PTE); */
 450        } while (ptr < end && idx < ISP_L1PT_PTES);
 451}
 452
 453/*
 454 * Free page table according to isp virtual address and page physical
 455 * address
 456 */
 457static void mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
 458                        unsigned int pgnr)
 459{
 460        unsigned int start, end;
 461        phys_addr_t l1_pt;
 462
 463        mutex_lock(&mmu->pt_mutex);
 464        if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
 465                mmu_unmap_l1_pt_error(mmu, mmu->l1_pte);
 466                mutex_unlock(&mmu->pt_mutex);
 467                return;
 468        }
 469
 470        l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
 471
 472        start = (isp_virt) & ISP_PAGE_MASK;
 473        end = start + (pgnr << ISP_PAGE_OFFSET);
 474
 475        mmu_l1_unmap(mmu, l1_pt, start, end);
 476        mutex_unlock(&mmu->pt_mutex);
 477}
 478
 479/*
 480 * Free page tables according to isp start virtual address and end virtual
 481 * address.
 482 */
 483static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
 484                                unsigned int end_isp_virt)
 485{
 486        unsigned int pgnr;
 487        unsigned int start, end;
 488
 489        start = (start_isp_virt) & ISP_PAGE_MASK;
 490        end = (end_isp_virt) & ISP_PAGE_MASK;
 491        pgnr = (end - start) >> ISP_PAGE_OFFSET;
 492        mmu_unmap(mmu, start, pgnr);
 493}
 494
 495int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
 496                phys_addr_t phys, unsigned int pgnr)
 497{
 498        return mmu_map(mmu, isp_virt, phys, pgnr);
 499}
 500
 501void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
 502                   unsigned int pgnr)
 503{
 504        mmu_unmap(mmu, isp_virt, pgnr);
 505}
 506
 507static void isp_mmu_flush_tlb_range_default(struct isp_mmu *mmu,
 508                                              unsigned int start,
 509                                              unsigned int size)
 510{
 511        isp_mmu_flush_tlb(mmu);
 512}
 513
 514/*MMU init for internal structure*/
 515int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver)
 516{
 517        if (!mmu)               /* error */
 518                return -EINVAL;
 519        if (!driver)            /* error */
 520                return -EINVAL;
 521
 522        if (!driver->name)
 523                dev_warn(atomisp_dev, "NULL name for MMU driver...\n");
 524
 525        mmu->driver = driver;
 526
 527        if (!driver->tlb_flush_all) {
 528                dev_err(atomisp_dev, "tlb_flush_all operation not provided.\n");
 529                return -EINVAL;
 530        }
 531
 532        if (!driver->tlb_flush_range)
 533                driver->tlb_flush_range = isp_mmu_flush_tlb_range_default;
 534
 535        if (!driver->pte_valid_mask) {
 536                dev_err(atomisp_dev, "PTE_MASK is missing from mmu driver\n");
 537                return -EINVAL;
 538        }
 539
 540        mmu->l1_pte = driver->null_pte;
 541
 542        mutex_init(&mmu->pt_mutex);
 543
 544        mmu->tbl_cache = kmem_cache_create("iopte_cache", ISP_PAGE_SIZE,
 545                                           ISP_PAGE_SIZE, SLAB_HWCACHE_ALIGN,
 546                                           NULL);
 547        if (!mmu->tbl_cache)
 548                return -ENOMEM;
 549
 550        return 0;
 551}
 552
 553/*Free L1 and L2 page table*/
 554void isp_mmu_exit(struct isp_mmu *mmu)
 555{
 556        unsigned int idx;
 557        unsigned int pte;
 558        phys_addr_t l1_pt, l2_pt;
 559
 560        if (!mmu)
 561                return;
 562
 563        if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
 564                dev_warn(atomisp_dev, "invalid L1PT: pte = 0x%x\n",
 565                            (unsigned int)mmu->l1_pte);
 566                return;
 567        }
 568
 569        l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
 570
 571        for (idx = 0; idx < ISP_L1PT_PTES; idx++) {
 572                pte = atomisp_get_pte(l1_pt, idx);
 573
 574                if (ISP_PTE_VALID(mmu, pte)) {
 575                        l2_pt = isp_pte_to_pgaddr(mmu, pte);
 576
 577                        free_page_table(mmu, l2_pt);
 578                }
 579        }
 580
 581        free_page_table(mmu, l1_pt);
 582
 583        kmem_cache_destroy(mmu->tbl_cache);
 584}
 585