linux/drivers/staging/tidspbridge/core/tiomap3430.c
<<
>>
Prefs
   1/*
   2 * tiomap.c
   3 *
   4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
   5 *
   6 * Processor Manager Driver for TI OMAP3430 EVM.
   7 *
   8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
   9 *
  10 * This package is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 *
  14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
  15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
  16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  17 */
  18
  19#include <linux/platform_data/dsp-omap.h>
  20
  21#include <linux/types.h>
  22/*  ----------------------------------- Host OS */
  23#include <dspbridge/host_os.h>
  24#include <linux/mm.h>
  25#include <linux/mmzone.h>
  26
  27/*  ----------------------------------- DSP/BIOS Bridge */
  28#include <dspbridge/dbdefs.h>
  29
  30/*  ----------------------------------- OS Adaptation Layer */
  31#include <dspbridge/drv.h>
  32#include <dspbridge/sync.h>
  33
  34/* ------------------------------------ Hardware Abstraction Layer */
  35#include <hw_defs.h>
  36#include <hw_mmu.h>
  37
  38/*  ----------------------------------- Link Driver */
  39#include <dspbridge/dspdefs.h>
  40#include <dspbridge/dspchnl.h>
  41#include <dspbridge/dspdeh.h>
  42#include <dspbridge/dspio.h>
  43#include <dspbridge/dspmsg.h>
  44#include <dspbridge/pwr.h>
  45#include <dspbridge/io_sm.h>
  46
  47/*  ----------------------------------- Platform Manager */
  48#include <dspbridge/dev.h>
  49#include <dspbridge/dspapi.h>
  50#include <dspbridge/dmm.h>
  51#include <dspbridge/wdt.h>
  52
  53/*  ----------------------------------- Local */
  54#include "_tiomap.h"
  55#include "_tiomap_pwr.h"
  56#include "tiomap_io.h"
  57
  58/* Offset in shared mem to write to in order to synchronize start with DSP */
  59#define SHMSYNCOFFSET 4         /* GPP byte offset */
  60
  61#define BUFFERSIZE 1024
  62
  63#define TIHELEN_ACKTIMEOUT  10000
  64
  65#define MMU_SECTION_ADDR_MASK    0xFFF00000
  66#define MMU_SSECTION_ADDR_MASK   0xFF000000
  67#define MMU_LARGE_PAGE_MASK      0xFFFF0000
  68#define MMU_SMALL_PAGE_MASK      0xFFFFF000
  69#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
  70#define PAGES_II_LVL_TABLE   512
  71#define PHYS_TO_PAGE(phys)      pfn_to_page((phys) >> PAGE_SHIFT)
  72
  73/* IVA Boot modes */
  74#define DIRECT          0
  75#define IDLE            1
  76
  77/* Forward Declarations: */
  78static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
  79static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
  80                                  u8 *host_buff,
  81                                  u32 dsp_addr, u32 ul_num_bytes,
  82                                  u32 mem_type);
  83static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
  84                                   u32 dsp_addr);
  85static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
  86                                    int *board_state);
  87static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt);
  88static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
  89                                   u8 *host_buff,
  90                                   u32 dsp_addr, u32 ul_num_bytes,
  91                                   u32 mem_type);
  92static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
  93                                    u32 brd_state);
  94static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
  95                                   u32 dsp_dest_addr, u32 dsp_src_addr,
  96                                   u32 ul_num_bytes, u32 mem_type);
  97static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
  98                                    u8 *host_buff, u32 dsp_addr,
  99                                    u32 ul_num_bytes, u32 mem_type);
 100static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
 101                                  u32 ul_mpu_addr, u32 virt_addr,
 102                                  u32 ul_num_bytes, u32 ul_map_attr,
 103                                  struct page **mapped_pages);
 104static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
 105                                     u32 virt_addr, u32 ul_num_bytes);
 106static int bridge_dev_create(struct bridge_dev_context
 107                                        **dev_cntxt,
 108                                        struct dev_object *hdev_obj,
 109                                        struct cfg_hostres *config_param);
 110static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
 111                                  u32 dw_cmd, void *pargs);
 112static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
 113static u32 user_va2_pa(struct mm_struct *mm, u32 address);
 114static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
 115                             u32 va, u32 size,
 116                             struct hw_mmu_map_attrs_t *map_attrs);
 117static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
 118                          u32 size, struct hw_mmu_map_attrs_t *attrs);
 119static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
 120                                  u32 ul_mpu_addr, u32 virt_addr,
 121                                  u32 ul_num_bytes,
 122                                  struct hw_mmu_map_attrs_t *hw_attrs);
 123
 124bool wait_for_start(struct bridge_dev_context *dev_context,
 125                        void __iomem *sync_addr);
 126
 127/*  ----------------------------------- Globals */
 128
 129/* Attributes of L2 page tables for DSP MMU */
 130struct page_info {
 131        u32 num_entries;        /* Number of valid PTEs in the L2 PT */
 132};
 133
 134/* Attributes used to manage the DSP MMU page tables */
 135struct pg_table_attrs {
 136        spinlock_t pg_lock;     /* Critical section object handle */
 137
 138        u32 l1_base_pa;         /* Physical address of the L1 PT */
 139        u32 l1_base_va;         /* Virtual  address of the L1 PT */
 140        u32 l1_size;            /* Size of the L1 PT */
 141        u32 l1_tbl_alloc_pa;
 142        /* Physical address of Allocated mem for L1 table. May not be aligned */
 143        u32 l1_tbl_alloc_va;
 144        /* Virtual address of Allocated mem for L1 table. May not be aligned */
 145        u32 l1_tbl_alloc_sz;
 146        /* Size of consistent memory allocated for L1 table.
 147         * May not be aligned */
 148
 149        u32 l2_base_pa;         /* Physical address of the L2 PT */
 150        u32 l2_base_va;         /* Virtual  address of the L2 PT */
 151        u32 l2_size;            /* Size of the L2 PT */
 152        u32 l2_tbl_alloc_pa;
 153        /* Physical address of Allocated mem for L2 table. May not be aligned */
 154        u32 l2_tbl_alloc_va;
 155        /* Virtual address of Allocated mem for L2 table. May not be aligned */
 156        u32 l2_tbl_alloc_sz;
 157        /* Size of consistent memory allocated for L2 table.
 158         * May not be aligned */
 159
 160        u32 l2_num_pages;       /* Number of allocated L2 PT */
 161        /* Array [l2_num_pages] of L2 PT info structs */
 162        struct page_info *pg_info;
 163};
 164
 165/*
 166 *  This Bridge driver's function interface table.
 167 */
 168static struct bridge_drv_interface drv_interface_fxns = {
 169        /* Bridge API ver. for which this bridge driver is built. */
 170        BRD_API_MAJOR_VERSION,
 171        BRD_API_MINOR_VERSION,
 172        bridge_dev_create,
 173        bridge_dev_destroy,
 174        bridge_dev_ctrl,
 175        bridge_brd_monitor,
 176        bridge_brd_start,
 177        bridge_brd_stop,
 178        bridge_brd_status,
 179        bridge_brd_read,
 180        bridge_brd_write,
 181        bridge_brd_set_state,
 182        bridge_brd_mem_copy,
 183        bridge_brd_mem_write,
 184        bridge_brd_mem_map,
 185        bridge_brd_mem_un_map,
 186        /* The following CHNL functions are provided by chnl_io.lib: */
 187        bridge_chnl_create,
 188        bridge_chnl_destroy,
 189        bridge_chnl_open,
 190        bridge_chnl_close,
 191        bridge_chnl_add_io_req,
 192        bridge_chnl_get_ioc,
 193        bridge_chnl_cancel_io,
 194        bridge_chnl_flush_io,
 195        bridge_chnl_get_info,
 196        bridge_chnl_get_mgr_info,
 197        bridge_chnl_idle,
 198        bridge_chnl_register_notify,
 199        /* The following IO functions are provided by chnl_io.lib: */
 200        bridge_io_create,
 201        bridge_io_destroy,
 202        bridge_io_on_loaded,
 203        bridge_io_get_proc_load,
 204        /* The following msg_ctrl functions are provided by chnl_io.lib: */
 205        bridge_msg_create,
 206        bridge_msg_create_queue,
 207        bridge_msg_delete,
 208        bridge_msg_delete_queue,
 209        bridge_msg_get,
 210        bridge_msg_put,
 211        bridge_msg_register_notify,
 212        bridge_msg_set_queue_id,
 213};
 214
 215static struct notifier_block dsp_mbox_notifier = {
 216        .notifier_call = io_mbox_msg,
 217};
 218
 219static inline void flush_all(struct bridge_dev_context *dev_context)
 220{
 221        if (dev_context->brd_state == BRD_DSP_HIBERNATION ||
 222            dev_context->brd_state == BRD_HIBERNATION)
 223                wake_dsp(dev_context, NULL);
 224
 225        hw_mmu_tlb_flush_all(dev_context->dsp_mmu_base);
 226}
 227
 228static void bad_page_dump(u32 pa, struct page *pg)
 229{
 230        pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
 231        pr_emerg("Bad page state in process '%s'\n"
 232                 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
 233                 "Backtrace:\n",
 234                 current->comm, pg, (int)(2 * sizeof(unsigned long)),
 235                 (unsigned long)pg->flags, pg->mapping,
 236                 page_mapcount(pg), page_count(pg));
 237        dump_stack();
 238}
 239
 240/*
 241 *  ======== bridge_drv_entry ========
 242 *  purpose:
 243 *      Bridge Driver entry point.
 244 */
 245void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
 246                   const char *driver_file_name)
 247{
 248        if (strcmp(driver_file_name, "UMA") == 0)
 249                *drv_intf = &drv_interface_fxns;
 250        else
 251                dev_dbg(bridge, "%s Unknown Bridge file name", __func__);
 252
 253}
 254
 255/*
 256 *  ======== bridge_brd_monitor ========
 257 *  purpose:
 258 *      This bridge_brd_monitor puts DSP into a Loadable state.
 259 *      i.e Application can load and start the device.
 260 *
 261 *  Preconditions:
 262 *      Device in 'OFF' state.
 263 */
 264static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
 265{
 266        struct bridge_dev_context *dev_context = dev_ctxt;
 267        u32 temp;
 268        struct omap_dsp_platform_data *pdata =
 269                omap_dspbridge_dev->dev.platform_data;
 270
 271        temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
 272                                        OMAP_POWERSTATEST_MASK;
 273        if (!(temp & 0x02)) {
 274                /* IVA2 is not in ON state */
 275                /* Read and set PM_PWSTCTRL_IVA2  to ON */
 276                (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
 277                        PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
 278                /* Set the SW supervised state transition */
 279                (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP,
 280                                        OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
 281
 282                /* Wait until the state has moved to ON */
 283                while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
 284                                                OMAP_INTRANSITION_MASK)
 285                        ;
 286                /* Disable Automatic transition */
 287                (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
 288                                        OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
 289        }
 290        (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
 291                                        OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
 292        dsp_clk_enable(DSP_CLK_IVA2);
 293
 294        /* set the device state to IDLE */
 295        dev_context->brd_state = BRD_IDLE;
 296
 297        return 0;
 298}
 299
 300/*
 301 *  ======== bridge_brd_read ========
 302 *  purpose:
 303 *      Reads buffers for DSP memory.
 304 */
 305static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
 306                                  u8 *host_buff, u32 dsp_addr,
 307                                  u32 ul_num_bytes, u32 mem_type)
 308{
 309        int status = 0;
 310        struct bridge_dev_context *dev_context = dev_ctxt;
 311        u32 offset;
 312        u32 dsp_base_addr = dev_ctxt->dsp_base_addr;
 313
 314        if (dsp_addr < dev_context->dsp_start_add) {
 315                status = -EPERM;
 316                return status;
 317        }
 318        /* change here to account for the 3 bands of the DSP internal memory */
 319        if ((dsp_addr - dev_context->dsp_start_add) <
 320            dev_context->internal_size) {
 321                offset = dsp_addr - dev_context->dsp_start_add;
 322        } else {
 323                status = read_ext_dsp_data(dev_context, host_buff, dsp_addr,
 324                                           ul_num_bytes, mem_type);
 325                return status;
 326        }
 327        /* copy the data from DSP memory */
 328        memcpy(host_buff, (void *)(dsp_base_addr + offset), ul_num_bytes);
 329        return status;
 330}
 331
 332/*
 333 *  ======== bridge_brd_set_state ========
 334 *  purpose:
 335 *      This routine updates the Board status.
 336 */
 337static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
 338                                    u32 brd_state)
 339{
 340        int status = 0;
 341        struct bridge_dev_context *dev_context = dev_ctxt;
 342
 343        dev_context->brd_state = brd_state;
 344        return status;
 345}
 346
 347/*
 348 *  ======== bridge_brd_start ========
 349 *  purpose:
 350 *      Initializes DSP MMU and Starts DSP.
 351 *
 352 *  Preconditions:
 353 *  a) DSP domain is 'ACTIVE'.
 354 *  b) DSP_RST1 is asserted.
 355 *  b) DSP_RST2 is released.
 356 */
 357static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
 358                                   u32 dsp_addr)
 359{
 360        int status = 0;
 361        struct bridge_dev_context *dev_context = dev_ctxt;
 362        void __iomem *sync_addr;
 363        u32 ul_shm_base;        /* Gpp Phys SM base addr(byte) */
 364        u32 ul_shm_base_virt;   /* Dsp Virt SM base addr */
 365        u32 ul_tlb_base_virt;   /* Base of MMU TLB entry */
 366        u32 shm_sync_pa;
 367        /* Offset of shm_base_virt from tlb_base_virt */
 368        u32 ul_shm_offset_virt;
 369        s32 entry_ndx;
 370        s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */
 371        struct cfg_hostres *resources = NULL;
 372        u32 temp;
 373        u32 ul_dsp_clk_rate;
 374        u32 ul_dsp_clk_addr;
 375        u32 ul_bios_gp_timer;
 376        u32 clk_cmd;
 377        struct io_mgr *hio_mgr;
 378        u32 ul_load_monitor_timer;
 379        u32 wdt_en = 0;
 380        struct omap_dsp_platform_data *pdata =
 381                omap_dspbridge_dev->dev.platform_data;
 382
 383        /* The device context contains all the mmu setup info from when the
 384         * last dsp base image was loaded. The first entry is always
 385         * SHMMEM base. */
 386        /* Get SHM_BEG - convert to byte address */
 387        (void)dev_get_symbol(dev_context->dev_obj, SHMBASENAME,
 388                             &ul_shm_base_virt);
 389        ul_shm_base_virt *= DSPWORDSIZE;
 390        /* DSP Virtual address */
 391        ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va;
 392        ul_shm_offset_virt =
 393            ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
 394        /* Kernel logical address */
 395        ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt;
 396
 397        /* SHM physical sync address */
 398        shm_sync_pa = dev_context->atlb_entry[0].gpp_pa + ul_shm_offset_virt +
 399                        SHMSYNCOFFSET;
 400
 401        /* 2nd wd is used as sync field */
 402        sync_addr = ioremap(shm_sync_pa, SZ_32);
 403        if (!sync_addr)
 404                return -ENOMEM;
 405
 406        /* Write a signature into the shm base + offset; this will
 407         * get cleared when the DSP program starts. */
 408        if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
 409                pr_err("%s: Illegal SM base\n", __func__);
 410                status = -EPERM;
 411        } else
 412                __raw_writel(0xffffffff, sync_addr);
 413
 414        if (!status) {
 415                resources = dev_context->resources;
 416                if (!resources)
 417                        status = -EPERM;
 418
 419                /* Assert RST1 i.e only the RST only for DSP megacell */
 420                if (!status) {
 421                        (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
 422                                        OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
 423                                        OMAP2_RM_RSTCTRL);
 424
 425                        /* Mask address with 1K for compatibility */
 426                        pdata->set_bootaddr(dsp_addr &
 427                                                OMAP3_IVA2_BOOTADDR_MASK);
 428                        pdata->set_bootmode(dsp_debug ? IDLE : DIRECT);
 429                }
 430        }
 431        if (!status) {
 432                /* Reset and Unreset the RST2, so that BOOTADDR is copied to
 433                 * IVA2 SYSC register */
 434                (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
 435                        OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
 436                udelay(100);
 437                (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
 438                                        OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
 439                udelay(100);
 440
 441                /* Disbale the DSP MMU */
 442                hw_mmu_disable(resources->dmmu_base);
 443                /* Disable TWL */
 444                hw_mmu_twl_disable(resources->dmmu_base);
 445
 446                /* Only make TLB entry if both addresses are non-zero */
 447                for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
 448                     entry_ndx++) {
 449                        struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
 450                        struct hw_mmu_map_attrs_t map_attrs = {
 451                                .endianism = e->endianism,
 452                                .element_size = e->elem_size,
 453                                .mixed_size = e->mixed_mode,
 454                        };
 455
 456                        if (!e->gpp_pa || !e->dsp_va)
 457                                continue;
 458
 459                        dev_dbg(bridge,
 460                                        "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
 461                                        itmp_entry_ndx,
 462                                        e->gpp_pa,
 463                                        e->dsp_va,
 464                                        e->size);
 465
 466                        hw_mmu_tlb_add(dev_context->dsp_mmu_base,
 467                                        e->gpp_pa,
 468                                        e->dsp_va,
 469                                        e->size,
 470                                        itmp_entry_ndx,
 471                                        &map_attrs, 1, 1);
 472
 473                        itmp_entry_ndx++;
 474                }
 475        }
 476
 477        /* Lock the above TLB entries and get the BIOS and load monitor timer
 478         * information */
 479        if (!status) {
 480                hw_mmu_num_locked_set(resources->dmmu_base, itmp_entry_ndx);
 481                hw_mmu_victim_num_set(resources->dmmu_base, itmp_entry_ndx);
 482                hw_mmu_ttb_set(resources->dmmu_base,
 483                               dev_context->pt_attrs->l1_base_pa);
 484                hw_mmu_twl_enable(resources->dmmu_base);
 485                /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
 486
 487                temp = __raw_readl((resources->dmmu_base) + 0x10);
 488                temp = (temp & 0xFFFFFFEF) | 0x11;
 489                __raw_writel(temp, (resources->dmmu_base) + 0x10);
 490
 491                /* Let the DSP MMU run */
 492                hw_mmu_enable(resources->dmmu_base);
 493
 494                /* Enable the BIOS clock */
 495                (void)dev_get_symbol(dev_context->dev_obj,
 496                                     BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
 497                (void)dev_get_symbol(dev_context->dev_obj,
 498                                     BRIDGEINIT_LOADMON_GPTIMER,
 499                                     &ul_load_monitor_timer);
 500        }
 501
 502        if (!status) {
 503                if (ul_load_monitor_timer != 0xFFFF) {
 504                        clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
 505                            ul_load_monitor_timer;
 506                        dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
 507                } else {
 508                        dev_dbg(bridge, "Not able to get the symbol for Load "
 509                                "Monitor Timer\n");
 510                }
 511        }
 512
 513        if (!status) {
 514                if (ul_bios_gp_timer != 0xFFFF) {
 515                        clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
 516                            ul_bios_gp_timer;
 517                        dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
 518                } else {
 519                        dev_dbg(bridge,
 520                                "Not able to get the symbol for BIOS Timer\n");
 521                }
 522        }
 523
 524        if (!status) {
 525                /* Set the DSP clock rate */
 526                (void)dev_get_symbol(dev_context->dev_obj,
 527                                     "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
 528                /*Set Autoidle Mode for IVA2 PLL */
 529                (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
 530                                OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
 531
 532                if ((unsigned int *)ul_dsp_clk_addr != NULL) {
 533                        /* Get the clock rate */
 534                        ul_dsp_clk_rate = dsp_clk_get_iva2_rate();
 535                        dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n",
 536                                __func__, ul_dsp_clk_rate);
 537                        (void)bridge_brd_write(dev_context,
 538                                               (u8 *) &ul_dsp_clk_rate,
 539                                               ul_dsp_clk_addr, sizeof(u32), 0);
 540                }
 541                /*
 542                 * Enable Mailbox events and also drain any pending
 543                 * stale messages.
 544                 */
 545                dev_context->mbox = omap_mbox_get("dsp", &dsp_mbox_notifier);
 546                if (IS_ERR(dev_context->mbox)) {
 547                        dev_context->mbox = NULL;
 548                        pr_err("%s: Failed to get dsp mailbox handle\n",
 549                                                                __func__);
 550                        status = -EPERM;
 551                }
 552
 553        }
 554        if (!status) {
 555/*PM_IVA2GRPSEL_PER = 0xC0;*/
 556                temp = readl(resources->per_pm_base + 0xA8);
 557                temp = (temp & 0xFFFFFF30) | 0xC0;
 558                writel(temp, resources->per_pm_base + 0xA8);
 559
 560/*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
 561                temp = readl(resources->per_pm_base + 0xA4);
 562                temp = (temp & 0xFFFFFF3F);
 563                writel(temp, resources->per_pm_base + 0xA4);
 564/*CM_SLEEPDEP_PER |= 0x04; */
 565                temp = readl(resources->per_base + 0x44);
 566                temp = (temp & 0xFFFFFFFB) | 0x04;
 567                writel(temp, resources->per_base + 0x44);
 568
 569/*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
 570                (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
 571                                        OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
 572
 573                /* Let DSP go */
 574                dev_dbg(bridge, "%s Unreset\n", __func__);
 575                /* Enable DSP MMU Interrupts */
 576                hw_mmu_event_enable(resources->dmmu_base,
 577                                    HW_MMU_ALL_INTERRUPTS);
 578                /* release the RST1, DSP starts executing now .. */
 579                (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
 580                                        OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
 581
 582                dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", *(u32 *)sync_addr);
 583                dev_dbg(bridge, "DSP c_int00 Address =  0x%x\n", dsp_addr);
 584                if (dsp_debug)
 585                        while (__raw_readw(sync_addr))
 586                                ;
 587
 588                /* Wait for DSP to clear word in shared memory */
 589                /* Read the Location */
 590                if (!wait_for_start(dev_context, sync_addr))
 591                        status = -ETIMEDOUT;
 592
 593                dev_get_symbol(dev_context->dev_obj, "_WDT_enable", &wdt_en);
 594                if (wdt_en) {
 595                        /* Start wdt */
 596                        dsp_wdt_sm_set((void *)ul_shm_base);
 597                        dsp_wdt_enable(true);
 598                }
 599
 600                status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr);
 601                if (hio_mgr) {
 602                        io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL);
 603                        /* Write the synchronization bit to indicate the
 604                         * completion of OPP table update to DSP
 605                         */
 606                        __raw_writel(0XCAFECAFE, sync_addr);
 607
 608                        /* update board state */
 609                        dev_context->brd_state = BRD_RUNNING;
 610                        /* (void)chnlsm_enable_interrupt(dev_context); */
 611                } else {
 612                        dev_context->brd_state = BRD_UNKNOWN;
 613                }
 614        }
 615
 616        iounmap(sync_addr);
 617
 618        return status;
 619}
 620
 621/*
 622 *  ======== bridge_brd_stop ========
 623 *  purpose:
 624 *      Puts DSP in self loop.
 625 *
 626 *  Preconditions :
 627 *  a) None
 628 */
 629static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
 630{
 631        int status = 0;
 632        struct bridge_dev_context *dev_context = dev_ctxt;
 633        struct pg_table_attrs *pt_attrs;
 634        u32 dsp_pwr_state;
 635        struct omap_dsp_platform_data *pdata =
 636                omap_dspbridge_dev->dev.platform_data;
 637
 638        if (dev_context->brd_state == BRD_STOPPED)
 639                return status;
 640
 641        /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
 642         * before turning off the clocks.. This is to ensure that there are no
 643         * pending L3 or other transactons from IVA2 */
 644        dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
 645                                        OMAP_POWERSTATEST_MASK;
 646        if (dsp_pwr_state != PWRDM_POWER_OFF) {
 647                (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
 648                                        OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
 649                sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE);
 650                mdelay(10);
 651
 652                /* IVA2 is not in OFF state */
 653                /* Set PM_PWSTCTRL_IVA2  to OFF */
 654                (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
 655                        PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
 656                /* Set the SW supervised state transition for Sleep */
 657                (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP,
 658                                        OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
 659        }
 660        udelay(10);
 661        /* Release the Ext Base virtual Address as the next DSP Program
 662         * may have a different load address */
 663        if (dev_context->dsp_ext_base_addr)
 664                dev_context->dsp_ext_base_addr = 0;
 665
 666        dev_context->brd_state = BRD_STOPPED;   /* update board state */
 667
 668        dsp_wdt_enable(false);
 669
 670        /* This is a good place to clear the MMU page tables as well */
 671        if (dev_context->pt_attrs) {
 672                pt_attrs = dev_context->pt_attrs;
 673                memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
 674                memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
 675                memset((u8 *) pt_attrs->pg_info, 0x00,
 676                       (pt_attrs->l2_num_pages * sizeof(struct page_info)));
 677        }
 678        /* Disable the mailbox interrupts */
 679        if (dev_context->mbox) {
 680                omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
 681                omap_mbox_put(dev_context->mbox, &dsp_mbox_notifier);
 682                dev_context->mbox = NULL;
 683        }
 684        /* Reset IVA2 clocks*/
 685        (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
 686                        OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
 687
 688        dsp_clock_disable_all(dev_context->dsp_per_clks);
 689        dsp_clk_disable(DSP_CLK_IVA2);
 690
 691        return status;
 692}
 693
 694/*
 695 *  ======== bridge_brd_status ========
 696 *      Returns the board status.
 697 */
 698static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
 699                                    int *board_state)
 700{
 701        struct bridge_dev_context *dev_context = dev_ctxt;
 702        *board_state = dev_context->brd_state;
 703        return 0;
 704}
 705
 706/*
 707 *  ======== bridge_brd_write ========
 708 *      Copies the buffers to DSP internal or external memory.
 709 */
 710static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
 711                                   u8 *host_buff, u32 dsp_addr,
 712                                   u32 ul_num_bytes, u32 mem_type)
 713{
 714        int status = 0;
 715        struct bridge_dev_context *dev_context = dev_ctxt;
 716
 717        if (dsp_addr < dev_context->dsp_start_add) {
 718                status = -EPERM;
 719                return status;
 720        }
 721        if ((dsp_addr - dev_context->dsp_start_add) <
 722            dev_context->internal_size) {
 723                status = write_dsp_data(dev_ctxt, host_buff, dsp_addr,
 724                                        ul_num_bytes, mem_type);
 725        } else {
 726                status = write_ext_dsp_data(dev_context, host_buff, dsp_addr,
 727                                            ul_num_bytes, mem_type, false);
 728        }
 729
 730        return status;
 731}
 732
 733/*
 734 *  ======== bridge_dev_create ========
 735 *      Creates a driver object. Puts DSP in self loop.
 736 */
 737static int bridge_dev_create(struct bridge_dev_context
 738                                        **dev_cntxt,
 739                                        struct dev_object *hdev_obj,
 740                                        struct cfg_hostres *config_param)
 741{
 742        int status = 0;
 743        struct bridge_dev_context *dev_context = NULL;
 744        s32 entry_ndx;
 745        struct cfg_hostres *resources = config_param;
 746        struct pg_table_attrs *pt_attrs;
 747        u32 pg_tbl_pa;
 748        u32 pg_tbl_va;
 749        u32 align_size;
 750        struct drv_data *drv_datap = dev_get_drvdata(bridge);
 751
 752        /* Allocate and initialize a data structure to contain the bridge driver
 753         *  state, which becomes the context for later calls into this driver */
 754        dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL);
 755        if (!dev_context) {
 756                status = -ENOMEM;
 757                goto func_end;
 758        }
 759
 760        dev_context->dsp_start_add = (u32) OMAP_GEM_BASE;
 761        dev_context->self_loop = (u32) NULL;
 762        dev_context->dsp_per_clks = 0;
 763        dev_context->internal_size = OMAP_DSP_SIZE;
 764        /*  Clear dev context MMU table entries.
 765         *  These get set on bridge_io_on_loaded() call after program loaded. */
 766        for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) {
 767                dev_context->atlb_entry[entry_ndx].gpp_pa =
 768                    dev_context->atlb_entry[entry_ndx].dsp_va = 0;
 769        }
 770        dev_context->dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
 771                                                                 (config_param->
 772                                                                  mem_base
 773                                                                  [3]),
 774                                                                 config_param->
 775                                                                 mem_length
 776                                                                 [3]);
 777        if (!dev_context->dsp_base_addr)
 778                status = -EPERM;
 779
 780        pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
 781        if (pt_attrs != NULL) {
 782                pt_attrs->l1_size = SZ_16K; /* 4096 entries of 32 bits */
 783                align_size = pt_attrs->l1_size;
 784                /* Align sizes are expected to be power of 2 */
 785                /* we like to get aligned on L1 table size */
 786                pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
 787                                                     align_size, &pg_tbl_pa);
 788
 789                /* Check if the PA is aligned for us */
 790                if ((pg_tbl_pa) & (align_size - 1)) {
 791                        /* PA not aligned to page table size ,
 792                         * try with more allocation and align */
 793                        mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
 794                                          pt_attrs->l1_size);
 795                        /* we like to get aligned on L1 table size */
 796                        pg_tbl_va =
 797                            (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
 798                                                     align_size, &pg_tbl_pa);
 799                        /* We should be able to get aligned table now */
 800                        pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
 801                        pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
 802                        pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
 803                        /* Align the PA to the next 'align'  boundary */
 804                        pt_attrs->l1_base_pa =
 805                            ((pg_tbl_pa) +
 806                             (align_size - 1)) & (~(align_size - 1));
 807                        pt_attrs->l1_base_va =
 808                            pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
 809                } else {
 810                        /* We got aligned PA, cool */
 811                        pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
 812                        pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
 813                        pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
 814                        pt_attrs->l1_base_pa = pg_tbl_pa;
 815                        pt_attrs->l1_base_va = pg_tbl_va;
 816                }
 817                if (pt_attrs->l1_base_va)
 818                        memset((u8 *) pt_attrs->l1_base_va, 0x00,
 819                               pt_attrs->l1_size);
 820
 821                /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
 822                 * L4 pages */
 823                pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
 824                pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
 825                    pt_attrs->l2_num_pages;
 826                align_size = 4; /* Make it u32 aligned */
 827                /* we like to get aligned on L1 table size */
 828                pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
 829                                                     align_size, &pg_tbl_pa);
 830                pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
 831                pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
 832                pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
 833                pt_attrs->l2_base_pa = pg_tbl_pa;
 834                pt_attrs->l2_base_va = pg_tbl_va;
 835
 836                if (pt_attrs->l2_base_va)
 837                        memset((u8 *) pt_attrs->l2_base_va, 0x00,
 838                               pt_attrs->l2_size);
 839
 840                pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
 841                                        sizeof(struct page_info), GFP_KERNEL);
 842                dev_dbg(bridge,
 843                        "L1 pa %x, va %x, size %x\n L2 pa %x, va "
 844                        "%x, size %x\n", pt_attrs->l1_base_pa,
 845                        pt_attrs->l1_base_va, pt_attrs->l1_size,
 846                        pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
 847                        pt_attrs->l2_size);
 848                dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
 849                        pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
 850        }
 851        if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
 852            (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
 853                dev_context->pt_attrs = pt_attrs;
 854        else
 855                status = -ENOMEM;
 856
 857        if (!status) {
 858                spin_lock_init(&pt_attrs->pg_lock);
 859                dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
 860
 861                /* Set the Clock Divisor for the DSP module */
 862                udelay(5);
 863                /* MMU address is obtained from the host
 864                 * resources struct */
 865                dev_context->dsp_mmu_base = resources->dmmu_base;
 866        }
 867        if (!status) {
 868                dev_context->dev_obj = hdev_obj;
 869                /* Store current board state. */
 870                dev_context->brd_state = BRD_UNKNOWN;
 871                dev_context->resources = resources;
 872                dsp_clk_enable(DSP_CLK_IVA2);
 873                bridge_brd_stop(dev_context);
 874                /* Return ptr to our device state to the DSP API for storage */
 875                *dev_cntxt = dev_context;
 876        } else {
 877                if (pt_attrs != NULL) {
 878                        kfree(pt_attrs->pg_info);
 879
 880                        if (pt_attrs->l2_tbl_alloc_va) {
 881                                mem_free_phys_mem((void *)
 882                                                  pt_attrs->l2_tbl_alloc_va,
 883                                                  pt_attrs->l2_tbl_alloc_pa,
 884                                                  pt_attrs->l2_tbl_alloc_sz);
 885                        }
 886                        if (pt_attrs->l1_tbl_alloc_va) {
 887                                mem_free_phys_mem((void *)
 888                                                  pt_attrs->l1_tbl_alloc_va,
 889                                                  pt_attrs->l1_tbl_alloc_pa,
 890                                                  pt_attrs->l1_tbl_alloc_sz);
 891                        }
 892                }
 893                kfree(pt_attrs);
 894                kfree(dev_context);
 895        }
 896func_end:
 897        return status;
 898}
 899
 900/*
 901 *  ======== bridge_dev_ctrl ========
 902 *      Receives device specific commands.
 903 */
 904static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
 905                                  u32 dw_cmd, void *pargs)
 906{
 907        int status = 0;
 908        struct bridge_ioctl_extproc *pa_ext_proc =
 909                                        (struct bridge_ioctl_extproc *)pargs;
 910        s32 ndx;
 911
 912        switch (dw_cmd) {
 913        case BRDIOCTL_CHNLREAD:
 914                break;
 915        case BRDIOCTL_CHNLWRITE:
 916                break;
 917        case BRDIOCTL_SETMMUCONFIG:
 918                /* store away dsp-mmu setup values for later use */
 919                for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++)
 920                        dev_context->atlb_entry[ndx] = *pa_ext_proc;
 921                break;
 922        case BRDIOCTL_DEEPSLEEP:
 923        case BRDIOCTL_EMERGENCYSLEEP:
 924                /* Currently only DSP Idle is supported Need to update for
 925                 * later releases */
 926                status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs);
 927                break;
 928        case BRDIOCTL_WAKEUP:
 929                status = wake_dsp(dev_context, pargs);
 930                break;
 931        case BRDIOCTL_CLK_CTRL:
 932                status = 0;
 933                /* Looking For Baseport Fix for Clocks */
 934                status = dsp_peripheral_clk_ctrl(dev_context, pargs);
 935                break;
 936        case BRDIOCTL_PWR_HIBERNATE:
 937                status = handle_hibernation_from_dsp(dev_context);
 938                break;
 939        case BRDIOCTL_PRESCALE_NOTIFY:
 940                status = pre_scale_dsp(dev_context, pargs);
 941                break;
 942        case BRDIOCTL_POSTSCALE_NOTIFY:
 943                status = post_scale_dsp(dev_context, pargs);
 944                break;
 945        case BRDIOCTL_CONSTRAINT_REQUEST:
 946                status = handle_constraints_set(dev_context, pargs);
 947                break;
 948        default:
 949                status = -EPERM;
 950                break;
 951        }
 952        return status;
 953}
 954
 955/*
 956 *  ======== bridge_dev_destroy ========
 957 *      Destroys the driver object.
 958 */
 959static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
 960{
 961        struct pg_table_attrs *pt_attrs;
 962        int status = 0;
 963        struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
 964            dev_ctxt;
 965        struct cfg_hostres *host_res;
 966        u32 shm_size;
 967        struct drv_data *drv_datap = dev_get_drvdata(bridge);
 968
 969        /* It should never happen */
 970        if (!dev_ctxt)
 971                return -EFAULT;
 972
 973        /* first put the device to stop state */
 974        bridge_brd_stop(dev_context);
 975        if (dev_context->pt_attrs) {
 976                pt_attrs = dev_context->pt_attrs;
 977                kfree(pt_attrs->pg_info);
 978
 979                if (pt_attrs->l2_tbl_alloc_va) {
 980                        mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
 981                                          pt_attrs->l2_tbl_alloc_pa,
 982                                          pt_attrs->l2_tbl_alloc_sz);
 983                }
 984                if (pt_attrs->l1_tbl_alloc_va) {
 985                        mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
 986                                          pt_attrs->l1_tbl_alloc_pa,
 987                                          pt_attrs->l1_tbl_alloc_sz);
 988                }
 989                kfree(pt_attrs);
 990
 991        }
 992
 993        if (dev_context->resources) {
 994                host_res = dev_context->resources;
 995                shm_size = drv_datap->shm_size;
 996                if (shm_size >= 0x10000) {
 997                        if ((host_res->mem_base[1]) &&
 998                            (host_res->mem_phys[1])) {
 999                                mem_free_phys_mem((void *)
1000                                                  host_res->mem_base
1001                                                  [1],
1002                                                  host_res->mem_phys
1003                                                  [1], shm_size);
1004                        }
1005                } else {
1006                        dev_dbg(bridge, "%s: Error getting shm size "
1007                                "from registry: %x. Not calling "
1008                                "mem_free_phys_mem\n", __func__,
1009                                status);
1010                }
1011                host_res->mem_base[1] = 0;
1012                host_res->mem_phys[1] = 0;
1013
1014                if (host_res->mem_base[0])
1015                        iounmap((void *)host_res->mem_base[0]);
1016                if (host_res->mem_base[2])
1017                        iounmap((void *)host_res->mem_base[2]);
1018                if (host_res->mem_base[3])
1019                        iounmap((void *)host_res->mem_base[3]);
1020                if (host_res->mem_base[4])
1021                        iounmap((void *)host_res->mem_base[4]);
1022                if (host_res->dmmu_base)
1023                        iounmap(host_res->dmmu_base);
1024                if (host_res->per_base)
1025                        iounmap(host_res->per_base);
1026                if (host_res->per_pm_base)
1027                        iounmap((void *)host_res->per_pm_base);
1028                if (host_res->core_pm_base)
1029                        iounmap((void *)host_res->core_pm_base);
1030
1031                host_res->mem_base[0] = (u32) NULL;
1032                host_res->mem_base[2] = (u32) NULL;
1033                host_res->mem_base[3] = (u32) NULL;
1034                host_res->mem_base[4] = (u32) NULL;
1035                host_res->dmmu_base = NULL;
1036
1037                kfree(host_res);
1038        }
1039
1040        /* Free the driver's device context: */
1041        kfree(drv_datap->base_img);
1042        kfree((void *)dev_ctxt);
1043        return status;
1044}
1045
1046static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
1047                                   u32 dsp_dest_addr, u32 dsp_src_addr,
1048                                   u32 ul_num_bytes, u32 mem_type)
1049{
1050        int status = 0;
1051        u32 src_addr = dsp_src_addr;
1052        u32 dest_addr = dsp_dest_addr;
1053        u32 copy_bytes = 0;
1054        u32 total_bytes = ul_num_bytes;
1055        u8 host_buf[BUFFERSIZE];
1056        struct bridge_dev_context *dev_context = dev_ctxt;
1057        while (total_bytes > 0 && !status) {
1058                copy_bytes =
1059                    total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes;
1060                /* Read from External memory */
1061                status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr,
1062                                           copy_bytes, mem_type);
1063                if (!status) {
1064                        if (dest_addr < (dev_context->dsp_start_add +
1065                                         dev_context->internal_size)) {
1066                                /* Write to Internal memory */
1067                                status = write_dsp_data(dev_ctxt, host_buf,
1068                                                        dest_addr, copy_bytes,
1069                                                        mem_type);
1070                        } else {
1071                                /* Write to External memory */
1072                                status =
1073                                    write_ext_dsp_data(dev_ctxt, host_buf,
1074                                                       dest_addr, copy_bytes,
1075                                                       mem_type, false);
1076                        }
1077                }
1078                total_bytes -= copy_bytes;
1079                src_addr += copy_bytes;
1080                dest_addr += copy_bytes;
1081        }
1082        return status;
1083}
1084
1085/* Mem Write does not halt the DSP to write unlike bridge_brd_write */
1086static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
1087                                    u8 *host_buff, u32 dsp_addr,
1088                                    u32 ul_num_bytes, u32 mem_type)
1089{
1090        int status = 0;
1091        struct bridge_dev_context *dev_context = dev_ctxt;
1092        u32 ul_remain_bytes = 0;
1093        u32 ul_bytes = 0;
1094        ul_remain_bytes = ul_num_bytes;
1095        while (ul_remain_bytes > 0 && !status) {
1096                ul_bytes =
1097                    ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
1098                if (dsp_addr < (dev_context->dsp_start_add +
1099                                 dev_context->internal_size)) {
1100                        status =
1101                            write_dsp_data(dev_ctxt, host_buff, dsp_addr,
1102                                           ul_bytes, mem_type);
1103                } else {
1104                        status = write_ext_dsp_data(dev_ctxt, host_buff,
1105                                                    dsp_addr, ul_bytes,
1106                                                    mem_type, true);
1107                }
1108                ul_remain_bytes -= ul_bytes;
1109                dsp_addr += ul_bytes;
1110                host_buff = host_buff + ul_bytes;
1111        }
1112        return status;
1113}
1114
1115/*
1116 *  ======== bridge_brd_mem_map ========
1117 *      This function maps MPU buffer to the DSP address space. It performs
1118 *  linear to physical address translation if required. It translates each
1119 *  page since linear addresses can be physically non-contiguous
1120 *  All address & size arguments are assumed to be page aligned (in proc.c)
1121 *
1122 *  TODO: Disable MMU while updating the page tables (but that'll stall DSP)
1123 */
1124static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
1125                                  u32 ul_mpu_addr, u32 virt_addr,
1126                                  u32 ul_num_bytes, u32 ul_map_attr,
1127                                  struct page **mapped_pages)
1128{
1129        u32 attrs;
1130        int status = 0;
1131        struct bridge_dev_context *dev_context = dev_ctxt;
1132        struct hw_mmu_map_attrs_t hw_attrs;
1133        struct vm_area_struct *vma;
1134        struct mm_struct *mm = current->mm;
1135        u32 write = 0;
1136        u32 num_usr_pgs = 0;
1137        struct page *mapped_page, *pg;
1138        s32 pg_num;
1139        u32 va = virt_addr;
1140        struct task_struct *curr_task = current;
1141        u32 pg_i = 0;
1142        u32 mpu_addr, pa;
1143
1144        dev_dbg(bridge,
1145                "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
1146                __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
1147                ul_map_attr);
1148        if (ul_num_bytes == 0)
1149                return -EINVAL;
1150
1151        if (ul_map_attr & DSP_MAP_DIR_MASK) {
1152                attrs = ul_map_attr;
1153        } else {
1154                /* Assign default attributes */
1155                attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
1156        }
1157        /* Take mapping properties */
1158        if (attrs & DSP_MAPBIGENDIAN)
1159                hw_attrs.endianism = HW_BIG_ENDIAN;
1160        else
1161                hw_attrs.endianism = HW_LITTLE_ENDIAN;
1162
1163        hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
1164            ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
1165        /* Ignore element_size if mixed_size is enabled */
1166        if (hw_attrs.mixed_size == 0) {
1167                if (attrs & DSP_MAPELEMSIZE8) {
1168                        /* Size is 8 bit */
1169                        hw_attrs.element_size = HW_ELEM_SIZE8BIT;
1170                } else if (attrs & DSP_MAPELEMSIZE16) {
1171                        /* Size is 16 bit */
1172                        hw_attrs.element_size = HW_ELEM_SIZE16BIT;
1173                } else if (attrs & DSP_MAPELEMSIZE32) {
1174                        /* Size is 32 bit */
1175                        hw_attrs.element_size = HW_ELEM_SIZE32BIT;
1176                } else if (attrs & DSP_MAPELEMSIZE64) {
1177                        /* Size is 64 bit */
1178                        hw_attrs.element_size = HW_ELEM_SIZE64BIT;
1179                } else {
1180                        /*
1181                         * Mixedsize isn't enabled, so size can't be
1182                         * zero here
1183                         */
1184                        return -EINVAL;
1185                }
1186        }
1187        if (attrs & DSP_MAPDONOTLOCK)
1188                hw_attrs.donotlockmpupage = 1;
1189        else
1190                hw_attrs.donotlockmpupage = 0;
1191
1192        if (attrs & DSP_MAPVMALLOCADDR) {
1193                return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
1194                                       ul_num_bytes, &hw_attrs);
1195        }
1196        /*
1197         * Do OS-specific user-va to pa translation.
1198         * Combine physically contiguous regions to reduce TLBs.
1199         * Pass the translated pa to pte_update.
1200         */
1201        if ((attrs & DSP_MAPPHYSICALADDR)) {
1202                status = pte_update(dev_context, ul_mpu_addr, virt_addr,
1203                                    ul_num_bytes, &hw_attrs);
1204                goto func_cont;
1205        }
1206
1207        /*
1208         * Important Note: ul_mpu_addr is mapped from user application process
1209         * to current process - it must lie completely within the current
1210         * virtual memory address space in order to be of use to us here!
1211         */
1212        down_read(&mm->mmap_sem);
1213        vma = find_vma(mm, ul_mpu_addr);
1214        if (vma)
1215                dev_dbg(bridge,
1216                        "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
1217                        "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1218                        ul_num_bytes, vma->vm_start, vma->vm_end,
1219                        vma->vm_flags);
1220
1221        /*
1222         * It is observed that under some circumstances, the user buffer is
1223         * spread across several VMAs. So loop through and check if the entire
1224         * user buffer is covered
1225         */
1226        while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
1227                /* jump to the next VMA region */
1228                vma = find_vma(mm, vma->vm_end + 1);
1229                dev_dbg(bridge,
1230                        "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
1231                        "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1232                        ul_num_bytes, vma->vm_start, vma->vm_end,
1233                        vma->vm_flags);
1234        }
1235        if (!vma) {
1236                pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
1237                       __func__, ul_mpu_addr, ul_num_bytes);
1238                status = -EINVAL;
1239                up_read(&mm->mmap_sem);
1240                goto func_cont;
1241        }
1242
1243        if (vma->vm_flags & VM_IO) {
1244                num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1245                mpu_addr = ul_mpu_addr;
1246
1247                /* Get the physical addresses for user buffer */
1248                for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1249                        pa = user_va2_pa(mm, mpu_addr);
1250                        if (!pa) {
1251                                status = -EPERM;
1252                                pr_err("DSPBRIDGE: VM_IO mapping physical"
1253                                       "address is invalid\n");
1254                                break;
1255                        }
1256                        if (pfn_valid(__phys_to_pfn(pa))) {
1257                                pg = PHYS_TO_PAGE(pa);
1258                                get_page(pg);
1259                                if (page_count(pg) < 1) {
1260                                        pr_err("Bad page in VM_IO buffer\n");
1261                                        bad_page_dump(pa, pg);
1262                                }
1263                        }
1264                        status = pte_set(dev_context->pt_attrs, pa,
1265                                         va, HW_PAGE_SIZE4KB, &hw_attrs);
1266                        if (status)
1267                                break;
1268
1269                        va += HW_PAGE_SIZE4KB;
1270                        mpu_addr += HW_PAGE_SIZE4KB;
1271                        pa += HW_PAGE_SIZE4KB;
1272                }
1273        } else {
1274                num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1275                if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
1276                        write = 1;
1277
1278                for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1279                        pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
1280                                                write, 1, &mapped_page, NULL);
1281                        if (pg_num > 0) {
1282                                if (page_count(mapped_page) < 1) {
1283                                        pr_err("Bad page count after doing"
1284                                               "get_user_pages on"
1285                                               "user buffer\n");
1286                                        bad_page_dump(page_to_phys(mapped_page),
1287                                                      mapped_page);
1288                                }
1289                                status = pte_set(dev_context->pt_attrs,
1290                                                 page_to_phys(mapped_page), va,
1291                                                 HW_PAGE_SIZE4KB, &hw_attrs);
1292                                if (status)
1293                                        break;
1294
1295                                if (mapped_pages)
1296                                        mapped_pages[pg_i] = mapped_page;
1297
1298                                va += HW_PAGE_SIZE4KB;
1299                                ul_mpu_addr += HW_PAGE_SIZE4KB;
1300                        } else {
1301                                pr_err("DSPBRIDGE: get_user_pages FAILED,"
1302                                       "MPU addr = 0x%x,"
1303                                       "vma->vm_flags = 0x%lx,"
1304                                       "get_user_pages Err"
1305                                       "Value = %d, Buffer"
1306                                       "size=0x%x\n", ul_mpu_addr,
1307                                       vma->vm_flags, pg_num, ul_num_bytes);
1308                                status = -EPERM;
1309                                break;
1310                        }
1311                }
1312        }
1313        up_read(&mm->mmap_sem);
1314func_cont:
1315        if (status) {
1316                /*
1317                 * Roll out the mapped pages incase it failed in middle of
1318                 * mapping
1319                 */
1320                if (pg_i) {
1321                        bridge_brd_mem_un_map(dev_context, virt_addr,
1322                                           (pg_i * PG_SIZE4K));
1323                }
1324                status = -EPERM;
1325        }
1326        /*
1327         * In any case, flush the TLB
1328         * This is called from here instead from pte_update to avoid unnecessary
1329         * repetition while mapping non-contiguous physical regions of a virtual
1330         * region
1331         */
1332        flush_all(dev_context);
1333        dev_dbg(bridge, "%s status %x\n", __func__, status);
1334        return status;
1335}
1336
1337/*
1338 *  ======== bridge_brd_mem_un_map ========
1339 *      Invalidate the PTEs for the DSP VA block to be unmapped.
1340 *
1341 *      PTEs of a mapped memory block are contiguous in any page table
1342 *      So, instead of looking up the PTE address for every 4K block,
1343 *      we clear consecutive PTEs until we unmap all the bytes
1344 */
1345static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
1346                                     u32 virt_addr, u32 ul_num_bytes)
1347{
1348        u32 l1_base_va;
1349        u32 l2_base_va;
1350        u32 l2_base_pa;
1351        u32 l2_page_num;
1352        u32 pte_val;
1353        u32 pte_size;
1354        u32 pte_count;
1355        u32 pte_addr_l1;
1356        u32 pte_addr_l2 = 0;
1357        u32 rem_bytes;
1358        u32 rem_bytes_l2;
1359        u32 va_curr;
1360        struct page *pg = NULL;
1361        int status = 0;
1362        struct bridge_dev_context *dev_context = dev_ctxt;
1363        struct pg_table_attrs *pt = dev_context->pt_attrs;
1364        u32 temp;
1365        u32 paddr;
1366        u32 numof4k_pages = 0;
1367
1368        va_curr = virt_addr;
1369        rem_bytes = ul_num_bytes;
1370        rem_bytes_l2 = 0;
1371        l1_base_va = pt->l1_base_va;
1372        pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1373        dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
1374                "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
1375                ul_num_bytes, l1_base_va, pte_addr_l1);
1376
1377        while (rem_bytes && !status) {
1378                u32 va_curr_orig = va_curr;
1379                /* Find whether the L1 PTE points to a valid L2 PT */
1380                pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1381                pte_val = *(u32 *) pte_addr_l1;
1382                pte_size = hw_mmu_pte_size_l1(pte_val);
1383
1384                if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
1385                        goto skip_coarse_page;
1386
1387                /*
1388                 * Get the L2 PA from the L1 PTE, and find
1389                 * corresponding L2 VA
1390                 */
1391                l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1392                l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1393                l2_page_num =
1394                    (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1395                /*
1396                 * Find the L2 PTE address from which we will start
1397                 * clearing, the number of PTEs to be cleared on this
1398                 * page, and the size of VA space that needs to be
1399                 * cleared on this L2 page
1400                 */
1401                pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
1402                pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
1403                pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
1404                if (rem_bytes < (pte_count * PG_SIZE4K))
1405                        pte_count = rem_bytes / PG_SIZE4K;
1406                rem_bytes_l2 = pte_count * PG_SIZE4K;
1407
1408                /*
1409                 * Unmap the VA space on this L2 PT. A quicker way
1410                 * would be to clear pte_count entries starting from
1411                 * pte_addr_l2. However, below code checks that we don't
1412                 * clear invalid entries or less than 64KB for a 64KB
1413                 * entry. Similar checking is done for L1 PTEs too
1414                 * below
1415                 */
1416                while (rem_bytes_l2 && !status) {
1417                        pte_val = *(u32 *) pte_addr_l2;
1418                        pte_size = hw_mmu_pte_size_l2(pte_val);
1419                        /* va_curr aligned to pte_size? */
1420                        if (pte_size == 0 || rem_bytes_l2 < pte_size ||
1421                            va_curr & (pte_size - 1)) {
1422                                status = -EPERM;
1423                                break;
1424                        }
1425
1426                        /* Collect Physical addresses from VA */
1427                        paddr = (pte_val & ~(pte_size - 1));
1428                        if (pte_size == HW_PAGE_SIZE64KB)
1429                                numof4k_pages = 16;
1430                        else
1431                                numof4k_pages = 1;
1432                        temp = 0;
1433                        while (temp++ < numof4k_pages) {
1434                                if (!pfn_valid(__phys_to_pfn(paddr))) {
1435                                        paddr += HW_PAGE_SIZE4KB;
1436                                        continue;
1437                                }
1438                                pg = PHYS_TO_PAGE(paddr);
1439                                if (page_count(pg) < 1) {
1440                                        pr_info("DSPBRIDGE: UNMAP function: "
1441                                                "COUNT 0 FOR PA 0x%x, size = "
1442                                                "0x%x\n", paddr, ul_num_bytes);
1443                                        bad_page_dump(paddr, pg);
1444                                } else {
1445                                        set_page_dirty(pg);
1446                                        page_cache_release(pg);
1447                                }
1448                                paddr += HW_PAGE_SIZE4KB;
1449                        }
1450                        if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
1451                                status = -EPERM;
1452                                goto EXIT_LOOP;
1453                        }
1454
1455                        status = 0;
1456                        rem_bytes_l2 -= pte_size;
1457                        va_curr += pte_size;
1458                        pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
1459                }
1460                spin_lock(&pt->pg_lock);
1461                if (rem_bytes_l2 == 0) {
1462                        pt->pg_info[l2_page_num].num_entries -= pte_count;
1463                        if (pt->pg_info[l2_page_num].num_entries == 0) {
1464                                /*
1465                                 * Clear the L1 PTE pointing to the L2 PT
1466                                 */
1467                                if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
1468                                                     HW_MMU_COARSE_PAGE_SIZE))
1469                                        status = 0;
1470                                else {
1471                                        status = -EPERM;
1472                                        spin_unlock(&pt->pg_lock);
1473                                        goto EXIT_LOOP;
1474                                }
1475                        }
1476                        rem_bytes -= pte_count * PG_SIZE4K;
1477                } else
1478                        status = -EPERM;
1479
1480                spin_unlock(&pt->pg_lock);
1481                continue;
1482skip_coarse_page:
1483                /* va_curr aligned to pte_size? */
1484                /* pte_size = 1 MB or 16 MB */
1485                if (pte_size == 0 || rem_bytes < pte_size ||
1486                    va_curr & (pte_size - 1)) {
1487                        status = -EPERM;
1488                        break;
1489                }
1490
1491                if (pte_size == HW_PAGE_SIZE1MB)
1492                        numof4k_pages = 256;
1493                else
1494                        numof4k_pages = 4096;
1495                temp = 0;
1496                /* Collect Physical addresses from VA */
1497                paddr = (pte_val & ~(pte_size - 1));
1498                while (temp++ < numof4k_pages) {
1499                        if (pfn_valid(__phys_to_pfn(paddr))) {
1500                                pg = PHYS_TO_PAGE(paddr);
1501                                if (page_count(pg) < 1) {
1502                                        pr_info("DSPBRIDGE: UNMAP function: "
1503                                                "COUNT 0 FOR PA 0x%x, size = "
1504                                                "0x%x\n", paddr, ul_num_bytes);
1505                                        bad_page_dump(paddr, pg);
1506                                } else {
1507                                        set_page_dirty(pg);
1508                                        page_cache_release(pg);
1509                                }
1510                        }
1511                        paddr += HW_PAGE_SIZE4KB;
1512                }
1513                if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
1514                        status = 0;
1515                        rem_bytes -= pte_size;
1516                        va_curr += pte_size;
1517                } else {
1518                        status = -EPERM;
1519                        goto EXIT_LOOP;
1520                }
1521        }
1522        /*
1523         * It is better to flush the TLB here, so that any stale old entries
1524         * get flushed
1525         */
1526EXIT_LOOP:
1527        flush_all(dev_context);
1528        dev_dbg(bridge,
1529                "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
1530                " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
1531                pte_addr_l2, rem_bytes, rem_bytes_l2, status);
1532        return status;
1533}
1534
1535/*
1536 *  ======== user_va2_pa ========
1537 *  Purpose:
1538 *      This function walks through the page tables to convert a userland
1539 *      virtual address to physical address
1540 */
1541static u32 user_va2_pa(struct mm_struct *mm, u32 address)
1542{
1543        pgd_t *pgd;
1544        pud_t *pud;
1545        pmd_t *pmd;
1546        pte_t *ptep, pte;
1547
1548        pgd = pgd_offset(mm, address);
1549        if (pgd_none(*pgd) || pgd_bad(*pgd))
1550                return 0;
1551
1552        pud = pud_offset(pgd, address);
1553        if (pud_none(*pud) || pud_bad(*pud))
1554                return 0;
1555
1556        pmd = pmd_offset(pud, address);
1557        if (pmd_none(*pmd) || pmd_bad(*pmd))
1558                return 0;
1559
1560        ptep = pte_offset_map(pmd, address);
1561        if (ptep) {
1562                pte = *ptep;
1563                if (pte_present(pte))
1564                        return pte & PAGE_MASK;
1565        }
1566
1567        return 0;
1568}
1569
1570/*
1571 *  ======== pte_update ========
1572 *      This function calculates the optimum page-aligned addresses and sizes
1573 *      Caller must pass page-aligned values
1574 */
1575static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
1576                             u32 va, u32 size,
1577                             struct hw_mmu_map_attrs_t *map_attrs)
1578{
1579        u32 i;
1580        u32 all_bits;
1581        u32 pa_curr = pa;
1582        u32 va_curr = va;
1583        u32 num_bytes = size;
1584        struct bridge_dev_context *dev_context = dev_ctxt;
1585        int status = 0;
1586        u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
1587                HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
1588        };
1589
1590        while (num_bytes && !status) {
1591                /* To find the max. page size with which both PA & VA are
1592                 * aligned */
1593                all_bits = pa_curr | va_curr;
1594
1595                for (i = 0; i < 4; i++) {
1596                        if ((num_bytes >= page_size[i]) && ((all_bits &
1597                                                             (page_size[i] -
1598                                                              1)) == 0)) {
1599                                status =
1600                                    pte_set(dev_context->pt_attrs, pa_curr,
1601                                            va_curr, page_size[i], map_attrs);
1602                                pa_curr += page_size[i];
1603                                va_curr += page_size[i];
1604                                num_bytes -= page_size[i];
1605                                /* Don't try smaller sizes. Hopefully we have
1606                                 * reached an address aligned to a bigger page
1607                                 * size */
1608                                break;
1609                        }
1610                }
1611        }
1612
1613        return status;
1614}
1615
1616/*
1617 *  ======== pte_set ========
1618 *      This function calculates PTE address (MPU virtual) to be updated
1619 *      It also manages the L2 page tables
1620 */
1621static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
1622                          u32 size, struct hw_mmu_map_attrs_t *attrs)
1623{
1624        u32 i;
1625        u32 pte_val;
1626        u32 pte_addr_l1;
1627        u32 pte_size;
1628        /* Base address of the PT that will be updated */
1629        u32 pg_tbl_va;
1630        u32 l1_base_va;
1631        /* Compiler warns that the next three variables might be used
1632         * uninitialized in this function. Doesn't seem so. Working around,
1633         * anyways. */
1634        u32 l2_base_va = 0;
1635        u32 l2_base_pa = 0;
1636        u32 l2_page_num = 0;
1637        int status = 0;
1638
1639        l1_base_va = pt->l1_base_va;
1640        pg_tbl_va = l1_base_va;
1641        if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
1642                /* Find whether the L1 PTE points to a valid L2 PT */
1643                pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1644                if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
1645                        pte_val = *(u32 *) pte_addr_l1;
1646                        pte_size = hw_mmu_pte_size_l1(pte_val);
1647                } else {
1648                        return -EPERM;
1649                }
1650                spin_lock(&pt->pg_lock);
1651                if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1652                        /* Get the L2 PA from the L1 PTE, and find
1653                         * corresponding L2 VA */
1654                        l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1655                        l2_base_va =
1656                            l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1657                        l2_page_num =
1658                            (l2_base_pa -
1659                             pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1660                } else if (pte_size == 0) {
1661                        /* L1 PTE is invalid. Allocate a L2 PT and
1662                         * point the L1 PTE to it */
1663                        /* Find a free L2 PT. */
1664                        for (i = 0; (i < pt->l2_num_pages) &&
1665                             (pt->pg_info[i].num_entries != 0); i++)
1666                                ;
1667                        if (i < pt->l2_num_pages) {
1668                                l2_page_num = i;
1669                                l2_base_pa = pt->l2_base_pa + (l2_page_num *
1670                                                HW_MMU_COARSE_PAGE_SIZE);
1671                                l2_base_va = pt->l2_base_va + (l2_page_num *
1672                                                HW_MMU_COARSE_PAGE_SIZE);
1673                                /* Endianness attributes are ignored for
1674                                 * HW_MMU_COARSE_PAGE_SIZE */
1675                                status =
1676                                    hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
1677                                                   HW_MMU_COARSE_PAGE_SIZE,
1678                                                   attrs);
1679                        } else {
1680                                status = -ENOMEM;
1681                        }
1682                } else {
1683                        /* Found valid L1 PTE of another size.
1684                         * Should not overwrite it. */
1685                        status = -EPERM;
1686                }
1687                if (!status) {
1688                        pg_tbl_va = l2_base_va;
1689                        if (size == HW_PAGE_SIZE64KB)
1690                                pt->pg_info[l2_page_num].num_entries += 16;
1691                        else
1692                                pt->pg_info[l2_page_num].num_entries++;
1693                        dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
1694                                "%x, num_entries %x\n", l2_base_va,
1695                                l2_base_pa, l2_page_num,
1696                                pt->pg_info[l2_page_num].num_entries);
1697                }
1698                spin_unlock(&pt->pg_lock);
1699        }
1700        if (!status) {
1701                dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
1702                        pg_tbl_va, pa, va, size);
1703                dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
1704                        "mixed_size %x\n", attrs->endianism,
1705                        attrs->element_size, attrs->mixed_size);
1706                status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
1707        }
1708
1709        return status;
1710}
1711
1712/* Memory map kernel VA -- memory allocated with vmalloc */
1713static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
1714                                  u32 ul_mpu_addr, u32 virt_addr,
1715                                  u32 ul_num_bytes,
1716                                  struct hw_mmu_map_attrs_t *hw_attrs)
1717{
1718        int status = 0;
1719        struct page *page[1];
1720        u32 i;
1721        u32 pa_curr;
1722        u32 pa_next;
1723        u32 va_curr;
1724        u32 size_curr;
1725        u32 num_pages;
1726        u32 pa;
1727        u32 num_of4k_pages;
1728        u32 temp = 0;
1729
1730        /*
1731         * Do Kernel va to pa translation.
1732         * Combine physically contiguous regions to reduce TLBs.
1733         * Pass the translated pa to pte_update.
1734         */
1735        num_pages = ul_num_bytes / PAGE_SIZE;   /* PAGE_SIZE = OS page size */
1736        i = 0;
1737        va_curr = ul_mpu_addr;
1738        page[0] = vmalloc_to_page((void *)va_curr);
1739        pa_next = page_to_phys(page[0]);
1740        while (!status && (i < num_pages)) {
1741                /*
1742                 * Reuse pa_next from the previous iteration to avoid
1743                 * an extra va2pa call
1744                 */
1745                pa_curr = pa_next;
1746                size_curr = PAGE_SIZE;
1747                /*
1748                 * If the next page is physically contiguous,
1749                 * map it with the current one by increasing
1750                 * the size of the region to be mapped
1751                 */
1752                while (++i < num_pages) {
1753                        page[0] =
1754                            vmalloc_to_page((void *)(va_curr + size_curr));
1755                        pa_next = page_to_phys(page[0]);
1756
1757                        if (pa_next == (pa_curr + size_curr))
1758                                size_curr += PAGE_SIZE;
1759                        else
1760                                break;
1761
1762                }
1763                if (pa_next == 0) {
1764                        status = -ENOMEM;
1765                        break;
1766                }
1767                pa = pa_curr;
1768                num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
1769                while (temp++ < num_of4k_pages) {
1770                        get_page(PHYS_TO_PAGE(pa));
1771                        pa += HW_PAGE_SIZE4KB;
1772                }
1773                status = pte_update(dev_context, pa_curr, virt_addr +
1774                                    (va_curr - ul_mpu_addr), size_curr,
1775                                    hw_attrs);
1776                va_curr += size_curr;
1777        }
1778        /*
1779         * In any case, flush the TLB
1780         * This is called from here instead from pte_update to avoid unnecessary
1781         * repetition while mapping non-contiguous physical regions of a virtual
1782         * region
1783         */
1784        flush_all(dev_context);
1785        dev_dbg(bridge, "%s status %x\n", __func__, status);
1786        return status;
1787}
1788
1789/*
1790 *  ======== wait_for_start ========
1791 *      Wait for the singal from DSP that it has started, or time out.
1792 */
1793bool wait_for_start(struct bridge_dev_context *dev_context,
1794                        void __iomem *sync_addr)
1795{
1796        u16 timeout = TIHELEN_ACKTIMEOUT;
1797
1798        /*  Wait for response from board */
1799        while (__raw_readw(sync_addr) && --timeout)
1800                udelay(10);
1801
1802        /*  If timed out: return false */
1803        if (!timeout) {
1804                pr_err("%s: Timed out waiting DSP to Start\n", __func__);
1805                return false;
1806        }
1807        return true;
1808}
1809