linux/drivers/staging/tidspbridge/rmgr/drv.c
<<
>>
Prefs
   1/*
   2 * drv.c
   3 *
   4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
   5 *
   6 * DSP/BIOS Bridge resource allocation module.
   7 *
   8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
   9 *
  10 * This package is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 *
  14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
  15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
  16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  17 */
  18#include <linux/types.h>
  19#include <linux/list.h>
  20
  21/*  ----------------------------------- Host OS */
  22#include <dspbridge/host_os.h>
  23
  24/*  ----------------------------------- DSP/BIOS Bridge */
  25#include <dspbridge/dbdefs.h>
  26
  27/*  ----------------------------------- This */
  28#include <dspbridge/drv.h>
  29#include <dspbridge/dev.h>
  30
  31#include <dspbridge/node.h>
  32#include <dspbridge/proc.h>
  33#include <dspbridge/strm.h>
  34#include <dspbridge/nodepriv.h>
  35#include <dspbridge/dspchnl.h>
  36#include <dspbridge/resourcecleanup.h>
  37
  38/*  ----------------------------------- Defines, Data Structures, Typedefs */
  39struct drv_object {
  40        struct list_head dev_list;
  41        struct list_head dev_node_string;
  42};
  43
  44/*
  45 *  This is the Device Extension. Named with the Prefix
  46 *  DRV_ since it is living in this module
  47 */
  48struct drv_ext {
  49        struct list_head link;
  50        char sz_string[MAXREGPATHLENGTH];
  51};
  52
  53/*  ----------------------------------- Globals */
  54static bool ext_phys_mem_pool_enabled;
  55struct ext_phys_mem_pool {
  56        u32 phys_mem_base;
  57        u32 phys_mem_size;
  58        u32 virt_mem_base;
  59        u32 next_phys_alloc_ptr;
  60};
  61static struct ext_phys_mem_pool ext_mem_pool;
  62
  63/*  ----------------------------------- Function Prototypes */
  64static int request_bridge_resources(struct cfg_hostres *res);
  65
  66
  67/* GPP PROCESS CLEANUP CODE */
  68
  69static int drv_proc_free_node_res(int id, void *p, void *data);
  70
  71/* Allocate and add a node resource element
  72* This function is called from .Node_Allocate. */
  73int drv_insert_node_res_element(void *hnode, void *node_resource,
  74                                       void *process_ctxt)
  75{
  76        struct node_res_object **node_res_obj =
  77            (struct node_res_object **)node_resource;
  78        struct process_context *ctxt = (struct process_context *)process_ctxt;
  79        int retval;
  80
  81        *node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL);
  82        if (!*node_res_obj)
  83                return -ENOMEM;
  84
  85        (*node_res_obj)->node = hnode;
  86        retval = idr_alloc(ctxt->node_id, *node_res_obj, 0, 0, GFP_KERNEL);
  87        if (retval >= 0) {
  88                (*node_res_obj)->id = retval;
  89                return 0;
  90        }
  91
  92        kfree(*node_res_obj);
  93
  94        if (retval == -ENOSPC) {
  95                pr_err("%s: FAILED, IDR is FULL\n", __func__);
  96                return -EFAULT;
  97        } else {
  98                pr_err("%s: OUT OF MEMORY\n", __func__);
  99                return -ENOMEM;
 100        }
 101}
 102
 103/* Release all Node resources and its context
 104 * Actual Node De-Allocation */
 105static int drv_proc_free_node_res(int id, void *p, void *data)
 106{
 107        struct process_context *ctxt = data;
 108        int status;
 109        struct node_res_object *node_res_obj = p;
 110        u32 node_state;
 111
 112        if (node_res_obj->node_allocated) {
 113                node_state = node_get_state(node_res_obj->node);
 114                if (node_state <= NODE_DELETING) {
 115                        if ((node_state == NODE_RUNNING) ||
 116                            (node_state == NODE_PAUSED) ||
 117                            (node_state == NODE_TERMINATING))
 118                                node_terminate
 119                                    (node_res_obj->node, &status);
 120
 121                        node_delete(node_res_obj, ctxt);
 122                }
 123        }
 124
 125        return 0;
 126}
 127
 128/* Release all Mapped and Reserved DMM resources */
 129int drv_remove_all_dmm_res_elements(void *process_ctxt)
 130{
 131        struct process_context *ctxt = (struct process_context *)process_ctxt;
 132        int status = 0;
 133        struct dmm_map_object *temp_map, *map_obj;
 134        struct dmm_rsv_object *temp_rsv, *rsv_obj;
 135
 136        /* Free DMM mapped memory resources */
 137        list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
 138                status = proc_un_map(ctxt->processor,
 139                                     (void *)map_obj->dsp_addr, ctxt);
 140                if (status)
 141                        pr_err("%s: proc_un_map failed!"
 142                               " status = 0x%xn", __func__, status);
 143        }
 144
 145        /* Free DMM reserved memory resources */
 146        list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
 147                status = proc_un_reserve_memory(ctxt->processor, (void *)
 148                                                rsv_obj->dsp_reserved_addr,
 149                                                ctxt);
 150                if (status)
 151                        pr_err("%s: proc_un_reserve_memory failed!"
 152                               " status = 0x%xn", __func__, status);
 153        }
 154        return status;
 155}
 156
 157/* Update Node allocation status */
 158void drv_proc_node_update_status(void *node_resource, s32 status)
 159{
 160        struct node_res_object *node_res_obj =
 161            (struct node_res_object *)node_resource;
 162        node_res_obj->node_allocated = status;
 163}
 164
 165/* Update Node Heap status */
 166void drv_proc_node_update_heap_status(void *node_resource, s32 status)
 167{
 168        struct node_res_object *node_res_obj =
 169            (struct node_res_object *)node_resource;
 170        node_res_obj->heap_allocated = status;
 171}
 172
 173/* Release all Node resources and its context
 174* This is called from .bridge_release.
 175 */
 176int drv_remove_all_node_res_elements(void *process_ctxt)
 177{
 178        struct process_context *ctxt = process_ctxt;
 179
 180        idr_for_each(ctxt->node_id, drv_proc_free_node_res, ctxt);
 181        idr_destroy(ctxt->node_id);
 182
 183        return 0;
 184}
 185
 186/* Allocate the STRM resource element
 187* This is called after the actual resource is allocated
 188 */
 189int drv_proc_insert_strm_res_element(void *stream_obj,
 190                                            void *strm_res, void *process_ctxt)
 191{
 192        struct strm_res_object **pstrm_res =
 193            (struct strm_res_object **)strm_res;
 194        struct process_context *ctxt = (struct process_context *)process_ctxt;
 195        int retval;
 196
 197        *pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL);
 198        if (*pstrm_res == NULL)
 199                return -EFAULT;
 200
 201        (*pstrm_res)->stream = stream_obj;
 202        retval = idr_alloc(ctxt->stream_id, *pstrm_res, 0, 0, GFP_KERNEL);
 203        if (retval >= 0) {
 204                (*pstrm_res)->id = retval;
 205                return 0;
 206        }
 207
 208        if (retval == -ENOSPC) {
 209                pr_err("%s: FAILED, IDR is FULL\n", __func__);
 210                return -EPERM;
 211        } else {
 212                pr_err("%s: OUT OF MEMORY\n", __func__);
 213                return -ENOMEM;
 214        }
 215}
 216
 217static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt)
 218{
 219        struct process_context *ctxt = process_ctxt;
 220        struct strm_res_object *strm_res = p;
 221        struct stream_info strm_info;
 222        struct dsp_streaminfo user;
 223        u8 **ap_buffer = NULL;
 224        u8 *buf_ptr;
 225        u32 ul_bytes;
 226        u32 dw_arg;
 227        s32 ul_buf_size;
 228
 229        if (strm_res->num_bufs) {
 230                ap_buffer = kmalloc((strm_res->num_bufs *
 231                                       sizeof(u8 *)), GFP_KERNEL);
 232                if (ap_buffer) {
 233                        strm_free_buffer(strm_res,
 234                                                  ap_buffer,
 235                                                  strm_res->num_bufs,
 236                                                  ctxt);
 237                        kfree(ap_buffer);
 238                }
 239        }
 240        strm_info.user_strm = &user;
 241        user.number_bufs_in_stream = 0;
 242        strm_get_info(strm_res->stream, &strm_info, sizeof(strm_info));
 243        while (user.number_bufs_in_stream--)
 244                strm_reclaim(strm_res->stream, &buf_ptr, &ul_bytes,
 245                             (u32 *) &ul_buf_size, &dw_arg);
 246        strm_close(strm_res, ctxt);
 247        return 0;
 248}
 249
 250/* Release all Stream resources and its context
 251* This is called from .bridge_release.
 252 */
 253int drv_remove_all_strm_res_elements(void *process_ctxt)
 254{
 255        struct process_context *ctxt = process_ctxt;
 256
 257        idr_for_each(ctxt->stream_id, drv_proc_free_strm_res, ctxt);
 258        idr_destroy(ctxt->stream_id);
 259
 260        return 0;
 261}
 262
 263/* Updating the stream resource element */
 264int drv_proc_update_strm_res(u32 num_bufs, void *strm_resources)
 265{
 266        int status = 0;
 267        struct strm_res_object **strm_res =
 268            (struct strm_res_object **)strm_resources;
 269
 270        (*strm_res)->num_bufs = num_bufs;
 271        return status;
 272}
 273
 274/* GPP PROCESS CLEANUP CODE END */
 275
 276/*
 277 *  ======== = drv_create ======== =
 278 *  Purpose:
 279 *      DRV Object gets created only once during Driver Loading.
 280 */
 281int drv_create(struct drv_object **drv_obj)
 282{
 283        int status = 0;
 284        struct drv_object *pdrv_object = NULL;
 285        struct drv_data *drv_datap = dev_get_drvdata(bridge);
 286
 287        pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL);
 288        if (pdrv_object) {
 289                /* Create and Initialize List of device objects */
 290                INIT_LIST_HEAD(&pdrv_object->dev_list);
 291                INIT_LIST_HEAD(&pdrv_object->dev_node_string);
 292        } else {
 293                status = -ENOMEM;
 294        }
 295        /* Store the DRV Object in the driver data */
 296        if (!status) {
 297                if (drv_datap) {
 298                        drv_datap->drv_object = (void *)pdrv_object;
 299                } else {
 300                        status = -EPERM;
 301                        pr_err("%s: Failed to store DRV object\n", __func__);
 302                }
 303        }
 304
 305        if (!status) {
 306                *drv_obj = pdrv_object;
 307        } else {
 308                /* Free the DRV Object */
 309                kfree(pdrv_object);
 310        }
 311
 312        return status;
 313}
 314
 315/*
 316 *  ======== = drv_destroy ======== =
 317 *  purpose:
 318 *      Invoked during bridge de-initialization
 319 */
 320int drv_destroy(struct drv_object *driver_obj)
 321{
 322        int status = 0;
 323        struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
 324        struct drv_data *drv_datap = dev_get_drvdata(bridge);
 325
 326        kfree(pdrv_object);
 327        /* Update the DRV Object in the driver data */
 328        if (drv_datap) {
 329                drv_datap->drv_object = NULL;
 330        } else {
 331                status = -EPERM;
 332                pr_err("%s: Failed to store DRV object\n", __func__);
 333        }
 334
 335        return status;
 336}
 337
 338/*
 339 *  ======== drv_get_dev_object ========
 340 *  Purpose:
 341 *      Given a index, returns a handle to DevObject from the list.
 342 */
 343int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj,
 344                              struct dev_object **device_obj)
 345{
 346        int status = 0;
 347        struct dev_object *dev_obj;
 348        u32 i;
 349
 350        dev_obj = (struct dev_object *)drv_get_first_dev_object();
 351        for (i = 0; i < index; i++) {
 352                dev_obj =
 353                    (struct dev_object *)drv_get_next_dev_object((u32) dev_obj);
 354        }
 355        if (dev_obj) {
 356                *device_obj = (struct dev_object *)dev_obj;
 357        } else {
 358                *device_obj = NULL;
 359                status = -EPERM;
 360        }
 361
 362        return status;
 363}
 364
 365/*
 366 *  ======== drv_get_first_dev_object ========
 367 *  Purpose:
 368 *      Retrieve the first Device Object handle from an internal linked list of
 369 *      of DEV_OBJECTs maintained by DRV.
 370 */
 371u32 drv_get_first_dev_object(void)
 372{
 373        u32 dw_dev_object = 0;
 374        struct drv_object *pdrv_obj;
 375        struct drv_data *drv_datap = dev_get_drvdata(bridge);
 376
 377        if (drv_datap && drv_datap->drv_object) {
 378                pdrv_obj = drv_datap->drv_object;
 379                if (!list_empty(&pdrv_obj->dev_list))
 380                        dw_dev_object = (u32) pdrv_obj->dev_list.next;
 381        } else {
 382                pr_err("%s: Failed to retrieve the object handle\n", __func__);
 383        }
 384
 385        return dw_dev_object;
 386}
 387
 388/*
 389 *  ======== DRV_GetFirstDevNodeString ========
 390 *  Purpose:
 391 *      Retrieve the first Device Extension from an internal linked list of
 392 *      of Pointer to dev_node Strings maintained by DRV.
 393 */
 394u32 drv_get_first_dev_extension(void)
 395{
 396        u32 dw_dev_extension = 0;
 397        struct drv_object *pdrv_obj;
 398        struct drv_data *drv_datap = dev_get_drvdata(bridge);
 399
 400        if (drv_datap && drv_datap->drv_object) {
 401                pdrv_obj = drv_datap->drv_object;
 402                if (!list_empty(&pdrv_obj->dev_node_string)) {
 403                        dw_dev_extension =
 404                            (u32) pdrv_obj->dev_node_string.next;
 405                }
 406        } else {
 407                pr_err("%s: Failed to retrieve the object handle\n", __func__);
 408        }
 409
 410        return dw_dev_extension;
 411}
 412
 413/*
 414 *  ======== drv_get_next_dev_object ========
 415 *  Purpose:
 416 *      Retrieve the next Device Object handle from an internal linked list of
 417 *      of DEV_OBJECTs maintained by DRV, after having previously called
 418 *      drv_get_first_dev_object() and zero or more DRV_GetNext.
 419 */
 420u32 drv_get_next_dev_object(u32 hdev_obj)
 421{
 422        u32 dw_next_dev_object = 0;
 423        struct drv_object *pdrv_obj;
 424        struct drv_data *drv_datap = dev_get_drvdata(bridge);
 425        struct list_head *curr;
 426
 427        if (drv_datap && drv_datap->drv_object) {
 428                pdrv_obj = drv_datap->drv_object;
 429                if (!list_empty(&pdrv_obj->dev_list)) {
 430                        curr = (struct list_head *)hdev_obj;
 431                        if (list_is_last(curr, &pdrv_obj->dev_list))
 432                                return 0;
 433                        dw_next_dev_object = (u32) curr->next;
 434                }
 435        } else {
 436                pr_err("%s: Failed to retrieve the object handle\n", __func__);
 437        }
 438
 439        return dw_next_dev_object;
 440}
 441
 442/*
 443 *  ======== drv_get_next_dev_extension ========
 444 *  Purpose:
 445 *      Retrieve the next Device Extension from an internal linked list of
 446 *      of pointer to DevNodeString maintained by DRV, after having previously
 447 *      called drv_get_first_dev_extension() and zero or more
 448 *      drv_get_next_dev_extension().
 449 */
 450u32 drv_get_next_dev_extension(u32 dev_extension)
 451{
 452        u32 dw_dev_extension = 0;
 453        struct drv_object *pdrv_obj;
 454        struct drv_data *drv_datap = dev_get_drvdata(bridge);
 455        struct list_head *curr;
 456
 457        if (drv_datap && drv_datap->drv_object) {
 458                pdrv_obj = drv_datap->drv_object;
 459                if (!list_empty(&pdrv_obj->dev_node_string)) {
 460                        curr = (struct list_head *)dev_extension;
 461                        if (list_is_last(curr, &pdrv_obj->dev_node_string))
 462                                return 0;
 463                        dw_dev_extension = (u32) curr->next;
 464                }
 465        } else {
 466                pr_err("%s: Failed to retrieve the object handle\n", __func__);
 467        }
 468
 469        return dw_dev_extension;
 470}
 471
 472/*
 473 *  ======== drv_insert_dev_object ========
 474 *  Purpose:
 475 *      Insert a DevObject into the list of Manager object.
 476 */
 477int drv_insert_dev_object(struct drv_object *driver_obj,
 478                                 struct dev_object *hdev_obj)
 479{
 480        struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
 481
 482        list_add_tail((struct list_head *)hdev_obj, &pdrv_object->dev_list);
 483
 484        return 0;
 485}
 486
 487/*
 488 *  ======== drv_remove_dev_object ========
 489 *  Purpose:
 490 *      Search for and remove a DeviceObject from the given list of DRV
 491 *      objects.
 492 */
 493int drv_remove_dev_object(struct drv_object *driver_obj,
 494                                 struct dev_object *hdev_obj)
 495{
 496        int status = -EPERM;
 497        struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
 498        struct list_head *cur_elem;
 499
 500        /* Search list for p_proc_object: */
 501        list_for_each(cur_elem, &pdrv_object->dev_list) {
 502                /* If found, remove it. */
 503                if ((struct dev_object *)cur_elem == hdev_obj) {
 504                        list_del(cur_elem);
 505                        status = 0;
 506                        break;
 507                }
 508        }
 509
 510        return status;
 511}
 512
 513/*
 514 *  ======== drv_request_resources ========
 515 *  Purpose:
 516 *      Requests  resources from the OS.
 517 */
 518int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
 519{
 520        int status = 0;
 521        struct drv_object *pdrv_object;
 522        struct drv_ext *pszdev_node;
 523        struct drv_data *drv_datap = dev_get_drvdata(bridge);
 524
 525        /*
 526         *  Allocate memory to hold the string. This will live until
 527         *  it is freed in the Release resources. Update the driver object
 528         *  list.
 529         */
 530
 531        if (!drv_datap || !drv_datap->drv_object)
 532                status = -ENODATA;
 533        else
 534                pdrv_object = drv_datap->drv_object;
 535
 536        if (!status) {
 537                pszdev_node = kzalloc(sizeof(struct drv_ext), GFP_KERNEL);
 538                if (pszdev_node) {
 539                        strncpy(pszdev_node->sz_string,
 540                                (char *)dw_context, MAXREGPATHLENGTH - 1);
 541                        pszdev_node->sz_string[MAXREGPATHLENGTH - 1] = '\0';
 542                        /* Update the Driver Object List */
 543                        *dev_node_strg = (u32) pszdev_node->sz_string;
 544                        list_add_tail(&pszdev_node->link,
 545                                        &pdrv_object->dev_node_string);
 546                } else {
 547                        status = -ENOMEM;
 548                        *dev_node_strg = 0;
 549                }
 550        } else {
 551                dev_dbg(bridge, "%s: Failed to get Driver Object from Registry",
 552                        __func__);
 553                *dev_node_strg = 0;
 554        }
 555
 556        return status;
 557}
 558
 559/*
 560 *  ======== drv_release_resources ========
 561 *  Purpose:
 562 *      Releases  resources from the OS.
 563 */
 564int drv_release_resources(u32 dw_context, struct drv_object *hdrv_obj)
 565{
 566        int status = 0;
 567        struct drv_ext *pszdev_node;
 568
 569        /*
 570         *  Irrespective of the status go ahead and clean it
 571         *  The following will over write the status.
 572         */
 573        for (pszdev_node = (struct drv_ext *)drv_get_first_dev_extension();
 574             pszdev_node != NULL; pszdev_node = (struct drv_ext *)
 575             drv_get_next_dev_extension((u32) pszdev_node)) {
 576                if ((u32) pszdev_node == dw_context) {
 577                        /* Found it */
 578                        /* Delete from the Driver object list */
 579                        list_del(&pszdev_node->link);
 580                        kfree(pszdev_node);
 581                        break;
 582                }
 583        }
 584        return status;
 585}
 586
 587/*
 588 *  ======== request_bridge_resources ========
 589 *  Purpose:
 590 *      Reserves shared memory for bridge.
 591 */
 592static int request_bridge_resources(struct cfg_hostres *res)
 593{
 594        struct cfg_hostres *host_res = res;
 595
 596        /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
 597        host_res->num_mem_windows = 2;
 598
 599        /* First window is for DSP internal memory */
 600        dev_dbg(bridge, "mem_base[0] 0x%x\n", host_res->mem_base[0]);
 601        dev_dbg(bridge, "mem_base[3] 0x%x\n", host_res->mem_base[3]);
 602        dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base);
 603
 604        /* for 24xx base port is not mapping the mamory for DSP
 605         * internal memory TODO Do a ioremap here */
 606        /* Second window is for DSP external memory shared with MPU */
 607
 608        /* These are hard-coded values */
 609        host_res->birq_registers = 0;
 610        host_res->birq_attrib = 0;
 611        host_res->offset_for_monitor = 0;
 612        host_res->chnl_offset = 0;
 613        /* CHNL_MAXCHANNELS */
 614        host_res->num_chnls = CHNL_MAXCHANNELS;
 615        host_res->chnl_buf_size = 0x400;
 616
 617        return 0;
 618}
 619
 620/*
 621 *  ======== drv_request_bridge_res_dsp ========
 622 *  Purpose:
 623 *      Reserves shared memory for bridge.
 624 */
 625int drv_request_bridge_res_dsp(void **phost_resources)
 626{
 627        int status = 0;
 628        struct cfg_hostres *host_res;
 629        u32 dw_buff_size;
 630        u32 dma_addr;
 631        u32 shm_size;
 632        struct drv_data *drv_datap = dev_get_drvdata(bridge);
 633
 634        dw_buff_size = sizeof(struct cfg_hostres);
 635
 636        host_res = kzalloc(dw_buff_size, GFP_KERNEL);
 637
 638        if (host_res != NULL) {
 639                request_bridge_resources(host_res);
 640                /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
 641                host_res->num_mem_windows = 4;
 642
 643                host_res->mem_base[0] = 0;
 644                host_res->mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE,
 645                                                         OMAP_DSP_MEM1_SIZE);
 646                host_res->mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE,
 647                                                         OMAP_DSP_MEM2_SIZE);
 648                host_res->mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE,
 649                                                         OMAP_DSP_MEM3_SIZE);
 650                host_res->per_base = ioremap(OMAP_PER_CM_BASE,
 651                                                OMAP_PER_CM_SIZE);
 652                host_res->per_pm_base = ioremap(OMAP_PER_PRM_BASE,
 653                                                OMAP_PER_PRM_SIZE);
 654                host_res->core_pm_base = ioremap(OMAP_CORE_PRM_BASE,
 655                                                        OMAP_CORE_PRM_SIZE);
 656                host_res->dmmu_base = ioremap(OMAP_DMMU_BASE,
 657                                                 OMAP_DMMU_SIZE);
 658
 659                dev_dbg(bridge, "mem_base[0] 0x%x\n",
 660                        host_res->mem_base[0]);
 661                dev_dbg(bridge, "mem_base[1] 0x%x\n",
 662                        host_res->mem_base[1]);
 663                dev_dbg(bridge, "mem_base[2] 0x%x\n",
 664                        host_res->mem_base[2]);
 665                dev_dbg(bridge, "mem_base[3] 0x%x\n",
 666                        host_res->mem_base[3]);
 667                dev_dbg(bridge, "mem_base[4] 0x%x\n",
 668                        host_res->mem_base[4]);
 669                dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base);
 670
 671                shm_size = drv_datap->shm_size;
 672                if (shm_size >= 0x10000) {
 673                        /* Allocate Physically contiguous,
 674                         * non-cacheable  memory */
 675                        host_res->mem_base[1] =
 676                            (u32) mem_alloc_phys_mem(shm_size, 0x100000,
 677                                                     &dma_addr);
 678                        if (host_res->mem_base[1] == 0) {
 679                                status = -ENOMEM;
 680                                pr_err("shm reservation Failed\n");
 681                        } else {
 682                                host_res->mem_length[1] = shm_size;
 683                                host_res->mem_phys[1] = dma_addr;
 684
 685                                dev_dbg(bridge, "%s: Bridge shm address 0x%x "
 686                                        "dma_addr %x size %x\n", __func__,
 687                                        host_res->mem_base[1],
 688                                        dma_addr, shm_size);
 689                        }
 690                }
 691                if (!status) {
 692                        /* These are hard-coded values */
 693                        host_res->birq_registers = 0;
 694                        host_res->birq_attrib = 0;
 695                        host_res->offset_for_monitor = 0;
 696                        host_res->chnl_offset = 0;
 697                        /* CHNL_MAXCHANNELS */
 698                        host_res->num_chnls = CHNL_MAXCHANNELS;
 699                        host_res->chnl_buf_size = 0x400;
 700                        dw_buff_size = sizeof(struct cfg_hostres);
 701                }
 702                *phost_resources = host_res;
 703        }
 704        /* End Mem alloc */
 705        return status;
 706}
 707
 708void mem_ext_phys_pool_init(u32 pool_phys_base, u32 pool_size)
 709{
 710        u32 pool_virt_base;
 711
 712        /* get the virtual address for the physical memory pool passed */
 713        pool_virt_base = (u32) ioremap(pool_phys_base, pool_size);
 714
 715        if ((void **)pool_virt_base == NULL) {
 716                pr_err("%s: external physical memory map failed\n", __func__);
 717                ext_phys_mem_pool_enabled = false;
 718        } else {
 719                ext_mem_pool.phys_mem_base = pool_phys_base;
 720                ext_mem_pool.phys_mem_size = pool_size;
 721                ext_mem_pool.virt_mem_base = pool_virt_base;
 722                ext_mem_pool.next_phys_alloc_ptr = pool_phys_base;
 723                ext_phys_mem_pool_enabled = true;
 724        }
 725}
 726
 727void mem_ext_phys_pool_release(void)
 728{
 729        if (ext_phys_mem_pool_enabled) {
 730                iounmap((void *)(ext_mem_pool.virt_mem_base));
 731                ext_phys_mem_pool_enabled = false;
 732        }
 733}
 734
 735/*
 736 *  ======== mem_ext_phys_mem_alloc ========
 737 *  Purpose:
 738 *     Allocate physically contiguous, uncached memory from external memory pool
 739 */
 740
 741static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 * phys_addr)
 742{
 743        u32 new_alloc_ptr;
 744        u32 offset;
 745        u32 virt_addr;
 746
 747        if (align == 0)
 748                align = 1;
 749
 750        if (bytes > ((ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)
 751                     - ext_mem_pool.next_phys_alloc_ptr)) {
 752                phys_addr = NULL;
 753                return NULL;
 754        } else {
 755                offset = (ext_mem_pool.next_phys_alloc_ptr & (align - 1));
 756                if (offset == 0)
 757                        new_alloc_ptr = ext_mem_pool.next_phys_alloc_ptr;
 758                else
 759                        new_alloc_ptr = (ext_mem_pool.next_phys_alloc_ptr) +
 760                            (align - offset);
 761                if ((new_alloc_ptr + bytes) <=
 762                    (ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)) {
 763                        /* we can allocate */
 764                        *phys_addr = new_alloc_ptr;
 765                        ext_mem_pool.next_phys_alloc_ptr =
 766                            new_alloc_ptr + bytes;
 767                        virt_addr =
 768                            ext_mem_pool.virt_mem_base + (new_alloc_ptr -
 769                                                          ext_mem_pool.
 770                                                          phys_mem_base);
 771                        return (void *)virt_addr;
 772                } else {
 773                        *phys_addr = 0;
 774                        return NULL;
 775                }
 776        }
 777}
 778
 779/*
 780 *  ======== mem_alloc_phys_mem ========
 781 *  Purpose:
 782 *      Allocate physically contiguous, uncached memory
 783 */
 784void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask,
 785                                u32 *physical_address)
 786{
 787        void *va_mem = NULL;
 788        dma_addr_t pa_mem;
 789
 790        if (byte_size > 0) {
 791                if (ext_phys_mem_pool_enabled) {
 792                        va_mem = mem_ext_phys_mem_alloc(byte_size, align_mask,
 793                                                        (u32 *) &pa_mem);
 794                } else
 795                        va_mem = dma_alloc_coherent(NULL, byte_size, &pa_mem,
 796                                                                GFP_KERNEL);
 797                if (va_mem == NULL)
 798                        *physical_address = 0;
 799                else
 800                        *physical_address = pa_mem;
 801        }
 802        return va_mem;
 803}
 804
 805/*
 806 *  ======== mem_free_phys_mem ========
 807 *  Purpose:
 808 *      Free the given block of physically contiguous memory.
 809 */
 810void mem_free_phys_mem(void *virtual_address, u32 physical_address,
 811                       u32 byte_size)
 812{
 813        if (!ext_phys_mem_pool_enabled)
 814                dma_free_coherent(NULL, byte_size, virtual_address,
 815                                  physical_address);
 816}
 817