linux/drivers/staging/tidspbridge/pmgr/cmm.c
<<
>>
Prefs
   1/*
   2 * cmm.c
   3 *
   4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
   5 *
   6 * The Communication(Shared) Memory Management(CMM) module provides
   7 * shared memory management services for DSP/BIOS Bridge data streaming
   8 * and messaging.
   9 *
  10 * Multiple shared memory segments can be registered with CMM.
  11 * Each registered SM segment is represented by a SM "allocator" that
  12 * describes a block of physically contiguous shared memory used for
  13 * future allocations by CMM.
  14 *
  15 * Memory is coelesced back to the appropriate heap when a buffer is
  16 * freed.
  17 *
  18 * Notes:
  19 *   Va: Virtual address.
  20 *   Pa: Physical or kernel system address.
  21 *
  22 * Copyright (C) 2005-2006 Texas Instruments, Inc.
  23 *
  24 * This package is free software; you can redistribute it and/or modify
  25 * it under the terms of the GNU General Public License version 2 as
  26 * published by the Free Software Foundation.
  27 *
  28 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
  29 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
  30 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  31 */
  32#include <linux/types.h>
  33
  34/*  ----------------------------------- DSP/BIOS Bridge */
  35#include <dspbridge/dbdefs.h>
  36
  37/*  ----------------------------------- Trace & Debug */
  38#include <dspbridge/dbc.h>
  39
  40/*  ----------------------------------- OS Adaptation Layer */
  41#include <dspbridge/list.h>
  42#include <dspbridge/sync.h>
  43#include <dspbridge/utildefs.h>
  44
  45/*  ----------------------------------- Platform Manager */
  46#include <dspbridge/dev.h>
  47#include <dspbridge/proc.h>
  48
  49/*  ----------------------------------- This */
  50#include <dspbridge/cmm.h>
  51
  52/*  ----------------------------------- Defines, Data Structures, Typedefs */
  53#define NEXT_PA(pnode)   (pnode->dw_pa + pnode->ul_size)
  54
  55/* Other bus/platform translations */
  56#define DSPPA2GPPPA(base, x, y)  ((x)+(y))
  57#define GPPPA2DSPPA(base, x, y)  ((x)-(y))
  58
  59/*
  60 *  Allocators define a block of contiguous memory used for future allocations.
  61 *
  62 *      sma - shared memory allocator.
  63 *      vma - virtual memory allocator.(not used).
  64 */
  65struct cmm_allocator {          /* sma */
  66        unsigned int shm_base;  /* Start of physical SM block */
  67        u32 ul_sm_size;         /* Size of SM block in bytes */
  68        unsigned int dw_vm_base;        /* Start of VM block. (Dev driver
  69                                         * context for 'sma') */
  70        u32 dw_dsp_phys_addr_offset;    /* DSP PA to GPP PA offset for this
  71                                         * SM space */
  72        s8 c_factor;            /* DSPPa to GPPPa Conversion Factor */
  73        unsigned int dw_dsp_base;       /* DSP virt base byte address */
  74        u32 ul_dsp_size;        /* DSP seg size in bytes */
  75        struct cmm_object *hcmm_mgr;    /* back ref to parent mgr */
  76        /* node list of available memory */
  77        struct lst_list *free_list_head;
  78        /* node list of memory in use */
  79        struct lst_list *in_use_list_head;
  80};
  81
  82struct cmm_xlator {             /* Pa<->Va translator object */
  83        /* CMM object this translator associated */
  84        struct cmm_object *hcmm_mgr;
  85        /*
  86         *  Client process virtual base address that corresponds to phys SM
  87         *  base address for translator's ul_seg_id.
  88         *  Only 1 segment ID currently supported.
  89         */
  90        unsigned int dw_virt_base;      /* virtual base address */
  91        u32 ul_virt_size;       /* size of virt space in bytes */
  92        u32 ul_seg_id;          /* Segment Id */
  93};
  94
  95/* CMM Mgr */
  96struct cmm_object {
  97        /*
  98         * Cmm Lock is used to serialize access mem manager for multi-threads.
  99         */
 100        struct mutex cmm_lock;  /* Lock to access cmm mgr */
 101        struct lst_list *node_free_list_head;   /* Free list of memory nodes */
 102        u32 ul_min_block_size;  /* Min SM block; default 16 bytes */
 103        u32 dw_page_size;       /* Memory Page size (1k/4k) */
 104        /* GPP SM segment ptrs */
 105        struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
 106};
 107
 108/* Default CMM Mgr attributes */
 109static struct cmm_mgrattrs cmm_dfltmgrattrs = {
 110        /* ul_min_block_size, min block size(bytes) allocated by cmm mgr */
 111        16
 112};
 113
 114/* Default allocation attributes */
 115static struct cmm_attrs cmm_dfltalctattrs = {
 116        1               /* ul_seg_id, default segment Id for allocator */
 117};
 118
 119/* Address translator default attrs */
 120static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
 121        /* ul_seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
 122        1,
 123        0,                      /* dw_dsp_bufs */
 124        0,                      /* dw_dsp_buf_size */
 125        NULL,                   /* vm_base */
 126        0,                      /* dw_vm_size */
 127};
 128
 129/* SM node representing a block of memory. */
 130struct cmm_mnode {
 131        struct list_head link;  /* must be 1st element */
 132        u32 dw_pa;              /* Phys addr */
 133        u32 dw_va;              /* Virtual address in device process context */
 134        u32 ul_size;            /* SM block size in bytes */
 135        u32 client_proc;        /* Process that allocated this mem block */
 136};
 137
 138/*  ----------------------------------- Globals */
 139static u32 refs;                /* module reference count */
 140
 141/*  ----------------------------------- Function Prototypes */
 142static void add_to_free_list(struct cmm_allocator *allocator,
 143                             struct cmm_mnode *pnode);
 144static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
 145                                           u32 ul_seg_id);
 146static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
 147                                        u32 usize);
 148static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
 149                                  u32 dw_va, u32 ul_size);
 150/* get available slot for new allocator */
 151static s32 get_slot(struct cmm_object *cmm_mgr_obj);
 152static void un_register_gppsm_seg(struct cmm_allocator *psma);
 153
 154/*
 155 *  ======== cmm_calloc_buf ========
 156 *  Purpose:
 157 *      Allocate a SM buffer, zero contents, and return the physical address
 158 *      and optional driver context virtual address(pp_buf_va).
 159 *
 160 *      The freelist is sorted in increasing size order. Get the first
 161 *      block that satifies the request and sort the remaining back on
 162 *      the freelist; if large enough. The kept block is placed on the
 163 *      inUseList.
 164 */
 165void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
 166                     struct cmm_attrs *pattrs, void **pp_buf_va)
 167{
 168        struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
 169        void *buf_pa = NULL;
 170        struct cmm_mnode *pnode = NULL;
 171        struct cmm_mnode *new_node = NULL;
 172        struct cmm_allocator *allocator = NULL;
 173        u32 delta_size;
 174        u8 *pbyte = NULL;
 175        s32 cnt;
 176
 177        if (pattrs == NULL)
 178                pattrs = &cmm_dfltalctattrs;
 179
 180        if (pp_buf_va != NULL)
 181                *pp_buf_va = NULL;
 182
 183        if (cmm_mgr_obj && (usize != 0)) {
 184                if (pattrs->ul_seg_id > 0) {
 185                        /* SegId > 0 is SM */
 186                        /* get the allocator object for this segment id */
 187                        allocator =
 188                            get_allocator(cmm_mgr_obj, pattrs->ul_seg_id);
 189                        /* keep block size a multiple of ul_min_block_size */
 190                        usize =
 191                            ((usize - 1) & ~(cmm_mgr_obj->ul_min_block_size -
 192                                             1))
 193                            + cmm_mgr_obj->ul_min_block_size;
 194                        mutex_lock(&cmm_mgr_obj->cmm_lock);
 195                        pnode = get_free_block(allocator, usize);
 196                }
 197                if (pnode) {
 198                        delta_size = (pnode->ul_size - usize);
 199                        if (delta_size >= cmm_mgr_obj->ul_min_block_size) {
 200                                /* create a new block with the leftovers and
 201                                 * add to freelist */
 202                                new_node =
 203                                    get_node(cmm_mgr_obj, pnode->dw_pa + usize,
 204                                             pnode->dw_va + usize,
 205                                             (u32) delta_size);
 206                                /* leftovers go free */
 207                                add_to_free_list(allocator, new_node);
 208                                /* adjust our node's size */
 209                                pnode->ul_size = usize;
 210                        }
 211                        /* Tag node with client process requesting allocation
 212                         * We'll need to free up a process's alloc'd SM if the
 213                         * client process goes away.
 214                         */
 215                        /* Return TGID instead of process handle */
 216                        pnode->client_proc = current->tgid;
 217
 218                        /* put our node on InUse list */
 219                        lst_put_tail(allocator->in_use_list_head,
 220                                     (struct list_head *)pnode);
 221                        buf_pa = (void *)pnode->dw_pa;  /* physical address */
 222                        /* clear mem */
 223                        pbyte = (u8 *) pnode->dw_va;
 224                        for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
 225                                *pbyte = 0;
 226
 227                        if (pp_buf_va != NULL) {
 228                                /* Virtual address */
 229                                *pp_buf_va = (void *)pnode->dw_va;
 230                        }
 231                }
 232                mutex_unlock(&cmm_mgr_obj->cmm_lock);
 233        }
 234        return buf_pa;
 235}
 236
 237/*
 238 *  ======== cmm_create ========
 239 *  Purpose:
 240 *      Create a communication memory manager object.
 241 */
 242int cmm_create(struct cmm_object **ph_cmm_mgr,
 243                      struct dev_object *hdev_obj,
 244                      const struct cmm_mgrattrs *mgr_attrts)
 245{
 246        struct cmm_object *cmm_obj = NULL;
 247        int status = 0;
 248        struct util_sysinfo sys_info;
 249
 250        DBC_REQUIRE(refs > 0);
 251        DBC_REQUIRE(ph_cmm_mgr != NULL);
 252
 253        *ph_cmm_mgr = NULL;
 254        /* create, zero, and tag a cmm mgr object */
 255        cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
 256        if (cmm_obj != NULL) {
 257                if (mgr_attrts == NULL)
 258                        mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
 259
 260                /* 4 bytes minimum */
 261                DBC_ASSERT(mgr_attrts->ul_min_block_size >= 4);
 262                /* save away smallest block allocation for this cmm mgr */
 263                cmm_obj->ul_min_block_size = mgr_attrts->ul_min_block_size;
 264                /* save away the systems memory page size */
 265                sys_info.dw_page_size = PAGE_SIZE;
 266                sys_info.dw_allocation_granularity = PAGE_SIZE;
 267                sys_info.dw_number_of_processors = 1;
 268
 269                cmm_obj->dw_page_size = sys_info.dw_page_size;
 270
 271                /* Note: DSP SM seg table(aDSPSMSegTab[]) zero'd by
 272                 * MEM_ALLOC_OBJECT */
 273
 274                /* create node free list */
 275                cmm_obj->node_free_list_head =
 276                                kzalloc(sizeof(struct lst_list),
 277                                                GFP_KERNEL);
 278                if (cmm_obj->node_free_list_head == NULL) {
 279                        status = -ENOMEM;
 280                        cmm_destroy(cmm_obj, true);
 281                } else {
 282                        INIT_LIST_HEAD(&cmm_obj->
 283                                       node_free_list_head->head);
 284                        mutex_init(&cmm_obj->cmm_lock);
 285                        *ph_cmm_mgr = cmm_obj;
 286                }
 287        } else {
 288                status = -ENOMEM;
 289        }
 290        return status;
 291}
 292
 293/*
 294 *  ======== cmm_destroy ========
 295 *  Purpose:
 296 *      Release the communication memory manager resources.
 297 */
 298int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
 299{
 300        struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
 301        struct cmm_info temp_info;
 302        int status = 0;
 303        s32 slot_seg;
 304        struct cmm_mnode *pnode;
 305
 306        DBC_REQUIRE(refs > 0);
 307        if (!hcmm_mgr) {
 308                status = -EFAULT;
 309                return status;
 310        }
 311        mutex_lock(&cmm_mgr_obj->cmm_lock);
 312        /* If not force then fail if outstanding allocations exist */
 313        if (!force) {
 314                /* Check for outstanding memory allocations */
 315                status = cmm_get_info(hcmm_mgr, &temp_info);
 316                if (!status) {
 317                        if (temp_info.ul_total_in_use_cnt > 0) {
 318                                /* outstanding allocations */
 319                                status = -EPERM;
 320                        }
 321                }
 322        }
 323        if (!status) {
 324                /* UnRegister SM allocator */
 325                for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
 326                        if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) {
 327                                un_register_gppsm_seg
 328                                    (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]);
 329                                /* Set slot to NULL for future reuse */
 330                                cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL;
 331                        }
 332                }
 333        }
 334        if (cmm_mgr_obj->node_free_list_head != NULL) {
 335                /* Free the free nodes */
 336                while (!LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
 337                        pnode = (struct cmm_mnode *)
 338                            lst_get_head(cmm_mgr_obj->node_free_list_head);
 339                        kfree(pnode);
 340                }
 341                /* delete NodeFreeList list */
 342                kfree(cmm_mgr_obj->node_free_list_head);
 343        }
 344        mutex_unlock(&cmm_mgr_obj->cmm_lock);
 345        if (!status) {
 346                /* delete CS & cmm mgr object */
 347                mutex_destroy(&cmm_mgr_obj->cmm_lock);
 348                kfree(cmm_mgr_obj);
 349        }
 350        return status;
 351}
 352
 353/*
 354 *  ======== cmm_exit ========
 355 *  Purpose:
 356 *      Discontinue usage of module; free resources when reference count
 357 *      reaches 0.
 358 */
 359void cmm_exit(void)
 360{
 361        DBC_REQUIRE(refs > 0);
 362
 363        refs--;
 364}
 365
 366/*
 367 *  ======== cmm_free_buf ========
 368 *  Purpose:
 369 *      Free the given buffer.
 370 */
 371int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa,
 372                        u32 ul_seg_id)
 373{
 374        struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
 375        int status = -EFAULT;
 376        struct cmm_mnode *mnode_obj = NULL;
 377        struct cmm_allocator *allocator = NULL;
 378        struct cmm_attrs *pattrs;
 379
 380        DBC_REQUIRE(refs > 0);
 381        DBC_REQUIRE(buf_pa != NULL);
 382
 383        if (ul_seg_id == 0) {
 384                pattrs = &cmm_dfltalctattrs;
 385                ul_seg_id = pattrs->ul_seg_id;
 386        }
 387        if (!hcmm_mgr || !(ul_seg_id > 0)) {
 388                status = -EFAULT;
 389                return status;
 390        }
 391        /* get the allocator for this segment id */
 392        allocator = get_allocator(cmm_mgr_obj, ul_seg_id);
 393        if (allocator != NULL) {
 394                mutex_lock(&cmm_mgr_obj->cmm_lock);
 395                mnode_obj =
 396                    (struct cmm_mnode *)lst_first(allocator->in_use_list_head);
 397                while (mnode_obj) {
 398                        if ((u32) buf_pa == mnode_obj->dw_pa) {
 399                                /* Found it */
 400                                lst_remove_elem(allocator->in_use_list_head,
 401                                                (struct list_head *)mnode_obj);
 402                                /* back to freelist */
 403                                add_to_free_list(allocator, mnode_obj);
 404                                status = 0;     /* all right! */
 405                                break;
 406                        }
 407                        /* next node. */
 408                        mnode_obj = (struct cmm_mnode *)
 409                            lst_next(allocator->in_use_list_head,
 410                                     (struct list_head *)mnode_obj);
 411                }
 412                mutex_unlock(&cmm_mgr_obj->cmm_lock);
 413        }
 414        return status;
 415}
 416
 417/*
 418 *  ======== cmm_get_handle ========
 419 *  Purpose:
 420 *      Return the communication memory manager object for this device.
 421 *      This is typically called from the client process.
 422 */
 423int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
 424{
 425        int status = 0;
 426        struct dev_object *hdev_obj;
 427
 428        DBC_REQUIRE(refs > 0);
 429        DBC_REQUIRE(ph_cmm_mgr != NULL);
 430        if (hprocessor != NULL)
 431                status = proc_get_dev_object(hprocessor, &hdev_obj);
 432        else
 433                hdev_obj = dev_get_first();     /* default */
 434
 435        if (!status)
 436                status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr);
 437
 438        return status;
 439}
 440
 441/*
 442 *  ======== cmm_get_info ========
 443 *  Purpose:
 444 *      Return the current memory utilization information.
 445 */
 446int cmm_get_info(struct cmm_object *hcmm_mgr,
 447                        struct cmm_info *cmm_info_obj)
 448{
 449        struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
 450        u32 ul_seg;
 451        int status = 0;
 452        struct cmm_allocator *altr;
 453        struct cmm_mnode *mnode_obj = NULL;
 454
 455        DBC_REQUIRE(cmm_info_obj != NULL);
 456
 457        if (!hcmm_mgr) {
 458                status = -EFAULT;
 459                return status;
 460        }
 461        mutex_lock(&cmm_mgr_obj->cmm_lock);
 462        cmm_info_obj->ul_num_gppsm_segs = 0;    /* # of SM segments */
 463        /* Total # of outstanding alloc */
 464        cmm_info_obj->ul_total_in_use_cnt = 0;
 465        /* min block size */
 466        cmm_info_obj->ul_min_block_size = cmm_mgr_obj->ul_min_block_size;
 467        /* check SM memory segments */
 468        for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
 469                /* get the allocator object for this segment id */
 470                altr = get_allocator(cmm_mgr_obj, ul_seg);
 471                if (altr != NULL) {
 472                        cmm_info_obj->ul_num_gppsm_segs++;
 473                        cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_pa =
 474                            altr->shm_base - altr->ul_dsp_size;
 475                        cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size =
 476                            altr->ul_dsp_size + altr->ul_sm_size;
 477                        cmm_info_obj->seg_info[ul_seg - 1].dw_gpp_base_pa =
 478                            altr->shm_base;
 479                        cmm_info_obj->seg_info[ul_seg - 1].ul_gpp_size =
 480                            altr->ul_sm_size;
 481                        cmm_info_obj->seg_info[ul_seg - 1].dw_dsp_base_va =
 482                            altr->dw_dsp_base;
 483                        cmm_info_obj->seg_info[ul_seg - 1].ul_dsp_size =
 484                            altr->ul_dsp_size;
 485                        cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_va =
 486                            altr->dw_vm_base - altr->ul_dsp_size;
 487                        cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt = 0;
 488                        mnode_obj = (struct cmm_mnode *)
 489                            lst_first(altr->in_use_list_head);
 490                        /* Count inUse blocks */
 491                        while (mnode_obj) {
 492                                cmm_info_obj->ul_total_in_use_cnt++;
 493                                cmm_info_obj->seg_info[ul_seg -
 494                                                       1].ul_in_use_cnt++;
 495                                /* next node. */
 496                                mnode_obj = (struct cmm_mnode *)
 497                                    lst_next(altr->in_use_list_head,
 498                                             (struct list_head *)mnode_obj);
 499                        }
 500                }
 501        }                       /* end for */
 502        mutex_unlock(&cmm_mgr_obj->cmm_lock);
 503        return status;
 504}
 505
 506/*
 507 *  ======== cmm_init ========
 508 *  Purpose:
 509 *      Initializes private state of CMM module.
 510 */
 511bool cmm_init(void)
 512{
 513        bool ret = true;
 514
 515        DBC_REQUIRE(refs >= 0);
 516        if (ret)
 517                refs++;
 518
 519        DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
 520
 521        return ret;
 522}
 523
 524/*
 525 *  ======== cmm_register_gppsm_seg ========
 526 *  Purpose:
 527 *      Register a block of SM with the CMM to be used for later GPP SM
 528 *      allocations.
 529 */
 530int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
 531                                  u32 dw_gpp_base_pa, u32 ul_size,
 532                                  u32 dsp_addr_offset, s8 c_factor,
 533                                  u32 dw_dsp_base, u32 ul_dsp_size,
 534                                  u32 *sgmt_id, u32 gpp_base_va)
 535{
 536        struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
 537        struct cmm_allocator *psma = NULL;
 538        int status = 0;
 539        struct cmm_mnode *new_node;
 540        s32 slot_seg;
 541
 542        DBC_REQUIRE(ul_size > 0);
 543        DBC_REQUIRE(sgmt_id != NULL);
 544        DBC_REQUIRE(dw_gpp_base_pa != 0);
 545        DBC_REQUIRE(gpp_base_va != 0);
 546        DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
 547                    (c_factor >= CMM_SUBFROMDSPPA));
 548        dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
 549                "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n", __func__,
 550                dw_gpp_base_pa, ul_size, dsp_addr_offset, dw_dsp_base,
 551                ul_dsp_size, gpp_base_va);
 552        if (!hcmm_mgr) {
 553                status = -EFAULT;
 554                return status;
 555        }
 556        /* make sure we have room for another allocator */
 557        mutex_lock(&cmm_mgr_obj->cmm_lock);
 558        slot_seg = get_slot(cmm_mgr_obj);
 559        if (slot_seg < 0) {
 560                /* get a slot number */
 561                status = -EPERM;
 562                goto func_end;
 563        }
 564        /* Check if input ul_size is big enough to alloc at least one block */
 565        if (ul_size < cmm_mgr_obj->ul_min_block_size) {
 566                status = -EINVAL;
 567                goto func_end;
 568        }
 569
 570        /* create, zero, and tag an SM allocator object */
 571        psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL);
 572        if (psma != NULL) {
 573                psma->hcmm_mgr = hcmm_mgr;      /* ref to parent */
 574                psma->shm_base = dw_gpp_base_pa;        /* SM Base phys */
 575                psma->ul_sm_size = ul_size;     /* SM segment size in bytes */
 576                psma->dw_vm_base = gpp_base_va;
 577                psma->dw_dsp_phys_addr_offset = dsp_addr_offset;
 578                psma->c_factor = c_factor;
 579                psma->dw_dsp_base = dw_dsp_base;
 580                psma->ul_dsp_size = ul_dsp_size;
 581                if (psma->dw_vm_base == 0) {
 582                        status = -EPERM;
 583                        goto func_end;
 584                }
 585                /* return the actual segment identifier */
 586                *sgmt_id = (u32) slot_seg + 1;
 587                /* create memory free list */
 588                psma->free_list_head = kzalloc(sizeof(struct lst_list),
 589                                                        GFP_KERNEL);
 590                if (psma->free_list_head == NULL) {
 591                        status = -ENOMEM;
 592                        goto func_end;
 593                }
 594                INIT_LIST_HEAD(&psma->free_list_head->head);
 595
 596                /* create memory in-use list */
 597                psma->in_use_list_head = kzalloc(sizeof(struct
 598                                                lst_list), GFP_KERNEL);
 599                if (psma->in_use_list_head == NULL) {
 600                        status = -ENOMEM;
 601                        goto func_end;
 602                }
 603                INIT_LIST_HEAD(&psma->in_use_list_head->head);
 604
 605                /* Get a mem node for this hunk-o-memory */
 606                new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
 607                                    psma->dw_vm_base, ul_size);
 608                /* Place node on the SM allocator's free list */
 609                if (new_node) {
 610                        lst_put_tail(psma->free_list_head,
 611                                     (struct list_head *)new_node);
 612                } else {
 613                        status = -ENOMEM;
 614                        goto func_end;
 615                }
 616        } else {
 617                status = -ENOMEM;
 618                goto func_end;
 619        }
 620        /* make entry */
 621        cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma;
 622
 623func_end:
 624        if (status && psma) {
 625                /* Cleanup allocator */
 626                un_register_gppsm_seg(psma);
 627        }
 628
 629        mutex_unlock(&cmm_mgr_obj->cmm_lock);
 630        return status;
 631}
 632
 633/*
 634 *  ======== cmm_un_register_gppsm_seg ========
 635 *  Purpose:
 636 *      UnRegister GPP SM segments with the CMM.
 637 */
 638int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
 639                                     u32 ul_seg_id)
 640{
 641        struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
 642        int status = 0;
 643        struct cmm_allocator *psma;
 644        u32 ul_id = ul_seg_id;
 645
 646        DBC_REQUIRE(ul_seg_id > 0);
 647        if (hcmm_mgr) {
 648                if (ul_seg_id == CMM_ALLSEGMENTS)
 649                        ul_id = 1;
 650
 651                if ((ul_id > 0) && (ul_id <= CMM_MAXGPPSEGS)) {
 652                        while (ul_id <= CMM_MAXGPPSEGS) {
 653                                mutex_lock(&cmm_mgr_obj->cmm_lock);
 654                                /* slot = seg_id-1 */
 655                                psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1];
 656                                if (psma != NULL) {
 657                                        un_register_gppsm_seg(psma);
 658                                        /* Set alctr ptr to NULL for future
 659                                         * reuse */
 660                                        cmm_mgr_obj->pa_gppsm_seg_tab[ul_id -
 661                                                                      1] = NULL;
 662                                } else if (ul_seg_id != CMM_ALLSEGMENTS) {
 663                                        status = -EPERM;
 664                                }
 665                                mutex_unlock(&cmm_mgr_obj->cmm_lock);
 666                                if (ul_seg_id != CMM_ALLSEGMENTS)
 667                                        break;
 668
 669                                ul_id++;
 670                        }       /* end while */
 671                } else {
 672                        status = -EINVAL;
 673                }
 674        } else {
 675                status = -EFAULT;
 676        }
 677        return status;
 678}
 679
 680/*
 681 *  ======== un_register_gppsm_seg ========
 682 *  Purpose:
 683 *      UnRegister the SM allocator by freeing all its resources and
 684 *      nulling cmm mgr table entry.
 685 *  Note:
 686 *      This routine is always called within cmm lock crit sect.
 687 */
 688static void un_register_gppsm_seg(struct cmm_allocator *psma)
 689{
 690        struct cmm_mnode *mnode_obj = NULL;
 691        struct cmm_mnode *next_node = NULL;
 692
 693        DBC_REQUIRE(psma != NULL);
 694        if (psma->free_list_head != NULL) {
 695                /* free nodes on free list */
 696                mnode_obj = (struct cmm_mnode *)lst_first(psma->free_list_head);
 697                while (mnode_obj) {
 698                        next_node =
 699                            (struct cmm_mnode *)lst_next(psma->free_list_head,
 700                                                         (struct list_head *)
 701                                                         mnode_obj);
 702                        lst_remove_elem(psma->free_list_head,
 703                                        (struct list_head *)mnode_obj);
 704                        kfree((void *)mnode_obj);
 705                        /* next node. */
 706                        mnode_obj = next_node;
 707                }
 708                kfree(psma->free_list_head);    /* delete freelist */
 709                /* free nodes on InUse list */
 710                mnode_obj =
 711                    (struct cmm_mnode *)lst_first(psma->in_use_list_head);
 712                while (mnode_obj) {
 713                        next_node =
 714                            (struct cmm_mnode *)lst_next(psma->in_use_list_head,
 715                                                         (struct list_head *)
 716                                                         mnode_obj);
 717                        lst_remove_elem(psma->in_use_list_head,
 718                                        (struct list_head *)mnode_obj);
 719                        kfree((void *)mnode_obj);
 720                        /* next node. */
 721                        mnode_obj = next_node;
 722                }
 723                kfree(psma->in_use_list_head);  /* delete InUse list */
 724        }
 725        if ((void *)psma->dw_vm_base != NULL)
 726                MEM_UNMAP_LINEAR_ADDRESS((void *)psma->dw_vm_base);
 727
 728        /* Free allocator itself */
 729        kfree(psma);
 730}
 731
 732/*
 733 *  ======== get_slot ========
 734 *  Purpose:
 735 *      An available slot # is returned. Returns negative on failure.
 736 */
 737static s32 get_slot(struct cmm_object *cmm_mgr_obj)
 738{
 739        s32 slot_seg = -1;      /* neg on failure */
 740        DBC_REQUIRE(cmm_mgr_obj != NULL);
 741        /* get first available slot in cmm mgr SMSegTab[] */
 742        for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
 743                if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
 744                        break;
 745
 746        }
 747        if (slot_seg == CMM_MAXGPPSEGS)
 748                slot_seg = -1;  /* failed */
 749
 750        return slot_seg;
 751}
 752
 753/*
 754 *  ======== get_node ========
 755 *  Purpose:
 756 *      Get a memory node from freelist or create a new one.
 757 */
 758static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
 759                                  u32 dw_va, u32 ul_size)
 760{
 761        struct cmm_mnode *pnode = NULL;
 762
 763        DBC_REQUIRE(cmm_mgr_obj != NULL);
 764        DBC_REQUIRE(dw_pa != 0);
 765        DBC_REQUIRE(dw_va != 0);
 766        DBC_REQUIRE(ul_size != 0);
 767        /* Check cmm mgr's node freelist */
 768        if (LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
 769                pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
 770        } else {
 771                /* surely a valid element */
 772                pnode = (struct cmm_mnode *)
 773                    lst_get_head(cmm_mgr_obj->node_free_list_head);
 774        }
 775        if (pnode) {
 776                lst_init_elem((struct list_head *)pnode);       /* set self */
 777                pnode->dw_pa = dw_pa;   /* Physical addr of start of block */
 778                pnode->dw_va = dw_va;   /* Virtual   "            " */
 779                pnode->ul_size = ul_size;       /* Size of block */
 780        }
 781        return pnode;
 782}
 783
 784/*
 785 *  ======== delete_node ========
 786 *  Purpose:
 787 *      Put a memory node on the cmm nodelist for later use.
 788 *      Doesn't actually delete the node. Heap thrashing friendly.
 789 */
 790static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
 791{
 792        DBC_REQUIRE(pnode != NULL);
 793        lst_init_elem((struct list_head *)pnode);       /* init .self ptr */
 794        lst_put_tail(cmm_mgr_obj->node_free_list_head,
 795                     (struct list_head *)pnode);
 796}
 797
 798/*
 799 * ====== get_free_block ========
 800 *  Purpose:
 801 *      Scan the free block list and return the first block that satisfies
 802 *      the size.
 803 */
 804static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
 805                                        u32 usize)
 806{
 807        if (allocator) {
 808                struct cmm_mnode *mnode_obj = (struct cmm_mnode *)
 809                    lst_first(allocator->free_list_head);
 810                while (mnode_obj) {
 811                        if (usize <= (u32) mnode_obj->ul_size) {
 812                                lst_remove_elem(allocator->free_list_head,
 813                                                (struct list_head *)mnode_obj);
 814                                return mnode_obj;
 815                        }
 816                        /* next node. */
 817                        mnode_obj = (struct cmm_mnode *)
 818                            lst_next(allocator->free_list_head,
 819                                     (struct list_head *)mnode_obj);
 820                }
 821        }
 822        return NULL;
 823}
 824
 825/*
 826 *  ======== add_to_free_list ========
 827 *  Purpose:
 828 *      Coelesce node into the freelist in ascending size order.
 829 */
 830static void add_to_free_list(struct cmm_allocator *allocator,
 831                             struct cmm_mnode *pnode)
 832{
 833        struct cmm_mnode *node_prev = NULL;
 834        struct cmm_mnode *node_next = NULL;
 835        struct cmm_mnode *mnode_obj;
 836        u32 dw_this_pa;
 837        u32 dw_next_pa;
 838
 839        DBC_REQUIRE(pnode != NULL);
 840        DBC_REQUIRE(allocator != NULL);
 841        dw_this_pa = pnode->dw_pa;
 842        dw_next_pa = NEXT_PA(pnode);
 843        mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
 844        while (mnode_obj) {
 845                if (dw_this_pa == NEXT_PA(mnode_obj)) {
 846                        /* found the block ahead of this one */
 847                        node_prev = mnode_obj;
 848                } else if (dw_next_pa == mnode_obj->dw_pa) {
 849                        node_next = mnode_obj;
 850                }
 851                if ((node_prev == NULL) || (node_next == NULL)) {
 852                        /* next node. */
 853                        mnode_obj = (struct cmm_mnode *)
 854                            lst_next(allocator->free_list_head,
 855                                     (struct list_head *)mnode_obj);
 856                } else {
 857                        /* got 'em */
 858                        break;
 859                }
 860        }                       /* while */
 861        if (node_prev != NULL) {
 862                /* combine with previous block */
 863                lst_remove_elem(allocator->free_list_head,
 864                                (struct list_head *)node_prev);
 865                /* grow node to hold both */
 866                pnode->ul_size += node_prev->ul_size;
 867                pnode->dw_pa = node_prev->dw_pa;
 868                pnode->dw_va = node_prev->dw_va;
 869                /* place node on mgr nodeFreeList */
 870                delete_node((struct cmm_object *)allocator->hcmm_mgr,
 871                            node_prev);
 872        }
 873        if (node_next != NULL) {
 874                /* combine with next block */
 875                lst_remove_elem(allocator->free_list_head,
 876                                (struct list_head *)node_next);
 877                /* grow da node */
 878                pnode->ul_size += node_next->ul_size;
 879                /* place node on mgr nodeFreeList */
 880                delete_node((struct cmm_object *)allocator->hcmm_mgr,
 881                            node_next);
 882        }
 883        /* Now, let's add to freelist in increasing size order */
 884        mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
 885        while (mnode_obj) {
 886                if (pnode->ul_size <= mnode_obj->ul_size)
 887                        break;
 888
 889                /* next node. */
 890                mnode_obj =
 891                    (struct cmm_mnode *)lst_next(allocator->free_list_head,
 892                                                 (struct list_head *)mnode_obj);
 893        }
 894        /* if mnode_obj is NULL then add our pnode to the end of the freelist */
 895        if (mnode_obj == NULL) {
 896                lst_put_tail(allocator->free_list_head,
 897                             (struct list_head *)pnode);
 898        } else {
 899                /* insert our node before the current traversed node */
 900                lst_insert_before(allocator->free_list_head,
 901                                  (struct list_head *)pnode,
 902                                  (struct list_head *)mnode_obj);
 903        }
 904}
 905
 906/*
 907 * ======== get_allocator ========
 908 *  Purpose:
 909 *      Return the allocator for the given SM Segid.
 910 *      SegIds:  1,2,3..max.
 911 */
 912static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
 913                                           u32 ul_seg_id)
 914{
 915        struct cmm_allocator *allocator = NULL;
 916
 917        DBC_REQUIRE(cmm_mgr_obj != NULL);
 918        DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS));
 919        allocator = cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
 920        if (allocator != NULL) {
 921                /* make sure it's for real */
 922                if (!allocator) {
 923                        allocator = NULL;
 924                        DBC_ASSERT(false);
 925                }
 926        }
 927        return allocator;
 928}
 929
 930/*
 931 *  The CMM_Xlator[xxx] routines below are used by Node and Stream
 932 *  to perform SM address translation to the client process address space.
 933 *  A "translator" object is created by a node/stream for each SM seg used.
 934 */
 935
 936/*
 937 *  ======== cmm_xlator_create ========
 938 *  Purpose:
 939 *      Create an address translator object.
 940 */
 941int cmm_xlator_create(struct cmm_xlatorobject **xlator,
 942                             struct cmm_object *hcmm_mgr,
 943                             struct cmm_xlatorattrs *xlator_attrs)
 944{
 945        struct cmm_xlator *xlator_object = NULL;
 946        int status = 0;
 947
 948        DBC_REQUIRE(refs > 0);
 949        DBC_REQUIRE(xlator != NULL);
 950        DBC_REQUIRE(hcmm_mgr != NULL);
 951
 952        *xlator = NULL;
 953        if (xlator_attrs == NULL)
 954                xlator_attrs = &cmm_dfltxlatorattrs;    /* set defaults */
 955
 956        xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL);
 957        if (xlator_object != NULL) {
 958                xlator_object->hcmm_mgr = hcmm_mgr;     /* ref back to CMM */
 959                /* SM seg_id */
 960                xlator_object->ul_seg_id = xlator_attrs->ul_seg_id;
 961        } else {
 962                status = -ENOMEM;
 963        }
 964        if (!status)
 965                *xlator = (struct cmm_xlatorobject *)xlator_object;
 966
 967        return status;
 968}
 969
 970/*
 971 *  ======== cmm_xlator_alloc_buf ========
 972 */
 973void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
 974                           u32 pa_size)
 975{
 976        struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
 977        void *pbuf = NULL;
 978        void *tmp_va_buff;
 979        struct cmm_attrs attrs;
 980
 981        DBC_REQUIRE(refs > 0);
 982        DBC_REQUIRE(xlator != NULL);
 983        DBC_REQUIRE(xlator_obj->hcmm_mgr != NULL);
 984        DBC_REQUIRE(va_buf != NULL);
 985        DBC_REQUIRE(pa_size > 0);
 986        DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
 987
 988        if (xlator_obj) {
 989                attrs.ul_seg_id = xlator_obj->ul_seg_id;
 990                __raw_writel(0, va_buf);
 991                /* Alloc SM */
 992                pbuf =
 993                    cmm_calloc_buf(xlator_obj->hcmm_mgr, pa_size, &attrs, NULL);
 994                if (pbuf) {
 995                        /* convert to translator(node/strm) process Virtual
 996                         * address */
 997                         tmp_va_buff = cmm_xlator_translate(xlator,
 998                                                         pbuf, CMM_PA2VA);
 999                        __raw_writel((u32)tmp_va_buff, va_buf);
1000                }
1001        }
1002        return pbuf;
1003}
1004
1005/*
1006 *  ======== cmm_xlator_free_buf ========
1007 *  Purpose:
1008 *      Free the given SM buffer and descriptor.
1009 *      Does not free virtual memory.
1010 */
1011int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
1012{
1013        struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
1014        int status = -EPERM;
1015        void *buf_pa = NULL;
1016
1017        DBC_REQUIRE(refs > 0);
1018        DBC_REQUIRE(buf_va != NULL);
1019        DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
1020
1021        if (xlator_obj) {
1022                /* convert Va to Pa so we can free it. */
1023                buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
1024                if (buf_pa) {
1025                        status = cmm_free_buf(xlator_obj->hcmm_mgr, buf_pa,
1026                                              xlator_obj->ul_seg_id);
1027                        if (status) {
1028                                /* Uh oh, this shouldn't happen. Descriptor
1029                                 * gone! */
1030                                DBC_ASSERT(false);      /* CMM is leaking mem */
1031                        }
1032                }
1033        }
1034        return status;
1035}
1036
1037/*
1038 *  ======== cmm_xlator_info ========
1039 *  Purpose:
1040 *      Set/Get translator info.
1041 */
1042int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
1043                           u32 ul_size, u32 segm_id, bool set_info)
1044{
1045        struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
1046        int status = 0;
1047
1048        DBC_REQUIRE(refs > 0);
1049        DBC_REQUIRE(paddr != NULL);
1050        DBC_REQUIRE((segm_id > 0) && (segm_id <= CMM_MAXGPPSEGS));
1051
1052        if (xlator_obj) {
1053                if (set_info) {
1054                        /* set translators virtual address range */
1055                        xlator_obj->dw_virt_base = (u32) *paddr;
1056                        xlator_obj->ul_virt_size = ul_size;
1057                } else {        /* return virt base address */
1058                        *paddr = (u8 *) xlator_obj->dw_virt_base;
1059                }
1060        } else {
1061                status = -EFAULT;
1062        }
1063        return status;
1064}
1065
1066/*
1067 *  ======== cmm_xlator_translate ========
1068 */
1069void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
1070                           enum cmm_xlatetype xtype)
1071{
1072        u32 dw_addr_xlate = 0;
1073        struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
1074        struct cmm_object *cmm_mgr_obj = NULL;
1075        struct cmm_allocator *allocator = NULL;
1076        u32 dw_offset = 0;
1077
1078        DBC_REQUIRE(refs > 0);
1079        DBC_REQUIRE(paddr != NULL);
1080        DBC_REQUIRE((xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA));
1081
1082        if (!xlator_obj)
1083                goto loop_cont;
1084
1085        cmm_mgr_obj = (struct cmm_object *)xlator_obj->hcmm_mgr;
1086        /* get this translator's default SM allocator */
1087        DBC_ASSERT(xlator_obj->ul_seg_id > 0);
1088        allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->ul_seg_id - 1];
1089        if (!allocator)
1090                goto loop_cont;
1091
1092        if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) ||
1093            (xtype == CMM_PA2VA)) {
1094                if (xtype == CMM_PA2VA) {
1095                        /* Gpp Va = Va Base + offset */
1096                        dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
1097                                                           allocator->
1098                                                           ul_dsp_size);
1099                        dw_addr_xlate = xlator_obj->dw_virt_base + dw_offset;
1100                        /* Check if translated Va base is in range */
1101                        if ((dw_addr_xlate < xlator_obj->dw_virt_base) ||
1102                            (dw_addr_xlate >=
1103                             (xlator_obj->dw_virt_base +
1104                              xlator_obj->ul_virt_size))) {
1105                                dw_addr_xlate = 0;      /* bad address */
1106                        }
1107                } else {
1108                        /* Gpp PA =  Gpp Base + offset */
1109                        dw_offset =
1110                            (u8 *) paddr - (u8 *) xlator_obj->dw_virt_base;
1111                        dw_addr_xlate =
1112                            allocator->shm_base - allocator->ul_dsp_size +
1113                            dw_offset;
1114                }
1115        } else {
1116                dw_addr_xlate = (u32) paddr;
1117        }
1118        /*Now convert address to proper target physical address if needed */
1119        if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) {
1120                /* Got Gpp Pa now, convert to DSP Pa */
1121                dw_addr_xlate =
1122                    GPPPA2DSPPA((allocator->shm_base - allocator->ul_dsp_size),
1123                                dw_addr_xlate,
1124                                allocator->dw_dsp_phys_addr_offset *
1125                                allocator->c_factor);
1126        } else if (xtype == CMM_DSPPA2PA) {
1127                /* Got DSP Pa, convert to GPP Pa */
1128                dw_addr_xlate =
1129                    DSPPA2GPPPA(allocator->shm_base - allocator->ul_dsp_size,
1130                                dw_addr_xlate,
1131                                allocator->dw_dsp_phys_addr_offset *
1132                                allocator->c_factor);
1133        }
1134loop_cont:
1135        return (void *)dw_addr_xlate;
1136}
1137