linux/drivers/staging/tidspbridge/pmgr/cmm.c
<<
>>
Prefs
   1/*
   2 * cmm.c
   3 *
   4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
   5 *
   6 * The Communication(Shared) Memory Management(CMM) module provides
   7 * shared memory management services for DSP/BIOS Bridge data streaming
   8 * and messaging.
   9 *
  10 * Multiple shared memory segments can be registered with CMM.
  11 * Each registered SM segment is represented by a SM "allocator" that
  12 * describes a block of physically contiguous shared memory used for
  13 * future allocations by CMM.
  14 *
  15 * Memory is coalesced back to the appropriate heap when a buffer is
  16 * freed.
  17 *
  18 * Notes:
  19 *   Va: Virtual address.
  20 *   Pa: Physical or kernel system address.
  21 *
  22 * Copyright (C) 2005-2006 Texas Instruments, Inc.
  23 *
  24 * This package is free software; you can redistribute it and/or modify
  25 * it under the terms of the GNU General Public License version 2 as
  26 * published by the Free Software Foundation.
  27 *
  28 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
  29 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
  30 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  31 */
  32#include <linux/types.h>
  33#include <linux/list.h>
  34
  35/*  ----------------------------------- DSP/BIOS Bridge */
  36#include <dspbridge/dbdefs.h>
  37
  38/*  ----------------------------------- OS Adaptation Layer */
  39#include <dspbridge/sync.h>
  40
  41/*  ----------------------------------- Platform Manager */
  42#include <dspbridge/dev.h>
  43#include <dspbridge/proc.h>
  44
  45/*  ----------------------------------- This */
  46#include <dspbridge/cmm.h>
  47
  48/*  ----------------------------------- Defines, Data Structures, Typedefs */
  49#define NEXT_PA(pnode)   (pnode->pa + pnode->size)
  50
  51/* Other bus/platform translations */
  52#define DSPPA2GPPPA(base, x, y)  ((x)+(y))
  53#define GPPPA2DSPPA(base, x, y)  ((x)-(y))
  54
  55/*
  56 *  Allocators define a block of contiguous memory used for future allocations.
  57 *
  58 *      sma - shared memory allocator.
  59 *      vma - virtual memory allocator.(not used).
  60 */
  61struct cmm_allocator {          /* sma */
  62        unsigned int shm_base;  /* Start of physical SM block */
  63        u32 sm_size;            /* Size of SM block in bytes */
  64        unsigned int vm_base;   /* Start of VM block. (Dev driver
  65                                         * context for 'sma') */
  66        u32 dsp_phys_addr_offset;       /* DSP PA to GPP PA offset for this
  67                                         * SM space */
  68        s8 c_factor;            /* DSPPa to GPPPa Conversion Factor */
  69        unsigned int dsp_base;  /* DSP virt base byte address */
  70        u32 dsp_size;   /* DSP seg size in bytes */
  71        struct cmm_object *cmm_mgr;     /* back ref to parent mgr */
  72        /* node list of available memory */
  73        struct list_head free_list;
  74        /* node list of memory in use */
  75        struct list_head in_use_list;
  76};
  77
  78struct cmm_xlator {             /* Pa<->Va translator object */
  79        /* CMM object this translator associated */
  80        struct cmm_object *cmm_mgr;
  81        /*
  82         *  Client process virtual base address that corresponds to phys SM
  83         *  base address for translator's seg_id.
  84         *  Only 1 segment ID currently supported.
  85         */
  86        unsigned int virt_base; /* virtual base address */
  87        u32 virt_size;          /* size of virt space in bytes */
  88        u32 seg_id;             /* Segment Id */
  89};
  90
  91/* CMM Mgr */
  92struct cmm_object {
  93        /*
  94         * Cmm Lock is used to serialize access mem manager for multi-threads.
  95         */
  96        struct mutex cmm_lock;  /* Lock to access cmm mgr */
  97        struct list_head node_free_list;        /* Free list of memory nodes */
  98        u32 min_block_size;     /* Min SM block; default 16 bytes */
  99        u32 page_size;  /* Memory Page size (1k/4k) */
 100        /* GPP SM segment ptrs */
 101        struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
 102};
 103
 104/* Default CMM Mgr attributes */
 105static struct cmm_mgrattrs cmm_dfltmgrattrs = {
 106        /* min_block_size, min block size(bytes) allocated by cmm mgr */
 107        16
 108};
 109
 110/* Default allocation attributes */
 111static struct cmm_attrs cmm_dfltalctattrs = {
 112        1               /* seg_id, default segment Id for allocator */
 113};
 114
 115/* Address translator default attrs */
 116static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
 117        /* seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
 118        1,
 119        0,                      /* dsp_bufs */
 120        0,                      /* dsp_buf_size */
 121        NULL,                   /* vm_base */
 122        0,                      /* vm_size */
 123};
 124
 125/* SM node representing a block of memory. */
 126struct cmm_mnode {
 127        struct list_head link;  /* must be 1st element */
 128        u32 pa;         /* Phys addr */
 129        u32 va;                 /* Virtual address in device process context */
 130        u32 size;               /* SM block size in bytes */
 131        u32 client_proc;        /* Process that allocated this mem block */
 132};
 133
 134/*  ----------------------------------- Function Prototypes */
 135static void add_to_free_list(struct cmm_allocator *allocator,
 136                             struct cmm_mnode *pnode);
 137static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
 138                                           u32 ul_seg_id);
 139static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
 140                                        u32 usize);
 141static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
 142                                  u32 dw_va, u32 ul_size);
 143/* get available slot for new allocator */
 144static s32 get_slot(struct cmm_object *cmm_mgr_obj);
 145static void un_register_gppsm_seg(struct cmm_allocator *psma);
 146
 147/*
 148 *  ======== cmm_calloc_buf ========
 149 *  Purpose:
 150 *      Allocate a SM buffer, zero contents, and return the physical address
 151 *      and optional driver context virtual address(pp_buf_va).
 152 *
 153 *      The freelist is sorted in increasing size order. Get the first
 154 *      block that satifies the request and sort the remaining back on
 155 *      the freelist; if large enough. The kept block is placed on the
 156 *      inUseList.
 157 */
 158void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
 159                     struct cmm_attrs *pattrs, void **pp_buf_va)
 160{
 161        struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
 162        void *buf_pa = NULL;
 163        struct cmm_mnode *pnode = NULL;
 164        struct cmm_mnode *new_node = NULL;
 165        struct cmm_allocator *allocator = NULL;
 166        u32 delta_size;
 167        u8 *pbyte = NULL;
 168        s32 cnt;
 169
 170        if (pattrs == NULL)
 171                pattrs = &cmm_dfltalctattrs;
 172
 173        if (pp_buf_va != NULL)
 174                *pp_buf_va = NULL;
 175
 176        if (cmm_mgr_obj && (usize != 0)) {
 177                if (pattrs->seg_id > 0) {
 178                        /* SegId > 0 is SM */
 179                        /* get the allocator object for this segment id */
 180                        allocator =
 181                            get_allocator(cmm_mgr_obj, pattrs->seg_id);
 182                        /* keep block size a multiple of min_block_size */
 183                        usize =
 184                            ((usize - 1) & ~(cmm_mgr_obj->min_block_size -
 185                                             1))
 186                            + cmm_mgr_obj->min_block_size;
 187                        mutex_lock(&cmm_mgr_obj->cmm_lock);
 188                        pnode = get_free_block(allocator, usize);
 189                }
 190                if (pnode) {
 191                        delta_size = (pnode->size - usize);
 192                        if (delta_size >= cmm_mgr_obj->min_block_size) {
 193                                /* create a new block with the leftovers and
 194                                 * add to freelist */
 195                                new_node =
 196                                    get_node(cmm_mgr_obj, pnode->pa + usize,
 197                                             pnode->va + usize,
 198                                             (u32) delta_size);
 199                                /* leftovers go free */
 200                                add_to_free_list(allocator, new_node);
 201                                /* adjust our node's size */
 202                                pnode->size = usize;
 203                        }
 204                        /* Tag node with client process requesting allocation
 205                         * We'll need to free up a process's alloc'd SM if the
 206                         * client process goes away.
 207                         */
 208                        /* Return TGID instead of process handle */
 209                        pnode->client_proc = current->tgid;
 210
 211                        /* put our node on InUse list */
 212                        list_add_tail(&pnode->link, &allocator->in_use_list);
 213                        buf_pa = (void *)pnode->pa;     /* physical address */
 214                        /* clear mem */
 215                        pbyte = (u8 *) pnode->va;
 216                        for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
 217                                *pbyte = 0;
 218
 219                        if (pp_buf_va != NULL) {
 220                                /* Virtual address */
 221                                *pp_buf_va = (void *)pnode->va;
 222                        }
 223                }
 224                mutex_unlock(&cmm_mgr_obj->cmm_lock);
 225        }
 226        return buf_pa;
 227}
 228
 229/*
 230 *  ======== cmm_create ========
 231 *  Purpose:
 232 *      Create a communication memory manager object.
 233 */
 234int cmm_create(struct cmm_object **ph_cmm_mgr,
 235                      struct dev_object *hdev_obj,
 236                      const struct cmm_mgrattrs *mgr_attrts)
 237{
 238        struct cmm_object *cmm_obj = NULL;
 239        int status = 0;
 240
 241        *ph_cmm_mgr = NULL;
 242        /* create, zero, and tag a cmm mgr object */
 243        cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
 244        if (!cmm_obj)
 245                return -ENOMEM;
 246
 247        if (mgr_attrts == NULL)
 248                mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
 249
 250        /* save away smallest block allocation for this cmm mgr */
 251        cmm_obj->min_block_size = mgr_attrts->min_block_size;
 252        cmm_obj->page_size = PAGE_SIZE;
 253
 254        /* create node free list */
 255        INIT_LIST_HEAD(&cmm_obj->node_free_list);
 256        mutex_init(&cmm_obj->cmm_lock);
 257        *ph_cmm_mgr = cmm_obj;
 258
 259        return status;
 260}
 261
 262/*
 263 *  ======== cmm_destroy ========
 264 *  Purpose:
 265 *      Release the communication memory manager resources.
 266 */
 267int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
 268{
 269        struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
 270        struct cmm_info temp_info;
 271        int status = 0;
 272        s32 slot_seg;
 273        struct cmm_mnode *node, *tmp;
 274
 275        if (!hcmm_mgr) {
 276                status = -EFAULT;
 277                return status;
 278        }
 279        mutex_lock(&cmm_mgr_obj->cmm_lock);
 280        /* If not force then fail if outstanding allocations exist */
 281        if (!force) {
 282                /* Check for outstanding memory allocations */
 283                status = cmm_get_info(hcmm_mgr, &temp_info);
 284                if (!status) {
 285                        if (temp_info.total_in_use_cnt > 0) {
 286                                /* outstanding allocations */
 287                                status = -EPERM;
 288                        }
 289                }
 290        }
 291        if (!status) {
 292                /* UnRegister SM allocator */
 293                for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
 294                        if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) {
 295                                un_register_gppsm_seg
 296                                    (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]);
 297                                /* Set slot to NULL for future reuse */
 298                                cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL;
 299                        }
 300                }
 301        }
 302        list_for_each_entry_safe(node, tmp, &cmm_mgr_obj->node_free_list,
 303                        link) {
 304                list_del(&node->link);
 305                kfree(node);
 306        }
 307        mutex_unlock(&cmm_mgr_obj->cmm_lock);
 308        if (!status) {
 309                /* delete CS & cmm mgr object */
 310                mutex_destroy(&cmm_mgr_obj->cmm_lock);
 311                kfree(cmm_mgr_obj);
 312        }
 313        return status;
 314}
 315
 316/*
 317 *  ======== cmm_free_buf ========
 318 *  Purpose:
 319 *      Free the given buffer.
 320 */
 321int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id)
 322{
 323        struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
 324        int status = -EFAULT;
 325        struct cmm_mnode *curr, *tmp;
 326        struct cmm_allocator *allocator;
 327        struct cmm_attrs *pattrs;
 328
 329        if (ul_seg_id == 0) {
 330                pattrs = &cmm_dfltalctattrs;
 331                ul_seg_id = pattrs->seg_id;
 332        }
 333        if (!hcmm_mgr || !(ul_seg_id > 0)) {
 334                status = -EFAULT;
 335                return status;
 336        }
 337
 338        allocator = get_allocator(cmm_mgr_obj, ul_seg_id);
 339        if (!allocator)
 340                return status;
 341
 342        mutex_lock(&cmm_mgr_obj->cmm_lock);
 343        list_for_each_entry_safe(curr, tmp, &allocator->in_use_list, link) {
 344                if (curr->pa == (u32) buf_pa) {
 345                        list_del(&curr->link);
 346                        add_to_free_list(allocator, curr);
 347                        status = 0;
 348                        break;
 349                }
 350        }
 351        mutex_unlock(&cmm_mgr_obj->cmm_lock);
 352
 353        return status;
 354}
 355
 356/*
 357 *  ======== cmm_get_handle ========
 358 *  Purpose:
 359 *      Return the communication memory manager object for this device.
 360 *      This is typically called from the client process.
 361 */
 362int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
 363{
 364        int status = 0;
 365        struct dev_object *hdev_obj;
 366
 367        if (hprocessor != NULL)
 368                status = proc_get_dev_object(hprocessor, &hdev_obj);
 369        else
 370                hdev_obj = dev_get_first();     /* default */
 371
 372        if (!status)
 373                status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr);
 374
 375        return status;
 376}
 377
 378/*
 379 *  ======== cmm_get_info ========
 380 *  Purpose:
 381 *      Return the current memory utilization information.
 382 */
 383int cmm_get_info(struct cmm_object *hcmm_mgr,
 384                        struct cmm_info *cmm_info_obj)
 385{
 386        struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
 387        u32 ul_seg;
 388        int status = 0;
 389        struct cmm_allocator *altr;
 390        struct cmm_mnode *curr;
 391
 392        if (!hcmm_mgr) {
 393                status = -EFAULT;
 394                return status;
 395        }
 396        mutex_lock(&cmm_mgr_obj->cmm_lock);
 397        cmm_info_obj->num_gppsm_segs = 0;       /* # of SM segments */
 398        /* Total # of outstanding alloc */
 399        cmm_info_obj->total_in_use_cnt = 0;
 400        /* min block size */
 401        cmm_info_obj->min_block_size = cmm_mgr_obj->min_block_size;
 402        /* check SM memory segments */
 403        for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
 404                /* get the allocator object for this segment id */
 405                altr = get_allocator(cmm_mgr_obj, ul_seg);
 406                if (!altr)
 407                        continue;
 408                cmm_info_obj->num_gppsm_segs++;
 409                cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa =
 410                        altr->shm_base - altr->dsp_size;
 411                cmm_info_obj->seg_info[ul_seg - 1].total_seg_size =
 412                        altr->dsp_size + altr->sm_size;
 413                cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa =
 414                        altr->shm_base;
 415                cmm_info_obj->seg_info[ul_seg - 1].gpp_size =
 416                        altr->sm_size;
 417                cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va =
 418                        altr->dsp_base;
 419                cmm_info_obj->seg_info[ul_seg - 1].dsp_size =
 420                        altr->dsp_size;
 421                cmm_info_obj->seg_info[ul_seg - 1].seg_base_va =
 422                        altr->vm_base - altr->dsp_size;
 423                cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt = 0;
 424
 425                list_for_each_entry(curr, &altr->in_use_list, link) {
 426                        cmm_info_obj->total_in_use_cnt++;
 427                        cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt++;
 428                }
 429        }
 430        mutex_unlock(&cmm_mgr_obj->cmm_lock);
 431        return status;
 432}
 433
 434/*
 435 *  ======== cmm_register_gppsm_seg ========
 436 *  Purpose:
 437 *      Register a block of SM with the CMM to be used for later GPP SM
 438 *      allocations.
 439 */
 440int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
 441                                  u32 dw_gpp_base_pa, u32 ul_size,
 442                                  u32 dsp_addr_offset, s8 c_factor,
 443                                  u32 dw_dsp_base, u32 ul_dsp_size,
 444                                  u32 *sgmt_id, u32 gpp_base_va)
 445{
 446        struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
 447        struct cmm_allocator *psma = NULL;
 448        int status = 0;
 449        struct cmm_mnode *new_node;
 450        s32 slot_seg;
 451
 452        dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
 453                        "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n",
 454                        __func__, dw_gpp_base_pa, ul_size, dsp_addr_offset,
 455                        dw_dsp_base, ul_dsp_size, gpp_base_va);
 456
 457        if (!hcmm_mgr)
 458                return -EFAULT;
 459
 460        /* make sure we have room for another allocator */
 461        mutex_lock(&cmm_mgr_obj->cmm_lock);
 462
 463        slot_seg = get_slot(cmm_mgr_obj);
 464        if (slot_seg < 0) {
 465                status = -EPERM;
 466                goto func_end;
 467        }
 468
 469        /* Check if input ul_size is big enough to alloc at least one block */
 470        if (ul_size < cmm_mgr_obj->min_block_size) {
 471                status = -EINVAL;
 472                goto func_end;
 473        }
 474
 475        /* create, zero, and tag an SM allocator object */
 476        psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL);
 477        if (!psma) {
 478                status = -ENOMEM;
 479                goto func_end;
 480        }
 481
 482        psma->cmm_mgr = hcmm_mgr;       /* ref to parent */
 483        psma->shm_base = dw_gpp_base_pa;        /* SM Base phys */
 484        psma->sm_size = ul_size;        /* SM segment size in bytes */
 485        psma->vm_base = gpp_base_va;
 486        psma->dsp_phys_addr_offset = dsp_addr_offset;
 487        psma->c_factor = c_factor;
 488        psma->dsp_base = dw_dsp_base;
 489        psma->dsp_size = ul_dsp_size;
 490        if (psma->vm_base == 0) {
 491                status = -EPERM;
 492                goto func_end;
 493        }
 494        /* return the actual segment identifier */
 495        *sgmt_id = (u32) slot_seg + 1;
 496
 497        INIT_LIST_HEAD(&psma->free_list);
 498        INIT_LIST_HEAD(&psma->in_use_list);
 499
 500        /* Get a mem node for this hunk-o-memory */
 501        new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
 502                        psma->vm_base, ul_size);
 503        /* Place node on the SM allocator's free list */
 504        if (new_node) {
 505                list_add_tail(&new_node->link, &psma->free_list);
 506        } else {
 507                status = -ENOMEM;
 508                goto func_end;
 509        }
 510        /* make entry */
 511        cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma;
 512
 513func_end:
 514        /* Cleanup allocator */
 515        if (status && psma)
 516                un_register_gppsm_seg(psma);
 517        mutex_unlock(&cmm_mgr_obj->cmm_lock);
 518
 519        return status;
 520}
 521
 522/*
 523 *  ======== cmm_un_register_gppsm_seg ========
 524 *  Purpose:
 525 *      UnRegister GPP SM segments with the CMM.
 526 */
 527int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
 528                                     u32 ul_seg_id)
 529{
 530        struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
 531        int status = 0;
 532        struct cmm_allocator *psma;
 533        u32 ul_id = ul_seg_id;
 534
 535        if (!hcmm_mgr)
 536                return -EFAULT;
 537
 538        if (ul_seg_id == CMM_ALLSEGMENTS)
 539                ul_id = 1;
 540
 541        if ((ul_id <= 0) || (ul_id > CMM_MAXGPPSEGS))
 542                return -EINVAL;
 543
 544        /*
 545         * FIXME: CMM_MAXGPPSEGS == 1. why use a while cycle? Seems to me like
 546         * the ul_seg_id is not needed here. It must be always 1.
 547         */
 548        while (ul_id <= CMM_MAXGPPSEGS) {
 549                mutex_lock(&cmm_mgr_obj->cmm_lock);
 550                /* slot = seg_id-1 */
 551                psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1];
 552                if (psma != NULL) {
 553                        un_register_gppsm_seg(psma);
 554                        /* Set alctr ptr to NULL for future reuse */
 555                        cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1] = NULL;
 556                } else if (ul_seg_id != CMM_ALLSEGMENTS) {
 557                        status = -EPERM;
 558                }
 559                mutex_unlock(&cmm_mgr_obj->cmm_lock);
 560                if (ul_seg_id != CMM_ALLSEGMENTS)
 561                        break;
 562
 563                ul_id++;
 564        }       /* end while */
 565        return status;
 566}
 567
 568/*
 569 *  ======== un_register_gppsm_seg ========
 570 *  Purpose:
 571 *      UnRegister the SM allocator by freeing all its resources and
 572 *      nulling cmm mgr table entry.
 573 *  Note:
 574 *      This routine is always called within cmm lock crit sect.
 575 */
 576static void un_register_gppsm_seg(struct cmm_allocator *psma)
 577{
 578        struct cmm_mnode *curr, *tmp;
 579
 580        /* free nodes on free list */
 581        list_for_each_entry_safe(curr, tmp, &psma->free_list, link) {
 582                list_del(&curr->link);
 583                kfree(curr);
 584        }
 585
 586        /* free nodes on InUse list */
 587        list_for_each_entry_safe(curr, tmp, &psma->in_use_list, link) {
 588                list_del(&curr->link);
 589                kfree(curr);
 590        }
 591
 592        if ((void *)psma->vm_base != NULL)
 593                MEM_UNMAP_LINEAR_ADDRESS((void *)psma->vm_base);
 594
 595        /* Free allocator itself */
 596        kfree(psma);
 597}
 598
 599/*
 600 *  ======== get_slot ========
 601 *  Purpose:
 602 *      An available slot # is returned. Returns negative on failure.
 603 */
 604static s32 get_slot(struct cmm_object *cmm_mgr_obj)
 605{
 606        s32 slot_seg = -1;      /* neg on failure */
 607        /* get first available slot in cmm mgr SMSegTab[] */
 608        for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
 609                if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
 610                        break;
 611
 612        }
 613        if (slot_seg == CMM_MAXGPPSEGS)
 614                slot_seg = -1;  /* failed */
 615
 616        return slot_seg;
 617}
 618
 619/*
 620 *  ======== get_node ========
 621 *  Purpose:
 622 *      Get a memory node from freelist or create a new one.
 623 */
 624static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
 625                                  u32 dw_va, u32 ul_size)
 626{
 627        struct cmm_mnode *pnode;
 628
 629        /* Check cmm mgr's node freelist */
 630        if (list_empty(&cmm_mgr_obj->node_free_list)) {
 631                pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
 632                if (!pnode)
 633                        return NULL;
 634        } else {
 635                /* surely a valid element */
 636                pnode = list_first_entry(&cmm_mgr_obj->node_free_list,
 637                                struct cmm_mnode, link);
 638                list_del_init(&pnode->link);
 639        }
 640
 641        pnode->pa = dw_pa;
 642        pnode->va = dw_va;
 643        pnode->size = ul_size;
 644
 645        return pnode;
 646}
 647
 648/*
 649 *  ======== delete_node ========
 650 *  Purpose:
 651 *      Put a memory node on the cmm nodelist for later use.
 652 *      Doesn't actually delete the node. Heap thrashing friendly.
 653 */
 654static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
 655{
 656        list_add_tail(&pnode->link, &cmm_mgr_obj->node_free_list);
 657}
 658
 659/*
 660 * ====== get_free_block ========
 661 *  Purpose:
 662 *      Scan the free block list and return the first block that satisfies
 663 *      the size.
 664 */
 665static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
 666                                        u32 usize)
 667{
 668        struct cmm_mnode *node, *tmp;
 669
 670        if (!allocator)
 671                return NULL;
 672
 673        list_for_each_entry_safe(node, tmp, &allocator->free_list, link) {
 674                if (usize <= node->size) {
 675                        list_del(&node->link);
 676                        return node;
 677                }
 678        }
 679
 680        return NULL;
 681}
 682
 683/*
 684 *  ======== add_to_free_list ========
 685 *  Purpose:
 686 *      Coalesce node into the freelist in ascending size order.
 687 */
 688static void add_to_free_list(struct cmm_allocator *allocator,
 689                             struct cmm_mnode *node)
 690{
 691        struct cmm_mnode *curr;
 692
 693        if (!node) {
 694                pr_err("%s: failed - node is NULL\n", __func__);
 695                return;
 696        }
 697
 698        list_for_each_entry(curr, &allocator->free_list, link) {
 699                if (NEXT_PA(curr) == node->pa) {
 700                        curr->size += node->size;
 701                        delete_node(allocator->cmm_mgr, node);
 702                        return;
 703                }
 704                if (curr->pa == NEXT_PA(node)) {
 705                        curr->pa = node->pa;
 706                        curr->va = node->va;
 707                        curr->size += node->size;
 708                        delete_node(allocator->cmm_mgr, node);
 709                        return;
 710                }
 711        }
 712        list_for_each_entry(curr, &allocator->free_list, link) {
 713                if (curr->size >= node->size) {
 714                        list_add_tail(&node->link, &curr->link);
 715                        return;
 716                }
 717        }
 718        list_add_tail(&node->link, &allocator->free_list);
 719}
 720
 721/*
 722 * ======== get_allocator ========
 723 *  Purpose:
 724 *      Return the allocator for the given SM Segid.
 725 *      SegIds:  1,2,3..max.
 726 */
 727static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
 728                                           u32 ul_seg_id)
 729{
 730        return cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
 731}
 732
 733/*
 734 *  The CMM_Xlator[xxx] routines below are used by Node and Stream
 735 *  to perform SM address translation to the client process address space.
 736 *  A "translator" object is created by a node/stream for each SM seg used.
 737 */
 738
 739/*
 740 *  ======== cmm_xlator_create ========
 741 *  Purpose:
 742 *      Create an address translator object.
 743 */
 744int cmm_xlator_create(struct cmm_xlatorobject **xlator,
 745                             struct cmm_object *hcmm_mgr,
 746                             struct cmm_xlatorattrs *xlator_attrs)
 747{
 748        struct cmm_xlator *xlator_object = NULL;
 749        int status = 0;
 750
 751        *xlator = NULL;
 752        if (xlator_attrs == NULL)
 753                xlator_attrs = &cmm_dfltxlatorattrs;    /* set defaults */
 754
 755        xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL);
 756        if (xlator_object != NULL) {
 757                xlator_object->cmm_mgr = hcmm_mgr;      /* ref back to CMM */
 758                /* SM seg_id */
 759                xlator_object->seg_id = xlator_attrs->seg_id;
 760        } else {
 761                status = -ENOMEM;
 762        }
 763        if (!status)
 764                *xlator = (struct cmm_xlatorobject *)xlator_object;
 765
 766        return status;
 767}
 768
 769/*
 770 *  ======== cmm_xlator_alloc_buf ========
 771 */
 772void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
 773                           u32 pa_size)
 774{
 775        struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
 776        void *pbuf = NULL;
 777        void *tmp_va_buff;
 778        struct cmm_attrs attrs;
 779
 780        if (xlator_obj) {
 781                attrs.seg_id = xlator_obj->seg_id;
 782                __raw_writel(0, va_buf);
 783                /* Alloc SM */
 784                pbuf =
 785                    cmm_calloc_buf(xlator_obj->cmm_mgr, pa_size, &attrs, NULL);
 786                if (pbuf) {
 787                        /* convert to translator(node/strm) process Virtual
 788                         * address */
 789                         tmp_va_buff = cmm_xlator_translate(xlator,
 790                                                         pbuf, CMM_PA2VA);
 791                        __raw_writel((u32)tmp_va_buff, va_buf);
 792                }
 793        }
 794        return pbuf;
 795}
 796
 797/*
 798 *  ======== cmm_xlator_free_buf ========
 799 *  Purpose:
 800 *      Free the given SM buffer and descriptor.
 801 *      Does not free virtual memory.
 802 */
 803int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
 804{
 805        struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
 806        int status = -EPERM;
 807        void *buf_pa = NULL;
 808
 809        if (xlator_obj) {
 810                /* convert Va to Pa so we can free it. */
 811                buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
 812                if (buf_pa) {
 813                        status = cmm_free_buf(xlator_obj->cmm_mgr, buf_pa,
 814                                              xlator_obj->seg_id);
 815                        if (status) {
 816                                /* Uh oh, this shouldn't happen. Descriptor
 817                                 * gone! */
 818                                pr_err("%s, line %d: Assertion failed\n",
 819                                       __FILE__, __LINE__);
 820                        }
 821                }
 822        }
 823        return status;
 824}
 825
 826/*
 827 *  ======== cmm_xlator_info ========
 828 *  Purpose:
 829 *      Set/Get translator info.
 830 */
 831int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
 832                           u32 ul_size, u32 segm_id, bool set_info)
 833{
 834        struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
 835        int status = 0;
 836
 837        if (xlator_obj) {
 838                if (set_info) {
 839                        /* set translators virtual address range */
 840                        xlator_obj->virt_base = (u32) *paddr;
 841                        xlator_obj->virt_size = ul_size;
 842                } else {        /* return virt base address */
 843                        *paddr = (u8 *) xlator_obj->virt_base;
 844                }
 845        } else {
 846                status = -EFAULT;
 847        }
 848        return status;
 849}
 850
 851/*
 852 *  ======== cmm_xlator_translate ========
 853 */
 854void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
 855                           enum cmm_xlatetype xtype)
 856{
 857        u32 dw_addr_xlate = 0;
 858        struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
 859        struct cmm_object *cmm_mgr_obj = NULL;
 860        struct cmm_allocator *allocator = NULL;
 861        u32 dw_offset = 0;
 862
 863        if (!xlator_obj)
 864                goto loop_cont;
 865
 866        cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr;
 867        /* get this translator's default SM allocator */
 868        allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1];
 869        if (!allocator)
 870                goto loop_cont;
 871
 872        if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) ||
 873            (xtype == CMM_PA2VA)) {
 874                if (xtype == CMM_PA2VA) {
 875                        /* Gpp Va = Va Base + offset */
 876                        dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
 877                                                           allocator->
 878                                                           dsp_size);
 879                        dw_addr_xlate = xlator_obj->virt_base + dw_offset;
 880                        /* Check if translated Va base is in range */
 881                        if ((dw_addr_xlate < xlator_obj->virt_base) ||
 882                            (dw_addr_xlate >=
 883                             (xlator_obj->virt_base +
 884                              xlator_obj->virt_size))) {
 885                                dw_addr_xlate = 0;      /* bad address */
 886                        }
 887                } else {
 888                        /* Gpp PA =  Gpp Base + offset */
 889                        dw_offset =
 890                            (u8 *) paddr - (u8 *) xlator_obj->virt_base;
 891                        dw_addr_xlate =
 892                            allocator->shm_base - allocator->dsp_size +
 893                            dw_offset;
 894                }
 895        } else {
 896                dw_addr_xlate = (u32) paddr;
 897        }
 898        /*Now convert address to proper target physical address if needed */
 899        if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) {
 900                /* Got Gpp Pa now, convert to DSP Pa */
 901                dw_addr_xlate =
 902                    GPPPA2DSPPA((allocator->shm_base - allocator->dsp_size),
 903                                dw_addr_xlate,
 904                                allocator->dsp_phys_addr_offset *
 905                                allocator->c_factor);
 906        } else if (xtype == CMM_DSPPA2PA) {
 907                /* Got DSP Pa, convert to GPP Pa */
 908                dw_addr_xlate =
 909                    DSPPA2GPPPA(allocator->shm_base - allocator->dsp_size,
 910                                dw_addr_xlate,
 911                                allocator->dsp_phys_addr_offset *
 912                                allocator->c_factor);
 913        }
 914loop_cont:
 915        return (void *)dw_addr_xlate;
 916}
 917