linux/drivers/staging/tidspbridge/core/io_sm.c
<<
>>
Prefs
   1/*
   2 * io_sm.c
   3 *
   4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
   5 *
   6 * IO dispatcher for a shared memory channel driver.
   7 *
   8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
   9 *
  10 * This package is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 *
  14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
  15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
  16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  17 */
  18
  19/*
  20 * Channel Invariant:
  21 * There is an important invariant condition which must be maintained per
  22 * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
  23 * which may cause timeouts and/or failure of the sync_wait_on_event
  24 * function.
  25 */
  26#include <linux/types.h>
  27#include <linux/list.h>
  28
  29/* Host OS */
  30#include <dspbridge/host_os.h>
  31#include <linux/workqueue.h>
  32
  33/*  ----------------------------------- DSP/BIOS Bridge */
  34#include <dspbridge/dbdefs.h>
  35
  36/* Services Layer */
  37#include <dspbridge/ntfy.h>
  38#include <dspbridge/sync.h>
  39
  40/* Hardware Abstraction Layer */
  41#include <hw_defs.h>
  42#include <hw_mmu.h>
  43
  44/* Bridge Driver */
  45#include <dspbridge/dspdeh.h>
  46#include <dspbridge/dspio.h>
  47#include <dspbridge/dspioctl.h>
  48#include <dspbridge/wdt.h>
  49#include <_tiomap.h>
  50#include <tiomap_io.h>
  51#include <_tiomap_pwr.h>
  52
  53/* Platform Manager */
  54#include <dspbridge/cod.h>
  55#include <dspbridge/node.h>
  56#include <dspbridge/dev.h>
  57
  58/* Others */
  59#include <dspbridge/rms_sh.h>
  60#include <dspbridge/mgr.h>
  61#include <dspbridge/drv.h>
  62#include "_cmm.h"
  63#include "module_list.h"
  64
  65/* This */
  66#include <dspbridge/io_sm.h>
  67#include "_msg_sm.h"
  68
  69/* Defines, Data Structures, Typedefs */
  70#define OUTPUTNOTREADY  0xffff
  71#define NOTENABLED      0xffff  /* Channel(s) not enabled */
  72
  73#define EXTEND      "_EXT_END"
  74
  75#define SWAP_WORD(x)     (x)
  76#define UL_PAGE_ALIGN_SIZE 0x10000      /* Page Align Size */
  77
  78#define MAX_PM_REQS 32
  79
  80#define MMU_FAULT_HEAD1 0xa5a5a5a5
  81#define MMU_FAULT_HEAD2 0x96969696
  82#define POLL_MAX 1000
  83#define MAX_MMU_DBGBUFF 10240
  84
  85/* IO Manager: only one created per board */
  86struct io_mgr {
  87        /* These four fields must be the first fields in a io_mgr_ struct */
  88        /* Bridge device context */
  89        struct bridge_dev_context *bridge_context;
  90        /* Function interface to Bridge driver */
  91        struct bridge_drv_interface *intf_fxns;
  92        struct dev_object *dev_obj;     /* Device this board represents */
  93
  94        /* These fields initialized in bridge_io_create() */
  95        struct chnl_mgr *chnl_mgr;
  96        struct shm *shared_mem; /* Shared Memory control */
  97        u8 *input;              /* Address of input channel */
  98        u8 *output;             /* Address of output channel */
  99        struct msg_mgr *msg_mgr;        /* Message manager */
 100        /* Msg control for from DSP messages */
 101        struct msg_ctrl *msg_input_ctrl;
 102        /* Msg control for to DSP messages */
 103        struct msg_ctrl *msg_output_ctrl;
 104        u8 *msg_input;          /* Address of input messages */
 105        u8 *msg_output;         /* Address of output messages */
 106        u32 sm_buf_size;        /* Size of a shared memory I/O channel */
 107        bool shared_irq;        /* Is this IRQ shared? */
 108        u32 word_size;          /* Size in bytes of DSP word */
 109        u16 intr_val;           /* Interrupt value */
 110        /* Private extnd proc info; mmu setup */
 111        struct mgr_processorextinfo ext_proc_info;
 112        struct cmm_object *cmm_mgr;     /* Shared Mem Mngr */
 113        struct work_struct io_workq;    /* workqueue */
 114#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
 115        u32 trace_buffer_begin; /* Trace message start address */
 116        u32 trace_buffer_end;   /* Trace message end address */
 117        u32 trace_buffer_current;       /* Trace message current address */
 118        u32 gpp_read_pointer;           /* GPP Read pointer to Trace buffer */
 119        u8 *msg;
 120        u32 gpp_va;
 121        u32 dsp_va;
 122#endif
 123        /* IO Dpc */
 124        u32 dpc_req;            /* Number of requested DPC's. */
 125        u32 dpc_sched;          /* Number of executed DPC's. */
 126        struct tasklet_struct dpc_tasklet;
 127        spinlock_t dpc_lock;
 128
 129};
 130
 131struct shm_symbol_val {
 132        u32 shm_base;
 133        u32 shm_lim;
 134        u32 msg_base;
 135        u32 msg_lim;
 136        u32 shm0_end;
 137        u32 dyn_ext;
 138        u32 ext_end;
 139};
 140
 141/* Function Prototypes */
 142static void io_dispatch_pm(struct io_mgr *pio_mgr);
 143static void notify_chnl_complete(struct chnl_object *pchnl,
 144                                 struct chnl_irp *chnl_packet_obj);
 145static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
 146                        u8 io_mode);
 147static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
 148                        u8 io_mode);
 149static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
 150static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
 151static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
 152                             struct chnl_object *pchnl, u32 mask);
 153
 154/* Bus Addr (cached kernel) */
 155static int register_shm_segs(struct io_mgr *hio_mgr,
 156                                    struct cod_manager *cod_man,
 157                                    u32 dw_gpp_base_pa);
 158
 159static inline void set_chnl_free(struct shm *sm, u32 chnl)
 160{
 161        sm->host_free_mask &= ~(1 << chnl);
 162}
 163
 164static inline void set_chnl_busy(struct shm *sm, u32 chnl)
 165{
 166        sm->host_free_mask |= 1 << chnl;
 167}
 168
 169
 170/*
 171 *  ======== bridge_io_create ========
 172 *      Create an IO manager object.
 173 */
 174int bridge_io_create(struct io_mgr **io_man,
 175                            struct dev_object *hdev_obj,
 176                            const struct io_attrs *mgr_attrts)
 177{
 178        struct io_mgr *pio_mgr = NULL;
 179        struct bridge_dev_context *hbridge_context = NULL;
 180        struct cfg_devnode *dev_node_obj;
 181        struct chnl_mgr *hchnl_mgr;
 182        u8 dev_type;
 183
 184        /* Check requirements */
 185        if (!io_man || !mgr_attrts || mgr_attrts->word_size == 0)
 186                return -EFAULT;
 187
 188        *io_man = NULL;
 189
 190        dev_get_chnl_mgr(hdev_obj, &hchnl_mgr);
 191        if (!hchnl_mgr || hchnl_mgr->iomgr)
 192                return -EFAULT;
 193
 194        /*
 195         * Message manager will be created when a file is loaded, since
 196         * size of message buffer in shared memory is configurable in
 197         * the base image.
 198         */
 199        dev_get_bridge_context(hdev_obj, &hbridge_context);
 200        if (!hbridge_context)
 201                return -EFAULT;
 202
 203        dev_get_dev_type(hdev_obj, &dev_type);
 204
 205        /* Allocate IO manager object */
 206        pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL);
 207        if (!pio_mgr)
 208                return -ENOMEM;
 209
 210        /* Initialize chnl_mgr object */
 211        pio_mgr->chnl_mgr = hchnl_mgr;
 212        pio_mgr->word_size = mgr_attrts->word_size;
 213
 214        if (dev_type == DSP_UNIT) {
 215                /* Create an IO DPC */
 216                tasklet_init(&pio_mgr->dpc_tasklet, io_dpc, (u32) pio_mgr);
 217
 218                /* Initialize DPC counters */
 219                pio_mgr->dpc_req = 0;
 220                pio_mgr->dpc_sched = 0;
 221
 222                spin_lock_init(&pio_mgr->dpc_lock);
 223
 224                if (dev_get_dev_node(hdev_obj, &dev_node_obj)) {
 225                        bridge_io_destroy(pio_mgr);
 226                        return -EIO;
 227                }
 228        }
 229
 230        pio_mgr->bridge_context = hbridge_context;
 231        pio_mgr->shared_irq = mgr_attrts->irq_shared;
 232        if (dsp_wdt_init()) {
 233                bridge_io_destroy(pio_mgr);
 234                return -EPERM;
 235        }
 236
 237        /* Return IO manager object to caller... */
 238        hchnl_mgr->iomgr = pio_mgr;
 239        *io_man = pio_mgr;
 240
 241        return 0;
 242}
 243
 244/*
 245 *  ======== bridge_io_destroy ========
 246 *  Purpose:
 247 *      Disable interrupts, destroy the IO manager.
 248 */
 249int bridge_io_destroy(struct io_mgr *hio_mgr)
 250{
 251        int status = 0;
 252        if (hio_mgr) {
 253                /* Free IO DPC object */
 254                tasklet_kill(&hio_mgr->dpc_tasklet);
 255
 256#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
 257                kfree(hio_mgr->msg);
 258#endif
 259                dsp_wdt_exit();
 260                /* Free this IO manager object */
 261                kfree(hio_mgr);
 262        } else {
 263                status = -EFAULT;
 264        }
 265
 266        return status;
 267}
 268
 269struct shm_symbol_val *_get_shm_symbol_values(struct io_mgr *hio_mgr)
 270{
 271        struct shm_symbol_val *s;
 272        struct cod_manager *cod_man;
 273        int status;
 274
 275        s = kzalloc(sizeof(*s), GFP_KERNEL);
 276        if (!s)
 277                return ERR_PTR(-ENOMEM);
 278
 279        status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man);
 280        if (status)
 281                goto free_symbol;
 282
 283        /* Get start and length of channel part of shared memory */
 284        status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM,
 285                                   &s->shm_base);
 286        if (status)
 287                goto free_symbol;
 288
 289        status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM,
 290                                   &s->shm_lim);
 291        if (status)
 292                goto free_symbol;
 293
 294        if (s->shm_lim <= s->shm_base) {
 295                status = -EINVAL;
 296                goto free_symbol;
 297        }
 298
 299        /* Get start and length of message part of shared memory */
 300        status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
 301                                   &s->msg_base);
 302        if (status)
 303                goto free_symbol;
 304
 305        status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
 306                                   &s->msg_lim);
 307        if (status)
 308                goto free_symbol;
 309
 310        if (s->msg_lim <= s->msg_base) {
 311                status = -EINVAL;
 312                goto free_symbol;
 313        }
 314
 315#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
 316        status = cod_get_sym_value(cod_man, DSP_TRACESEC_END, &s->shm0_end);
 317#else
 318        status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM, &s->shm0_end);
 319#endif
 320        if (status)
 321                goto free_symbol;
 322
 323        status = cod_get_sym_value(cod_man, DYNEXTBASE, &s->dyn_ext);
 324        if (status)
 325                goto free_symbol;
 326
 327        status = cod_get_sym_value(cod_man, EXTEND, &s->ext_end);
 328        if (status)
 329                goto free_symbol;
 330
 331        return s;
 332
 333free_symbol:
 334        kfree(s);
 335        return ERR_PTR(status);
 336}
 337
 338/*
 339 *  ======== bridge_io_on_loaded ========
 340 *  Purpose:
 341 *      Called when a new program is loaded to get shared memory buffer
 342 *      parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit
 343 *      are in DSP address units.
 344 */
 345int bridge_io_on_loaded(struct io_mgr *hio_mgr)
 346{
 347        struct bridge_dev_context *dc = hio_mgr->bridge_context;
 348        struct cfg_hostres *cfg_res = dc->resources;
 349        struct bridge_ioctl_extproc *eproc;
 350        struct cod_manager *cod_man;
 351        struct chnl_mgr *hchnl_mgr;
 352        struct msg_mgr *hmsg_mgr;
 353        struct shm_symbol_val *s;
 354        int status;
 355        u8 num_procs;
 356        s32 ndx;
 357        u32 i;
 358        u32 mem_sz, msg_sz, pad_sz, shm_sz, shm_base_offs;
 359        u32 seg0_sz, seg1_sz;
 360        u32 pa, va, da;
 361        u32 pa_curr, va_curr, da_curr;
 362        u32 bytes;
 363        u32 all_bits = 0;
 364        u32 page_size[] = {
 365                HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
 366                HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
 367        };
 368        u32 map_attrs = DSP_MAPLITTLEENDIAN | DSP_MAPPHYSICALADDR |
 369                        DSP_MAPELEMSIZE32 | DSP_MAPDONOTLOCK;
 370
 371        status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man);
 372        if (status)
 373                return status;
 374
 375        hchnl_mgr = hio_mgr->chnl_mgr;
 376
 377        /* The message manager is destroyed when the board is stopped */
 378        dev_get_msg_mgr(hio_mgr->dev_obj, &hio_mgr->msg_mgr);
 379        hmsg_mgr = hio_mgr->msg_mgr;
 380        if (!hchnl_mgr || !hmsg_mgr)
 381                return -EFAULT;
 382
 383        if (hio_mgr->shared_mem)
 384                hio_mgr->shared_mem = NULL;
 385
 386        s = _get_shm_symbol_values(hio_mgr);
 387        if (IS_ERR(s))
 388                return PTR_ERR(s);
 389
 390        /* Get total length in bytes */
 391        shm_sz = (s->shm_lim - s->shm_base + 1) * hio_mgr->word_size;
 392
 393        /* Calculate size of a PROCCOPY shared memory region */
 394        dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
 395                __func__, shm_sz - sizeof(struct shm));
 396
 397        /* Length (bytes) of messaging part of shared memory */
 398        msg_sz = (s->msg_lim - s->msg_base + 1) * hio_mgr->word_size;
 399
 400        /* Total length (bytes) of shared memory: chnl + msg */
 401        mem_sz = shm_sz + msg_sz;
 402
 403        /* Get memory reserved in host resources */
 404        (void)mgr_enum_processor_info(0,
 405                                      (struct dsp_processorinfo *)
 406                                                &hio_mgr->ext_proc_info,
 407                                      sizeof(struct mgr_processorextinfo),
 408                                      &num_procs);
 409
 410        /* IO supports only one DSP for now */
 411        if (num_procs != 1) {
 412                status = -EINVAL;
 413                goto free_symbol;
 414        }
 415
 416        /* The first MMU TLB entry(TLB_0) in DCD is ShmBase */
 417        pa = cfg_res->mem_phys[1];
 418        va = cfg_res->mem_base[1];
 419
 420        /* This is the virtual uncached ioremapped address!!! */
 421        /* Why can't we directly take the DSPVA from the symbols? */
 422        da = hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt;
 423        seg0_sz = (s->shm0_end - da) * hio_mgr->word_size;
 424        seg1_sz = (s->ext_end - s->dyn_ext) * hio_mgr->word_size;
 425
 426        /* 4K align */
 427        seg1_sz = (seg1_sz + 0xFFF) & (~0xFFFUL);
 428
 429        /* 64K align */
 430        seg0_sz = (seg0_sz + 0xFFFF) & (~0xFFFFUL);
 431
 432        pad_sz = UL_PAGE_ALIGN_SIZE - ((pa + seg1_sz) % UL_PAGE_ALIGN_SIZE);
 433        if (pad_sz == UL_PAGE_ALIGN_SIZE)
 434                pad_sz = 0x0;
 435
 436        dev_dbg(bridge, "%s: pa %x, va %x, da %x\n", __func__, pa, va, da);
 437        dev_dbg(bridge,
 438                "shm0_end %x, dyn_ext %x, ext_end %x, seg0_sz %x seg1_sz %x\n",
 439                s->shm0_end, s->dyn_ext, s->ext_end, seg0_sz, seg1_sz);
 440
 441        if ((seg0_sz + seg1_sz + pad_sz) > cfg_res->mem_length[1]) {
 442                pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
 443                       __func__, cfg_res->mem_length[1],
 444                       seg0_sz + seg1_sz + pad_sz);
 445                status = -ENOMEM;
 446                goto free_symbol;
 447        }
 448
 449        pa_curr = pa;
 450        va_curr = s->dyn_ext * hio_mgr->word_size;
 451        da_curr = va;
 452        bytes = seg1_sz;
 453
 454        /*
 455         * Try to fit into TLB entries. If not possible, push them to page
 456         * tables. It is quite possible that if sections are not on
 457         * bigger page boundary, we may end up making several small pages.
 458         * So, push them onto page tables, if that is the case.
 459         */
 460        while (bytes) {
 461                /*
 462                 * To find the max. page size with which both PA & VA are
 463                 * aligned.
 464                 */
 465                all_bits = pa_curr | va_curr;
 466                dev_dbg(bridge,
 467                        "seg all_bits %x, pa_curr %x, va_curr %x, bytes %x\n",
 468                        all_bits, pa_curr, va_curr, bytes);
 469
 470                for (i = 0; i < 4; i++) {
 471                        if ((bytes >= page_size[i]) &&
 472                            ((all_bits & (page_size[i] - 1)) == 0)) {
 473                                status = hio_mgr->intf_fxns->brd_mem_map(dc,
 474                                                        pa_curr, va_curr,
 475                                                        page_size[i], map_attrs,
 476                                                        NULL);
 477                                if (status)
 478                                        goto free_symbol;
 479
 480                                pa_curr += page_size[i];
 481                                va_curr += page_size[i];
 482                                da_curr += page_size[i];
 483                                bytes -= page_size[i];
 484                                /*
 485                                 * Don't try smaller sizes. Hopefully we have
 486                                 * reached an address aligned to a bigger page
 487                                 * size.
 488                                 */
 489                                break;
 490                        }
 491                }
 492        }
 493
 494        pa_curr += pad_sz;
 495        va_curr += pad_sz;
 496        da_curr += pad_sz;
 497        bytes = seg0_sz;
 498        va_curr = da * hio_mgr->word_size;
 499
 500        eproc = kzalloc(sizeof(*eproc) * BRDIOCTL_NUMOFMMUTLB, GFP_KERNEL);
 501        if (!eproc) {
 502                status = -ENOMEM;
 503                goto free_symbol;
 504        }
 505
 506        ndx = 0;
 507        /* Configure the TLB entries for the next cacheable segment */
 508        while (bytes) {
 509                /*
 510                 * To find the max. page size with which both PA & VA are
 511                 * aligned.
 512                 */
 513                all_bits = pa_curr | va_curr;
 514                dev_dbg(bridge,
 515                        "seg1 all_bits %x, pa_curr %x, va_curr %x, bytes %x\n",
 516                        all_bits, pa_curr, va_curr, bytes);
 517
 518                for (i = 0; i < 4; i++) {
 519                        if (!(bytes >= page_size[i]) ||
 520                            !((all_bits & (page_size[i] - 1)) == 0))
 521                                continue;
 522
 523                        if (ndx >= MAX_LOCK_TLB_ENTRIES) {
 524                                status = hio_mgr->intf_fxns->brd_mem_map(dc,
 525                                                        pa_curr, va_curr,
 526                                                        page_size[i], map_attrs,
 527                                                        NULL);
 528                                dev_dbg(bridge,
 529                                        "PTE pa %x va %x dsp_va %x sz %x\n",
 530                                        eproc[ndx].gpp_pa,
 531                                        eproc[ndx].gpp_va,
 532                                        eproc[ndx].dsp_va *
 533                                        hio_mgr->word_size, page_size[i]);
 534                                if (status)
 535                                        goto free_eproc;
 536                        }
 537
 538                        /* This is the physical address written to DSP MMU */
 539                        eproc[ndx].gpp_pa = pa_curr;
 540
 541                        /*
 542                         * This is the virtual uncached ioremapped
 543                         * address!!!
 544                         */
 545                        eproc[ndx].gpp_va = da_curr;
 546                        eproc[ndx].dsp_va = va_curr / hio_mgr->word_size;
 547                        eproc[ndx].size = page_size[i];
 548                        eproc[ndx].endianism = HW_LITTLE_ENDIAN;
 549                        eproc[ndx].elem_size = HW_ELEM_SIZE16BIT;
 550                        eproc[ndx].mixed_mode = HW_MMU_CPUES;
 551                        dev_dbg(bridge, "%s: tlb pa %x va %x dsp_va %x sz %x\n",
 552                                __func__, eproc[ndx].gpp_pa,
 553                                eproc[ndx].gpp_va,
 554                                eproc[ndx].dsp_va * hio_mgr->word_size,
 555                                page_size[i]);
 556                        ndx++;
 557
 558                        pa_curr += page_size[i];
 559                        va_curr += page_size[i];
 560                        da_curr += page_size[i];
 561                        bytes -= page_size[i];
 562                        /*
 563                         * Don't try smaller sizes. Hopefully we have reached
 564                         * an address aligned to a bigger page size.
 565                         */
 566                        break;
 567                }
 568        }
 569
 570        /*
 571         * Copy remaining entries from CDB. All entries are 1 MB and
 572         * should not conflict with shm entries on MPU or DSP side.
 573         */
 574        for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
 575                struct mgr_processorextinfo *ep = &hio_mgr->ext_proc_info;
 576                u32 word_sz = hio_mgr->word_size;
 577
 578                if (ep->ty_tlb[i].gpp_phys == 0)
 579                        continue;
 580
 581                if ((ep->ty_tlb[i].gpp_phys > pa - 0x100000 &&
 582                     ep->ty_tlb[i].gpp_phys <= pa + seg0_sz) ||
 583                    (ep->ty_tlb[i].dsp_virt > da - 0x100000 / word_sz &&
 584                     ep->ty_tlb[i].dsp_virt <= da + seg0_sz / word_sz)) {
 585                        dev_dbg(bridge,
 586                                "err cdb%d pa %x da %x shm pa %x da %x sz %x\n",
 587                                i, ep->ty_tlb[i].gpp_phys,
 588                                ep->ty_tlb[i].dsp_virt, pa, da, seg0_sz);
 589                        status = -EPERM;
 590                        goto free_eproc;
 591                }
 592
 593                if (ndx >= MAX_LOCK_TLB_ENTRIES) {
 594                        status = hio_mgr->intf_fxns->brd_mem_map(dc,
 595                                                ep->ty_tlb[i].gpp_phys,
 596                                                ep->ty_tlb[i].dsp_virt,
 597                                                0x100000, map_attrs, NULL);
 598                        if (status)
 599                                goto free_eproc;
 600                }
 601
 602                eproc[ndx].dsp_va = ep->ty_tlb[i].dsp_virt;
 603                eproc[ndx].gpp_pa = ep->ty_tlb[i].gpp_phys;
 604                eproc[ndx].gpp_va = 0;
 605
 606                /* 1 MB */
 607                eproc[ndx].size = 0x100000;
 608                dev_dbg(bridge, "shm MMU entry pa %x da 0x%x\n",
 609                        eproc[ndx].gpp_pa, eproc[ndx].dsp_va);
 610                ndx++;
 611        }
 612
 613        /* Map the L4 peripherals */
 614        i = 0;
 615        while (l4_peripheral_table[i].phys_addr) {
 616                status = hio_mgr->intf_fxns->brd_mem_map(dc,
 617                                        l4_peripheral_table[i].phys_addr,
 618                                        l4_peripheral_table[i].dsp_virt_addr,
 619                                        HW_PAGE_SIZE4KB, map_attrs, NULL);
 620                if (status)
 621                        goto free_eproc;
 622                i++;
 623        }
 624
 625        for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
 626                eproc[i].dsp_va = 0;
 627                eproc[i].gpp_pa = 0;
 628                eproc[i].gpp_va = 0;
 629                eproc[i].size = 0;
 630        }
 631
 632        /*
 633         * Set the shm physical address entry (grayed out in CDB file)
 634         * to the virtual uncached ioremapped address of shm reserved
 635         * on MPU.
 636         */
 637        hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys =
 638                (va + seg1_sz + pad_sz);
 639
 640        /*
 641         * Need shm Phys addr. IO supports only one DSP for now:
 642         * num_procs = 1.
 643         */
 644        if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys)
 645                return -EFAULT;
 646
 647        if (eproc[0].dsp_va > s->shm_base)
 648                return -EPERM;
 649
 650        /* shm_base may not be at ul_dsp_va address */
 651        shm_base_offs = (s->shm_base - eproc[0].dsp_va) *
 652                    hio_mgr->word_size;
 653        /*
 654         * bridge_dev_ctrl() will set dev context dsp-mmu info. In
 655         * bridge_brd_start() the MMU will be re-programed with MMU
 656         * DSPVa-GPPPa pair info while DSP is in a known
 657         * (reset) state.
 658         */
 659        status = hio_mgr->intf_fxns->dev_cntrl(hio_mgr->bridge_context,
 660                                               BRDIOCTL_SETMMUCONFIG, eproc);
 661        if (status)
 662                goto free_eproc;
 663
 664        s->shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
 665        s->shm_base += shm_base_offs;
 666        s->shm_base = (u32) MEM_LINEAR_ADDRESS((void *)s->shm_base,
 667                                               mem_sz);
 668        if (!s->shm_base) {
 669                status = -EFAULT;
 670                goto free_eproc;
 671        }
 672
 673        /* Register SM */
 674        status = register_shm_segs(hio_mgr, cod_man, eproc[0].gpp_pa);
 675
 676        hio_mgr->shared_mem = (struct shm *)s->shm_base;
 677        hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm);
 678        hio_mgr->output = hio_mgr->input + (shm_sz -
 679                                            sizeof(struct shm)) / 2;
 680        hio_mgr->sm_buf_size = hio_mgr->output - hio_mgr->input;
 681
 682        /*  Set up Shared memory addresses for messaging */
 683        hio_mgr->msg_input_ctrl =
 684                (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem + shm_sz);
 685        hio_mgr->msg_input =
 686                (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl);
 687        hio_mgr->msg_output_ctrl =
 688                (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl +
 689                                           msg_sz / 2);
 690        hio_mgr->msg_output =
 691                (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl);
 692        hmsg_mgr->max_msgs =
 693                ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input) /
 694                sizeof(struct msg_dspmsg);
 695
 696        dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, "
 697                "output %p, msg_input_ctrl %p, msg_input %p, "
 698                "msg_output_ctrl %p, msg_output %p\n",
 699                (u8 *) hio_mgr->shared_mem, hio_mgr->input,
 700                hio_mgr->output, (u8 *) hio_mgr->msg_input_ctrl,
 701                hio_mgr->msg_input, (u8 *) hio_mgr->msg_output_ctrl,
 702                hio_mgr->msg_output);
 703        dev_dbg(bridge, "(proc) Mas msgs in shared memory: 0x%x\n",
 704                hmsg_mgr->max_msgs);
 705        memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm));
 706
 707#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
 708        /* Get the start address of trace buffer */
 709        status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
 710                                   &hio_mgr->trace_buffer_begin);
 711        if (status)
 712                goto free_eproc;
 713
 714        hio_mgr->gpp_read_pointer =
 715                hio_mgr->trace_buffer_begin =
 716                        (va + seg1_sz + pad_sz) +
 717                        (hio_mgr->trace_buffer_begin - da);
 718
 719        /* Get the end address of trace buffer */
 720        status = cod_get_sym_value(cod_man, SYS_PUTCEND,
 721                                   &hio_mgr->trace_buffer_end);
 722        if (status)
 723                goto free_eproc;
 724
 725        hio_mgr->trace_buffer_end =
 726                (va + seg1_sz + pad_sz) +
 727                (hio_mgr->trace_buffer_end - da);
 728
 729        /* Get the current address of DSP write pointer */
 730        status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT,
 731                                   &hio_mgr->trace_buffer_current);
 732        if (status)
 733                goto free_eproc;
 734
 735        hio_mgr->trace_buffer_current =
 736                (va + seg1_sz + pad_sz) +
 737                (hio_mgr->trace_buffer_current - da);
 738
 739        /* Calculate the size of trace buffer */
 740        kfree(hio_mgr->msg);
 741        hio_mgr->msg = kmalloc(((hio_mgr->trace_buffer_end -
 742                                hio_mgr->trace_buffer_begin) *
 743                                hio_mgr->word_size) + 2, GFP_KERNEL);
 744        if (!hio_mgr->msg) {
 745                status = -ENOMEM;
 746                goto free_eproc;
 747        }
 748
 749        hio_mgr->dsp_va = da;
 750        hio_mgr->gpp_va = (va + seg1_sz + pad_sz);
 751#endif
 752
 753free_eproc:
 754        kfree(eproc);
 755free_symbol:
 756        kfree(s);
 757
 758        return status;
 759}
 760
 761/*
 762 *  ======== io_buf_size ========
 763 *      Size of shared memory I/O channel.
 764 */
 765u32 io_buf_size(struct io_mgr *hio_mgr)
 766{
 767        if (hio_mgr)
 768                return hio_mgr->sm_buf_size;
 769        else
 770                return 0;
 771}
 772
 773/*
 774 *  ======== io_cancel_chnl ========
 775 *      Cancel IO on a given PCPY channel.
 776 */
 777void io_cancel_chnl(struct io_mgr *hio_mgr, u32 chnl)
 778{
 779        struct io_mgr *pio_mgr = (struct io_mgr *)hio_mgr;
 780        struct shm *sm;
 781
 782        if (!hio_mgr)
 783                goto func_end;
 784        sm = hio_mgr->shared_mem;
 785
 786        /* Inform DSP that we have no more buffers on this channel */
 787        set_chnl_free(sm, chnl);
 788
 789        sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
 790func_end:
 791        return;
 792}
 793
 794
 795/*
 796 *  ======== io_dispatch_pm ========
 797 *      Performs I/O dispatch on PM related messages from DSP
 798 */
 799static void io_dispatch_pm(struct io_mgr *pio_mgr)
 800{
 801        int status;
 802        u32 parg[2];
 803
 804        /* Perform Power message processing here */
 805        parg[0] = pio_mgr->intr_val;
 806
 807        /* Send the command to the Bridge clk/pwr manager to handle */
 808        if (parg[0] == MBX_PM_HIBERNATE_EN) {
 809                dev_dbg(bridge, "PM: Hibernate command\n");
 810                status = pio_mgr->intf_fxns->
 811                                dev_cntrl(pio_mgr->bridge_context,
 812                                              BRDIOCTL_PWR_HIBERNATE, parg);
 813                if (status)
 814                        pr_err("%s: hibernate cmd failed 0x%x\n",
 815                                       __func__, status);
 816        } else if (parg[0] == MBX_PM_OPP_REQ) {
 817                parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt;
 818                dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]);
 819                status = pio_mgr->intf_fxns->
 820                                dev_cntrl(pio_mgr->bridge_context,
 821                                        BRDIOCTL_CONSTRAINT_REQUEST, parg);
 822                if (status)
 823                        dev_dbg(bridge, "PM: Failed to set constraint "
 824                                "= 0x%x\n", parg[1]);
 825        } else {
 826                dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n",
 827                        parg[0]);
 828                status = pio_mgr->intf_fxns->
 829                                dev_cntrl(pio_mgr->bridge_context,
 830                                              BRDIOCTL_CLK_CTRL, parg);
 831                if (status)
 832                        dev_dbg(bridge, "PM: Failed to ctrl the DSP clk"
 833                                "= 0x%x\n", *parg);
 834        }
 835}
 836
 837/*
 838 *  ======== io_dpc ========
 839 *      Deferred procedure call for shared memory channel driver ISR.  Carries
 840 *      out the dispatch of I/O as a non-preemptible event. It can only be
 841 *      pre-empted by an ISR.
 842 */
 843void io_dpc(unsigned long ref_data)
 844{
 845        struct io_mgr *pio_mgr = (struct io_mgr *)ref_data;
 846        struct chnl_mgr *chnl_mgr_obj;
 847        struct msg_mgr *msg_mgr_obj;
 848        struct deh_mgr *hdeh_mgr;
 849        u32 requested;
 850        u32 serviced;
 851
 852        if (!pio_mgr)
 853                goto func_end;
 854        chnl_mgr_obj = pio_mgr->chnl_mgr;
 855        dev_get_msg_mgr(pio_mgr->dev_obj, &msg_mgr_obj);
 856        dev_get_deh_mgr(pio_mgr->dev_obj, &hdeh_mgr);
 857        if (!chnl_mgr_obj)
 858                goto func_end;
 859
 860        requested = pio_mgr->dpc_req;
 861        serviced = pio_mgr->dpc_sched;
 862
 863        if (serviced == requested)
 864                goto func_end;
 865
 866        /* Process pending DPC's */
 867        do {
 868                /* Check value of interrupt reg to ensure it's a valid error */
 869                if ((pio_mgr->intr_val > DEH_BASE) &&
 870                    (pio_mgr->intr_val < DEH_LIMIT)) {
 871                        /* Notify DSP/BIOS exception */
 872                        if (hdeh_mgr) {
 873#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
 874                                print_dsp_debug_trace(pio_mgr);
 875#endif
 876                                bridge_deh_notify(hdeh_mgr, DSP_SYSERROR,
 877                                                  pio_mgr->intr_val);
 878                        }
 879                }
 880                /* Proc-copy channel dispatch */
 881                input_chnl(pio_mgr, NULL, IO_SERVICE);
 882                output_chnl(pio_mgr, NULL, IO_SERVICE);
 883
 884#ifdef CHNL_MESSAGES
 885                if (msg_mgr_obj) {
 886                        /* Perform I/O dispatch on message queues */
 887                        input_msg(pio_mgr, msg_mgr_obj);
 888                        output_msg(pio_mgr, msg_mgr_obj);
 889                }
 890
 891#endif
 892#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
 893                if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) {
 894                        /* Notify DSP Trace message */
 895                        print_dsp_debug_trace(pio_mgr);
 896                }
 897#endif
 898                serviced++;
 899        } while (serviced != requested);
 900        pio_mgr->dpc_sched = requested;
 901func_end:
 902        return;
 903}
 904
 905/*
 906 *  ======== io_mbox_msg ========
 907 *      Main interrupt handler for the shared memory IO manager.
 908 *      Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
 909 *      schedules a DPC to dispatch I/O.
 910 */
 911int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg)
 912{
 913        struct io_mgr *pio_mgr;
 914        struct dev_object *dev_obj;
 915        unsigned long flags;
 916
 917        dev_obj = dev_get_first();
 918        dev_get_io_mgr(dev_obj, &pio_mgr);
 919
 920        if (!pio_mgr)
 921                return NOTIFY_BAD;
 922
 923        pio_mgr->intr_val = (u16)((u32)msg);
 924        if (pio_mgr->intr_val & MBX_PM_CLASS)
 925                io_dispatch_pm(pio_mgr);
 926
 927        if (pio_mgr->intr_val == MBX_DEH_RESET) {
 928                pio_mgr->intr_val = 0;
 929        } else {
 930                spin_lock_irqsave(&pio_mgr->dpc_lock, flags);
 931                pio_mgr->dpc_req++;
 932                spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
 933                tasklet_schedule(&pio_mgr->dpc_tasklet);
 934        }
 935        return NOTIFY_OK;
 936}
 937
 938/*
 939 *  ======== io_request_chnl ========
 940 *  Purpose:
 941 *      Request channel I/O from the DSP. Sets flags in shared memory, then
 942 *      interrupts the DSP.
 943 */
 944void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
 945                        u8 io_mode, u16 *mbx_val)
 946{
 947        struct chnl_mgr *chnl_mgr_obj;
 948        struct shm *sm;
 949
 950        if (!pchnl || !mbx_val)
 951                goto func_end;
 952        chnl_mgr_obj = io_manager->chnl_mgr;
 953        sm = io_manager->shared_mem;
 954        if (io_mode == IO_INPUT) {
 955                /* Indicate to the DSP we have a buffer available for input */
 956                set_chnl_busy(sm, pchnl->chnl_id);
 957                *mbx_val = MBX_PCPY_CLASS;
 958        } else if (io_mode == IO_OUTPUT) {
 959                /*
 960                 * Record the fact that we have a buffer available for
 961                 * output.
 962                 */
 963                chnl_mgr_obj->output_mask |= (1 << pchnl->chnl_id);
 964        } else {
 965        }
 966func_end:
 967        return;
 968}
 969
 970/*
 971 *  ======== iosm_schedule ========
 972 *      Schedule DPC for IO.
 973 */
 974void iosm_schedule(struct io_mgr *io_manager)
 975{
 976        unsigned long flags;
 977
 978        if (!io_manager)
 979                return;
 980
 981        /* Increment count of DPC's pending. */
 982        spin_lock_irqsave(&io_manager->dpc_lock, flags);
 983        io_manager->dpc_req++;
 984        spin_unlock_irqrestore(&io_manager->dpc_lock, flags);
 985
 986        /* Schedule DPC */
 987        tasklet_schedule(&io_manager->dpc_tasklet);
 988}
 989
 990/*
 991 *  ======== find_ready_output ========
 992 *      Search for a host output channel which is ready to send.  If this is
 993 *      called as a result of servicing the DPC, then implement a round
 994 *      robin search; otherwise, this was called by a client thread (via
 995 *      IO_Dispatch()), so just start searching from the current channel id.
 996 */
 997static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
 998                             struct chnl_object *pchnl, u32 mask)
 999{
1000        u32 ret = OUTPUTNOTREADY;
1001        u32 id, start_id;
1002        u32 shift;
1003
1004        id = (pchnl !=
1005              NULL ? pchnl->chnl_id : (chnl_mgr_obj->last_output + 1));
1006        id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
1007        if (id >= CHNL_MAXCHANNELS)
1008                goto func_end;
1009        if (mask) {
1010                shift = (1 << id);
1011                start_id = id;
1012                do {
1013                        if (mask & shift) {
1014                                ret = id;
1015                                if (pchnl == NULL)
1016                                        chnl_mgr_obj->last_output = id;
1017                                break;
1018                        }
1019                        id = id + 1;
1020                        id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
1021                        shift = (1 << id);
1022                } while (id != start_id);
1023        }
1024func_end:
1025        return ret;
1026}
1027
1028/*
1029 *  ======== input_chnl ========
1030 *      Dispatch a buffer on an input channel.
1031 */
1032static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1033                        u8 io_mode)
1034{
1035        struct chnl_mgr *chnl_mgr_obj;
1036        struct shm *sm;
1037        u32 chnl_id;
1038        u32 bytes;
1039        struct chnl_irp *chnl_packet_obj = NULL;
1040        u32 dw_arg;
1041        bool clear_chnl = false;
1042        bool notify_client = false;
1043
1044        sm = pio_mgr->shared_mem;
1045        chnl_mgr_obj = pio_mgr->chnl_mgr;
1046
1047        /* Attempt to perform input */
1048        if (!sm->input_full)
1049                goto func_end;
1050
1051        bytes = sm->input_size * chnl_mgr_obj->word_size;
1052        chnl_id = sm->input_id;
1053        dw_arg = sm->arg;
1054        if (chnl_id >= CHNL_MAXCHANNELS) {
1055                /* Shouldn't be here: would indicate corrupted shm. */
1056                goto func_end;
1057        }
1058        pchnl = chnl_mgr_obj->channels[chnl_id];
1059        if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) {
1060                if ((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY) {
1061                        /* Get the I/O request, and attempt a transfer */
1062                        if (!list_empty(&pchnl->io_requests)) {
1063                                if (!pchnl->cio_reqs)
1064                                        goto func_end;
1065
1066                                chnl_packet_obj = list_first_entry(
1067                                                &pchnl->io_requests,
1068                                                struct chnl_irp, link);
1069                                list_del(&chnl_packet_obj->link);
1070                                pchnl->cio_reqs--;
1071
1072                                /*
1073                                 * Ensure we don't overflow the client's
1074                                 * buffer.
1075                                 */
1076                                bytes = min(bytes, chnl_packet_obj->byte_size);
1077                                memcpy(chnl_packet_obj->host_sys_buf,
1078                                                pio_mgr->input, bytes);
1079                                pchnl->bytes_moved += bytes;
1080                                chnl_packet_obj->byte_size = bytes;
1081                                chnl_packet_obj->arg = dw_arg;
1082                                chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE;
1083
1084                                if (bytes == 0) {
1085                                        /*
1086                                         * This assertion fails if the DSP
1087                                         * sends EOS more than once on this
1088                                         * channel.
1089                                         */
1090                                        if (pchnl->state & CHNL_STATEEOS)
1091                                                goto func_end;
1092                                        /*
1093                                         * Zero bytes indicates EOS. Update
1094                                         * IOC status for this chirp, and also
1095                                         * the channel state.
1096                                         */
1097                                        chnl_packet_obj->status |=
1098                                                CHNL_IOCSTATEOS;
1099                                        pchnl->state |= CHNL_STATEEOS;
1100                                        /*
1101                                         * Notify that end of stream has
1102                                         * occurred.
1103                                         */
1104                                        ntfy_notify(pchnl->ntfy_obj,
1105                                                        DSP_STREAMDONE);
1106                                }
1107                                /* Tell DSP if no more I/O buffers available */
1108                                if (list_empty(&pchnl->io_requests))
1109                                        set_chnl_free(sm, pchnl->chnl_id);
1110                                clear_chnl = true;
1111                                notify_client = true;
1112                        } else {
1113                                /*
1114                                 * Input full for this channel, but we have no
1115                                 * buffers available.  The channel must be
1116                                 * "idling". Clear out the physical input
1117                                 * channel.
1118                                 */
1119                                clear_chnl = true;
1120                        }
1121                } else {
1122                        /* Input channel cancelled: clear input channel */
1123                        clear_chnl = true;
1124                }
1125        } else {
1126                /* DPC fired after host closed channel: clear input channel */
1127                clear_chnl = true;
1128        }
1129        if (clear_chnl) {
1130                /* Indicate to the DSP we have read the input */
1131                sm->input_full = 0;
1132                sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1133        }
1134        if (notify_client) {
1135                /* Notify client with IO completion record */
1136                notify_chnl_complete(pchnl, chnl_packet_obj);
1137        }
1138func_end:
1139        return;
1140}
1141
1142/*
1143 *  ======== input_msg ========
1144 *      Copies messages from shared memory to the message queues.
1145 */
1146static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
1147{
1148        u32 num_msgs;
1149        u32 i;
1150        u8 *msg_input;
1151        struct msg_queue *msg_queue_obj;
1152        struct msg_frame *pmsg;
1153        struct msg_dspmsg msg;
1154        struct msg_ctrl *msg_ctr_obj;
1155        u32 input_empty;
1156        u32 addr;
1157
1158        msg_ctr_obj = pio_mgr->msg_input_ctrl;
1159        /* Get the number of input messages to be read */
1160        input_empty = msg_ctr_obj->buf_empty;
1161        num_msgs = msg_ctr_obj->size;
1162        if (input_empty)
1163                return;
1164
1165        msg_input = pio_mgr->msg_input;
1166        for (i = 0; i < num_msgs; i++) {
1167                /* Read the next message */
1168                addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.cmd);
1169                msg.msg.cmd =
1170                        read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1171                addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg1);
1172                msg.msg.arg1 =
1173                        read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1174                addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg2);
1175                msg.msg.arg2 =
1176                        read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1177                addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id);
1178                msg.msgq_id =
1179                        read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1180                msg_input += sizeof(struct msg_dspmsg);
1181
1182                /* Determine which queue to put the message in */
1183                dev_dbg(bridge, "input msg: cmd=0x%x arg1=0x%x "
1184                                "arg2=0x%x msgq_id=0x%x\n", msg.msg.cmd,
1185                                msg.msg.arg1, msg.msg.arg2, msg.msgq_id);
1186                /*
1187                 * Interrupt may occur before shared memory and message
1188                 * input locations have been set up. If all nodes were
1189                 * cleaned up, hmsg_mgr->max_msgs should be 0.
1190                 */
1191                list_for_each_entry(msg_queue_obj, &hmsg_mgr->queue_list,
1192                                list_elem) {
1193                        if (msg.msgq_id != msg_queue_obj->msgq_id)
1194                                continue;
1195                        /* Found it */
1196                        if (msg.msg.cmd == RMS_EXITACK) {
1197                                /*
1198                                 * Call the node exit notification.
1199                                 * The exit message does not get
1200                                 * queued.
1201                                 */
1202                                (*hmsg_mgr->on_exit)(msg_queue_obj->arg,
1203                                                msg.msg.arg1);
1204                                break;
1205                        }
1206                        /*
1207                         * Not an exit acknowledgement, queue
1208                         * the message.
1209                         */
1210                        if (list_empty(&msg_queue_obj->msg_free_list)) {
1211                                /*
1212                                 * No free frame to copy the
1213                                 * message into.
1214                                 */
1215                                pr_err("%s: no free msg frames,"
1216                                                " discarding msg\n",
1217                                                __func__);
1218                                break;
1219                        }
1220
1221                        pmsg = list_first_entry(&msg_queue_obj->msg_free_list,
1222                                        struct msg_frame, list_elem);
1223                        list_del(&pmsg->list_elem);
1224                        pmsg->msg_data = msg;
1225                        list_add_tail(&pmsg->list_elem,
1226                                        &msg_queue_obj->msg_used_list);
1227                        ntfy_notify(msg_queue_obj->ntfy_obj,
1228                                        DSP_NODEMESSAGEREADY);
1229                        sync_set_event(msg_queue_obj->sync_event);
1230                }
1231        }
1232        /* Set the post SWI flag */
1233        if (num_msgs > 0) {
1234                /* Tell the DSP we've read the messages */
1235                msg_ctr_obj->buf_empty = true;
1236                msg_ctr_obj->post_swi = true;
1237                sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1238        }
1239}
1240
1241/*
1242 *  ======== notify_chnl_complete ========
1243 *  Purpose:
1244 *      Signal the channel event, notifying the client that I/O has completed.
1245 */
1246static void notify_chnl_complete(struct chnl_object *pchnl,
1247                                 struct chnl_irp *chnl_packet_obj)
1248{
1249        bool signal_event;
1250
1251        if (!pchnl || !pchnl->sync_event || !chnl_packet_obj)
1252                goto func_end;
1253
1254        /*
1255         * Note: we signal the channel event only if the queue of IO
1256         * completions is empty.  If it is not empty, the event is sure to be
1257         * signalled by the only IO completion list consumer:
1258         * bridge_chnl_get_ioc().
1259         */
1260        signal_event = list_empty(&pchnl->io_completions);
1261        /* Enqueue the IO completion info for the client */
1262        list_add_tail(&chnl_packet_obj->link, &pchnl->io_completions);
1263        pchnl->cio_cs++;
1264
1265        if (pchnl->cio_cs > pchnl->chnl_packets)
1266                goto func_end;
1267        /* Signal the channel event (if not already set) that IO is complete */
1268        if (signal_event)
1269                sync_set_event(pchnl->sync_event);
1270
1271        /* Notify that IO is complete */
1272        ntfy_notify(pchnl->ntfy_obj, DSP_STREAMIOCOMPLETION);
1273func_end:
1274        return;
1275}
1276
1277/*
1278 *  ======== output_chnl ========
1279 *  Purpose:
1280 *      Dispatch a buffer on an output channel.
1281 */
1282static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1283                        u8 io_mode)
1284{
1285        struct chnl_mgr *chnl_mgr_obj;
1286        struct shm *sm;
1287        u32 chnl_id;
1288        struct chnl_irp *chnl_packet_obj;
1289        u32 dw_dsp_f_mask;
1290
1291        chnl_mgr_obj = pio_mgr->chnl_mgr;
1292        sm = pio_mgr->shared_mem;
1293        /* Attempt to perform output */
1294        if (sm->output_full)
1295                goto func_end;
1296
1297        if (pchnl && !((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY))
1298                goto func_end;
1299
1300        /* Look to see if both a PC and DSP output channel are ready */
1301        dw_dsp_f_mask = sm->dsp_free_mask;
1302        chnl_id =
1303            find_ready_output(chnl_mgr_obj, pchnl,
1304                              (chnl_mgr_obj->output_mask & dw_dsp_f_mask));
1305        if (chnl_id == OUTPUTNOTREADY)
1306                goto func_end;
1307
1308        pchnl = chnl_mgr_obj->channels[chnl_id];
1309        if (!pchnl || list_empty(&pchnl->io_requests)) {
1310                /* Shouldn't get here */
1311                goto func_end;
1312        }
1313
1314        if (!pchnl->cio_reqs)
1315                goto func_end;
1316
1317        /* Get the I/O request, and attempt a transfer */
1318        chnl_packet_obj = list_first_entry(&pchnl->io_requests,
1319                        struct chnl_irp, link);
1320        list_del(&chnl_packet_obj->link);
1321
1322        pchnl->cio_reqs--;
1323
1324        /* Record fact that no more I/O buffers available */
1325        if (list_empty(&pchnl->io_requests))
1326                chnl_mgr_obj->output_mask &= ~(1 << chnl_id);
1327
1328        /* Transfer buffer to DSP side */
1329        chnl_packet_obj->byte_size = min(pio_mgr->sm_buf_size,
1330                                        chnl_packet_obj->byte_size);
1331        memcpy(pio_mgr->output, chnl_packet_obj->host_sys_buf,
1332                                        chnl_packet_obj->byte_size);
1333        pchnl->bytes_moved += chnl_packet_obj->byte_size;
1334        /* Write all 32 bits of arg */
1335        sm->arg = chnl_packet_obj->arg;
1336#if _CHNL_WORDSIZE == 2
1337        /* Access can be different SM access word size (e.g. 16/32 bit words) */
1338        sm->output_id = (u16) chnl_id;
1339        sm->output_size = (u16) (chnl_packet_obj->byte_size +
1340                                chnl_mgr_obj->word_size - 1) /
1341                                (u16) chnl_mgr_obj->word_size;
1342#else
1343        sm->output_id = chnl_id;
1344        sm->output_size = (chnl_packet_obj->byte_size +
1345                        chnl_mgr_obj->word_size - 1) / chnl_mgr_obj->word_size;
1346#endif
1347        sm->output_full =  1;
1348        /* Indicate to the DSP we have written the output */
1349        sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1350        /* Notify client with IO completion record (keep EOS) */
1351        chnl_packet_obj->status &= CHNL_IOCSTATEOS;
1352        notify_chnl_complete(pchnl, chnl_packet_obj);
1353        /* Notify if stream is done. */
1354        if (chnl_packet_obj->status & CHNL_IOCSTATEOS)
1355                ntfy_notify(pchnl->ntfy_obj, DSP_STREAMDONE);
1356
1357func_end:
1358        return;
1359}
1360
1361/*
1362 *  ======== output_msg ========
1363 *      Copies messages from the message queues to the shared memory.
1364 */
1365static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
1366{
1367        u32 num_msgs = 0;
1368        u32 i;
1369        struct msg_dspmsg *msg_output;
1370        struct msg_frame *pmsg;
1371        struct msg_ctrl *msg_ctr_obj;
1372        u32 val;
1373        u32 addr;
1374
1375        msg_ctr_obj = pio_mgr->msg_output_ctrl;
1376
1377        /* Check if output has been cleared */
1378        if (!msg_ctr_obj->buf_empty)
1379                return;
1380
1381        num_msgs = (hmsg_mgr->msgs_pending > hmsg_mgr->max_msgs) ?
1382                hmsg_mgr->max_msgs : hmsg_mgr->msgs_pending;
1383        msg_output = (struct msg_dspmsg *) pio_mgr->msg_output;
1384
1385        /* Copy num_msgs messages into shared memory */
1386        for (i = 0; i < num_msgs; i++) {
1387                if (list_empty(&hmsg_mgr->msg_used_list))
1388                        continue;
1389
1390                pmsg = list_first_entry(&hmsg_mgr->msg_used_list,
1391                                struct msg_frame, list_elem);
1392                list_del(&pmsg->list_elem);
1393
1394                val = (pmsg->msg_data).msgq_id;
1395                addr = (u32) &msg_output->msgq_id;
1396                write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1397
1398                val = (pmsg->msg_data).msg.cmd;
1399                addr = (u32) &msg_output->msg.cmd;
1400                write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1401
1402                val = (pmsg->msg_data).msg.arg1;
1403                addr = (u32) &msg_output->msg.arg1;
1404                write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1405
1406                val = (pmsg->msg_data).msg.arg2;
1407                addr = (u32) &msg_output->msg.arg2;
1408                write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1409
1410                msg_output++;
1411                list_add_tail(&pmsg->list_elem, &hmsg_mgr->msg_free_list);
1412                sync_set_event(hmsg_mgr->sync_event);
1413        }
1414
1415        if (num_msgs > 0) {
1416                hmsg_mgr->msgs_pending -= num_msgs;
1417#if _CHNL_WORDSIZE == 2
1418                /*
1419                 * Access can be different SM access word size
1420                 * (e.g. 16/32 bit words)
1421                 */
1422                msg_ctr_obj->size = (u16) num_msgs;
1423#else
1424                msg_ctr_obj->size = num_msgs;
1425#endif
1426                msg_ctr_obj->buf_empty = false;
1427                /* Set the post SWI flag */
1428                msg_ctr_obj->post_swi = true;
1429                /* Tell the DSP we have written the output. */
1430                sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1431        }
1432}
1433
1434/*
1435 *  ======== register_shm_segs ========
1436 *  purpose:
1437 *      Registers GPP SM segment with CMM.
1438 */
1439static int register_shm_segs(struct io_mgr *hio_mgr,
1440                                    struct cod_manager *cod_man,
1441                                    u32 dw_gpp_base_pa)
1442{
1443        int status = 0;
1444        u32 ul_shm0_base = 0;
1445        u32 shm0_end = 0;
1446        u32 ul_shm0_rsrvd_start = 0;
1447        u32 ul_rsrvd_size = 0;
1448        u32 ul_gpp_phys;
1449        u32 ul_dsp_virt;
1450        u32 ul_shm_seg_id0 = 0;
1451        u32 dw_offset, dw_gpp_base_va, ul_dsp_size;
1452
1453        /*
1454         * Read address and size info for first SM region.
1455         * Get start of 1st SM Heap region.
1456         */
1457        status =
1458            cod_get_sym_value(cod_man, SHM0_SHARED_BASE_SYM, &ul_shm0_base);
1459        if (ul_shm0_base == 0) {
1460                status = -EPERM;
1461                goto func_end;
1462        }
1463        /* Get end of 1st SM Heap region */
1464        if (!status) {
1465                /* Get start and length of message part of shared memory */
1466                status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
1467                                           &shm0_end);
1468                if (shm0_end == 0) {
1469                        status = -EPERM;
1470                        goto func_end;
1471                }
1472        }
1473        /* Start of Gpp reserved region */
1474        if (!status) {
1475                /* Get start and length of message part of shared memory */
1476                status =
1477                    cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM,
1478                                      &ul_shm0_rsrvd_start);
1479                if (ul_shm0_rsrvd_start == 0) {
1480                        status = -EPERM;
1481                        goto func_end;
1482                }
1483        }
1484        /* Register with CMM */
1485        if (!status) {
1486                status = dev_get_cmm_mgr(hio_mgr->dev_obj, &hio_mgr->cmm_mgr);
1487                if (!status) {
1488                        status = cmm_un_register_gppsm_seg(hio_mgr->cmm_mgr,
1489                                                           CMM_ALLSEGMENTS);
1490                }
1491        }
1492        /* Register new SM region(s) */
1493        if (!status && (shm0_end - ul_shm0_base) > 0) {
1494                /* Calc size (bytes) of SM the GPP can alloc from */
1495                ul_rsrvd_size =
1496                    (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size;
1497                if (ul_rsrvd_size <= 0) {
1498                        status = -EPERM;
1499                        goto func_end;
1500                }
1501                /* Calc size of SM DSP can alloc from */
1502                ul_dsp_size =
1503                    (ul_shm0_rsrvd_start - ul_shm0_base) * hio_mgr->word_size;
1504                if (ul_dsp_size <= 0) {
1505                        status = -EPERM;
1506                        goto func_end;
1507                }
1508                /* First TLB entry reserved for Bridge SM use. */
1509                ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
1510                /* Get size in bytes */
1511                ul_dsp_virt =
1512                    hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt *
1513                    hio_mgr->word_size;
1514                /*
1515                 * Calc byte offset used to convert GPP phys <-> DSP byte
1516                 * address.
1517                 */
1518                if (dw_gpp_base_pa > ul_dsp_virt)
1519                        dw_offset = dw_gpp_base_pa - ul_dsp_virt;
1520                else
1521                        dw_offset = ul_dsp_virt - dw_gpp_base_pa;
1522
1523                if (ul_shm0_rsrvd_start * hio_mgr->word_size < ul_dsp_virt) {
1524                        status = -EPERM;
1525                        goto func_end;
1526                }
1527                /*
1528                 * Calc Gpp phys base of SM region.
1529                 * This is actually uncached kernel virtual address.
1530                 */
1531                dw_gpp_base_va =
1532                    ul_gpp_phys + ul_shm0_rsrvd_start * hio_mgr->word_size -
1533                    ul_dsp_virt;
1534                /*
1535                 * Calc Gpp phys base of SM region.
1536                 * This is the physical address.
1537                 */
1538                dw_gpp_base_pa =
1539                    dw_gpp_base_pa + ul_shm0_rsrvd_start * hio_mgr->word_size -
1540                    ul_dsp_virt;
1541                /* Register SM Segment 0. */
1542                status =
1543                    cmm_register_gppsm_seg(hio_mgr->cmm_mgr, dw_gpp_base_pa,
1544                                           ul_rsrvd_size, dw_offset,
1545                                           (dw_gpp_base_pa >
1546                                            ul_dsp_virt) ? CMM_ADDTODSPPA :
1547                                           CMM_SUBFROMDSPPA,
1548                                           (u32) (ul_shm0_base *
1549                                                  hio_mgr->word_size),
1550                                           ul_dsp_size, &ul_shm_seg_id0,
1551                                           dw_gpp_base_va);
1552                /* First SM region is seg_id = 1 */
1553                if (ul_shm_seg_id0 != 1)
1554                        status = -EPERM;
1555        }
1556func_end:
1557        return status;
1558}
1559
1560/* ZCPY IO routines. */
1561/*
1562 *  ======== IO_SHMcontrol ========
1563 *      Sets the requested shm setting.
1564 */
1565int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs)
1566{
1567#ifdef CONFIG_TIDSPBRIDGE_DVFS
1568        u32 i;
1569        struct dspbridge_platform_data *pdata =
1570            omap_dspbridge_dev->dev.platform_data;
1571
1572        switch (desc) {
1573        case SHM_CURROPP:
1574                /* Update the shared memory with requested OPP information */
1575                if (pargs != NULL)
1576                        hio_mgr->shared_mem->opp_table_struct.curr_opp_pt =
1577                            *(u32 *) pargs;
1578                else
1579                        return -EPERM;
1580                break;
1581        case SHM_OPPINFO:
1582                /*
1583                 * Update the shared memory with the voltage, frequency,
1584                 * min and max frequency values for an OPP.
1585                 */
1586                for (i = 0; i <= dsp_max_opps; i++) {
1587                        hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1588                            voltage = vdd1_dsp_freq[i][0];
1589                        dev_dbg(bridge, "OPP-shm: voltage: %d\n",
1590                                vdd1_dsp_freq[i][0]);
1591                        hio_mgr->shared_mem->opp_table_struct.
1592                            opp_point[i].frequency = vdd1_dsp_freq[i][1];
1593                        dev_dbg(bridge, "OPP-shm: frequency: %d\n",
1594                                vdd1_dsp_freq[i][1]);
1595                        hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1596                            min_freq = vdd1_dsp_freq[i][2];
1597                        dev_dbg(bridge, "OPP-shm: min freq: %d\n",
1598                                vdd1_dsp_freq[i][2]);
1599                        hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1600                            max_freq = vdd1_dsp_freq[i][3];
1601                        dev_dbg(bridge, "OPP-shm: max freq: %d\n",
1602                                vdd1_dsp_freq[i][3]);
1603                }
1604                hio_mgr->shared_mem->opp_table_struct.num_opp_pts =
1605                    dsp_max_opps;
1606                dev_dbg(bridge, "OPP-shm: max OPP number: %d\n", dsp_max_opps);
1607                /* Update the current OPP number */
1608                if (pdata->dsp_get_opp)
1609                        i = (*pdata->dsp_get_opp) ();
1610                hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = i;
1611                dev_dbg(bridge, "OPP-shm: value programmed = %d\n", i);
1612                break;
1613        case SHM_GETOPP:
1614                /* Get the OPP that DSP has requested */
1615                *(u32 *) pargs = hio_mgr->shared_mem->opp_request.rqst_opp_pt;
1616                break;
1617        default:
1618                break;
1619        }
1620#endif
1621        return 0;
1622}
1623
1624/*
1625 *  ======== bridge_io_get_proc_load ========
1626 *      Gets the Processor's Load information
1627 */
1628int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
1629                                struct dsp_procloadstat *proc_lstat)
1630{
1631        if (!hio_mgr->shared_mem)
1632                return -EFAULT;
1633
1634        proc_lstat->curr_load =
1635                        hio_mgr->shared_mem->load_mon_info.curr_dsp_load;
1636        proc_lstat->predicted_load =
1637            hio_mgr->shared_mem->load_mon_info.pred_dsp_load;
1638        proc_lstat->curr_dsp_freq =
1639            hio_mgr->shared_mem->load_mon_info.curr_dsp_freq;
1640        proc_lstat->predicted_freq =
1641            hio_mgr->shared_mem->load_mon_info.pred_dsp_freq;
1642
1643        dev_dbg(bridge, "Curr Load = %d, Pred Load = %d, Curr Freq = %d, "
1644                "Pred Freq = %d\n", proc_lstat->curr_load,
1645                proc_lstat->predicted_load, proc_lstat->curr_dsp_freq,
1646                proc_lstat->predicted_freq);
1647        return 0;
1648}
1649
1650
1651#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
1652void print_dsp_debug_trace(struct io_mgr *hio_mgr)
1653{
1654        u32 ul_new_message_length = 0, ul_gpp_cur_pointer;
1655
1656        while (true) {
1657                /* Get the DSP current pointer */
1658                ul_gpp_cur_pointer =
1659                    *(u32 *) (hio_mgr->trace_buffer_current);
1660                ul_gpp_cur_pointer =
1661                    hio_mgr->gpp_va + (ul_gpp_cur_pointer -
1662                                          hio_mgr->dsp_va);
1663
1664                /* No new debug messages available yet */
1665                if (ul_gpp_cur_pointer == hio_mgr->gpp_read_pointer) {
1666                        break;
1667                } else if (ul_gpp_cur_pointer > hio_mgr->gpp_read_pointer) {
1668                        /* Continuous data */
1669                        ul_new_message_length =
1670                            ul_gpp_cur_pointer - hio_mgr->gpp_read_pointer;
1671
1672                        memcpy(hio_mgr->msg,
1673                               (char *)hio_mgr->gpp_read_pointer,
1674                               ul_new_message_length);
1675                        hio_mgr->msg[ul_new_message_length] = '\0';
1676                        /*
1677                         * Advance the GPP trace pointer to DSP current
1678                         * pointer.
1679                         */
1680                        hio_mgr->gpp_read_pointer += ul_new_message_length;
1681                        /* Print the trace messages */
1682                        pr_info("DSPTrace: %s\n", hio_mgr->msg);
1683                } else if (ul_gpp_cur_pointer < hio_mgr->gpp_read_pointer) {
1684                        /* Handle trace buffer wraparound */
1685                        memcpy(hio_mgr->msg,
1686                               (char *)hio_mgr->gpp_read_pointer,
1687                               hio_mgr->trace_buffer_end -
1688                               hio_mgr->gpp_read_pointer);
1689                        ul_new_message_length =
1690                            ul_gpp_cur_pointer - hio_mgr->trace_buffer_begin;
1691                        memcpy(&hio_mgr->msg[hio_mgr->trace_buffer_end -
1692                                              hio_mgr->gpp_read_pointer],
1693                               (char *)hio_mgr->trace_buffer_begin,
1694                               ul_new_message_length);
1695                        hio_mgr->msg[hio_mgr->trace_buffer_end -
1696                                      hio_mgr->gpp_read_pointer +
1697                                      ul_new_message_length] = '\0';
1698                        /*
1699                         * Advance the GPP trace pointer to DSP current
1700                         * pointer.
1701                         */
1702                        hio_mgr->gpp_read_pointer =
1703                            hio_mgr->trace_buffer_begin +
1704                            ul_new_message_length;
1705                        /* Print the trace messages */
1706                        pr_info("DSPTrace: %s\n", hio_mgr->msg);
1707                }
1708        }
1709}
1710#endif
1711
1712#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
1713/*
1714 *  ======== print_dsp_trace_buffer ========
1715 *      Prints the trace buffer returned from the DSP (if DBG_Trace is enabled).
1716 *  Parameters:
1717 *    hdeh_mgr:          Handle to DEH manager object
1718 *                      number of extra carriage returns to generate.
1719 *  Returns:
1720 *      0:        Success.
1721 *      -ENOMEM:    Unable to allocate memory.
1722 *  Requires:
1723 *      hdeh_mgr muse be valid. Checked in bridge_deh_notify.
1724 */
1725int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
1726{
1727        int status = 0;
1728        struct cod_manager *cod_mgr;
1729        u32 ul_trace_end;
1730        u32 ul_trace_begin;
1731        u32 trace_cur_pos;
1732        u32 ul_num_bytes = 0;
1733        u32 ul_num_words = 0;
1734        u32 ul_word_size = 2;
1735        char *psz_buf;
1736        char *str_beg;
1737        char *trace_end;
1738        char *buf_end;
1739        char *new_line;
1740
1741        struct bridge_dev_context *pbridge_context = hbridge_context;
1742        struct bridge_drv_interface *intf_fxns;
1743        struct dev_object *dev_obj = (struct dev_object *)
1744            pbridge_context->dev_obj;
1745
1746        status = dev_get_cod_mgr(dev_obj, &cod_mgr);
1747
1748        if (cod_mgr) {
1749                /* Look for SYS_PUTCBEG/SYS_PUTCEND */
1750                status =
1751                    cod_get_sym_value(cod_mgr, COD_TRACEBEG, &ul_trace_begin);
1752        } else {
1753                status = -EFAULT;
1754        }
1755        if (!status)
1756                status =
1757                    cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end);
1758
1759        if (!status)
1760                /* trace_cur_pos will hold the address of a DSP pointer */
1761                status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS,
1762                                                        &trace_cur_pos);
1763
1764        if (status)
1765                goto func_end;
1766
1767        ul_num_bytes = (ul_trace_end - ul_trace_begin);
1768
1769        ul_num_words = ul_num_bytes * ul_word_size;
1770        status = dev_get_intf_fxns(dev_obj, &intf_fxns);
1771
1772        if (status)
1773                goto func_end;
1774
1775        psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC);
1776        if (psz_buf != NULL) {
1777                /* Read trace buffer data */
1778                status = (*intf_fxns->brd_read)(pbridge_context,
1779                        (u8 *)psz_buf, (u32)ul_trace_begin,
1780                        ul_num_bytes, 0);
1781
1782                if (status)
1783                        goto func_end;
1784
1785                /* Pack and do newline conversion */
1786                pr_debug("PrintDspTraceBuffer: "
1787                        "before pack and unpack.\n");
1788                pr_debug("%s: DSP Trace Buffer Begin:\n"
1789                        "=======================\n%s\n",
1790                        __func__, psz_buf);
1791
1792                /* Read the value at the DSP address in trace_cur_pos. */
1793                status = (*intf_fxns->brd_read)(pbridge_context,
1794                                (u8 *)&trace_cur_pos, (u32)trace_cur_pos,
1795                                4, 0);
1796                if (status)
1797                        goto func_end;
1798                /* Pack and do newline conversion */
1799                pr_info("DSP Trace Buffer Begin:\n"
1800                        "=======================\n%s\n",
1801                        psz_buf);
1802
1803
1804                /* convert to offset */
1805                trace_cur_pos = trace_cur_pos - ul_trace_begin;
1806
1807                if (ul_num_bytes) {
1808                        /*
1809                         * The buffer is not full, find the end of the
1810                         * data -- buf_end will be >= pszBuf after
1811                         * while.
1812                         */
1813                        buf_end = &psz_buf[ul_num_bytes+1];
1814                        /* DSP print position */
1815                        trace_end = &psz_buf[trace_cur_pos];
1816
1817                        /*
1818                         * Search buffer for a new_line and replace it
1819                         * with '\0', then print as string.
1820                         * Continue until end of buffer is reached.
1821                         */
1822                        str_beg = trace_end;
1823                        ul_num_bytes = buf_end - str_beg;
1824
1825                        while (str_beg < buf_end) {
1826                                new_line = strnchr(str_beg, ul_num_bytes,
1827                                                                '\n');
1828                                if (new_line && new_line < buf_end) {
1829                                        *new_line = 0;
1830                                        pr_debug("%s\n", str_beg);
1831                                        str_beg = ++new_line;
1832                                        ul_num_bytes = buf_end - str_beg;
1833                                } else {
1834                                        /*
1835                                         * Assume buffer empty if it contains
1836                                         * a zero
1837                                         */
1838                                        if (*str_beg != '\0') {
1839                                                str_beg[ul_num_bytes] = 0;
1840                                                pr_debug("%s\n", str_beg);
1841                                        }
1842                                        str_beg = buf_end;
1843                                        ul_num_bytes = 0;
1844                                }
1845                        }
1846                        /*
1847                         * Search buffer for a nNewLine and replace it
1848                         * with '\0', then print as string.
1849                         * Continue until buffer is exhausted.
1850                         */
1851                        str_beg = psz_buf;
1852                        ul_num_bytes = trace_end - str_beg;
1853
1854                        while (str_beg < trace_end) {
1855                                new_line = strnchr(str_beg, ul_num_bytes, '\n');
1856                                if (new_line != NULL && new_line < trace_end) {
1857                                        *new_line = 0;
1858                                        pr_debug("%s\n", str_beg);
1859                                        str_beg = ++new_line;
1860                                        ul_num_bytes = trace_end - str_beg;
1861                                } else {
1862                                        /*
1863                                         * Assume buffer empty if it contains
1864                                         * a zero
1865                                         */
1866                                        if (*str_beg != '\0') {
1867                                                str_beg[ul_num_bytes] = 0;
1868                                                pr_debug("%s\n", str_beg);
1869                                        }
1870                                        str_beg = trace_end;
1871                                        ul_num_bytes = 0;
1872                                }
1873                        }
1874                }
1875                pr_info("\n=======================\n"
1876                        "DSP Trace Buffer End:\n");
1877                kfree(psz_buf);
1878        } else {
1879                status = -ENOMEM;
1880        }
1881func_end:
1882        if (status)
1883                dev_dbg(bridge, "%s Failed, status 0x%x\n", __func__, status);
1884        return status;
1885}
1886
1887/**
1888 * dump_dsp_stack() - This function dumps the data on the DSP stack.
1889 * @bridge_context:     Bridge driver's device context pointer.
1890 *
1891 */
1892int dump_dsp_stack(struct bridge_dev_context *bridge_context)
1893{
1894        int status = 0;
1895        struct cod_manager *code_mgr;
1896        struct node_mgr *node_mgr;
1897        u32 trace_begin;
1898        char name[256];
1899        struct {
1900                u32 head[2];
1901                u32 size;
1902        } mmu_fault_dbg_info;
1903        u32 *buffer;
1904        u32 *buffer_beg;
1905        u32 *buffer_end;
1906        u32 exc_type;
1907        u32 dyn_ext_base;
1908        u32 i;
1909        u32 offset_output;
1910        u32 total_size;
1911        u32 poll_cnt;
1912        const char *dsp_regs[] = {"EFR", "IERR", "ITSR", "NTSR",
1913                                "IRP", "NRP", "AMR", "SSR",
1914                                "ILC", "RILC", "IER", "CSR"};
1915        const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"};
1916        struct bridge_drv_interface *intf_fxns;
1917        struct dev_object *dev_object = bridge_context->dev_obj;
1918
1919        status = dev_get_cod_mgr(dev_object, &code_mgr);
1920        if (!code_mgr) {
1921                pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
1922                status = -EFAULT;
1923        }
1924
1925        if (!status) {
1926                status = dev_get_node_manager(dev_object, &node_mgr);
1927                if (!node_mgr) {
1928                        pr_debug("%s: Failed on dev_get_node_manager.\n",
1929                                                                __func__);
1930                        status = -EFAULT;
1931                }
1932        }
1933
1934        if (!status) {
1935                /* Look for SYS_PUTCBEG/SYS_PUTCEND: */
1936                status =
1937                        cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin);
1938                pr_debug("%s: trace_begin Value 0x%x\n",
1939                        __func__, trace_begin);
1940                if (status)
1941                        pr_debug("%s: Failed on cod_get_sym_value.\n",
1942                                                                __func__);
1943        }
1944        if (!status)
1945                status = dev_get_intf_fxns(dev_object, &intf_fxns);
1946        /*
1947         * Check for the "magic number" in the trace buffer.  If it has
1948         * yet to appear then poll the trace buffer to wait for it.  Its
1949         * appearance signals that the DSP has finished dumping its state.
1950         */
1951        mmu_fault_dbg_info.head[0] = 0;
1952        mmu_fault_dbg_info.head[1] = 0;
1953        if (!status) {
1954                poll_cnt = 0;
1955                while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 ||
1956                        mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) &&
1957                        poll_cnt < POLL_MAX) {
1958
1959                        /* Read DSP dump size from the DSP trace buffer... */
1960                        status = (*intf_fxns->brd_read)(bridge_context,
1961                                (u8 *)&mmu_fault_dbg_info, (u32)trace_begin,
1962                                sizeof(mmu_fault_dbg_info), 0);
1963
1964                        if (status)
1965                                break;
1966
1967                        poll_cnt++;
1968                }
1969
1970                if (mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 &&
1971                        mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) {
1972                        status = -ETIME;
1973                        pr_err("%s:No DSP MMU-Fault information available.\n",
1974                                                        __func__);
1975                }
1976        }
1977
1978        if (!status) {
1979                total_size = mmu_fault_dbg_info.size;
1980                /* Limit the size in case DSP went crazy */
1981                if (total_size > MAX_MMU_DBGBUFF)
1982                        total_size = MAX_MMU_DBGBUFF;
1983
1984                buffer = kzalloc(total_size, GFP_ATOMIC);
1985                if (!buffer) {
1986                        status = -ENOMEM;
1987                        pr_debug("%s: Failed to "
1988                                "allocate stack dump buffer.\n", __func__);
1989                        goto func_end;
1990                }
1991
1992                buffer_beg = buffer;
1993                buffer_end =  buffer + total_size / 4;
1994
1995                /* Read bytes from the DSP trace buffer... */
1996                status = (*intf_fxns->brd_read)(bridge_context,
1997                                (u8 *)buffer, (u32)trace_begin,
1998                                total_size, 0);
1999                if (status) {
2000                        pr_debug("%s: Failed to Read Trace Buffer.\n",
2001                                                                __func__);
2002                        goto func_end;
2003                }
2004
2005                pr_err("\nAproximate Crash Position:\n"
2006                        "--------------------------\n");
2007
2008                exc_type = buffer[3];
2009                if (!exc_type)
2010                        i = buffer[79];         /* IRP */
2011                else
2012                        i = buffer[80];         /* NRP */
2013
2014                status =
2015                    cod_get_sym_value(code_mgr, DYNEXTBASE, &dyn_ext_base);
2016                if (status) {
2017                        status = -EFAULT;
2018                        goto func_end;
2019                }
2020
2021                if ((i > dyn_ext_base) && (node_find_addr(node_mgr, i,
2022                        0x1000, &offset_output, name) == 0))
2023                        pr_err("0x%-8x [\"%s\" + 0x%x]\n", i, name,
2024                                                        i - offset_output);
2025                else
2026                        pr_err("0x%-8x [Unable to match to a symbol.]\n", i);
2027
2028                buffer += 4;
2029
2030                pr_err("\nExecution Info:\n"
2031                        "---------------\n");
2032
2033                if (*buffer < ARRAY_SIZE(exec_ctxt)) {
2034                        pr_err("Execution context \t%s\n",
2035                                exec_ctxt[*buffer++]);
2036                } else {
2037                        pr_err("Execution context corrupt\n");
2038                        kfree(buffer_beg);
2039                        return -EFAULT;
2040                }
2041                pr_err("Task Handle\t\t0x%x\n", *buffer++);
2042                pr_err("Stack Pointer\t\t0x%x\n", *buffer++);
2043                pr_err("Stack Top\t\t0x%x\n", *buffer++);
2044                pr_err("Stack Bottom\t\t0x%x\n", *buffer++);
2045                pr_err("Stack Size\t\t0x%x\n", *buffer++);
2046                pr_err("Stack Size In Use\t0x%x\n", *buffer++);
2047
2048                pr_err("\nCPU Registers\n"
2049                        "---------------\n");
2050
2051                for (i = 0; i < 32; i++) {
2052                        if (i == 4 || i == 6 || i == 8)
2053                                pr_err("A%d 0x%-8x [Function Argument %d]\n",
2054                                                        i, *buffer++, i-3);
2055                        else if (i == 15)
2056                                pr_err("A15 0x%-8x [Frame Pointer]\n",
2057                                                                *buffer++);
2058                        else
2059                                pr_err("A%d 0x%x\n", i, *buffer++);
2060                }
2061
2062                pr_err("\nB0 0x%x\n", *buffer++);
2063                pr_err("B1 0x%x\n", *buffer++);
2064                pr_err("B2 0x%x\n", *buffer++);
2065
2066                if ((*buffer > dyn_ext_base) && (node_find_addr(node_mgr,
2067                        *buffer, 0x1000, &offset_output, name) == 0))
2068
2069                        pr_err("B3 0x%-8x [Function Return Pointer:"
2070                                " \"%s\" + 0x%x]\n", *buffer, name,
2071                                *buffer - offset_output);
2072                else
2073                        pr_err("B3 0x%-8x [Function Return Pointer:"
2074                                "Unable to match to a symbol.]\n", *buffer);
2075
2076                buffer++;
2077
2078                for (i = 4; i < 32; i++) {
2079                        if (i == 4 || i == 6 || i == 8)
2080                                pr_err("B%d 0x%-8x [Function Argument %d]\n",
2081                                                        i, *buffer++, i-2);
2082                        else if (i == 14)
2083                                pr_err("B14 0x%-8x [Data Page Pointer]\n",
2084                                                                *buffer++);
2085                        else
2086                                pr_err("B%d 0x%x\n", i, *buffer++);
2087                }
2088
2089                pr_err("\n");
2090
2091                for (i = 0; i < ARRAY_SIZE(dsp_regs); i++)
2092                        pr_err("%s 0x%x\n", dsp_regs[i], *buffer++);
2093
2094                pr_err("\nStack:\n"
2095                        "------\n");
2096
2097                for (i = 0; buffer < buffer_end; i++, buffer++) {
2098                        if ((*buffer > dyn_ext_base) && (
2099                                node_find_addr(node_mgr, *buffer , 0x600,
2100                                &offset_output, name) == 0))
2101                                pr_err("[%d] 0x%-8x [\"%s\" + 0x%x]\n",
2102                                        i, *buffer, name,
2103                                        *buffer - offset_output);
2104                        else
2105                                pr_err("[%d] 0x%x\n", i, *buffer);
2106                }
2107                kfree(buffer_beg);
2108        }
2109func_end:
2110        return status;
2111}
2112
2113/**
2114 * dump_dl_modules() - This functions dumps the _DLModules loaded in DSP side
2115 * @bridge_context:             Bridge driver's device context pointer.
2116 *
2117 */
2118void dump_dl_modules(struct bridge_dev_context *bridge_context)
2119{
2120        struct cod_manager *code_mgr;
2121        struct bridge_drv_interface *intf_fxns;
2122        struct bridge_dev_context *bridge_ctxt = bridge_context;
2123        struct dev_object *dev_object = bridge_ctxt->dev_obj;
2124        struct modules_header modules_hdr;
2125        struct dll_module *module_struct = NULL;
2126        u32 module_dsp_addr;
2127        u32 module_size;
2128        u32 module_struct_size = 0;
2129        u32 sect_ndx;
2130        char *sect_str ;
2131        int status = 0;
2132
2133        status = dev_get_intf_fxns(dev_object, &intf_fxns);
2134        if (status) {
2135                pr_debug("%s: Failed on dev_get_intf_fxns.\n", __func__);
2136                goto func_end;
2137        }
2138
2139        status = dev_get_cod_mgr(dev_object, &code_mgr);
2140        if (!code_mgr) {
2141                pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
2142                status = -EFAULT;
2143                goto func_end;
2144        }
2145
2146        /* Lookup  the address of the modules_header structure */
2147        status = cod_get_sym_value(code_mgr, "_DLModules", &module_dsp_addr);
2148        if (status) {
2149                pr_debug("%s: Failed on cod_get_sym_value for _DLModules.\n",
2150                        __func__);
2151                goto func_end;
2152        }
2153
2154        pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr);
2155
2156        /* Copy the modules_header structure from DSP memory. */
2157        status = (*intf_fxns->brd_read)(bridge_context, (u8 *) &modules_hdr,
2158                                (u32) module_dsp_addr, sizeof(modules_hdr), 0);
2159
2160        if (status) {
2161                pr_debug("%s: Failed failed to read modules header.\n",
2162                                                                __func__);
2163                goto func_end;
2164        }
2165
2166        module_dsp_addr = modules_hdr.first_module;
2167        module_size = modules_hdr.first_module_size;
2168
2169        pr_debug("%s: dll_module_header 0x%x %d\n", __func__, module_dsp_addr,
2170                                                                module_size);
2171
2172        pr_err("\nDynamically Loaded Modules:\n"
2173                "---------------------------\n");
2174
2175        /* For each dll_module structure in the list... */
2176        while (module_size) {
2177                /*
2178                 * Allocate/re-allocate memory to hold the dll_module
2179                 * structure. The memory is re-allocated only if the existing
2180                 * allocation is too small.
2181                 */
2182                if (module_size > module_struct_size) {
2183                        kfree(module_struct);
2184                        module_struct = kzalloc(module_size+128, GFP_ATOMIC);
2185                        module_struct_size = module_size+128;
2186                        pr_debug("%s: allocated module struct %p %d\n",
2187                                __func__, module_struct, module_struct_size);
2188                        if (!module_struct)
2189                                goto func_end;
2190                }
2191                /* Copy the dll_module structure from DSP memory */
2192                status = (*intf_fxns->brd_read)(bridge_context,
2193                        (u8 *)module_struct, module_dsp_addr, module_size, 0);
2194
2195                if (status) {
2196                        pr_debug(
2197                        "%s: Failed to read dll_module struct for 0x%x.\n",
2198                        __func__, module_dsp_addr);
2199                        break;
2200                }
2201
2202                /* Update info regarding the _next_ module in the list. */
2203                module_dsp_addr = module_struct->next_module;
2204                module_size = module_struct->next_module_size;
2205
2206                pr_debug("%s: next module 0x%x %d, this module num sects %d\n",
2207                        __func__, module_dsp_addr, module_size,
2208                        module_struct->num_sects);
2209
2210                /*
2211                 * The section name strings start immediately following
2212                 * the array of dll_sect structures.
2213                 */
2214                sect_str = (char *) &module_struct->
2215                                        sects[module_struct->num_sects];
2216                pr_err("%s\n", sect_str);
2217
2218                /*
2219                 * Advance to the first section name string.
2220                 * Each string follows the one before.
2221                 */
2222                sect_str += strlen(sect_str) + 1;
2223
2224                /* Access each dll_sect structure and its name string. */
2225                for (sect_ndx = 0;
2226                        sect_ndx < module_struct->num_sects; sect_ndx++) {
2227                        pr_err("    Section: 0x%x ",
2228                                module_struct->sects[sect_ndx].sect_load_adr);
2229
2230                        if (((u32) sect_str - (u32) module_struct) <
2231                                module_struct_size) {
2232                                pr_err("%s\n", sect_str);
2233                                /* Each string follows the one before. */
2234                                sect_str += strlen(sect_str)+1;
2235                        } else {
2236                                pr_err("<string error>\n");
2237                                pr_debug("%s: section name sting address "
2238                                        "is invalid %p\n", __func__, sect_str);
2239                        }
2240                }
2241        }
2242func_end:
2243        kfree(module_struct);
2244}
2245#endif
2246