linux/drivers/staging/most/hdm-dim2/dim2_hal.c
<<
>>
Prefs
   1/*
   2 * dim2_hal.c - DIM2 HAL implementation
   3 * (MediaLB, Device Interface Macro IP, OS62420)
   4 *
   5 * Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
   6 *
   7 * This program is distributed in the hope that it will be useful,
   8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10 * GNU General Public License for more details.
  11 *
  12 * This file is licensed under GPLv2.
  13 */
  14
  15/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
  16
  17#include "dim2_hal.h"
  18#include "dim2_errors.h"
  19#include "dim2_reg.h"
  20#include <linux/stddef.h>
  21#include <linux/kernel.h>
  22
  23/*
  24 * Size factor for isochronous DBR buffer.
  25 * Minimal value is 3.
  26 */
  27#define ISOC_DBR_FACTOR 3u
  28
  29/*
  30 * Number of 32-bit units for DBR map.
  31 *
  32 * 1: block size is 512, max allocation is 16K
  33 * 2: block size is 256, max allocation is 8K
  34 * 4: block size is 128, max allocation is 4K
  35 * 8: block size is 64, max allocation is 2K
  36 *
  37 * Min allocated space is block size.
  38 * Max possible allocated space is 32 blocks.
  39 */
  40#define DBR_MAP_SIZE 2
  41
  42/* -------------------------------------------------------------------------- */
  43/* not configurable area */
  44
  45#define CDT 0x00
  46#define ADT 0x40
  47#define MLB_CAT 0x80
  48#define AHB_CAT 0x88
  49
  50#define DBR_SIZE  (16 * 1024) /* specified by IP */
  51#define DBR_BLOCK_SIZE  (DBR_SIZE / 32 / DBR_MAP_SIZE)
  52
  53#define ROUND_UP_TO(x, d)  (DIV_ROUND_UP(x, (d)) * (d))
  54
  55/* -------------------------------------------------------------------------- */
  56/* generic helper functions and macros */
  57
  58static inline u32 bit_mask(u8 position)
  59{
  60        return (u32)1 << position;
  61}
  62
  63static inline bool dim_on_error(u8 error_id, const char *error_message)
  64{
  65        dimcb_on_error(error_id, error_message);
  66        return false;
  67}
  68
  69/* -------------------------------------------------------------------------- */
  70/* types and local variables */
  71
  72struct async_tx_dbr {
  73        u8 ch_addr;
  74        u16 rpc;
  75        u16 wpc;
  76        u16 rest_size;
  77        u16 sz_queue[CDT0_RPC_MASK + 1];
  78};
  79
  80struct lld_global_vars_t {
  81        bool dim_is_initialized;
  82        bool mcm_is_initialized;
  83        struct dim2_regs __iomem *dim2; /* DIM2 core base address */
  84        struct async_tx_dbr atx_dbr;
  85        u32 fcnt;
  86        u32 dbr_map[DBR_MAP_SIZE];
  87};
  88
  89static struct lld_global_vars_t g = { false };
  90
  91/* -------------------------------------------------------------------------- */
  92
  93static int dbr_get_mask_size(u16 size)
  94{
  95        int i;
  96
  97        for (i = 0; i < 6; i++)
  98                if (size <= (DBR_BLOCK_SIZE << i))
  99                        return 1 << i;
 100        return 0;
 101}
 102
 103/**
 104 * Allocates DBR memory.
 105 * @param size Allocating memory size.
 106 * @return Offset in DBR memory by success or DBR_SIZE if out of memory.
 107 */
 108static int alloc_dbr(u16 size)
 109{
 110        int mask_size;
 111        int i, block_idx = 0;
 112
 113        if (size <= 0)
 114                return DBR_SIZE; /* out of memory */
 115
 116        mask_size = dbr_get_mask_size(size);
 117        if (mask_size == 0)
 118                return DBR_SIZE; /* out of memory */
 119
 120        for (i = 0; i < DBR_MAP_SIZE; i++) {
 121                u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
 122                u32 mask = ~((~(u32)0) << blocks);
 123
 124                do {
 125                        if ((g.dbr_map[i] & mask) == 0) {
 126                                g.dbr_map[i] |= mask;
 127                                return block_idx * DBR_BLOCK_SIZE;
 128                        }
 129                        block_idx += mask_size;
 130                        /* do shift left with 2 steps in case mask_size == 32 */
 131                        mask <<= mask_size - 1;
 132                } while ((mask <<= 1) != 0);
 133        }
 134
 135        return DBR_SIZE; /* out of memory */
 136}
 137
 138static void free_dbr(int offs, int size)
 139{
 140        int block_idx = offs / DBR_BLOCK_SIZE;
 141        u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
 142        u32 mask = ~((~(u32)0) << blocks);
 143
 144        mask <<= block_idx % 32;
 145        g.dbr_map[block_idx / 32] &= ~mask;
 146}
 147
 148/* -------------------------------------------------------------------------- */
 149
 150static void dim2_transfer_madr(u32 val)
 151{
 152        dimcb_io_write(&g.dim2->MADR, val);
 153
 154        /* wait for transfer completion */
 155        while ((dimcb_io_read(&g.dim2->MCTL) & 1) != 1)
 156                continue;
 157
 158        dimcb_io_write(&g.dim2->MCTL, 0);   /* clear transfer complete */
 159}
 160
 161static void dim2_clear_dbr(u16 addr, u16 size)
 162{
 163        enum { MADR_TB_BIT = 30, MADR_WNR_BIT = 31 };
 164
 165        u16 const end_addr = addr + size;
 166        u32 const cmd = bit_mask(MADR_WNR_BIT) | bit_mask(MADR_TB_BIT);
 167
 168        dimcb_io_write(&g.dim2->MCTL, 0);   /* clear transfer complete */
 169        dimcb_io_write(&g.dim2->MDAT0, 0);
 170
 171        for (; addr < end_addr; addr++)
 172                dim2_transfer_madr(cmd | addr);
 173}
 174
 175static u32 dim2_read_ctr(u32 ctr_addr, u16 mdat_idx)
 176{
 177        dim2_transfer_madr(ctr_addr);
 178
 179        return dimcb_io_read((&g.dim2->MDAT0) + mdat_idx);
 180}
 181
 182static void dim2_write_ctr_mask(u32 ctr_addr, const u32 *mask, const u32 *value)
 183{
 184        enum { MADR_WNR_BIT = 31 };
 185
 186        dimcb_io_write(&g.dim2->MCTL, 0);   /* clear transfer complete */
 187
 188        if (mask[0] != 0)
 189                dimcb_io_write(&g.dim2->MDAT0, value[0]);
 190        if (mask[1] != 0)
 191                dimcb_io_write(&g.dim2->MDAT1, value[1]);
 192        if (mask[2] != 0)
 193                dimcb_io_write(&g.dim2->MDAT2, value[2]);
 194        if (mask[3] != 0)
 195                dimcb_io_write(&g.dim2->MDAT3, value[3]);
 196
 197        dimcb_io_write(&g.dim2->MDWE0, mask[0]);
 198        dimcb_io_write(&g.dim2->MDWE1, mask[1]);
 199        dimcb_io_write(&g.dim2->MDWE2, mask[2]);
 200        dimcb_io_write(&g.dim2->MDWE3, mask[3]);
 201
 202        dim2_transfer_madr(bit_mask(MADR_WNR_BIT) | ctr_addr);
 203}
 204
 205static inline void dim2_write_ctr(u32 ctr_addr, const u32 *value)
 206{
 207        u32 const mask[4] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
 208
 209        dim2_write_ctr_mask(ctr_addr, mask, value);
 210}
 211
 212static inline void dim2_clear_ctr(u32 ctr_addr)
 213{
 214        u32 const value[4] = { 0, 0, 0, 0 };
 215
 216        dim2_write_ctr(ctr_addr, value);
 217}
 218
 219static void dim2_configure_cat(u8 cat_base, u8 ch_addr, u8 ch_type,
 220                               bool read_not_write)
 221{
 222        bool isoc_fce = ch_type == CAT_CT_VAL_ISOC;
 223        bool sync_mfe = ch_type == CAT_CT_VAL_SYNC;
 224        u16 const cat =
 225                (read_not_write << CAT_RNW_BIT) |
 226                (ch_type << CAT_CT_SHIFT) |
 227                (ch_addr << CAT_CL_SHIFT) |
 228                (isoc_fce << CAT_FCE_BIT) |
 229                (sync_mfe << CAT_MFE_BIT) |
 230                (false << CAT_MT_BIT) |
 231                (true << CAT_CE_BIT);
 232        u8 const ctr_addr = cat_base + ch_addr / 8;
 233        u8 const idx = (ch_addr % 8) / 2;
 234        u8 const shift = (ch_addr % 2) * 16;
 235        u32 mask[4] = { 0, 0, 0, 0 };
 236        u32 value[4] = { 0, 0, 0, 0 };
 237
 238        mask[idx] = (u32)0xFFFF << shift;
 239        value[idx] = cat << shift;
 240        dim2_write_ctr_mask(ctr_addr, mask, value);
 241}
 242
 243static void dim2_clear_cat(u8 cat_base, u8 ch_addr)
 244{
 245        u8 const ctr_addr = cat_base + ch_addr / 8;
 246        u8 const idx = (ch_addr % 8) / 2;
 247        u8 const shift = (ch_addr % 2) * 16;
 248        u32 mask[4] = { 0, 0, 0, 0 };
 249        u32 value[4] = { 0, 0, 0, 0 };
 250
 251        mask[idx] = (u32)0xFFFF << shift;
 252        dim2_write_ctr_mask(ctr_addr, mask, value);
 253}
 254
 255static void dim2_configure_cdt(u8 ch_addr, u16 dbr_address, u16 hw_buffer_size,
 256                               u16 packet_length)
 257{
 258        u32 cdt[4] = { 0, 0, 0, 0 };
 259
 260        if (packet_length)
 261                cdt[1] = ((packet_length - 1) << CDT1_BS_ISOC_SHIFT);
 262
 263        cdt[3] =
 264                ((hw_buffer_size - 1) << CDT3_BD_SHIFT) |
 265                (dbr_address << CDT3_BA_SHIFT);
 266        dim2_write_ctr(CDT + ch_addr, cdt);
 267}
 268
 269static u16 dim2_rpc(u8 ch_addr)
 270{
 271        u32 cdt0 = dim2_read_ctr(CDT + ch_addr, 0);
 272
 273        return (cdt0 >> CDT0_RPC_SHIFT) & CDT0_RPC_MASK;
 274}
 275
 276static void dim2_clear_cdt(u8 ch_addr)
 277{
 278        u32 cdt[4] = { 0, 0, 0, 0 };
 279
 280        dim2_write_ctr(CDT + ch_addr, cdt);
 281}
 282
 283static void dim2_configure_adt(u8 ch_addr)
 284{
 285        u32 adt[4] = { 0, 0, 0, 0 };
 286
 287        adt[0] =
 288                (true << ADT0_CE_BIT) |
 289                (true << ADT0_LE_BIT) |
 290                (0 << ADT0_PG_BIT);
 291
 292        dim2_write_ctr(ADT + ch_addr, adt);
 293}
 294
 295static void dim2_clear_adt(u8 ch_addr)
 296{
 297        u32 adt[4] = { 0, 0, 0, 0 };
 298
 299        dim2_write_ctr(ADT + ch_addr, adt);
 300}
 301
 302static void dim2_start_ctrl_async(u8 ch_addr, u8 idx, u32 buf_addr,
 303                                  u16 buffer_size)
 304{
 305        u8 const shift = idx * 16;
 306
 307        u32 mask[4] = { 0, 0, 0, 0 };
 308        u32 adt[4] = { 0, 0, 0, 0 };
 309
 310        mask[1] =
 311                bit_mask(ADT1_PS_BIT + shift) |
 312                bit_mask(ADT1_RDY_BIT + shift) |
 313                (ADT1_CTRL_ASYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
 314        adt[1] =
 315                (true << (ADT1_PS_BIT + shift)) |
 316                (true << (ADT1_RDY_BIT + shift)) |
 317                ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
 318
 319        mask[idx + 2] = 0xFFFFFFFF;
 320        adt[idx + 2] = buf_addr;
 321
 322        dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
 323}
 324
 325static void dim2_start_isoc_sync(u8 ch_addr, u8 idx, u32 buf_addr,
 326                                 u16 buffer_size)
 327{
 328        u8 const shift = idx * 16;
 329
 330        u32 mask[4] = { 0, 0, 0, 0 };
 331        u32 adt[4] = { 0, 0, 0, 0 };
 332
 333        mask[1] =
 334                bit_mask(ADT1_RDY_BIT + shift) |
 335                (ADT1_ISOC_SYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
 336        adt[1] =
 337                (true << (ADT1_RDY_BIT + shift)) |
 338                ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
 339
 340        mask[idx + 2] = 0xFFFFFFFF;
 341        adt[idx + 2] = buf_addr;
 342
 343        dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
 344}
 345
 346static void dim2_clear_ctram(void)
 347{
 348        u32 ctr_addr;
 349
 350        for (ctr_addr = 0; ctr_addr < 0x90; ctr_addr++)
 351                dim2_clear_ctr(ctr_addr);
 352}
 353
 354static void dim2_configure_channel(
 355        u8 ch_addr, u8 type, u8 is_tx, u16 dbr_address, u16 hw_buffer_size,
 356        u16 packet_length)
 357{
 358        dim2_configure_cdt(ch_addr, dbr_address, hw_buffer_size, packet_length);
 359        dim2_configure_cat(MLB_CAT, ch_addr, type, is_tx ? 1 : 0);
 360
 361        dim2_configure_adt(ch_addr);
 362        dim2_configure_cat(AHB_CAT, ch_addr, type, is_tx ? 0 : 1);
 363
 364        /* unmask interrupt for used channel, enable mlb_sys_int[0] interrupt */
 365        dimcb_io_write(&g.dim2->ACMR0,
 366                       dimcb_io_read(&g.dim2->ACMR0) | bit_mask(ch_addr));
 367}
 368
 369static void dim2_clear_channel(u8 ch_addr)
 370{
 371        /* mask interrupt for used channel, disable mlb_sys_int[0] interrupt */
 372        dimcb_io_write(&g.dim2->ACMR0,
 373                       dimcb_io_read(&g.dim2->ACMR0) & ~bit_mask(ch_addr));
 374
 375        dim2_clear_cat(AHB_CAT, ch_addr);
 376        dim2_clear_adt(ch_addr);
 377
 378        dim2_clear_cat(MLB_CAT, ch_addr);
 379        dim2_clear_cdt(ch_addr);
 380
 381        /* clear channel status bit */
 382        dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
 383}
 384
 385/* -------------------------------------------------------------------------- */
 386/* trace async tx dbr fill state */
 387
 388static inline u16 norm_pc(u16 pc)
 389{
 390        return pc & CDT0_RPC_MASK;
 391}
 392
 393static void dbrcnt_init(u8 ch_addr, u16 dbr_size)
 394{
 395        g.atx_dbr.rest_size = dbr_size;
 396        g.atx_dbr.rpc = dim2_rpc(ch_addr);
 397        g.atx_dbr.wpc = g.atx_dbr.rpc;
 398}
 399
 400static void dbrcnt_enq(int buf_sz)
 401{
 402        g.atx_dbr.rest_size -= buf_sz;
 403        g.atx_dbr.sz_queue[norm_pc(g.atx_dbr.wpc)] = buf_sz;
 404        g.atx_dbr.wpc++;
 405}
 406
 407u16 dim_dbr_space(struct dim_channel *ch)
 408{
 409        u16 cur_rpc;
 410        struct async_tx_dbr *dbr = &g.atx_dbr;
 411
 412        if (ch->addr != dbr->ch_addr)
 413                return 0xFFFF;
 414
 415        cur_rpc = dim2_rpc(ch->addr);
 416
 417        while (norm_pc(dbr->rpc) != cur_rpc) {
 418                dbr->rest_size += dbr->sz_queue[norm_pc(dbr->rpc)];
 419                dbr->rpc++;
 420        }
 421
 422        if ((u16)(dbr->wpc - dbr->rpc) >= CDT0_RPC_MASK)
 423                return 0;
 424
 425        return dbr->rest_size;
 426}
 427
 428/* -------------------------------------------------------------------------- */
 429/* channel state helpers */
 430
 431static void state_init(struct int_ch_state *state)
 432{
 433        state->request_counter = 0;
 434        state->service_counter = 0;
 435
 436        state->idx1 = 0;
 437        state->idx2 = 0;
 438        state->level = 0;
 439}
 440
 441/* -------------------------------------------------------------------------- */
 442/* macro helper functions */
 443
 444static inline bool check_channel_address(u32 ch_address)
 445{
 446        return ch_address > 0 && (ch_address % 2) == 0 &&
 447               (ch_address / 2) <= (u32)CAT_CL_MASK;
 448}
 449
 450static inline bool check_packet_length(u32 packet_length)
 451{
 452        u16 const max_size = ((u16)CDT3_BD_ISOC_MASK + 1u) / ISOC_DBR_FACTOR;
 453
 454        if (packet_length <= 0)
 455                return false; /* too small */
 456
 457        if (packet_length > max_size)
 458                return false; /* too big */
 459
 460        if (packet_length - 1u > (u32)CDT1_BS_ISOC_MASK)
 461                return false; /* too big */
 462
 463        return true;
 464}
 465
 466static inline bool check_bytes_per_frame(u32 bytes_per_frame)
 467{
 468        u16 const bd_factor = g.fcnt + 2;
 469        u16 const max_size = ((u16)CDT3_BD_MASK + 1u) >> bd_factor;
 470
 471        if (bytes_per_frame <= 0)
 472                return false; /* too small */
 473
 474        if (bytes_per_frame > max_size)
 475                return false; /* too big */
 476
 477        return true;
 478}
 479
 480static inline u16 norm_ctrl_async_buffer_size(u16 buf_size)
 481{
 482        u16 const max_size = (u16)ADT1_CTRL_ASYNC_BD_MASK + 1u;
 483
 484        if (buf_size > max_size)
 485                return max_size;
 486
 487        return buf_size;
 488}
 489
 490static inline u16 norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
 491{
 492        u16 n;
 493        u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
 494
 495        if (buf_size > max_size)
 496                buf_size = max_size;
 497
 498        n = buf_size / packet_length;
 499
 500        if (n < 2u)
 501                return 0; /* too small buffer for given packet_length */
 502
 503        return packet_length * n;
 504}
 505
 506static inline u16 norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
 507{
 508        u16 n;
 509        u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
 510        u32 const unit = bytes_per_frame << g.fcnt;
 511
 512        if (buf_size > max_size)
 513                buf_size = max_size;
 514
 515        n = buf_size / unit;
 516
 517        if (n < 1u)
 518                return 0; /* too small buffer for given bytes_per_frame */
 519
 520        return unit * n;
 521}
 522
 523static void dim2_cleanup(void)
 524{
 525        /* disable MediaLB */
 526        dimcb_io_write(&g.dim2->MLBC0, false << MLBC0_MLBEN_BIT);
 527
 528        dim2_clear_ctram();
 529
 530        /* disable mlb_int interrupt */
 531        dimcb_io_write(&g.dim2->MIEN, 0);
 532
 533        /* clear status for all dma channels */
 534        dimcb_io_write(&g.dim2->ACSR0, 0xFFFFFFFF);
 535        dimcb_io_write(&g.dim2->ACSR1, 0xFFFFFFFF);
 536
 537        /* mask interrupts for all channels */
 538        dimcb_io_write(&g.dim2->ACMR0, 0);
 539        dimcb_io_write(&g.dim2->ACMR1, 0);
 540}
 541
 542static void dim2_initialize(bool enable_6pin, u8 mlb_clock)
 543{
 544        dim2_cleanup();
 545
 546        /* configure and enable MediaLB */
 547        dimcb_io_write(&g.dim2->MLBC0,
 548                       enable_6pin << MLBC0_MLBPEN_BIT |
 549                       mlb_clock << MLBC0_MLBCLK_SHIFT |
 550                       g.fcnt << MLBC0_FCNT_SHIFT |
 551                       true << MLBC0_MLBEN_BIT);
 552
 553        /* activate all HBI channels */
 554        dimcb_io_write(&g.dim2->HCMR0, 0xFFFFFFFF);
 555        dimcb_io_write(&g.dim2->HCMR1, 0xFFFFFFFF);
 556
 557        /* enable HBI */
 558        dimcb_io_write(&g.dim2->HCTL, bit_mask(HCTL_EN_BIT));
 559
 560        /* configure DMA */
 561        dimcb_io_write(&g.dim2->ACTL,
 562                       ACTL_DMA_MODE_VAL_DMA_MODE_1 << ACTL_DMA_MODE_BIT |
 563                       true << ACTL_SCE_BIT);
 564}
 565
 566static bool dim2_is_mlb_locked(void)
 567{
 568        u32 const mask0 = bit_mask(MLBC0_MLBLK_BIT);
 569        u32 const mask1 = bit_mask(MLBC1_CLKMERR_BIT) |
 570                          bit_mask(MLBC1_LOCKERR_BIT);
 571        u32 const c1 = dimcb_io_read(&g.dim2->MLBC1);
 572        u32 const nda_mask = (u32)MLBC1_NDA_MASK << MLBC1_NDA_SHIFT;
 573
 574        dimcb_io_write(&g.dim2->MLBC1, c1 & nda_mask);
 575        return (dimcb_io_read(&g.dim2->MLBC1) & mask1) == 0 &&
 576               (dimcb_io_read(&g.dim2->MLBC0) & mask0) != 0;
 577}
 578
 579/* -------------------------------------------------------------------------- */
 580/* channel help routines */
 581
 582static inline bool service_channel(u8 ch_addr, u8 idx)
 583{
 584        u8 const shift = idx * 16;
 585        u32 const adt1 = dim2_read_ctr(ADT + ch_addr, 1);
 586        u32 mask[4] = { 0, 0, 0, 0 };
 587        u32 adt_w[4] = { 0, 0, 0, 0 };
 588
 589        if (((adt1 >> (ADT1_DNE_BIT + shift)) & 1) == 0)
 590                return false;
 591
 592        mask[1] =
 593                bit_mask(ADT1_DNE_BIT + shift) |
 594                bit_mask(ADT1_ERR_BIT + shift) |
 595                bit_mask(ADT1_RDY_BIT + shift);
 596        dim2_write_ctr_mask(ADT + ch_addr, mask, adt_w);
 597
 598        /* clear channel status bit */
 599        dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
 600
 601        return true;
 602}
 603
 604/* -------------------------------------------------------------------------- */
 605/* channel init routines */
 606
 607static void isoc_init(struct dim_channel *ch, u8 ch_addr, u16 packet_length)
 608{
 609        state_init(&ch->state);
 610
 611        ch->addr = ch_addr;
 612
 613        ch->packet_length = packet_length;
 614        ch->bytes_per_frame = 0;
 615        ch->done_sw_buffers_number = 0;
 616}
 617
 618static void sync_init(struct dim_channel *ch, u8 ch_addr, u16 bytes_per_frame)
 619{
 620        state_init(&ch->state);
 621
 622        ch->addr = ch_addr;
 623
 624        ch->packet_length = 0;
 625        ch->bytes_per_frame = bytes_per_frame;
 626        ch->done_sw_buffers_number = 0;
 627}
 628
 629static void channel_init(struct dim_channel *ch, u8 ch_addr)
 630{
 631        state_init(&ch->state);
 632
 633        ch->addr = ch_addr;
 634
 635        ch->packet_length = 0;
 636        ch->bytes_per_frame = 0;
 637        ch->done_sw_buffers_number = 0;
 638}
 639
 640/* returns true if channel interrupt state is cleared */
 641static bool channel_service_interrupt(struct dim_channel *ch)
 642{
 643        struct int_ch_state *const state = &ch->state;
 644
 645        if (!service_channel(ch->addr, state->idx2))
 646                return false;
 647
 648        state->idx2 ^= 1;
 649        state->request_counter++;
 650        return true;
 651}
 652
 653static bool channel_start(struct dim_channel *ch, u32 buf_addr, u16 buf_size)
 654{
 655        struct int_ch_state *const state = &ch->state;
 656
 657        if (buf_size <= 0)
 658                return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE, "Bad buffer size");
 659
 660        if (ch->packet_length == 0 && ch->bytes_per_frame == 0 &&
 661            buf_size != norm_ctrl_async_buffer_size(buf_size))
 662                return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
 663                                    "Bad control/async buffer size");
 664
 665        if (ch->packet_length &&
 666            buf_size != norm_isoc_buffer_size(buf_size, ch->packet_length))
 667                return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
 668                                    "Bad isochronous buffer size");
 669
 670        if (ch->bytes_per_frame &&
 671            buf_size != norm_sync_buffer_size(buf_size, ch->bytes_per_frame))
 672                return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
 673                                    "Bad synchronous buffer size");
 674
 675        if (state->level >= 2u)
 676                return dim_on_error(DIM_ERR_OVERFLOW, "Channel overflow");
 677
 678        ++state->level;
 679
 680        if (ch->addr == g.atx_dbr.ch_addr)
 681                dbrcnt_enq(buf_size);
 682
 683        if (ch->packet_length || ch->bytes_per_frame)
 684                dim2_start_isoc_sync(ch->addr, state->idx1, buf_addr, buf_size);
 685        else
 686                dim2_start_ctrl_async(ch->addr, state->idx1, buf_addr,
 687                                      buf_size);
 688        state->idx1 ^= 1;
 689
 690        return true;
 691}
 692
 693static u8 channel_service(struct dim_channel *ch)
 694{
 695        struct int_ch_state *const state = &ch->state;
 696
 697        if (state->service_counter != state->request_counter) {
 698                state->service_counter++;
 699                if (state->level == 0)
 700                        return DIM_ERR_UNDERFLOW;
 701
 702                --state->level;
 703                ch->done_sw_buffers_number++;
 704        }
 705
 706        return DIM_NO_ERROR;
 707}
 708
 709static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number)
 710{
 711        if (buffers_number > ch->done_sw_buffers_number)
 712                return dim_on_error(DIM_ERR_UNDERFLOW, "Channel underflow");
 713
 714        ch->done_sw_buffers_number -= buffers_number;
 715        return true;
 716}
 717
 718/* -------------------------------------------------------------------------- */
 719/* API */
 720
 721u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock,
 722               u32 fcnt)
 723{
 724        g.dim_is_initialized = false;
 725
 726        if (!dim_base_address)
 727                return DIM_INIT_ERR_DIM_ADDR;
 728
 729        /* MediaLB clock: 0 - 256 fs, 1 - 512 fs, 2 - 1024 fs, 3 - 2048 fs */
 730        /* MediaLB clock: 4 - 3072 fs, 5 - 4096 fs, 6 - 6144 fs, 7 - 8192 fs */
 731        if (mlb_clock >= 8)
 732                return DIM_INIT_ERR_MLB_CLOCK;
 733
 734        if (fcnt > MLBC0_FCNT_MAX_VAL)
 735                return DIM_INIT_ERR_MLB_CLOCK;
 736
 737        g.dim2 = dim_base_address;
 738        g.fcnt = fcnt;
 739        g.dbr_map[0] = 0;
 740        g.dbr_map[1] = 0;
 741
 742        dim2_initialize(mlb_clock >= 3, mlb_clock);
 743
 744        g.dim_is_initialized = true;
 745
 746        return DIM_NO_ERROR;
 747}
 748
 749void dim_shutdown(void)
 750{
 751        g.dim_is_initialized = false;
 752        dim2_cleanup();
 753}
 754
 755bool dim_get_lock_state(void)
 756{
 757        return dim2_is_mlb_locked();
 758}
 759
 760static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
 761                          u16 ch_address, u16 hw_buffer_size)
 762{
 763        if (!g.dim_is_initialized || !ch)
 764                return DIM_ERR_DRIVER_NOT_INITIALIZED;
 765
 766        if (!check_channel_address(ch_address))
 767                return DIM_INIT_ERR_CHANNEL_ADDRESS;
 768
 769        ch->dbr_size = ROUND_UP_TO(hw_buffer_size, DBR_BLOCK_SIZE);
 770        ch->dbr_addr = alloc_dbr(ch->dbr_size);
 771        if (ch->dbr_addr >= DBR_SIZE)
 772                return DIM_INIT_ERR_OUT_OF_MEMORY;
 773
 774        channel_init(ch, ch_address / 2);
 775
 776        dim2_configure_channel(ch->addr, type, is_tx,
 777                               ch->dbr_addr, ch->dbr_size, 0);
 778
 779        return DIM_NO_ERROR;
 780}
 781
 782void dim_service_mlb_int_irq(void)
 783{
 784        dimcb_io_write(&g.dim2->MS0, 0);
 785        dimcb_io_write(&g.dim2->MS1, 0);
 786}
 787
 788u16 dim_norm_ctrl_async_buffer_size(u16 buf_size)
 789{
 790        return norm_ctrl_async_buffer_size(buf_size);
 791}
 792
 793/**
 794 * Retrieves maximal possible correct buffer size for isochronous data type
 795 * conform to given packet length and not bigger than given buffer size.
 796 *
 797 * Returns non-zero correct buffer size or zero by error.
 798 */
 799u16 dim_norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
 800{
 801        if (!check_packet_length(packet_length))
 802                return 0;
 803
 804        return norm_isoc_buffer_size(buf_size, packet_length);
 805}
 806
 807/**
 808 * Retrieves maximal possible correct buffer size for synchronous data type
 809 * conform to given bytes per frame and not bigger than given buffer size.
 810 *
 811 * Returns non-zero correct buffer size or zero by error.
 812 */
 813u16 dim_norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
 814{
 815        if (!check_bytes_per_frame(bytes_per_frame))
 816                return 0;
 817
 818        return norm_sync_buffer_size(buf_size, bytes_per_frame);
 819}
 820
 821u8 dim_init_control(struct dim_channel *ch, u8 is_tx, u16 ch_address,
 822                    u16 max_buffer_size)
 823{
 824        return init_ctrl_async(ch, CAT_CT_VAL_CONTROL, is_tx, ch_address,
 825                               max_buffer_size);
 826}
 827
 828u8 dim_init_async(struct dim_channel *ch, u8 is_tx, u16 ch_address,
 829                  u16 max_buffer_size)
 830{
 831        u8 ret = init_ctrl_async(ch, CAT_CT_VAL_ASYNC, is_tx, ch_address,
 832                                 max_buffer_size);
 833
 834        if (is_tx && !g.atx_dbr.ch_addr) {
 835                g.atx_dbr.ch_addr = ch->addr;
 836                dbrcnt_init(ch->addr, ch->dbr_size);
 837                dimcb_io_write(&g.dim2->MIEN, bit_mask(20));
 838        }
 839
 840        return ret;
 841}
 842
 843u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
 844                 u16 packet_length)
 845{
 846        if (!g.dim_is_initialized || !ch)
 847                return DIM_ERR_DRIVER_NOT_INITIALIZED;
 848
 849        if (!check_channel_address(ch_address))
 850                return DIM_INIT_ERR_CHANNEL_ADDRESS;
 851
 852        if (!check_packet_length(packet_length))
 853                return DIM_ERR_BAD_CONFIG;
 854
 855        ch->dbr_size = packet_length * ISOC_DBR_FACTOR;
 856        ch->dbr_addr = alloc_dbr(ch->dbr_size);
 857        if (ch->dbr_addr >= DBR_SIZE)
 858                return DIM_INIT_ERR_OUT_OF_MEMORY;
 859
 860        isoc_init(ch, ch_address / 2, packet_length);
 861
 862        dim2_configure_channel(ch->addr, CAT_CT_VAL_ISOC, is_tx, ch->dbr_addr,
 863                               ch->dbr_size, packet_length);
 864
 865        return DIM_NO_ERROR;
 866}
 867
 868u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
 869                 u16 bytes_per_frame)
 870{
 871        u16 bd_factor = g.fcnt + 2;
 872
 873        if (!g.dim_is_initialized || !ch)
 874                return DIM_ERR_DRIVER_NOT_INITIALIZED;
 875
 876        if (!check_channel_address(ch_address))
 877                return DIM_INIT_ERR_CHANNEL_ADDRESS;
 878
 879        if (!check_bytes_per_frame(bytes_per_frame))
 880                return DIM_ERR_BAD_CONFIG;
 881
 882        ch->dbr_size = bytes_per_frame << bd_factor;
 883        ch->dbr_addr = alloc_dbr(ch->dbr_size);
 884        if (ch->dbr_addr >= DBR_SIZE)
 885                return DIM_INIT_ERR_OUT_OF_MEMORY;
 886
 887        sync_init(ch, ch_address / 2, bytes_per_frame);
 888
 889        dim2_clear_dbr(ch->dbr_addr, ch->dbr_size);
 890        dim2_configure_channel(ch->addr, CAT_CT_VAL_SYNC, is_tx,
 891                               ch->dbr_addr, ch->dbr_size, 0);
 892
 893        return DIM_NO_ERROR;
 894}
 895
 896u8 dim_destroy_channel(struct dim_channel *ch)
 897{
 898        if (!g.dim_is_initialized || !ch)
 899                return DIM_ERR_DRIVER_NOT_INITIALIZED;
 900
 901        if (ch->addr == g.atx_dbr.ch_addr) {
 902                dimcb_io_write(&g.dim2->MIEN, 0);
 903                g.atx_dbr.ch_addr = 0;
 904        }
 905
 906        dim2_clear_channel(ch->addr);
 907        if (ch->dbr_addr < DBR_SIZE)
 908                free_dbr(ch->dbr_addr, ch->dbr_size);
 909        ch->dbr_addr = DBR_SIZE;
 910
 911        return DIM_NO_ERROR;
 912}
 913
 914void dim_service_ahb_int_irq(struct dim_channel *const *channels)
 915{
 916        bool state_changed;
 917
 918        if (!g.dim_is_initialized) {
 919                dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
 920                             "DIM is not initialized");
 921                return;
 922        }
 923
 924        if (!channels) {
 925                dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channels");
 926                return;
 927        }
 928
 929        /*
 930         * Use while-loop and a flag to make sure the age is changed back at
 931         * least once, otherwise the interrupt may never come if CPU generates
 932         * interrupt on changing age.
 933         * This cycle runs not more than number of channels, because
 934         * channel_service_interrupt() routine doesn't start the channel again.
 935         */
 936        do {
 937                struct dim_channel *const *ch = channels;
 938
 939                state_changed = false;
 940
 941                while (*ch) {
 942                        state_changed |= channel_service_interrupt(*ch);
 943                        ++ch;
 944                }
 945        } while (state_changed);
 946}
 947
 948u8 dim_service_channel(struct dim_channel *ch)
 949{
 950        if (!g.dim_is_initialized || !ch)
 951                return DIM_ERR_DRIVER_NOT_INITIALIZED;
 952
 953        return channel_service(ch);
 954}
 955
 956struct dim_ch_state_t *dim_get_channel_state(struct dim_channel *ch,
 957                                             struct dim_ch_state_t *state_ptr)
 958{
 959        if (!ch || !state_ptr)
 960                return NULL;
 961
 962        state_ptr->ready = ch->state.level < 2;
 963        state_ptr->done_buffers = ch->done_sw_buffers_number;
 964
 965        return state_ptr;
 966}
 967
 968bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
 969                        u16 buffer_size)
 970{
 971        if (!ch)
 972                return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
 973                                    "Bad channel");
 974
 975        return channel_start(ch, buffer_addr, buffer_size);
 976}
 977
 978bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number)
 979{
 980        if (!ch)
 981                return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
 982                                    "Bad channel");
 983
 984        return channel_detach_buffers(ch, buffers_number);
 985}
 986