linux/drivers/staging/most/dim2/hal.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * hal.c - DIM2 HAL implementation
   4 * (MediaLB, Device Interface Macro IP, OS62420)
   5 *
   6 * Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
   7 */
   8
   9/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
  10
  11#include "hal.h"
  12#include "errors.h"
  13#include "reg.h"
  14#include <linux/stddef.h>
  15#include <linux/kernel.h>
  16#include <linux/io.h>
  17
  18/*
  19 * Size factor for isochronous DBR buffer.
  20 * Minimal value is 3.
  21 */
  22#define ISOC_DBR_FACTOR 3u
  23
  24/*
  25 * Number of 32-bit units for DBR map.
  26 *
  27 * 1: block size is 512, max allocation is 16K
  28 * 2: block size is 256, max allocation is 8K
  29 * 4: block size is 128, max allocation is 4K
  30 * 8: block size is 64, max allocation is 2K
  31 *
  32 * Min allocated space is block size.
  33 * Max possible allocated space is 32 blocks.
  34 */
  35#define DBR_MAP_SIZE 2
  36
  37/* -------------------------------------------------------------------------- */
  38/* not configurable area */
  39
  40#define CDT 0x00
  41#define ADT 0x40
  42#define MLB_CAT 0x80
  43#define AHB_CAT 0x88
  44
  45#define DBR_SIZE  (16 * 1024) /* specified by IP */
  46#define DBR_BLOCK_SIZE  (DBR_SIZE / 32 / DBR_MAP_SIZE)
  47
  48#define ROUND_UP_TO(x, d)  (DIV_ROUND_UP(x, (d)) * (d))
  49
  50/* -------------------------------------------------------------------------- */
  51/* generic helper functions and macros */
  52
  53static inline u32 bit_mask(u8 position)
  54{
  55        return (u32)1 << position;
  56}
  57
  58static inline bool dim_on_error(u8 error_id, const char *error_message)
  59{
  60        dimcb_on_error(error_id, error_message);
  61        return false;
  62}
  63
  64/* -------------------------------------------------------------------------- */
  65/* types and local variables */
  66
  67struct async_tx_dbr {
  68        u8 ch_addr;
  69        u16 rpc;
  70        u16 wpc;
  71        u16 rest_size;
  72        u16 sz_queue[CDT0_RPC_MASK + 1];
  73};
  74
  75struct lld_global_vars_t {
  76        bool dim_is_initialized;
  77        bool mcm_is_initialized;
  78        struct dim2_regs __iomem *dim2; /* DIM2 core base address */
  79        struct async_tx_dbr atx_dbr;
  80        u32 fcnt;
  81        u32 dbr_map[DBR_MAP_SIZE];
  82};
  83
  84static struct lld_global_vars_t g = { false };
  85
  86/* -------------------------------------------------------------------------- */
  87
  88static int dbr_get_mask_size(u16 size)
  89{
  90        int i;
  91
  92        for (i = 0; i < 6; i++)
  93                if (size <= (DBR_BLOCK_SIZE << i))
  94                        return 1 << i;
  95        return 0;
  96}
  97
  98/**
  99 * alloc_dbr() - Allocates DBR memory.
 100 * @size: Allocating memory size.
 101 * Returns: Offset in DBR memory by success or DBR_SIZE if out of memory.
 102 */
 103static int alloc_dbr(u16 size)
 104{
 105        int mask_size;
 106        int i, block_idx = 0;
 107
 108        if (size <= 0)
 109                return DBR_SIZE; /* out of memory */
 110
 111        mask_size = dbr_get_mask_size(size);
 112        if (mask_size == 0)
 113                return DBR_SIZE; /* out of memory */
 114
 115        for (i = 0; i < DBR_MAP_SIZE; i++) {
 116                u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
 117                u32 mask = ~((~(u32)0) << blocks);
 118
 119                do {
 120                        if ((g.dbr_map[i] & mask) == 0) {
 121                                g.dbr_map[i] |= mask;
 122                                return block_idx * DBR_BLOCK_SIZE;
 123                        }
 124                        block_idx += mask_size;
 125                        /* do shift left with 2 steps in case mask_size == 32 */
 126                        mask <<= mask_size - 1;
 127                } while ((mask <<= 1) != 0);
 128        }
 129
 130        return DBR_SIZE; /* out of memory */
 131}
 132
 133static void free_dbr(int offs, int size)
 134{
 135        int block_idx = offs / DBR_BLOCK_SIZE;
 136        u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
 137        u32 mask = ~((~(u32)0) << blocks);
 138
 139        mask <<= block_idx % 32;
 140        g.dbr_map[block_idx / 32] &= ~mask;
 141}
 142
 143/* -------------------------------------------------------------------------- */
 144
 145static void dim2_transfer_madr(u32 val)
 146{
 147        writel(val, &g.dim2->MADR);
 148
 149        /* wait for transfer completion */
 150        while ((readl(&g.dim2->MCTL) & 1) != 1)
 151                continue;
 152
 153        writel(0, &g.dim2->MCTL);   /* clear transfer complete */
 154}
 155
 156static void dim2_clear_dbr(u16 addr, u16 size)
 157{
 158        enum { MADR_TB_BIT = 30, MADR_WNR_BIT = 31 };
 159
 160        u16 const end_addr = addr + size;
 161        u32 const cmd = bit_mask(MADR_WNR_BIT) | bit_mask(MADR_TB_BIT);
 162
 163        writel(0, &g.dim2->MCTL);   /* clear transfer complete */
 164        writel(0, &g.dim2->MDAT0);
 165
 166        for (; addr < end_addr; addr++)
 167                dim2_transfer_madr(cmd | addr);
 168}
 169
 170static u32 dim2_read_ctr(u32 ctr_addr, u16 mdat_idx)
 171{
 172        dim2_transfer_madr(ctr_addr);
 173
 174        return readl((&g.dim2->MDAT0) + mdat_idx);
 175}
 176
 177static void dim2_write_ctr_mask(u32 ctr_addr, const u32 *mask, const u32 *value)
 178{
 179        enum { MADR_WNR_BIT = 31 };
 180
 181        writel(0, &g.dim2->MCTL);   /* clear transfer complete */
 182
 183        if (mask[0] != 0)
 184                writel(value[0], &g.dim2->MDAT0);
 185        if (mask[1] != 0)
 186                writel(value[1], &g.dim2->MDAT1);
 187        if (mask[2] != 0)
 188                writel(value[2], &g.dim2->MDAT2);
 189        if (mask[3] != 0)
 190                writel(value[3], &g.dim2->MDAT3);
 191
 192        writel(mask[0], &g.dim2->MDWE0);
 193        writel(mask[1], &g.dim2->MDWE1);
 194        writel(mask[2], &g.dim2->MDWE2);
 195        writel(mask[3], &g.dim2->MDWE3);
 196
 197        dim2_transfer_madr(bit_mask(MADR_WNR_BIT) | ctr_addr);
 198}
 199
 200static inline void dim2_write_ctr(u32 ctr_addr, const u32 *value)
 201{
 202        u32 const mask[4] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
 203
 204        dim2_write_ctr_mask(ctr_addr, mask, value);
 205}
 206
 207static inline void dim2_clear_ctr(u32 ctr_addr)
 208{
 209        u32 const value[4] = { 0, 0, 0, 0 };
 210
 211        dim2_write_ctr(ctr_addr, value);
 212}
 213
 214static void dim2_configure_cat(u8 cat_base, u8 ch_addr, u8 ch_type,
 215                               bool read_not_write)
 216{
 217        bool isoc_fce = ch_type == CAT_CT_VAL_ISOC;
 218        bool sync_mfe = ch_type == CAT_CT_VAL_SYNC;
 219        u16 const cat =
 220                (read_not_write << CAT_RNW_BIT) |
 221                (ch_type << CAT_CT_SHIFT) |
 222                (ch_addr << CAT_CL_SHIFT) |
 223                (isoc_fce << CAT_FCE_BIT) |
 224                (sync_mfe << CAT_MFE_BIT) |
 225                (false << CAT_MT_BIT) |
 226                (true << CAT_CE_BIT);
 227        u8 const ctr_addr = cat_base + ch_addr / 8;
 228        u8 const idx = (ch_addr % 8) / 2;
 229        u8 const shift = (ch_addr % 2) * 16;
 230        u32 mask[4] = { 0, 0, 0, 0 };
 231        u32 value[4] = { 0, 0, 0, 0 };
 232
 233        mask[idx] = (u32)0xFFFF << shift;
 234        value[idx] = cat << shift;
 235        dim2_write_ctr_mask(ctr_addr, mask, value);
 236}
 237
 238static void dim2_clear_cat(u8 cat_base, u8 ch_addr)
 239{
 240        u8 const ctr_addr = cat_base + ch_addr / 8;
 241        u8 const idx = (ch_addr % 8) / 2;
 242        u8 const shift = (ch_addr % 2) * 16;
 243        u32 mask[4] = { 0, 0, 0, 0 };
 244        u32 value[4] = { 0, 0, 0, 0 };
 245
 246        mask[idx] = (u32)0xFFFF << shift;
 247        dim2_write_ctr_mask(ctr_addr, mask, value);
 248}
 249
 250static void dim2_configure_cdt(u8 ch_addr, u16 dbr_address, u16 hw_buffer_size,
 251                               u16 packet_length)
 252{
 253        u32 cdt[4] = { 0, 0, 0, 0 };
 254
 255        if (packet_length)
 256                cdt[1] = ((packet_length - 1) << CDT1_BS_ISOC_SHIFT);
 257
 258        cdt[3] =
 259                ((hw_buffer_size - 1) << CDT3_BD_SHIFT) |
 260                (dbr_address << CDT3_BA_SHIFT);
 261        dim2_write_ctr(CDT + ch_addr, cdt);
 262}
 263
 264static u16 dim2_rpc(u8 ch_addr)
 265{
 266        u32 cdt0 = dim2_read_ctr(CDT + ch_addr, 0);
 267
 268        return (cdt0 >> CDT0_RPC_SHIFT) & CDT0_RPC_MASK;
 269}
 270
 271static void dim2_clear_cdt(u8 ch_addr)
 272{
 273        u32 cdt[4] = { 0, 0, 0, 0 };
 274
 275        dim2_write_ctr(CDT + ch_addr, cdt);
 276}
 277
 278static void dim2_configure_adt(u8 ch_addr)
 279{
 280        u32 adt[4] = { 0, 0, 0, 0 };
 281
 282        adt[0] =
 283                (true << ADT0_CE_BIT) |
 284                (true << ADT0_LE_BIT) |
 285                (0 << ADT0_PG_BIT);
 286
 287        dim2_write_ctr(ADT + ch_addr, adt);
 288}
 289
 290static void dim2_clear_adt(u8 ch_addr)
 291{
 292        u32 adt[4] = { 0, 0, 0, 0 };
 293
 294        dim2_write_ctr(ADT + ch_addr, adt);
 295}
 296
 297static void dim2_start_ctrl_async(u8 ch_addr, u8 idx, u32 buf_addr,
 298                                  u16 buffer_size)
 299{
 300        u8 const shift = idx * 16;
 301
 302        u32 mask[4] = { 0, 0, 0, 0 };
 303        u32 adt[4] = { 0, 0, 0, 0 };
 304
 305        mask[1] =
 306                bit_mask(ADT1_PS_BIT + shift) |
 307                bit_mask(ADT1_RDY_BIT + shift) |
 308                (ADT1_CTRL_ASYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
 309        adt[1] =
 310                (true << (ADT1_PS_BIT + shift)) |
 311                (true << (ADT1_RDY_BIT + shift)) |
 312                ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
 313
 314        mask[idx + 2] = 0xFFFFFFFF;
 315        adt[idx + 2] = buf_addr;
 316
 317        dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
 318}
 319
 320static void dim2_start_isoc_sync(u8 ch_addr, u8 idx, u32 buf_addr,
 321                                 u16 buffer_size)
 322{
 323        u8 const shift = idx * 16;
 324
 325        u32 mask[4] = { 0, 0, 0, 0 };
 326        u32 adt[4] = { 0, 0, 0, 0 };
 327
 328        mask[1] =
 329                bit_mask(ADT1_RDY_BIT + shift) |
 330                (ADT1_ISOC_SYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
 331        adt[1] =
 332                (true << (ADT1_RDY_BIT + shift)) |
 333                ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
 334
 335        mask[idx + 2] = 0xFFFFFFFF;
 336        adt[idx + 2] = buf_addr;
 337
 338        dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
 339}
 340
 341static void dim2_clear_ctram(void)
 342{
 343        u32 ctr_addr;
 344
 345        for (ctr_addr = 0; ctr_addr < 0x90; ctr_addr++)
 346                dim2_clear_ctr(ctr_addr);
 347}
 348
 349static void dim2_configure_channel(
 350        u8 ch_addr, u8 type, u8 is_tx, u16 dbr_address, u16 hw_buffer_size,
 351        u16 packet_length)
 352{
 353        dim2_configure_cdt(ch_addr, dbr_address, hw_buffer_size, packet_length);
 354        dim2_configure_cat(MLB_CAT, ch_addr, type, is_tx ? 1 : 0);
 355
 356        dim2_configure_adt(ch_addr);
 357        dim2_configure_cat(AHB_CAT, ch_addr, type, is_tx ? 0 : 1);
 358
 359        /* unmask interrupt for used channel, enable mlb_sys_int[0] interrupt */
 360        writel(readl(&g.dim2->ACMR0) | bit_mask(ch_addr), &g.dim2->ACMR0);
 361}
 362
 363static void dim2_clear_channel(u8 ch_addr)
 364{
 365        /* mask interrupt for used channel, disable mlb_sys_int[0] interrupt */
 366        writel(readl(&g.dim2->ACMR0) & ~bit_mask(ch_addr), &g.dim2->ACMR0);
 367
 368        dim2_clear_cat(AHB_CAT, ch_addr);
 369        dim2_clear_adt(ch_addr);
 370
 371        dim2_clear_cat(MLB_CAT, ch_addr);
 372        dim2_clear_cdt(ch_addr);
 373
 374        /* clear channel status bit */
 375        writel(bit_mask(ch_addr), &g.dim2->ACSR0);
 376}
 377
 378/* -------------------------------------------------------------------------- */
 379/* trace async tx dbr fill state */
 380
 381static inline u16 norm_pc(u16 pc)
 382{
 383        return pc & CDT0_RPC_MASK;
 384}
 385
 386static void dbrcnt_init(u8 ch_addr, u16 dbr_size)
 387{
 388        g.atx_dbr.rest_size = dbr_size;
 389        g.atx_dbr.rpc = dim2_rpc(ch_addr);
 390        g.atx_dbr.wpc = g.atx_dbr.rpc;
 391}
 392
 393static void dbrcnt_enq(int buf_sz)
 394{
 395        g.atx_dbr.rest_size -= buf_sz;
 396        g.atx_dbr.sz_queue[norm_pc(g.atx_dbr.wpc)] = buf_sz;
 397        g.atx_dbr.wpc++;
 398}
 399
 400u16 dim_dbr_space(struct dim_channel *ch)
 401{
 402        u16 cur_rpc;
 403        struct async_tx_dbr *dbr = &g.atx_dbr;
 404
 405        if (ch->addr != dbr->ch_addr)
 406                return 0xFFFF;
 407
 408        cur_rpc = dim2_rpc(ch->addr);
 409
 410        while (norm_pc(dbr->rpc) != cur_rpc) {
 411                dbr->rest_size += dbr->sz_queue[norm_pc(dbr->rpc)];
 412                dbr->rpc++;
 413        }
 414
 415        if ((u16)(dbr->wpc - dbr->rpc) >= CDT0_RPC_MASK)
 416                return 0;
 417
 418        return dbr->rest_size;
 419}
 420
 421/* -------------------------------------------------------------------------- */
 422/* channel state helpers */
 423
 424static void state_init(struct int_ch_state *state)
 425{
 426        state->request_counter = 0;
 427        state->service_counter = 0;
 428
 429        state->idx1 = 0;
 430        state->idx2 = 0;
 431        state->level = 0;
 432}
 433
 434/* -------------------------------------------------------------------------- */
 435/* macro helper functions */
 436
 437static inline bool check_channel_address(u32 ch_address)
 438{
 439        return ch_address > 0 && (ch_address % 2) == 0 &&
 440               (ch_address / 2) <= (u32)CAT_CL_MASK;
 441}
 442
 443static inline bool check_packet_length(u32 packet_length)
 444{
 445        u16 const max_size = ((u16)CDT3_BD_ISOC_MASK + 1u) / ISOC_DBR_FACTOR;
 446
 447        if (packet_length <= 0)
 448                return false; /* too small */
 449
 450        if (packet_length > max_size)
 451                return false; /* too big */
 452
 453        if (packet_length - 1u > (u32)CDT1_BS_ISOC_MASK)
 454                return false; /* too big */
 455
 456        return true;
 457}
 458
 459static inline bool check_bytes_per_frame(u32 bytes_per_frame)
 460{
 461        u16 const bd_factor = g.fcnt + 2;
 462        u16 const max_size = ((u16)CDT3_BD_MASK + 1u) >> bd_factor;
 463
 464        if (bytes_per_frame <= 0)
 465                return false; /* too small */
 466
 467        if (bytes_per_frame > max_size)
 468                return false; /* too big */
 469
 470        return true;
 471}
 472
 473u16 dim_norm_ctrl_async_buffer_size(u16 buf_size)
 474{
 475        u16 const max_size = (u16)ADT1_CTRL_ASYNC_BD_MASK + 1u;
 476
 477        if (buf_size > max_size)
 478                return max_size;
 479
 480        return buf_size;
 481}
 482
 483static inline u16 norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
 484{
 485        u16 n;
 486        u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
 487
 488        if (buf_size > max_size)
 489                buf_size = max_size;
 490
 491        n = buf_size / packet_length;
 492
 493        if (n < 2u)
 494                return 0; /* too small buffer for given packet_length */
 495
 496        return packet_length * n;
 497}
 498
 499static inline u16 norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
 500{
 501        u16 n;
 502        u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
 503        u32 const unit = bytes_per_frame << g.fcnt;
 504
 505        if (buf_size > max_size)
 506                buf_size = max_size;
 507
 508        n = buf_size / unit;
 509
 510        if (n < 1u)
 511                return 0; /* too small buffer for given bytes_per_frame */
 512
 513        return unit * n;
 514}
 515
 516static void dim2_cleanup(void)
 517{
 518        /* disable MediaLB */
 519        writel(false << MLBC0_MLBEN_BIT, &g.dim2->MLBC0);
 520
 521        dim2_clear_ctram();
 522
 523        /* disable mlb_int interrupt */
 524        writel(0, &g.dim2->MIEN);
 525
 526        /* clear status for all dma channels */
 527        writel(0xFFFFFFFF, &g.dim2->ACSR0);
 528        writel(0xFFFFFFFF, &g.dim2->ACSR1);
 529
 530        /* mask interrupts for all channels */
 531        writel(0, &g.dim2->ACMR0);
 532        writel(0, &g.dim2->ACMR1);
 533}
 534
 535static void dim2_initialize(bool enable_6pin, u8 mlb_clock)
 536{
 537        dim2_cleanup();
 538
 539        /* configure and enable MediaLB */
 540        writel(enable_6pin << MLBC0_MLBPEN_BIT |
 541               mlb_clock << MLBC0_MLBCLK_SHIFT |
 542               g.fcnt << MLBC0_FCNT_SHIFT |
 543               true << MLBC0_MLBEN_BIT,
 544               &g.dim2->MLBC0);
 545
 546        /* activate all HBI channels */
 547        writel(0xFFFFFFFF, &g.dim2->HCMR0);
 548        writel(0xFFFFFFFF, &g.dim2->HCMR1);
 549
 550        /* enable HBI */
 551        writel(bit_mask(HCTL_EN_BIT), &g.dim2->HCTL);
 552
 553        /* configure DMA */
 554        writel(ACTL_DMA_MODE_VAL_DMA_MODE_1 << ACTL_DMA_MODE_BIT |
 555               true << ACTL_SCE_BIT, &g.dim2->ACTL);
 556}
 557
 558static bool dim2_is_mlb_locked(void)
 559{
 560        u32 const mask0 = bit_mask(MLBC0_MLBLK_BIT);
 561        u32 const mask1 = bit_mask(MLBC1_CLKMERR_BIT) |
 562                          bit_mask(MLBC1_LOCKERR_BIT);
 563        u32 const c1 = readl(&g.dim2->MLBC1);
 564        u32 const nda_mask = (u32)MLBC1_NDA_MASK << MLBC1_NDA_SHIFT;
 565
 566        writel(c1 & nda_mask, &g.dim2->MLBC1);
 567        return (readl(&g.dim2->MLBC1) & mask1) == 0 &&
 568               (readl(&g.dim2->MLBC0) & mask0) != 0;
 569}
 570
 571/* -------------------------------------------------------------------------- */
 572/* channel help routines */
 573
 574static inline bool service_channel(u8 ch_addr, u8 idx)
 575{
 576        u8 const shift = idx * 16;
 577        u32 const adt1 = dim2_read_ctr(ADT + ch_addr, 1);
 578        u32 mask[4] = { 0, 0, 0, 0 };
 579        u32 adt_w[4] = { 0, 0, 0, 0 };
 580
 581        if (((adt1 >> (ADT1_DNE_BIT + shift)) & 1) == 0)
 582                return false;
 583
 584        mask[1] =
 585                bit_mask(ADT1_DNE_BIT + shift) |
 586                bit_mask(ADT1_ERR_BIT + shift) |
 587                bit_mask(ADT1_RDY_BIT + shift);
 588        dim2_write_ctr_mask(ADT + ch_addr, mask, adt_w);
 589
 590        /* clear channel status bit */
 591        writel(bit_mask(ch_addr), &g.dim2->ACSR0);
 592
 593        return true;
 594}
 595
 596/* -------------------------------------------------------------------------- */
 597/* channel init routines */
 598
 599static void isoc_init(struct dim_channel *ch, u8 ch_addr, u16 packet_length)
 600{
 601        state_init(&ch->state);
 602
 603        ch->addr = ch_addr;
 604
 605        ch->packet_length = packet_length;
 606        ch->bytes_per_frame = 0;
 607        ch->done_sw_buffers_number = 0;
 608}
 609
 610static void sync_init(struct dim_channel *ch, u8 ch_addr, u16 bytes_per_frame)
 611{
 612        state_init(&ch->state);
 613
 614        ch->addr = ch_addr;
 615
 616        ch->packet_length = 0;
 617        ch->bytes_per_frame = bytes_per_frame;
 618        ch->done_sw_buffers_number = 0;
 619}
 620
 621static void channel_init(struct dim_channel *ch, u8 ch_addr)
 622{
 623        state_init(&ch->state);
 624
 625        ch->addr = ch_addr;
 626
 627        ch->packet_length = 0;
 628        ch->bytes_per_frame = 0;
 629        ch->done_sw_buffers_number = 0;
 630}
 631
 632/* returns true if channel interrupt state is cleared */
 633static bool channel_service_interrupt(struct dim_channel *ch)
 634{
 635        struct int_ch_state *const state = &ch->state;
 636
 637        if (!service_channel(ch->addr, state->idx2))
 638                return false;
 639
 640        state->idx2 ^= 1;
 641        state->request_counter++;
 642        return true;
 643}
 644
 645static bool channel_start(struct dim_channel *ch, u32 buf_addr, u16 buf_size)
 646{
 647        struct int_ch_state *const state = &ch->state;
 648
 649        if (buf_size <= 0)
 650                return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE, "Bad buffer size");
 651
 652        if (ch->packet_length == 0 && ch->bytes_per_frame == 0 &&
 653            buf_size != dim_norm_ctrl_async_buffer_size(buf_size))
 654                return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
 655                                    "Bad control/async buffer size");
 656
 657        if (ch->packet_length &&
 658            buf_size != norm_isoc_buffer_size(buf_size, ch->packet_length))
 659                return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
 660                                    "Bad isochronous buffer size");
 661
 662        if (ch->bytes_per_frame &&
 663            buf_size != norm_sync_buffer_size(buf_size, ch->bytes_per_frame))
 664                return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
 665                                    "Bad synchronous buffer size");
 666
 667        if (state->level >= 2u)
 668                return dim_on_error(DIM_ERR_OVERFLOW, "Channel overflow");
 669
 670        ++state->level;
 671
 672        if (ch->addr == g.atx_dbr.ch_addr)
 673                dbrcnt_enq(buf_size);
 674
 675        if (ch->packet_length || ch->bytes_per_frame)
 676                dim2_start_isoc_sync(ch->addr, state->idx1, buf_addr, buf_size);
 677        else
 678                dim2_start_ctrl_async(ch->addr, state->idx1, buf_addr,
 679                                      buf_size);
 680        state->idx1 ^= 1;
 681
 682        return true;
 683}
 684
 685static u8 channel_service(struct dim_channel *ch)
 686{
 687        struct int_ch_state *const state = &ch->state;
 688
 689        if (state->service_counter != state->request_counter) {
 690                state->service_counter++;
 691                if (state->level == 0)
 692                        return DIM_ERR_UNDERFLOW;
 693
 694                --state->level;
 695                ch->done_sw_buffers_number++;
 696        }
 697
 698        return DIM_NO_ERROR;
 699}
 700
 701static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number)
 702{
 703        if (buffers_number > ch->done_sw_buffers_number)
 704                return dim_on_error(DIM_ERR_UNDERFLOW, "Channel underflow");
 705
 706        ch->done_sw_buffers_number -= buffers_number;
 707        return true;
 708}
 709
 710/* -------------------------------------------------------------------------- */
 711/* API */
 712
 713u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock,
 714               u32 fcnt)
 715{
 716        g.dim_is_initialized = false;
 717
 718        if (!dim_base_address)
 719                return DIM_INIT_ERR_DIM_ADDR;
 720
 721        /* MediaLB clock: 0 - 256 fs, 1 - 512 fs, 2 - 1024 fs, 3 - 2048 fs */
 722        /* MediaLB clock: 4 - 3072 fs, 5 - 4096 fs, 6 - 6144 fs, 7 - 8192 fs */
 723        if (mlb_clock >= 8)
 724                return DIM_INIT_ERR_MLB_CLOCK;
 725
 726        if (fcnt > MLBC0_FCNT_MAX_VAL)
 727                return DIM_INIT_ERR_MLB_CLOCK;
 728
 729        g.dim2 = dim_base_address;
 730        g.fcnt = fcnt;
 731        g.dbr_map[0] = 0;
 732        g.dbr_map[1] = 0;
 733
 734        dim2_initialize(mlb_clock >= 3, mlb_clock);
 735
 736        g.dim_is_initialized = true;
 737
 738        return DIM_NO_ERROR;
 739}
 740
 741void dim_shutdown(void)
 742{
 743        g.dim_is_initialized = false;
 744        dim2_cleanup();
 745}
 746
 747bool dim_get_lock_state(void)
 748{
 749        return dim2_is_mlb_locked();
 750}
 751
 752static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
 753                          u16 ch_address, u16 hw_buffer_size)
 754{
 755        if (!g.dim_is_initialized || !ch)
 756                return DIM_ERR_DRIVER_NOT_INITIALIZED;
 757
 758        if (!check_channel_address(ch_address))
 759                return DIM_INIT_ERR_CHANNEL_ADDRESS;
 760
 761        if (!ch->dbr_size)
 762                ch->dbr_size = ROUND_UP_TO(hw_buffer_size, DBR_BLOCK_SIZE);
 763        ch->dbr_addr = alloc_dbr(ch->dbr_size);
 764        if (ch->dbr_addr >= DBR_SIZE)
 765                return DIM_INIT_ERR_OUT_OF_MEMORY;
 766
 767        channel_init(ch, ch_address / 2);
 768
 769        dim2_configure_channel(ch->addr, type, is_tx,
 770                               ch->dbr_addr, ch->dbr_size, 0);
 771
 772        return DIM_NO_ERROR;
 773}
 774
 775void dim_service_mlb_int_irq(void)
 776{
 777        writel(0, &g.dim2->MS0);
 778        writel(0, &g.dim2->MS1);
 779}
 780
 781/*
 782 * Retrieves maximal possible correct buffer size for isochronous data type
 783 * conform to given packet length and not bigger than given buffer size.
 784 *
 785 * Returns non-zero correct buffer size or zero by error.
 786 */
 787u16 dim_norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
 788{
 789        if (!check_packet_length(packet_length))
 790                return 0;
 791
 792        return norm_isoc_buffer_size(buf_size, packet_length);
 793}
 794
 795/*
 796 * Retrieves maximal possible correct buffer size for synchronous data type
 797 * conform to given bytes per frame and not bigger than given buffer size.
 798 *
 799 * Returns non-zero correct buffer size or zero by error.
 800 */
 801u16 dim_norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
 802{
 803        if (!check_bytes_per_frame(bytes_per_frame))
 804                return 0;
 805
 806        return norm_sync_buffer_size(buf_size, bytes_per_frame);
 807}
 808
 809u8 dim_init_control(struct dim_channel *ch, u8 is_tx, u16 ch_address,
 810                    u16 max_buffer_size)
 811{
 812        return init_ctrl_async(ch, CAT_CT_VAL_CONTROL, is_tx, ch_address,
 813                               max_buffer_size);
 814}
 815
 816u8 dim_init_async(struct dim_channel *ch, u8 is_tx, u16 ch_address,
 817                  u16 max_buffer_size)
 818{
 819        u8 ret = init_ctrl_async(ch, CAT_CT_VAL_ASYNC, is_tx, ch_address,
 820                                 max_buffer_size);
 821
 822        if (is_tx && !g.atx_dbr.ch_addr) {
 823                g.atx_dbr.ch_addr = ch->addr;
 824                dbrcnt_init(ch->addr, ch->dbr_size);
 825                writel(bit_mask(20), &g.dim2->MIEN);
 826        }
 827
 828        return ret;
 829}
 830
 831u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
 832                 u16 packet_length)
 833{
 834        if (!g.dim_is_initialized || !ch)
 835                return DIM_ERR_DRIVER_NOT_INITIALIZED;
 836
 837        if (!check_channel_address(ch_address))
 838                return DIM_INIT_ERR_CHANNEL_ADDRESS;
 839
 840        if (!check_packet_length(packet_length))
 841                return DIM_ERR_BAD_CONFIG;
 842
 843        if (!ch->dbr_size)
 844                ch->dbr_size = packet_length * ISOC_DBR_FACTOR;
 845        ch->dbr_addr = alloc_dbr(ch->dbr_size);
 846        if (ch->dbr_addr >= DBR_SIZE)
 847                return DIM_INIT_ERR_OUT_OF_MEMORY;
 848
 849        isoc_init(ch, ch_address / 2, packet_length);
 850
 851        dim2_configure_channel(ch->addr, CAT_CT_VAL_ISOC, is_tx, ch->dbr_addr,
 852                               ch->dbr_size, packet_length);
 853
 854        return DIM_NO_ERROR;
 855}
 856
 857u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
 858                 u16 bytes_per_frame)
 859{
 860        u16 bd_factor = g.fcnt + 2;
 861
 862        if (!g.dim_is_initialized || !ch)
 863                return DIM_ERR_DRIVER_NOT_INITIALIZED;
 864
 865        if (!check_channel_address(ch_address))
 866                return DIM_INIT_ERR_CHANNEL_ADDRESS;
 867
 868        if (!check_bytes_per_frame(bytes_per_frame))
 869                return DIM_ERR_BAD_CONFIG;
 870
 871        if (!ch->dbr_size)
 872                ch->dbr_size = bytes_per_frame << bd_factor;
 873        ch->dbr_addr = alloc_dbr(ch->dbr_size);
 874        if (ch->dbr_addr >= DBR_SIZE)
 875                return DIM_INIT_ERR_OUT_OF_MEMORY;
 876
 877        sync_init(ch, ch_address / 2, bytes_per_frame);
 878
 879        dim2_clear_dbr(ch->dbr_addr, ch->dbr_size);
 880        dim2_configure_channel(ch->addr, CAT_CT_VAL_SYNC, is_tx,
 881                               ch->dbr_addr, ch->dbr_size, 0);
 882
 883        return DIM_NO_ERROR;
 884}
 885
 886u8 dim_destroy_channel(struct dim_channel *ch)
 887{
 888        if (!g.dim_is_initialized || !ch)
 889                return DIM_ERR_DRIVER_NOT_INITIALIZED;
 890
 891        if (ch->addr == g.atx_dbr.ch_addr) {
 892                writel(0, &g.dim2->MIEN);
 893                g.atx_dbr.ch_addr = 0;
 894        }
 895
 896        dim2_clear_channel(ch->addr);
 897        if (ch->dbr_addr < DBR_SIZE)
 898                free_dbr(ch->dbr_addr, ch->dbr_size);
 899        ch->dbr_addr = DBR_SIZE;
 900
 901        return DIM_NO_ERROR;
 902}
 903
 904void dim_service_ahb_int_irq(struct dim_channel *const *channels)
 905{
 906        bool state_changed;
 907
 908        if (!g.dim_is_initialized) {
 909                dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
 910                             "DIM is not initialized");
 911                return;
 912        }
 913
 914        if (!channels) {
 915                dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channels");
 916                return;
 917        }
 918
 919        /*
 920         * Use while-loop and a flag to make sure the age is changed back at
 921         * least once, otherwise the interrupt may never come if CPU generates
 922         * interrupt on changing age.
 923         * This cycle runs not more than number of channels, because
 924         * channel_service_interrupt() routine doesn't start the channel again.
 925         */
 926        do {
 927                struct dim_channel *const *ch = channels;
 928
 929                state_changed = false;
 930
 931                while (*ch) {
 932                        state_changed |= channel_service_interrupt(*ch);
 933                        ++ch;
 934                }
 935        } while (state_changed);
 936}
 937
 938u8 dim_service_channel(struct dim_channel *ch)
 939{
 940        if (!g.dim_is_initialized || !ch)
 941                return DIM_ERR_DRIVER_NOT_INITIALIZED;
 942
 943        return channel_service(ch);
 944}
 945
 946struct dim_ch_state_t *dim_get_channel_state(struct dim_channel *ch,
 947                                             struct dim_ch_state_t *state_ptr)
 948{
 949        if (!ch || !state_ptr)
 950                return NULL;
 951
 952        state_ptr->ready = ch->state.level < 2;
 953        state_ptr->done_buffers = ch->done_sw_buffers_number;
 954
 955        return state_ptr;
 956}
 957
 958bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
 959                        u16 buffer_size)
 960{
 961        if (!ch)
 962                return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
 963                                    "Bad channel");
 964
 965        return channel_start(ch, buffer_addr, buffer_size);
 966}
 967
 968bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number)
 969{
 970        if (!ch)
 971                return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
 972                                    "Bad channel");
 973
 974        return channel_detach_buffers(ch, buffers_number);
 975}
 976