linux/drivers/mailbox/mtk-cmdq-mailbox.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2//
   3// Copyright (c) 2018 MediaTek Inc.
   4
   5#include <linux/bitops.h>
   6#include <linux/clk.h>
   7#include <linux/clk-provider.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/errno.h>
  10#include <linux/interrupt.h>
  11#include <linux/io.h>
  12#include <linux/iopoll.h>
  13#include <linux/kernel.h>
  14#include <linux/module.h>
  15#include <linux/platform_device.h>
  16#include <linux/mailbox_controller.h>
  17#include <linux/mailbox/mtk-cmdq-mailbox.h>
  18#include <linux/of_device.h>
  19
  20#define CMDQ_OP_CODE_MASK               (0xff << CMDQ_OP_CODE_SHIFT)
  21#define CMDQ_NUM_CMD(t)                 (t->cmd_buf_size / CMDQ_INST_SIZE)
  22#define CMDQ_GCE_NUM_MAX                (2)
  23
  24#define CMDQ_CURR_IRQ_STATUS            0x10
  25#define CMDQ_SYNC_TOKEN_UPDATE          0x68
  26#define CMDQ_THR_SLOT_CYCLES            0x30
  27#define CMDQ_THR_BASE                   0x100
  28#define CMDQ_THR_SIZE                   0x80
  29#define CMDQ_THR_WARM_RESET             0x00
  30#define CMDQ_THR_ENABLE_TASK            0x04
  31#define CMDQ_THR_SUSPEND_TASK           0x08
  32#define CMDQ_THR_CURR_STATUS            0x0c
  33#define CMDQ_THR_IRQ_STATUS             0x10
  34#define CMDQ_THR_IRQ_ENABLE             0x14
  35#define CMDQ_THR_CURR_ADDR              0x20
  36#define CMDQ_THR_END_ADDR               0x24
  37#define CMDQ_THR_WAIT_TOKEN             0x30
  38#define CMDQ_THR_PRIORITY               0x40
  39
  40#define GCE_GCTL_VALUE                  0x48
  41
  42#define CMDQ_THR_ACTIVE_SLOT_CYCLES     0x3200
  43#define CMDQ_THR_ENABLED                0x1
  44#define CMDQ_THR_DISABLED               0x0
  45#define CMDQ_THR_SUSPEND                0x1
  46#define CMDQ_THR_RESUME                 0x0
  47#define CMDQ_THR_STATUS_SUSPENDED       BIT(1)
  48#define CMDQ_THR_DO_WARM_RESET          BIT(0)
  49#define CMDQ_THR_IRQ_DONE               0x1
  50#define CMDQ_THR_IRQ_ERROR              0x12
  51#define CMDQ_THR_IRQ_EN                 (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE)
  52#define CMDQ_THR_IS_WAITING             BIT(31)
  53
  54#define CMDQ_JUMP_BY_OFFSET             0x10000000
  55#define CMDQ_JUMP_BY_PA                 0x10000001
  56
  57struct cmdq_thread {
  58        struct mbox_chan        *chan;
  59        void __iomem            *base;
  60        struct list_head        task_busy_list;
  61        u32                     priority;
  62};
  63
  64struct cmdq_task {
  65        struct cmdq             *cmdq;
  66        struct list_head        list_entry;
  67        dma_addr_t              pa_base;
  68        struct cmdq_thread      *thread;
  69        struct cmdq_pkt         *pkt; /* the packet sent from mailbox client */
  70};
  71
  72struct cmdq {
  73        struct mbox_controller  mbox;
  74        void __iomem            *base;
  75        int                     irq;
  76        u32                     thread_nr;
  77        u32                     irq_mask;
  78        struct cmdq_thread      *thread;
  79        struct clk_bulk_data    clocks[CMDQ_GCE_NUM_MAX];
  80        bool                    suspended;
  81        u8                      shift_pa;
  82        bool                    control_by_sw;
  83        u32                     gce_num;
  84};
  85
  86struct gce_plat {
  87        u32 thread_nr;
  88        u8 shift;
  89        bool control_by_sw;
  90        u32 gce_num;
  91};
  92
  93u8 cmdq_get_shift_pa(struct mbox_chan *chan)
  94{
  95        struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
  96
  97        return cmdq->shift_pa;
  98}
  99EXPORT_SYMBOL(cmdq_get_shift_pa);
 100
 101static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
 102{
 103        u32 status;
 104
 105        writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
 106
 107        /* If already disabled, treat as suspended successful. */
 108        if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
 109                return 0;
 110
 111        if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
 112                        status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) {
 113                dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
 114                        (u32)(thread->base - cmdq->base));
 115                return -EFAULT;
 116        }
 117
 118        return 0;
 119}
 120
 121static void cmdq_thread_resume(struct cmdq_thread *thread)
 122{
 123        writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
 124}
 125
 126static void cmdq_init(struct cmdq *cmdq)
 127{
 128        int i;
 129
 130        WARN_ON(clk_bulk_enable(cmdq->gce_num, cmdq->clocks));
 131        if (cmdq->control_by_sw)
 132                writel(0x7, cmdq->base + GCE_GCTL_VALUE);
 133        writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
 134        for (i = 0; i <= CMDQ_MAX_EVENT; i++)
 135                writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
 136        clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
 137}
 138
 139static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
 140{
 141        u32 warm_reset;
 142
 143        writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
 144        if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
 145                        warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET),
 146                        0, 10)) {
 147                dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
 148                        (u32)(thread->base - cmdq->base));
 149                return -EFAULT;
 150        }
 151
 152        return 0;
 153}
 154
 155static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
 156{
 157        cmdq_thread_reset(cmdq, thread);
 158        writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
 159}
 160
 161/* notify GCE to re-fetch commands by setting GCE thread PC */
 162static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
 163{
 164        writel(readl(thread->base + CMDQ_THR_CURR_ADDR),
 165               thread->base + CMDQ_THR_CURR_ADDR);
 166}
 167
 168static void cmdq_task_insert_into_thread(struct cmdq_task *task)
 169{
 170        struct device *dev = task->cmdq->mbox.dev;
 171        struct cmdq_thread *thread = task->thread;
 172        struct cmdq_task *prev_task = list_last_entry(
 173                        &thread->task_busy_list, typeof(*task), list_entry);
 174        u64 *prev_task_base = prev_task->pkt->va_base;
 175
 176        /* let previous task jump to this task */
 177        dma_sync_single_for_cpu(dev, prev_task->pa_base,
 178                                prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
 179        prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
 180                (u64)CMDQ_JUMP_BY_PA << 32 |
 181                (task->pa_base >> task->cmdq->shift_pa);
 182        dma_sync_single_for_device(dev, prev_task->pa_base,
 183                                   prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
 184
 185        cmdq_thread_invalidate_fetched_data(thread);
 186}
 187
 188static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
 189{
 190        return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
 191}
 192
 193static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
 194{
 195        struct cmdq_task_cb *cb = &task->pkt->async_cb;
 196        struct cmdq_cb_data data;
 197
 198        WARN_ON(cb->cb == (cmdq_async_flush_cb)NULL);
 199        data.sta = sta;
 200        data.data = cb->data;
 201        data.pkt = task->pkt;
 202        if (cb->cb)
 203                cb->cb(data);
 204
 205        mbox_chan_received_data(task->thread->chan, &data);
 206
 207        list_del(&task->list_entry);
 208}
 209
 210static void cmdq_task_handle_error(struct cmdq_task *task)
 211{
 212        struct cmdq_thread *thread = task->thread;
 213        struct cmdq_task *next_task;
 214        struct cmdq *cmdq = task->cmdq;
 215
 216        dev_err(cmdq->mbox.dev, "task 0x%p error\n", task);
 217        WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
 218        next_task = list_first_entry_or_null(&thread->task_busy_list,
 219                        struct cmdq_task, list_entry);
 220        if (next_task)
 221                writel(next_task->pa_base >> cmdq->shift_pa,
 222                       thread->base + CMDQ_THR_CURR_ADDR);
 223        cmdq_thread_resume(thread);
 224}
 225
 226static void cmdq_thread_irq_handler(struct cmdq *cmdq,
 227                                    struct cmdq_thread *thread)
 228{
 229        struct cmdq_task *task, *tmp, *curr_task = NULL;
 230        u32 curr_pa, irq_flag, task_end_pa;
 231        bool err;
 232
 233        irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
 234        writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
 235
 236        /*
 237         * When ISR call this function, another CPU core could run
 238         * "release task" right before we acquire the spin lock, and thus
 239         * reset / disable this GCE thread, so we need to check the enable
 240         * bit of this GCE thread.
 241         */
 242        if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
 243                return;
 244
 245        if (irq_flag & CMDQ_THR_IRQ_ERROR)
 246                err = true;
 247        else if (irq_flag & CMDQ_THR_IRQ_DONE)
 248                err = false;
 249        else
 250                return;
 251
 252        curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->shift_pa;
 253
 254        list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
 255                                 list_entry) {
 256                task_end_pa = task->pa_base + task->pkt->cmd_buf_size;
 257                if (curr_pa >= task->pa_base && curr_pa < task_end_pa)
 258                        curr_task = task;
 259
 260                if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) {
 261                        cmdq_task_exec_done(task, 0);
 262                        kfree(task);
 263                } else if (err) {
 264                        cmdq_task_exec_done(task, -ENOEXEC);
 265                        cmdq_task_handle_error(curr_task);
 266                        kfree(task);
 267                }
 268
 269                if (curr_task)
 270                        break;
 271        }
 272
 273        if (list_empty(&thread->task_busy_list)) {
 274                cmdq_thread_disable(cmdq, thread);
 275                clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
 276        }
 277}
 278
 279static irqreturn_t cmdq_irq_handler(int irq, void *dev)
 280{
 281        struct cmdq *cmdq = dev;
 282        unsigned long irq_status, flags = 0L;
 283        int bit;
 284
 285        irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask;
 286        if (!(irq_status ^ cmdq->irq_mask))
 287                return IRQ_NONE;
 288
 289        for_each_clear_bit(bit, &irq_status, cmdq->thread_nr) {
 290                struct cmdq_thread *thread = &cmdq->thread[bit];
 291
 292                spin_lock_irqsave(&thread->chan->lock, flags);
 293                cmdq_thread_irq_handler(cmdq, thread);
 294                spin_unlock_irqrestore(&thread->chan->lock, flags);
 295        }
 296
 297        return IRQ_HANDLED;
 298}
 299
 300static int cmdq_suspend(struct device *dev)
 301{
 302        struct cmdq *cmdq = dev_get_drvdata(dev);
 303        struct cmdq_thread *thread;
 304        int i;
 305        bool task_running = false;
 306
 307        cmdq->suspended = true;
 308
 309        for (i = 0; i < cmdq->thread_nr; i++) {
 310                thread = &cmdq->thread[i];
 311                if (!list_empty(&thread->task_busy_list)) {
 312                        task_running = true;
 313                        break;
 314                }
 315        }
 316
 317        if (task_running)
 318                dev_warn(dev, "exist running task(s) in suspend\n");
 319
 320        clk_bulk_unprepare(cmdq->gce_num, cmdq->clocks);
 321
 322        return 0;
 323}
 324
 325static int cmdq_resume(struct device *dev)
 326{
 327        struct cmdq *cmdq = dev_get_drvdata(dev);
 328
 329        WARN_ON(clk_bulk_prepare(cmdq->gce_num, cmdq->clocks));
 330        cmdq->suspended = false;
 331        return 0;
 332}
 333
 334static int cmdq_remove(struct platform_device *pdev)
 335{
 336        struct cmdq *cmdq = platform_get_drvdata(pdev);
 337
 338        clk_bulk_unprepare(cmdq->gce_num, cmdq->clocks);
 339        return 0;
 340}
 341
 342static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
 343{
 344        struct cmdq_pkt *pkt = (struct cmdq_pkt *)data;
 345        struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
 346        struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
 347        struct cmdq_task *task;
 348        unsigned long curr_pa, end_pa;
 349
 350        /* Client should not flush new tasks if suspended. */
 351        WARN_ON(cmdq->suspended);
 352
 353        task = kzalloc(sizeof(*task), GFP_ATOMIC);
 354        if (!task)
 355                return -ENOMEM;
 356
 357        task->cmdq = cmdq;
 358        INIT_LIST_HEAD(&task->list_entry);
 359        task->pa_base = pkt->pa_base;
 360        task->thread = thread;
 361        task->pkt = pkt;
 362
 363        if (list_empty(&thread->task_busy_list)) {
 364                WARN_ON(clk_bulk_enable(cmdq->gce_num, cmdq->clocks));
 365
 366                /*
 367                 * The thread reset will clear thread related register to 0,
 368                 * including pc, end, priority, irq, suspend and enable. Thus
 369                 * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable
 370                 * thread and make it running.
 371                 */
 372                WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
 373
 374                writel(task->pa_base >> cmdq->shift_pa,
 375                       thread->base + CMDQ_THR_CURR_ADDR);
 376                writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa,
 377                       thread->base + CMDQ_THR_END_ADDR);
 378
 379                writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
 380                writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
 381                writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
 382        } else {
 383                WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
 384                curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
 385                        cmdq->shift_pa;
 386                end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
 387                        cmdq->shift_pa;
 388                /* check boundary */
 389                if (curr_pa == end_pa - CMDQ_INST_SIZE ||
 390                    curr_pa == end_pa) {
 391                        /* set to this task directly */
 392                        writel(task->pa_base >> cmdq->shift_pa,
 393                               thread->base + CMDQ_THR_CURR_ADDR);
 394                } else {
 395                        cmdq_task_insert_into_thread(task);
 396                        smp_mb(); /* modify jump before enable thread */
 397                }
 398                writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa,
 399                       thread->base + CMDQ_THR_END_ADDR);
 400                cmdq_thread_resume(thread);
 401        }
 402        list_move_tail(&task->list_entry, &thread->task_busy_list);
 403
 404        return 0;
 405}
 406
 407static int cmdq_mbox_startup(struct mbox_chan *chan)
 408{
 409        return 0;
 410}
 411
 412static void cmdq_mbox_shutdown(struct mbox_chan *chan)
 413{
 414        struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
 415        struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
 416        struct cmdq_task *task, *tmp;
 417        unsigned long flags;
 418
 419        spin_lock_irqsave(&thread->chan->lock, flags);
 420        if (list_empty(&thread->task_busy_list))
 421                goto done;
 422
 423        WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
 424
 425        /* make sure executed tasks have success callback */
 426        cmdq_thread_irq_handler(cmdq, thread);
 427        if (list_empty(&thread->task_busy_list))
 428                goto done;
 429
 430        list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
 431                                 list_entry) {
 432                cmdq_task_exec_done(task, -ECONNABORTED);
 433                kfree(task);
 434        }
 435
 436        cmdq_thread_disable(cmdq, thread);
 437        clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
 438
 439done:
 440        /*
 441         * The thread->task_busy_list empty means thread already disable. The
 442         * cmdq_mbox_send_data() always reset thread which clear disable and
 443         * suspend statue when first pkt send to channel, so there is no need
 444         * to do any operation here, only unlock and leave.
 445         */
 446        spin_unlock_irqrestore(&thread->chan->lock, flags);
 447}
 448
 449static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
 450{
 451        struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
 452        struct cmdq_task_cb *cb;
 453        struct cmdq_cb_data data;
 454        struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
 455        struct cmdq_task *task, *tmp;
 456        unsigned long flags;
 457        u32 enable;
 458
 459        spin_lock_irqsave(&thread->chan->lock, flags);
 460        if (list_empty(&thread->task_busy_list))
 461                goto out;
 462
 463        WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
 464        if (!cmdq_thread_is_in_wfe(thread))
 465                goto wait;
 466
 467        list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
 468                                 list_entry) {
 469                cb = &task->pkt->async_cb;
 470                data.sta = -ECONNABORTED;
 471                data.data = cb->data;
 472                data.pkt = task->pkt;
 473                if (cb->cb)
 474                        cb->cb(data);
 475
 476                mbox_chan_received_data(task->thread->chan, &data);
 477                list_del(&task->list_entry);
 478                kfree(task);
 479        }
 480
 481        cmdq_thread_resume(thread);
 482        cmdq_thread_disable(cmdq, thread);
 483        clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
 484
 485out:
 486        spin_unlock_irqrestore(&thread->chan->lock, flags);
 487        return 0;
 488
 489wait:
 490        cmdq_thread_resume(thread);
 491        spin_unlock_irqrestore(&thread->chan->lock, flags);
 492        if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
 493                                      enable, enable == 0, 1, timeout)) {
 494                dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
 495                        (u32)(thread->base - cmdq->base));
 496
 497                return -EFAULT;
 498        }
 499        return 0;
 500}
 501
 502static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
 503        .send_data = cmdq_mbox_send_data,
 504        .startup = cmdq_mbox_startup,
 505        .shutdown = cmdq_mbox_shutdown,
 506        .flush = cmdq_mbox_flush,
 507};
 508
 509static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
 510                const struct of_phandle_args *sp)
 511{
 512        int ind = sp->args[0];
 513        struct cmdq_thread *thread;
 514
 515        if (ind >= mbox->num_chans)
 516                return ERR_PTR(-EINVAL);
 517
 518        thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
 519        thread->priority = sp->args[1];
 520        thread->chan = &mbox->chans[ind];
 521
 522        return &mbox->chans[ind];
 523}
 524
 525static int cmdq_probe(struct platform_device *pdev)
 526{
 527        struct device *dev = &pdev->dev;
 528        struct resource *res;
 529        struct cmdq *cmdq;
 530        int err, i;
 531        struct gce_plat *plat_data;
 532        struct device_node *phandle = dev->of_node;
 533        struct device_node *node;
 534        int alias_id = 0;
 535        char clk_name[4] = "gce";
 536
 537        cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
 538        if (!cmdq)
 539                return -ENOMEM;
 540
 541        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 542        cmdq->base = devm_ioremap_resource(dev, res);
 543        if (IS_ERR(cmdq->base))
 544                return PTR_ERR(cmdq->base);
 545
 546        cmdq->irq = platform_get_irq(pdev, 0);
 547        if (cmdq->irq < 0)
 548                return cmdq->irq;
 549
 550        plat_data = (struct gce_plat *)of_device_get_match_data(dev);
 551        if (!plat_data) {
 552                dev_err(dev, "failed to get match data\n");
 553                return -EINVAL;
 554        }
 555
 556        cmdq->thread_nr = plat_data->thread_nr;
 557        cmdq->shift_pa = plat_data->shift;
 558        cmdq->control_by_sw = plat_data->control_by_sw;
 559        cmdq->gce_num = plat_data->gce_num;
 560        cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
 561        err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
 562                               "mtk_cmdq", cmdq);
 563        if (err < 0) {
 564                dev_err(dev, "failed to register ISR (%d)\n", err);
 565                return err;
 566        }
 567
 568        dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
 569                dev, cmdq->base, cmdq->irq);
 570
 571        if (cmdq->gce_num > 1) {
 572                for_each_child_of_node(phandle->parent, node) {
 573                        char clk_id[8];
 574
 575                        alias_id = of_alias_get_id(node, clk_name);
 576                        if (alias_id < cmdq->gce_num) {
 577                                snprintf(clk_id, sizeof(clk_id), "%s%d", clk_name, alias_id);
 578                                cmdq->clocks[alias_id].id = clk_id;
 579                                cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
 580                                if (IS_ERR(cmdq->clocks[alias_id].clk)) {
 581                                        dev_err(dev, "failed to get gce clk: %d\n", alias_id);
 582                                        return PTR_ERR(cmdq->clocks[alias_id].clk);
 583                                }
 584                        }
 585                }
 586        } else {
 587                cmdq->clocks[alias_id].id = clk_name;
 588                cmdq->clocks[alias_id].clk = devm_clk_get(&pdev->dev, clk_name);
 589                if (IS_ERR(cmdq->clocks[alias_id].clk)) {
 590                        dev_err(dev, "failed to get gce clk\n");
 591                        return PTR_ERR(cmdq->clocks[alias_id].clk);
 592                }
 593        }
 594
 595        cmdq->mbox.dev = dev;
 596        cmdq->mbox.chans = devm_kcalloc(dev, cmdq->thread_nr,
 597                                        sizeof(*cmdq->mbox.chans), GFP_KERNEL);
 598        if (!cmdq->mbox.chans)
 599                return -ENOMEM;
 600
 601        cmdq->mbox.num_chans = cmdq->thread_nr;
 602        cmdq->mbox.ops = &cmdq_mbox_chan_ops;
 603        cmdq->mbox.of_xlate = cmdq_xlate;
 604
 605        /* make use of TXDONE_BY_ACK */
 606        cmdq->mbox.txdone_irq = false;
 607        cmdq->mbox.txdone_poll = false;
 608
 609        cmdq->thread = devm_kcalloc(dev, cmdq->thread_nr,
 610                                        sizeof(*cmdq->thread), GFP_KERNEL);
 611        if (!cmdq->thread)
 612                return -ENOMEM;
 613
 614        for (i = 0; i < cmdq->thread_nr; i++) {
 615                cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
 616                                CMDQ_THR_SIZE * i;
 617                INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
 618                cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
 619        }
 620
 621        err = devm_mbox_controller_register(dev, &cmdq->mbox);
 622        if (err < 0) {
 623                dev_err(dev, "failed to register mailbox: %d\n", err);
 624                return err;
 625        }
 626
 627        platform_set_drvdata(pdev, cmdq);
 628
 629        WARN_ON(clk_bulk_prepare(cmdq->gce_num, cmdq->clocks));
 630
 631        cmdq_init(cmdq);
 632
 633        return 0;
 634}
 635
 636static const struct dev_pm_ops cmdq_pm_ops = {
 637        .suspend = cmdq_suspend,
 638        .resume = cmdq_resume,
 639};
 640
 641static const struct gce_plat gce_plat_v2 = {
 642        .thread_nr = 16,
 643        .shift = 0,
 644        .control_by_sw = false,
 645        .gce_num = 1
 646};
 647
 648static const struct gce_plat gce_plat_v3 = {
 649        .thread_nr = 24,
 650        .shift = 0,
 651        .control_by_sw = false,
 652        .gce_num = 1
 653};
 654
 655static const struct gce_plat gce_plat_v4 = {
 656        .thread_nr = 24,
 657        .shift = 3,
 658        .control_by_sw = false,
 659        .gce_num = 1
 660};
 661
 662static const struct gce_plat gce_plat_v5 = {
 663        .thread_nr = 24,
 664        .shift = 3,
 665        .control_by_sw = true,
 666        .gce_num = 2
 667};
 668
 669static const struct gce_plat gce_plat_v6 = {
 670        .thread_nr = 24,
 671        .shift = 3,
 672        .control_by_sw = false,
 673        .gce_num = 2
 674};
 675
 676static const struct of_device_id cmdq_of_ids[] = {
 677        {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
 678        {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3},
 679        {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},
 680        {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_v5},
 681        {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_v6},
 682        {}
 683};
 684
 685static struct platform_driver cmdq_drv = {
 686        .probe = cmdq_probe,
 687        .remove = cmdq_remove,
 688        .driver = {
 689                .name = "mtk_cmdq",
 690                .pm = &cmdq_pm_ops,
 691                .of_match_table = cmdq_of_ids,
 692        }
 693};
 694
 695static int __init cmdq_drv_init(void)
 696{
 697        return platform_driver_register(&cmdq_drv);
 698}
 699
 700static void __exit cmdq_drv_exit(void)
 701{
 702        platform_driver_unregister(&cmdq_drv);
 703}
 704
 705subsys_initcall(cmdq_drv_init);
 706module_exit(cmdq_drv_exit);
 707
 708MODULE_LICENSE("GPL v2");
 709