linux/drivers/mailbox/mtk-cmdq-mailbox.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2//
   3// Copyright (c) 2018 MediaTek Inc.
   4
   5#include <linux/bitops.h>
   6#include <linux/clk.h>
   7#include <linux/clk-provider.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/errno.h>
  10#include <linux/interrupt.h>
  11#include <linux/io.h>
  12#include <linux/iopoll.h>
  13#include <linux/kernel.h>
  14#include <linux/module.h>
  15#include <linux/platform_device.h>
  16#include <linux/mailbox_controller.h>
  17#include <linux/mailbox/mtk-cmdq-mailbox.h>
  18#include <linux/of_device.h>
  19
  20#define CMDQ_OP_CODE_MASK               (0xff << CMDQ_OP_CODE_SHIFT)
  21#define CMDQ_NUM_CMD(t)                 (t->cmd_buf_size / CMDQ_INST_SIZE)
  22
  23#define CMDQ_CURR_IRQ_STATUS            0x10
  24#define CMDQ_SYNC_TOKEN_UPDATE          0x68
  25#define CMDQ_THR_SLOT_CYCLES            0x30
  26#define CMDQ_THR_BASE                   0x100
  27#define CMDQ_THR_SIZE                   0x80
  28#define CMDQ_THR_WARM_RESET             0x00
  29#define CMDQ_THR_ENABLE_TASK            0x04
  30#define CMDQ_THR_SUSPEND_TASK           0x08
  31#define CMDQ_THR_CURR_STATUS            0x0c
  32#define CMDQ_THR_IRQ_STATUS             0x10
  33#define CMDQ_THR_IRQ_ENABLE             0x14
  34#define CMDQ_THR_CURR_ADDR              0x20
  35#define CMDQ_THR_END_ADDR               0x24
  36#define CMDQ_THR_WAIT_TOKEN             0x30
  37#define CMDQ_THR_PRIORITY               0x40
  38
  39#define CMDQ_THR_ACTIVE_SLOT_CYCLES     0x3200
  40#define CMDQ_THR_ENABLED                0x1
  41#define CMDQ_THR_DISABLED               0x0
  42#define CMDQ_THR_SUSPEND                0x1
  43#define CMDQ_THR_RESUME                 0x0
  44#define CMDQ_THR_STATUS_SUSPENDED       BIT(1)
  45#define CMDQ_THR_DO_WARM_RESET          BIT(0)
  46#define CMDQ_THR_IRQ_DONE               0x1
  47#define CMDQ_THR_IRQ_ERROR              0x12
  48#define CMDQ_THR_IRQ_EN                 (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE)
  49#define CMDQ_THR_IS_WAITING             BIT(31)
  50
  51#define CMDQ_JUMP_BY_OFFSET             0x10000000
  52#define CMDQ_JUMP_BY_PA                 0x10000001
  53
  54struct cmdq_thread {
  55        struct mbox_chan        *chan;
  56        void __iomem            *base;
  57        struct list_head        task_busy_list;
  58        u32                     priority;
  59};
  60
  61struct cmdq_task {
  62        struct cmdq             *cmdq;
  63        struct list_head        list_entry;
  64        dma_addr_t              pa_base;
  65        struct cmdq_thread      *thread;
  66        struct cmdq_pkt         *pkt; /* the packet sent from mailbox client */
  67};
  68
  69struct cmdq {
  70        struct mbox_controller  mbox;
  71        void __iomem            *base;
  72        int                     irq;
  73        u32                     thread_nr;
  74        u32                     irq_mask;
  75        struct cmdq_thread      *thread;
  76        struct clk              *clock;
  77        bool                    suspended;
  78        u8                      shift_pa;
  79};
  80
  81struct gce_plat {
  82        u32 thread_nr;
  83        u8 shift;
  84};
  85
  86u8 cmdq_get_shift_pa(struct mbox_chan *chan)
  87{
  88        struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
  89
  90        return cmdq->shift_pa;
  91}
  92EXPORT_SYMBOL(cmdq_get_shift_pa);
  93
  94static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
  95{
  96        u32 status;
  97
  98        writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
  99
 100        /* If already disabled, treat as suspended successful. */
 101        if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
 102                return 0;
 103
 104        if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
 105                        status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) {
 106                dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
 107                        (u32)(thread->base - cmdq->base));
 108                return -EFAULT;
 109        }
 110
 111        return 0;
 112}
 113
 114static void cmdq_thread_resume(struct cmdq_thread *thread)
 115{
 116        writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
 117}
 118
 119static void cmdq_init(struct cmdq *cmdq)
 120{
 121        int i;
 122
 123        WARN_ON(clk_enable(cmdq->clock) < 0);
 124        writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
 125        for (i = 0; i <= CMDQ_MAX_EVENT; i++)
 126                writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
 127        clk_disable(cmdq->clock);
 128}
 129
 130static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
 131{
 132        u32 warm_reset;
 133
 134        writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
 135        if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
 136                        warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET),
 137                        0, 10)) {
 138                dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
 139                        (u32)(thread->base - cmdq->base));
 140                return -EFAULT;
 141        }
 142
 143        return 0;
 144}
 145
 146static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
 147{
 148        cmdq_thread_reset(cmdq, thread);
 149        writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
 150}
 151
 152/* notify GCE to re-fetch commands by setting GCE thread PC */
 153static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
 154{
 155        writel(readl(thread->base + CMDQ_THR_CURR_ADDR),
 156               thread->base + CMDQ_THR_CURR_ADDR);
 157}
 158
 159static void cmdq_task_insert_into_thread(struct cmdq_task *task)
 160{
 161        struct device *dev = task->cmdq->mbox.dev;
 162        struct cmdq_thread *thread = task->thread;
 163        struct cmdq_task *prev_task = list_last_entry(
 164                        &thread->task_busy_list, typeof(*task), list_entry);
 165        u64 *prev_task_base = prev_task->pkt->va_base;
 166
 167        /* let previous task jump to this task */
 168        dma_sync_single_for_cpu(dev, prev_task->pa_base,
 169                                prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
 170        prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
 171                (u64)CMDQ_JUMP_BY_PA << 32 | task->pa_base;
 172        dma_sync_single_for_device(dev, prev_task->pa_base,
 173                                   prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
 174
 175        cmdq_thread_invalidate_fetched_data(thread);
 176}
 177
 178static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
 179{
 180        return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
 181}
 182
 183static void cmdq_task_exec_done(struct cmdq_task *task, enum cmdq_cb_status sta)
 184{
 185        struct cmdq_task_cb *cb = &task->pkt->async_cb;
 186        struct cmdq_cb_data data;
 187
 188        WARN_ON(cb->cb == (cmdq_async_flush_cb)NULL);
 189        data.sta = sta;
 190        data.data = cb->data;
 191        cb->cb(data);
 192
 193        list_del(&task->list_entry);
 194}
 195
 196static void cmdq_task_handle_error(struct cmdq_task *task)
 197{
 198        struct cmdq_thread *thread = task->thread;
 199        struct cmdq_task *next_task;
 200        struct cmdq *cmdq = task->cmdq;
 201
 202        dev_err(cmdq->mbox.dev, "task 0x%p error\n", task);
 203        WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
 204        next_task = list_first_entry_or_null(&thread->task_busy_list,
 205                        struct cmdq_task, list_entry);
 206        if (next_task)
 207                writel(next_task->pa_base >> cmdq->shift_pa,
 208                       thread->base + CMDQ_THR_CURR_ADDR);
 209        cmdq_thread_resume(thread);
 210}
 211
 212static void cmdq_thread_irq_handler(struct cmdq *cmdq,
 213                                    struct cmdq_thread *thread)
 214{
 215        struct cmdq_task *task, *tmp, *curr_task = NULL;
 216        u32 curr_pa, irq_flag, task_end_pa;
 217        bool err;
 218
 219        irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
 220        writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
 221
 222        /*
 223         * When ISR call this function, another CPU core could run
 224         * "release task" right before we acquire the spin lock, and thus
 225         * reset / disable this GCE thread, so we need to check the enable
 226         * bit of this GCE thread.
 227         */
 228        if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
 229                return;
 230
 231        if (irq_flag & CMDQ_THR_IRQ_ERROR)
 232                err = true;
 233        else if (irq_flag & CMDQ_THR_IRQ_DONE)
 234                err = false;
 235        else
 236                return;
 237
 238        curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->shift_pa;
 239
 240        list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
 241                                 list_entry) {
 242                task_end_pa = task->pa_base + task->pkt->cmd_buf_size;
 243                if (curr_pa >= task->pa_base && curr_pa < task_end_pa)
 244                        curr_task = task;
 245
 246                if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) {
 247                        cmdq_task_exec_done(task, CMDQ_CB_NORMAL);
 248                        kfree(task);
 249                } else if (err) {
 250                        cmdq_task_exec_done(task, CMDQ_CB_ERROR);
 251                        cmdq_task_handle_error(curr_task);
 252                        kfree(task);
 253                }
 254
 255                if (curr_task)
 256                        break;
 257        }
 258
 259        if (list_empty(&thread->task_busy_list)) {
 260                cmdq_thread_disable(cmdq, thread);
 261                clk_disable(cmdq->clock);
 262        }
 263}
 264
 265static irqreturn_t cmdq_irq_handler(int irq, void *dev)
 266{
 267        struct cmdq *cmdq = dev;
 268        unsigned long irq_status, flags = 0L;
 269        int bit;
 270
 271        irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask;
 272        if (!(irq_status ^ cmdq->irq_mask))
 273                return IRQ_NONE;
 274
 275        for_each_clear_bit(bit, &irq_status, cmdq->thread_nr) {
 276                struct cmdq_thread *thread = &cmdq->thread[bit];
 277
 278                spin_lock_irqsave(&thread->chan->lock, flags);
 279                cmdq_thread_irq_handler(cmdq, thread);
 280                spin_unlock_irqrestore(&thread->chan->lock, flags);
 281        }
 282
 283        return IRQ_HANDLED;
 284}
 285
 286static int cmdq_suspend(struct device *dev)
 287{
 288        struct cmdq *cmdq = dev_get_drvdata(dev);
 289        struct cmdq_thread *thread;
 290        int i;
 291        bool task_running = false;
 292
 293        cmdq->suspended = true;
 294
 295        for (i = 0; i < cmdq->thread_nr; i++) {
 296                thread = &cmdq->thread[i];
 297                if (!list_empty(&thread->task_busy_list)) {
 298                        task_running = true;
 299                        break;
 300                }
 301        }
 302
 303        if (task_running)
 304                dev_warn(dev, "exist running task(s) in suspend\n");
 305
 306        clk_unprepare(cmdq->clock);
 307
 308        return 0;
 309}
 310
 311static int cmdq_resume(struct device *dev)
 312{
 313        struct cmdq *cmdq = dev_get_drvdata(dev);
 314
 315        WARN_ON(clk_prepare(cmdq->clock) < 0);
 316        cmdq->suspended = false;
 317        return 0;
 318}
 319
 320static int cmdq_remove(struct platform_device *pdev)
 321{
 322        struct cmdq *cmdq = platform_get_drvdata(pdev);
 323
 324        clk_unprepare(cmdq->clock);
 325
 326        return 0;
 327}
 328
 329static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
 330{
 331        struct cmdq_pkt *pkt = (struct cmdq_pkt *)data;
 332        struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
 333        struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
 334        struct cmdq_task *task;
 335        unsigned long curr_pa, end_pa;
 336
 337        /* Client should not flush new tasks if suspended. */
 338        WARN_ON(cmdq->suspended);
 339
 340        task = kzalloc(sizeof(*task), GFP_ATOMIC);
 341        if (!task)
 342                return -ENOMEM;
 343
 344        task->cmdq = cmdq;
 345        INIT_LIST_HEAD(&task->list_entry);
 346        task->pa_base = pkt->pa_base;
 347        task->thread = thread;
 348        task->pkt = pkt;
 349
 350        if (list_empty(&thread->task_busy_list)) {
 351                WARN_ON(clk_enable(cmdq->clock) < 0);
 352                /*
 353                 * The thread reset will clear thread related register to 0,
 354                 * including pc, end, priority, irq, suspend and enable. Thus
 355                 * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable
 356                 * thread and make it running.
 357                 */
 358                WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
 359
 360                writel(task->pa_base >> cmdq->shift_pa,
 361                       thread->base + CMDQ_THR_CURR_ADDR);
 362                writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa,
 363                       thread->base + CMDQ_THR_END_ADDR);
 364
 365                writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
 366                writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
 367                writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
 368        } else {
 369                WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
 370                curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
 371                        cmdq->shift_pa;
 372                end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
 373                        cmdq->shift_pa;
 374                /* check boundary */
 375                if (curr_pa == end_pa - CMDQ_INST_SIZE ||
 376                    curr_pa == end_pa) {
 377                        /* set to this task directly */
 378                        writel(task->pa_base >> cmdq->shift_pa,
 379                               thread->base + CMDQ_THR_CURR_ADDR);
 380                } else {
 381                        cmdq_task_insert_into_thread(task);
 382                        smp_mb(); /* modify jump before enable thread */
 383                }
 384                writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa,
 385                       thread->base + CMDQ_THR_END_ADDR);
 386                cmdq_thread_resume(thread);
 387        }
 388        list_move_tail(&task->list_entry, &thread->task_busy_list);
 389
 390        return 0;
 391}
 392
 393static int cmdq_mbox_startup(struct mbox_chan *chan)
 394{
 395        return 0;
 396}
 397
 398static void cmdq_mbox_shutdown(struct mbox_chan *chan)
 399{
 400        struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
 401        struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
 402        struct cmdq_task *task, *tmp;
 403        unsigned long flags;
 404
 405        spin_lock_irqsave(&thread->chan->lock, flags);
 406        if (list_empty(&thread->task_busy_list))
 407                goto done;
 408
 409        WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
 410
 411        /* make sure executed tasks have success callback */
 412        cmdq_thread_irq_handler(cmdq, thread);
 413        if (list_empty(&thread->task_busy_list))
 414                goto done;
 415
 416        list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
 417                                 list_entry) {
 418                cmdq_task_exec_done(task, CMDQ_CB_ERROR);
 419                kfree(task);
 420        }
 421
 422        cmdq_thread_disable(cmdq, thread);
 423        clk_disable(cmdq->clock);
 424done:
 425        /*
 426         * The thread->task_busy_list empty means thread already disable. The
 427         * cmdq_mbox_send_data() always reset thread which clear disable and
 428         * suspend statue when first pkt send to channel, so there is no need
 429         * to do any operation here, only unlock and leave.
 430         */
 431        spin_unlock_irqrestore(&thread->chan->lock, flags);
 432}
 433
 434static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
 435{
 436        struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
 437        struct cmdq_task_cb *cb;
 438        struct cmdq_cb_data data;
 439        struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
 440        struct cmdq_task *task, *tmp;
 441        unsigned long flags;
 442        u32 enable;
 443
 444        spin_lock_irqsave(&thread->chan->lock, flags);
 445        if (list_empty(&thread->task_busy_list))
 446                goto out;
 447
 448        WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
 449        if (!cmdq_thread_is_in_wfe(thread))
 450                goto wait;
 451
 452        list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
 453                                 list_entry) {
 454                cb = &task->pkt->async_cb;
 455                if (cb->cb) {
 456                        data.sta = CMDQ_CB_ERROR;
 457                        data.data = cb->data;
 458                        cb->cb(data);
 459                }
 460                list_del(&task->list_entry);
 461                kfree(task);
 462        }
 463
 464        cmdq_thread_resume(thread);
 465        cmdq_thread_disable(cmdq, thread);
 466        clk_disable(cmdq->clock);
 467
 468out:
 469        spin_unlock_irqrestore(&thread->chan->lock, flags);
 470        return 0;
 471
 472wait:
 473        cmdq_thread_resume(thread);
 474        spin_unlock_irqrestore(&thread->chan->lock, flags);
 475        if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
 476                                      enable, enable == 0, 1, timeout)) {
 477                dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
 478                        (u32)(thread->base - cmdq->base));
 479
 480                return -EFAULT;
 481        }
 482        return 0;
 483}
 484
 485static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
 486        .send_data = cmdq_mbox_send_data,
 487        .startup = cmdq_mbox_startup,
 488        .shutdown = cmdq_mbox_shutdown,
 489        .flush = cmdq_mbox_flush,
 490};
 491
 492static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
 493                const struct of_phandle_args *sp)
 494{
 495        int ind = sp->args[0];
 496        struct cmdq_thread *thread;
 497
 498        if (ind >= mbox->num_chans)
 499                return ERR_PTR(-EINVAL);
 500
 501        thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
 502        thread->priority = sp->args[1];
 503        thread->chan = &mbox->chans[ind];
 504
 505        return &mbox->chans[ind];
 506}
 507
 508static int cmdq_probe(struct platform_device *pdev)
 509{
 510        struct device *dev = &pdev->dev;
 511        struct resource *res;
 512        struct cmdq *cmdq;
 513        int err, i;
 514        struct gce_plat *plat_data;
 515
 516        cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
 517        if (!cmdq)
 518                return -ENOMEM;
 519
 520        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 521        cmdq->base = devm_ioremap_resource(dev, res);
 522        if (IS_ERR(cmdq->base)) {
 523                dev_err(dev, "failed to ioremap gce\n");
 524                return PTR_ERR(cmdq->base);
 525        }
 526
 527        cmdq->irq = platform_get_irq(pdev, 0);
 528        if (cmdq->irq < 0)
 529                return cmdq->irq;
 530
 531        plat_data = (struct gce_plat *)of_device_get_match_data(dev);
 532        if (!plat_data) {
 533                dev_err(dev, "failed to get match data\n");
 534                return -EINVAL;
 535        }
 536
 537        cmdq->thread_nr = plat_data->thread_nr;
 538        cmdq->shift_pa = plat_data->shift;
 539        cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
 540        err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
 541                               "mtk_cmdq", cmdq);
 542        if (err < 0) {
 543                dev_err(dev, "failed to register ISR (%d)\n", err);
 544                return err;
 545        }
 546
 547        dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
 548                dev, cmdq->base, cmdq->irq);
 549
 550        cmdq->clock = devm_clk_get(dev, "gce");
 551        if (IS_ERR(cmdq->clock)) {
 552                dev_err(dev, "failed to get gce clk\n");
 553                return PTR_ERR(cmdq->clock);
 554        }
 555
 556        cmdq->mbox.dev = dev;
 557        cmdq->mbox.chans = devm_kcalloc(dev, cmdq->thread_nr,
 558                                        sizeof(*cmdq->mbox.chans), GFP_KERNEL);
 559        if (!cmdq->mbox.chans)
 560                return -ENOMEM;
 561
 562        cmdq->mbox.num_chans = cmdq->thread_nr;
 563        cmdq->mbox.ops = &cmdq_mbox_chan_ops;
 564        cmdq->mbox.of_xlate = cmdq_xlate;
 565
 566        /* make use of TXDONE_BY_ACK */
 567        cmdq->mbox.txdone_irq = false;
 568        cmdq->mbox.txdone_poll = false;
 569
 570        cmdq->thread = devm_kcalloc(dev, cmdq->thread_nr,
 571                                        sizeof(*cmdq->thread), GFP_KERNEL);
 572        if (!cmdq->thread)
 573                return -ENOMEM;
 574
 575        for (i = 0; i < cmdq->thread_nr; i++) {
 576                cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
 577                                CMDQ_THR_SIZE * i;
 578                INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
 579                cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
 580        }
 581
 582        err = devm_mbox_controller_register(dev, &cmdq->mbox);
 583        if (err < 0) {
 584                dev_err(dev, "failed to register mailbox: %d\n", err);
 585                return err;
 586        }
 587
 588        platform_set_drvdata(pdev, cmdq);
 589        WARN_ON(clk_prepare(cmdq->clock) < 0);
 590
 591        cmdq_init(cmdq);
 592
 593        return 0;
 594}
 595
 596static const struct dev_pm_ops cmdq_pm_ops = {
 597        .suspend = cmdq_suspend,
 598        .resume = cmdq_resume,
 599};
 600
 601static const struct gce_plat gce_plat_v2 = {.thread_nr = 16};
 602static const struct gce_plat gce_plat_v3 = {.thread_nr = 24};
 603static const struct gce_plat gce_plat_v4 = {.thread_nr = 24, .shift = 3};
 604
 605static const struct of_device_id cmdq_of_ids[] = {
 606        {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
 607        {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3},
 608        {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},
 609        {}
 610};
 611
 612static struct platform_driver cmdq_drv = {
 613        .probe = cmdq_probe,
 614        .remove = cmdq_remove,
 615        .driver = {
 616                .name = "mtk_cmdq",
 617                .pm = &cmdq_pm_ops,
 618                .of_match_table = cmdq_of_ids,
 619        }
 620};
 621
 622static int __init cmdq_drv_init(void)
 623{
 624        return platform_driver_register(&cmdq_drv);
 625}
 626
 627static void __exit cmdq_drv_exit(void)
 628{
 629        platform_driver_unregister(&cmdq_drv);
 630}
 631
 632subsys_initcall(cmdq_drv_init);
 633module_exit(cmdq_drv_exit);
 634
 635MODULE_LICENSE("GPL v2");
 636