linux/drivers/mailbox/mtk-cmdq-mailbox.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2//
   3// Copyright (c) 2018 MediaTek Inc.
   4
   5#include <linux/bitops.h>
   6#include <linux/clk.h>
   7#include <linux/clk-provider.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/errno.h>
  10#include <linux/interrupt.h>
  11#include <linux/io.h>
  12#include <linux/iopoll.h>
  13#include <linux/kernel.h>
  14#include <linux/module.h>
  15#include <linux/platform_device.h>
  16#include <linux/mailbox_controller.h>
  17#include <linux/mailbox/mtk-cmdq-mailbox.h>
  18#include <linux/of_device.h>
  19
  20#define CMDQ_OP_CODE_MASK               (0xff << CMDQ_OP_CODE_SHIFT)
  21#define CMDQ_IRQ_MASK                   0xffff
  22#define CMDQ_NUM_CMD(t)                 (t->cmd_buf_size / CMDQ_INST_SIZE)
  23
  24#define CMDQ_CURR_IRQ_STATUS            0x10
  25#define CMDQ_THR_SLOT_CYCLES            0x30
  26#define CMDQ_THR_BASE                   0x100
  27#define CMDQ_THR_SIZE                   0x80
  28#define CMDQ_THR_WARM_RESET             0x00
  29#define CMDQ_THR_ENABLE_TASK            0x04
  30#define CMDQ_THR_SUSPEND_TASK           0x08
  31#define CMDQ_THR_CURR_STATUS            0x0c
  32#define CMDQ_THR_IRQ_STATUS             0x10
  33#define CMDQ_THR_IRQ_ENABLE             0x14
  34#define CMDQ_THR_CURR_ADDR              0x20
  35#define CMDQ_THR_END_ADDR               0x24
  36#define CMDQ_THR_WAIT_TOKEN             0x30
  37#define CMDQ_THR_PRIORITY               0x40
  38
  39#define CMDQ_THR_ACTIVE_SLOT_CYCLES     0x3200
  40#define CMDQ_THR_ENABLED                0x1
  41#define CMDQ_THR_DISABLED               0x0
  42#define CMDQ_THR_SUSPEND                0x1
  43#define CMDQ_THR_RESUME                 0x0
  44#define CMDQ_THR_STATUS_SUSPENDED       BIT(1)
  45#define CMDQ_THR_DO_WARM_RESET          BIT(0)
  46#define CMDQ_THR_IRQ_DONE               0x1
  47#define CMDQ_THR_IRQ_ERROR              0x12
  48#define CMDQ_THR_IRQ_EN                 (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE)
  49#define CMDQ_THR_IS_WAITING             BIT(31)
  50
  51#define CMDQ_JUMP_BY_OFFSET             0x10000000
  52#define CMDQ_JUMP_BY_PA                 0x10000001
  53
  54struct cmdq_thread {
  55        struct mbox_chan        *chan;
  56        void __iomem            *base;
  57        struct list_head        task_busy_list;
  58        u32                     priority;
  59        bool                    atomic_exec;
  60};
  61
  62struct cmdq_task {
  63        struct cmdq             *cmdq;
  64        struct list_head        list_entry;
  65        dma_addr_t              pa_base;
  66        struct cmdq_thread      *thread;
  67        struct cmdq_pkt         *pkt; /* the packet sent from mailbox client */
  68};
  69
  70struct cmdq {
  71        struct mbox_controller  mbox;
  72        void __iomem            *base;
  73        u32                     irq;
  74        u32                     thread_nr;
  75        struct cmdq_thread      *thread;
  76        struct clk              *clock;
  77        bool                    suspended;
  78};
  79
  80static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
  81{
  82        u32 status;
  83
  84        writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
  85
  86        /* If already disabled, treat as suspended successful. */
  87        if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
  88                return 0;
  89
  90        if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
  91                        status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) {
  92                dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
  93                        (u32)(thread->base - cmdq->base));
  94                return -EFAULT;
  95        }
  96
  97        return 0;
  98}
  99
 100static void cmdq_thread_resume(struct cmdq_thread *thread)
 101{
 102        writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
 103}
 104
 105static void cmdq_init(struct cmdq *cmdq)
 106{
 107        WARN_ON(clk_enable(cmdq->clock) < 0);
 108        writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
 109        clk_disable(cmdq->clock);
 110}
 111
 112static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
 113{
 114        u32 warm_reset;
 115
 116        writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
 117        if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
 118                        warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET),
 119                        0, 10)) {
 120                dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
 121                        (u32)(thread->base - cmdq->base));
 122                return -EFAULT;
 123        }
 124
 125        return 0;
 126}
 127
 128static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
 129{
 130        cmdq_thread_reset(cmdq, thread);
 131        writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
 132}
 133
 134/* notify GCE to re-fetch commands by setting GCE thread PC */
 135static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
 136{
 137        writel(readl(thread->base + CMDQ_THR_CURR_ADDR),
 138               thread->base + CMDQ_THR_CURR_ADDR);
 139}
 140
 141static void cmdq_task_insert_into_thread(struct cmdq_task *task)
 142{
 143        struct device *dev = task->cmdq->mbox.dev;
 144        struct cmdq_thread *thread = task->thread;
 145        struct cmdq_task *prev_task = list_last_entry(
 146                        &thread->task_busy_list, typeof(*task), list_entry);
 147        u64 *prev_task_base = prev_task->pkt->va_base;
 148
 149        /* let previous task jump to this task */
 150        dma_sync_single_for_cpu(dev, prev_task->pa_base,
 151                                prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
 152        prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
 153                (u64)CMDQ_JUMP_BY_PA << 32 | task->pa_base;
 154        dma_sync_single_for_device(dev, prev_task->pa_base,
 155                                   prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
 156
 157        cmdq_thread_invalidate_fetched_data(thread);
 158}
 159
 160static bool cmdq_command_is_wfe(u64 cmd)
 161{
 162        u64 wfe_option = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
 163        u64 wfe_op = (u64)(CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT) << 32;
 164        u64 wfe_mask = (u64)CMDQ_OP_CODE_MASK << 32 | 0xffffffff;
 165
 166        return ((cmd & wfe_mask) == (wfe_op | wfe_option));
 167}
 168
 169/* we assume tasks in the same display GCE thread are waiting the same event. */
 170static void cmdq_task_remove_wfe(struct cmdq_task *task)
 171{
 172        struct device *dev = task->cmdq->mbox.dev;
 173        u64 *base = task->pkt->va_base;
 174        int i;
 175
 176        dma_sync_single_for_cpu(dev, task->pa_base, task->pkt->cmd_buf_size,
 177                                DMA_TO_DEVICE);
 178        for (i = 0; i < CMDQ_NUM_CMD(task->pkt); i++)
 179                if (cmdq_command_is_wfe(base[i]))
 180                        base[i] = (u64)CMDQ_JUMP_BY_OFFSET << 32 |
 181                                  CMDQ_JUMP_PASS;
 182        dma_sync_single_for_device(dev, task->pa_base, task->pkt->cmd_buf_size,
 183                                   DMA_TO_DEVICE);
 184}
 185
 186static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
 187{
 188        return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
 189}
 190
 191static void cmdq_thread_wait_end(struct cmdq_thread *thread,
 192                                 unsigned long end_pa)
 193{
 194        struct device *dev = thread->chan->mbox->dev;
 195        unsigned long curr_pa;
 196
 197        if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_ADDR,
 198                        curr_pa, curr_pa == end_pa, 1, 20))
 199                dev_err(dev, "GCE thread cannot run to end.\n");
 200}
 201
 202static void cmdq_task_exec_done(struct cmdq_task *task, enum cmdq_cb_status sta)
 203{
 204        struct cmdq_task_cb *cb = &task->pkt->async_cb;
 205        struct cmdq_cb_data data;
 206
 207        WARN_ON(cb->cb == (cmdq_async_flush_cb)NULL);
 208        data.sta = sta;
 209        data.data = cb->data;
 210        cb->cb(data);
 211
 212        list_del(&task->list_entry);
 213}
 214
 215static void cmdq_task_handle_error(struct cmdq_task *task)
 216{
 217        struct cmdq_thread *thread = task->thread;
 218        struct cmdq_task *next_task;
 219
 220        dev_err(task->cmdq->mbox.dev, "task 0x%p error\n", task);
 221        WARN_ON(cmdq_thread_suspend(task->cmdq, thread) < 0);
 222        next_task = list_first_entry_or_null(&thread->task_busy_list,
 223                        struct cmdq_task, list_entry);
 224        if (next_task)
 225                writel(next_task->pa_base, thread->base + CMDQ_THR_CURR_ADDR);
 226        cmdq_thread_resume(thread);
 227}
 228
 229static void cmdq_thread_irq_handler(struct cmdq *cmdq,
 230                                    struct cmdq_thread *thread)
 231{
 232        struct cmdq_task *task, *tmp, *curr_task = NULL;
 233        u32 curr_pa, irq_flag, task_end_pa;
 234        bool err;
 235
 236        irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
 237        writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
 238
 239        /*
 240         * When ISR call this function, another CPU core could run
 241         * "release task" right before we acquire the spin lock, and thus
 242         * reset / disable this GCE thread, so we need to check the enable
 243         * bit of this GCE thread.
 244         */
 245        if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
 246                return;
 247
 248        if (irq_flag & CMDQ_THR_IRQ_ERROR)
 249                err = true;
 250        else if (irq_flag & CMDQ_THR_IRQ_DONE)
 251                err = false;
 252        else
 253                return;
 254
 255        curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR);
 256
 257        list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
 258                                 list_entry) {
 259                task_end_pa = task->pa_base + task->pkt->cmd_buf_size;
 260                if (curr_pa >= task->pa_base && curr_pa < task_end_pa)
 261                        curr_task = task;
 262
 263                if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) {
 264                        cmdq_task_exec_done(task, CMDQ_CB_NORMAL);
 265                        kfree(task);
 266                } else if (err) {
 267                        cmdq_task_exec_done(task, CMDQ_CB_ERROR);
 268                        cmdq_task_handle_error(curr_task);
 269                        kfree(task);
 270                }
 271
 272                if (curr_task)
 273                        break;
 274        }
 275
 276        if (list_empty(&thread->task_busy_list)) {
 277                cmdq_thread_disable(cmdq, thread);
 278                clk_disable(cmdq->clock);
 279        }
 280}
 281
 282static irqreturn_t cmdq_irq_handler(int irq, void *dev)
 283{
 284        struct cmdq *cmdq = dev;
 285        unsigned long irq_status, flags = 0L;
 286        int bit;
 287
 288        irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & CMDQ_IRQ_MASK;
 289        if (!(irq_status ^ CMDQ_IRQ_MASK))
 290                return IRQ_NONE;
 291
 292        for_each_clear_bit(bit, &irq_status, fls(CMDQ_IRQ_MASK)) {
 293                struct cmdq_thread *thread = &cmdq->thread[bit];
 294
 295                spin_lock_irqsave(&thread->chan->lock, flags);
 296                cmdq_thread_irq_handler(cmdq, thread);
 297                spin_unlock_irqrestore(&thread->chan->lock, flags);
 298        }
 299
 300        return IRQ_HANDLED;
 301}
 302
 303static int cmdq_suspend(struct device *dev)
 304{
 305        struct cmdq *cmdq = dev_get_drvdata(dev);
 306        struct cmdq_thread *thread;
 307        int i;
 308        bool task_running = false;
 309
 310        cmdq->suspended = true;
 311
 312        for (i = 0; i < cmdq->thread_nr; i++) {
 313                thread = &cmdq->thread[i];
 314                if (!list_empty(&thread->task_busy_list)) {
 315                        task_running = true;
 316                        break;
 317                }
 318        }
 319
 320        if (task_running)
 321                dev_warn(dev, "exist running task(s) in suspend\n");
 322
 323        clk_unprepare(cmdq->clock);
 324
 325        return 0;
 326}
 327
 328static int cmdq_resume(struct device *dev)
 329{
 330        struct cmdq *cmdq = dev_get_drvdata(dev);
 331
 332        WARN_ON(clk_prepare(cmdq->clock) < 0);
 333        cmdq->suspended = false;
 334        return 0;
 335}
 336
 337static int cmdq_remove(struct platform_device *pdev)
 338{
 339        struct cmdq *cmdq = platform_get_drvdata(pdev);
 340
 341        clk_unprepare(cmdq->clock);
 342
 343        return 0;
 344}
 345
 346static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
 347{
 348        struct cmdq_pkt *pkt = (struct cmdq_pkt *)data;
 349        struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
 350        struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
 351        struct cmdq_task *task;
 352        unsigned long curr_pa, end_pa;
 353
 354        /* Client should not flush new tasks if suspended. */
 355        WARN_ON(cmdq->suspended);
 356
 357        task = kzalloc(sizeof(*task), GFP_ATOMIC);
 358        if (!task)
 359                return -ENOMEM;
 360
 361        task->cmdq = cmdq;
 362        INIT_LIST_HEAD(&task->list_entry);
 363        task->pa_base = pkt->pa_base;
 364        task->thread = thread;
 365        task->pkt = pkt;
 366
 367        if (list_empty(&thread->task_busy_list)) {
 368                WARN_ON(clk_enable(cmdq->clock) < 0);
 369                WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
 370
 371                writel(task->pa_base, thread->base + CMDQ_THR_CURR_ADDR);
 372                writel(task->pa_base + pkt->cmd_buf_size,
 373                       thread->base + CMDQ_THR_END_ADDR);
 374                writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
 375                writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
 376                writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
 377        } else {
 378                WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
 379                curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR);
 380                end_pa = readl(thread->base + CMDQ_THR_END_ADDR);
 381
 382                /*
 383                 * Atomic execution should remove the following wfe, i.e. only
 384                 * wait event at first task, and prevent to pause when running.
 385                 */
 386                if (thread->atomic_exec) {
 387                        /* GCE is executing if command is not WFE */
 388                        if (!cmdq_thread_is_in_wfe(thread)) {
 389                                cmdq_thread_resume(thread);
 390                                cmdq_thread_wait_end(thread, end_pa);
 391                                WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
 392                                /* set to this task directly */
 393                                writel(task->pa_base,
 394                                       thread->base + CMDQ_THR_CURR_ADDR);
 395                        } else {
 396                                cmdq_task_insert_into_thread(task);
 397                                cmdq_task_remove_wfe(task);
 398                                smp_mb(); /* modify jump before enable thread */
 399                        }
 400                } else {
 401                        /* check boundary */
 402                        if (curr_pa == end_pa - CMDQ_INST_SIZE ||
 403                            curr_pa == end_pa) {
 404                                /* set to this task directly */
 405                                writel(task->pa_base,
 406                                       thread->base + CMDQ_THR_CURR_ADDR);
 407                        } else {
 408                                cmdq_task_insert_into_thread(task);
 409                                smp_mb(); /* modify jump before enable thread */
 410                        }
 411                }
 412                writel(task->pa_base + pkt->cmd_buf_size,
 413                       thread->base + CMDQ_THR_END_ADDR);
 414                cmdq_thread_resume(thread);
 415        }
 416        list_move_tail(&task->list_entry, &thread->task_busy_list);
 417
 418        return 0;
 419}
 420
 421static int cmdq_mbox_startup(struct mbox_chan *chan)
 422{
 423        return 0;
 424}
 425
 426static void cmdq_mbox_shutdown(struct mbox_chan *chan)
 427{
 428}
 429
 430static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
 431        .send_data = cmdq_mbox_send_data,
 432        .startup = cmdq_mbox_startup,
 433        .shutdown = cmdq_mbox_shutdown,
 434};
 435
 436static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
 437                const struct of_phandle_args *sp)
 438{
 439        int ind = sp->args[0];
 440        struct cmdq_thread *thread;
 441
 442        if (ind >= mbox->num_chans)
 443                return ERR_PTR(-EINVAL);
 444
 445        thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
 446        thread->priority = sp->args[1];
 447        thread->atomic_exec = (sp->args[2] != 0);
 448        thread->chan = &mbox->chans[ind];
 449
 450        return &mbox->chans[ind];
 451}
 452
 453static int cmdq_probe(struct platform_device *pdev)
 454{
 455        struct device *dev = &pdev->dev;
 456        struct resource *res;
 457        struct cmdq *cmdq;
 458        int err, i;
 459
 460        cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
 461        if (!cmdq)
 462                return -ENOMEM;
 463
 464        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 465        cmdq->base = devm_ioremap_resource(dev, res);
 466        if (IS_ERR(cmdq->base)) {
 467                dev_err(dev, "failed to ioremap gce\n");
 468                return PTR_ERR(cmdq->base);
 469        }
 470
 471        cmdq->irq = platform_get_irq(pdev, 0);
 472        if (!cmdq->irq) {
 473                dev_err(dev, "failed to get irq\n");
 474                return -EINVAL;
 475        }
 476        err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
 477                               "mtk_cmdq", cmdq);
 478        if (err < 0) {
 479                dev_err(dev, "failed to register ISR (%d)\n", err);
 480                return err;
 481        }
 482
 483        dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
 484                dev, cmdq->base, cmdq->irq);
 485
 486        cmdq->clock = devm_clk_get(dev, "gce");
 487        if (IS_ERR(cmdq->clock)) {
 488                dev_err(dev, "failed to get gce clk\n");
 489                return PTR_ERR(cmdq->clock);
 490        }
 491
 492        cmdq->thread_nr = (u32)(unsigned long)of_device_get_match_data(dev);
 493        cmdq->mbox.dev = dev;
 494        cmdq->mbox.chans = devm_kcalloc(dev, cmdq->thread_nr,
 495                                        sizeof(*cmdq->mbox.chans), GFP_KERNEL);
 496        if (!cmdq->mbox.chans)
 497                return -ENOMEM;
 498
 499        cmdq->mbox.num_chans = cmdq->thread_nr;
 500        cmdq->mbox.ops = &cmdq_mbox_chan_ops;
 501        cmdq->mbox.of_xlate = cmdq_xlate;
 502
 503        /* make use of TXDONE_BY_ACK */
 504        cmdq->mbox.txdone_irq = false;
 505        cmdq->mbox.txdone_poll = false;
 506
 507        cmdq->thread = devm_kcalloc(dev, cmdq->thread_nr,
 508                                        sizeof(*cmdq->thread), GFP_KERNEL);
 509        if (!cmdq->thread)
 510                return -ENOMEM;
 511
 512        for (i = 0; i < cmdq->thread_nr; i++) {
 513                cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
 514                                CMDQ_THR_SIZE * i;
 515                INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
 516                cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
 517        }
 518
 519        err = devm_mbox_controller_register(dev, &cmdq->mbox);
 520        if (err < 0) {
 521                dev_err(dev, "failed to register mailbox: %d\n", err);
 522                return err;
 523        }
 524
 525        platform_set_drvdata(pdev, cmdq);
 526        WARN_ON(clk_prepare(cmdq->clock) < 0);
 527
 528        cmdq_init(cmdq);
 529
 530        return 0;
 531}
 532
 533static const struct dev_pm_ops cmdq_pm_ops = {
 534        .suspend = cmdq_suspend,
 535        .resume = cmdq_resume,
 536};
 537
 538static const struct of_device_id cmdq_of_ids[] = {
 539        {.compatible = "mediatek,mt8173-gce", .data = (void *)16},
 540        {}
 541};
 542
 543static struct platform_driver cmdq_drv = {
 544        .probe = cmdq_probe,
 545        .remove = cmdq_remove,
 546        .driver = {
 547                .name = "mtk_cmdq",
 548                .pm = &cmdq_pm_ops,
 549                .of_match_table = cmdq_of_ids,
 550        }
 551};
 552
 553static int __init cmdq_drv_init(void)
 554{
 555        return platform_driver_register(&cmdq_drv);
 556}
 557
 558static void __exit cmdq_drv_exit(void)
 559{
 560        platform_driver_unregister(&cmdq_drv);
 561}
 562
 563subsys_initcall(cmdq_drv_init);
 564module_exit(cmdq_drv_exit);
 565
 566MODULE_LICENSE("GPL v2");
 567