linux/drivers/gpu/drm/v3d/v3d_irq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/* Copyright (C) 2014-2018 Broadcom */
   3
   4/**
   5 * DOC: Interrupt management for the V3D engine
   6 *
   7 * When we take a bin, render, TFU done, or CSD done interrupt, we
   8 * need to signal the fence for that job so that the scheduler can
   9 * queue up the next one and unblock any waiters.
  10 *
  11 * When we take the binner out of memory interrupt, we need to
  12 * allocate some new memory and pass it to the binner so that the
  13 * current job can make progress.
  14 */
  15
  16#include <linux/platform_device.h>
  17
  18#include "v3d_drv.h"
  19#include "v3d_regs.h"
  20#include "v3d_trace.h"
  21
  22#define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM |  \
  23                             V3D_INT_FLDONE |   \
  24                             V3D_INT_FRDONE |   \
  25                             V3D_INT_CSDDONE |  \
  26                             V3D_INT_GMPV))
  27
  28#define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV |       \
  29                            V3D_HUB_INT_MMU_PTI |       \
  30                            V3D_HUB_INT_MMU_CAP |       \
  31                            V3D_HUB_INT_TFUC))
  32
  33static irqreturn_t
  34v3d_hub_irq(int irq, void *arg);
  35
  36static void
  37v3d_overflow_mem_work(struct work_struct *work)
  38{
  39        struct v3d_dev *v3d =
  40                container_of(work, struct v3d_dev, overflow_mem_work);
  41        struct drm_device *dev = &v3d->drm;
  42        struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
  43        struct drm_gem_object *obj;
  44        unsigned long irqflags;
  45
  46        if (IS_ERR(bo)) {
  47                DRM_ERROR("Couldn't allocate binner overflow mem\n");
  48                return;
  49        }
  50        obj = &bo->base.base;
  51
  52        /* We lost a race, and our work task came in after the bin job
  53         * completed and exited.  This can happen because the HW
  54         * signals OOM before it's fully OOM, so the binner might just
  55         * barely complete.
  56         *
  57         * If we lose the race and our work task comes in after a new
  58         * bin job got scheduled, that's fine.  We'll just give them
  59         * some binner pool anyway.
  60         */
  61        spin_lock_irqsave(&v3d->job_lock, irqflags);
  62        if (!v3d->bin_job) {
  63                spin_unlock_irqrestore(&v3d->job_lock, irqflags);
  64                goto out;
  65        }
  66
  67        drm_gem_object_get(obj);
  68        list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list);
  69        spin_unlock_irqrestore(&v3d->job_lock, irqflags);
  70
  71        V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
  72        V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size);
  73
  74out:
  75        drm_gem_object_put(obj);
  76}
  77
  78static irqreturn_t
  79v3d_irq(int irq, void *arg)
  80{
  81        struct v3d_dev *v3d = arg;
  82        u32 intsts;
  83        irqreturn_t status = IRQ_NONE;
  84
  85        intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS);
  86
  87        /* Acknowledge the interrupts we're handling here. */
  88        V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts);
  89
  90        if (intsts & V3D_INT_OUTOMEM) {
  91                /* Note that the OOM status is edge signaled, so the
  92                 * interrupt won't happen again until the we actually
  93                 * add more memory.  Also, as of V3D 4.1, FLDONE won't
  94                 * be reported until any OOM state has been cleared.
  95                 */
  96                schedule_work(&v3d->overflow_mem_work);
  97                status = IRQ_HANDLED;
  98        }
  99
 100        if (intsts & V3D_INT_FLDONE) {
 101                struct v3d_fence *fence =
 102                        to_v3d_fence(v3d->bin_job->base.irq_fence);
 103
 104                trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
 105                dma_fence_signal(&fence->base);
 106                status = IRQ_HANDLED;
 107        }
 108
 109        if (intsts & V3D_INT_FRDONE) {
 110                struct v3d_fence *fence =
 111                        to_v3d_fence(v3d->render_job->base.irq_fence);
 112
 113                trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
 114                dma_fence_signal(&fence->base);
 115                status = IRQ_HANDLED;
 116        }
 117
 118        if (intsts & V3D_INT_CSDDONE) {
 119                struct v3d_fence *fence =
 120                        to_v3d_fence(v3d->csd_job->base.irq_fence);
 121
 122                trace_v3d_csd_irq(&v3d->drm, fence->seqno);
 123                dma_fence_signal(&fence->base);
 124                status = IRQ_HANDLED;
 125        }
 126
 127        /* We shouldn't be triggering these if we have GMP in
 128         * always-allowed mode.
 129         */
 130        if (intsts & V3D_INT_GMPV)
 131                dev_err(v3d->drm.dev, "GMP violation\n");
 132
 133        /* V3D 4.2 wires the hub and core IRQs together, so if we &
 134         * didn't see the common one then check hub for MMU IRQs.
 135         */
 136        if (v3d->single_irq_line && status == IRQ_NONE)
 137                return v3d_hub_irq(irq, arg);
 138
 139        return status;
 140}
 141
 142static irqreturn_t
 143v3d_hub_irq(int irq, void *arg)
 144{
 145        struct v3d_dev *v3d = arg;
 146        u32 intsts;
 147        irqreturn_t status = IRQ_NONE;
 148
 149        intsts = V3D_READ(V3D_HUB_INT_STS);
 150
 151        /* Acknowledge the interrupts we're handling here. */
 152        V3D_WRITE(V3D_HUB_INT_CLR, intsts);
 153
 154        if (intsts & V3D_HUB_INT_TFUC) {
 155                struct v3d_fence *fence =
 156                        to_v3d_fence(v3d->tfu_job->base.irq_fence);
 157
 158                trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
 159                dma_fence_signal(&fence->base);
 160                status = IRQ_HANDLED;
 161        }
 162
 163        if (intsts & (V3D_HUB_INT_MMU_WRV |
 164                      V3D_HUB_INT_MMU_PTI |
 165                      V3D_HUB_INT_MMU_CAP)) {
 166                u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
 167                u64 vio_addr = ((u64)V3D_READ(V3D_MMU_VIO_ADDR) <<
 168                                (v3d->va_width - 32));
 169                static const char *const v3d41_axi_ids[] = {
 170                        "L2T",
 171                        "PTB",
 172                        "PSE",
 173                        "TLB",
 174                        "CLE",
 175                        "TFU",
 176                        "MMU",
 177                        "GMP",
 178                };
 179                const char *client = "?";
 180
 181                V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL));
 182
 183                if (v3d->ver >= 41) {
 184                        axi_id = axi_id >> 5;
 185                        if (axi_id < ARRAY_SIZE(v3d41_axi_ids))
 186                                client = v3d41_axi_ids[axi_id];
 187                }
 188
 189                dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
 190                        client, axi_id, (long long)vio_addr,
 191                        ((intsts & V3D_HUB_INT_MMU_WRV) ?
 192                         ", write violation" : ""),
 193                        ((intsts & V3D_HUB_INT_MMU_PTI) ?
 194                         ", pte invalid" : ""),
 195                        ((intsts & V3D_HUB_INT_MMU_CAP) ?
 196                         ", cap exceeded" : ""));
 197                status = IRQ_HANDLED;
 198        }
 199
 200        return status;
 201}
 202
 203int
 204v3d_irq_init(struct v3d_dev *v3d)
 205{
 206        int irq1, ret, core;
 207
 208        INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
 209
 210        /* Clear any pending interrupts someone might have left around
 211         * for us.
 212         */
 213        for (core = 0; core < v3d->cores; core++)
 214                V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
 215        V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
 216
 217        irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1);
 218        if (irq1 == -EPROBE_DEFER)
 219                return irq1;
 220        if (irq1 > 0) {
 221                ret = devm_request_irq(v3d->drm.dev, irq1,
 222                                       v3d_irq, IRQF_SHARED,
 223                                       "v3d_core0", v3d);
 224                if (ret)
 225                        goto fail;
 226                ret = devm_request_irq(v3d->drm.dev,
 227                                       platform_get_irq(v3d_to_pdev(v3d), 0),
 228                                       v3d_hub_irq, IRQF_SHARED,
 229                                       "v3d_hub", v3d);
 230                if (ret)
 231                        goto fail;
 232        } else {
 233                v3d->single_irq_line = true;
 234
 235                ret = devm_request_irq(v3d->drm.dev,
 236                                       platform_get_irq(v3d_to_pdev(v3d), 0),
 237                                       v3d_irq, IRQF_SHARED,
 238                                       "v3d", v3d);
 239                if (ret)
 240                        goto fail;
 241        }
 242
 243        v3d_irq_enable(v3d);
 244        return 0;
 245
 246fail:
 247        if (ret != -EPROBE_DEFER)
 248                dev_err(v3d->drm.dev, "IRQ setup failed: %d\n", ret);
 249        return ret;
 250}
 251
 252void
 253v3d_irq_enable(struct v3d_dev *v3d)
 254{
 255        int core;
 256
 257        /* Enable our set of interrupts, masking out any others. */
 258        for (core = 0; core < v3d->cores; core++) {
 259                V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS);
 260                V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS);
 261        }
 262
 263        V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS);
 264        V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS);
 265}
 266
 267void
 268v3d_irq_disable(struct v3d_dev *v3d)
 269{
 270        int core;
 271
 272        /* Disable all interrupts. */
 273        for (core = 0; core < v3d->cores; core++)
 274                V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
 275        V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
 276
 277        /* Clear any pending interrupts we might have left. */
 278        for (core = 0; core < v3d->cores; core++)
 279                V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
 280        V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
 281
 282        cancel_work_sync(&v3d->overflow_mem_work);
 283}
 284
 285/** Reinitializes interrupt registers when a GPU reset is performed. */
 286void v3d_irq_reset(struct v3d_dev *v3d)
 287{
 288        v3d_irq_enable(v3d);
 289}
 290