linux/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
   3 */
   4
   5#define pr_fmt(fmt)     "[drm:%s:%d] " fmt, __func__, __LINE__
   6
   7#include <linux/debugfs.h>
   8#include <linux/irqdomain.h>
   9#include <linux/irq.h>
  10#include <linux/kthread.h>
  11
  12#include "dpu_core_irq.h"
  13#include "dpu_trace.h"
  14
  15/**
  16 * dpu_core_irq_callback_handler - dispatch core interrupts
  17 * @arg:                private data of callback handler
  18 * @irq_idx:            interrupt index
  19 */
  20static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
  21{
  22        struct dpu_kms *dpu_kms = arg;
  23        struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
  24        struct dpu_irq_callback *cb;
  25
  26        VERB("irq_idx=%d\n", irq_idx);
  27
  28        if (list_empty(&irq_obj->irq_cb_tbl[irq_idx]))
  29                DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
  30
  31        atomic_inc(&irq_obj->irq_counts[irq_idx]);
  32
  33        /*
  34         * Perform registered function callback
  35         */
  36        list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
  37                if (cb->func)
  38                        cb->func(cb->arg, irq_idx);
  39}
  40
  41u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
  42{
  43        if (!dpu_kms->hw_intr ||
  44                        !dpu_kms->hw_intr->ops.get_interrupt_status)
  45                return 0;
  46
  47        if (irq_idx < 0) {
  48                DPU_ERROR("[%pS] invalid irq_idx=%d\n",
  49                                __builtin_return_address(0), irq_idx);
  50                return 0;
  51        }
  52
  53        return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr,
  54                        irq_idx, clear);
  55}
  56
  57int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
  58                struct dpu_irq_callback *register_irq_cb)
  59{
  60        unsigned long irq_flags;
  61
  62        if (!dpu_kms->irq_obj.irq_cb_tbl) {
  63                DPU_ERROR("invalid params\n");
  64                return -EINVAL;
  65        }
  66
  67        if (!register_irq_cb || !register_irq_cb->func) {
  68                DPU_ERROR("invalid irq_cb:%d func:%d\n",
  69                                register_irq_cb != NULL,
  70                                register_irq_cb ?
  71                                        register_irq_cb->func != NULL : -1);
  72                return -EINVAL;
  73        }
  74
  75        if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
  76                DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
  77                return -EINVAL;
  78        }
  79
  80        VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
  81
  82        irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr);
  83        trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
  84        list_del_init(&register_irq_cb->list);
  85        list_add_tail(&register_irq_cb->list,
  86                        &dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
  87        if (list_is_first(&register_irq_cb->list,
  88                        &dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
  89                int ret = dpu_kms->hw_intr->ops.enable_irq_locked(
  90                                dpu_kms->hw_intr,
  91                                irq_idx);
  92                if (ret)
  93                        DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
  94                                        irq_idx);
  95        }
  96        dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags);
  97
  98        return 0;
  99}
 100
 101int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
 102                struct dpu_irq_callback *register_irq_cb)
 103{
 104        unsigned long irq_flags;
 105
 106        if (!dpu_kms->irq_obj.irq_cb_tbl) {
 107                DPU_ERROR("invalid params\n");
 108                return -EINVAL;
 109        }
 110
 111        if (!register_irq_cb || !register_irq_cb->func) {
 112                DPU_ERROR("invalid irq_cb:%d func:%d\n",
 113                                register_irq_cb != NULL,
 114                                register_irq_cb ?
 115                                        register_irq_cb->func != NULL : -1);
 116                return -EINVAL;
 117        }
 118
 119        if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
 120                DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
 121                return -EINVAL;
 122        }
 123
 124        VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
 125
 126        irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr);
 127        trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
 128        list_del_init(&register_irq_cb->list);
 129        /* empty callback list but interrupt is still enabled */
 130        if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
 131                int ret = dpu_kms->hw_intr->ops.disable_irq_locked(
 132                                dpu_kms->hw_intr,
 133                                irq_idx);
 134                if (ret)
 135                        DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
 136                                        irq_idx);
 137                VERB("irq_idx=%d ret=%d\n", irq_idx, ret);
 138        }
 139        dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags);
 140
 141        return 0;
 142}
 143
 144static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
 145{
 146        if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.clear_all_irqs)
 147                return;
 148
 149        dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
 150}
 151
 152static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
 153{
 154        if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.disable_all_irqs)
 155                return;
 156
 157        dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
 158}
 159
 160#ifdef CONFIG_DEBUG_FS
 161static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
 162{
 163        struct dpu_kms *dpu_kms = s->private;
 164        struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
 165        struct dpu_irq_callback *cb;
 166        unsigned long irq_flags;
 167        int i, irq_count, cb_count;
 168
 169        if (WARN_ON(!irq_obj->irq_cb_tbl))
 170                return 0;
 171
 172        for (i = 0; i < irq_obj->total_irqs; i++) {
 173                irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr);
 174                cb_count = 0;
 175                irq_count = atomic_read(&irq_obj->irq_counts[i]);
 176                list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
 177                        cb_count++;
 178                dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags);
 179
 180                if (irq_count || cb_count)
 181                        seq_printf(s, "idx:%d irq:%d cb:%d\n",
 182                                        i, irq_count, cb_count);
 183        }
 184
 185        return 0;
 186}
 187
 188DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
 189
 190void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
 191                struct dentry *parent)
 192{
 193        debugfs_create_file("core_irq", 0600, parent, dpu_kms,
 194                &dpu_debugfs_core_irq_fops);
 195}
 196#endif
 197
 198void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
 199{
 200        int i;
 201
 202        pm_runtime_get_sync(&dpu_kms->pdev->dev);
 203        dpu_clear_all_irqs(dpu_kms);
 204        dpu_disable_all_irqs(dpu_kms);
 205        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 206
 207        /* Create irq callbacks for all possible irq_idx */
 208        dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->total_irqs;
 209        dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
 210                        sizeof(struct list_head), GFP_KERNEL);
 211        dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
 212                        sizeof(atomic_t), GFP_KERNEL);
 213        for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
 214                INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
 215                atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
 216        }
 217}
 218
 219void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
 220{
 221        int i;
 222
 223        pm_runtime_get_sync(&dpu_kms->pdev->dev);
 224        for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
 225                if (!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
 226                        DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
 227
 228        dpu_clear_all_irqs(dpu_kms);
 229        dpu_disable_all_irqs(dpu_kms);
 230        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 231
 232        kfree(dpu_kms->irq_obj.irq_cb_tbl);
 233        kfree(dpu_kms->irq_obj.irq_counts);
 234        dpu_kms->irq_obj.irq_cb_tbl = NULL;
 235        dpu_kms->irq_obj.irq_counts = NULL;
 236        dpu_kms->irq_obj.total_irqs = 0;
 237}
 238
 239irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
 240{
 241        /*
 242         * Dispatch to HW driver to handle interrupt lookup that is being
 243         * fired. When matching interrupt is located, HW driver will call to
 244         * dpu_core_irq_callback_handler with the irq_idx from the lookup table.
 245         * dpu_core_irq_callback_handler will perform the registered function
 246         * callback, and do the interrupt status clearing once the registered
 247         * callback is finished.
 248         * Function will also clear the interrupt status after reading.
 249         */
 250        dpu_kms->hw_intr->ops.dispatch_irqs(
 251                        dpu_kms->hw_intr,
 252                        dpu_core_irq_callback_handler,
 253                        dpu_kms);
 254
 255        return IRQ_HANDLED;
 256}
 257