linux/drivers/gpu/host1x/intr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Tegra host1x Interrupt Management
   4 *
   5 * Copyright (c) 2010-2013, NVIDIA Corporation.
   6 */
   7
   8#include <linux/clk.h>
   9#include <linux/interrupt.h>
  10#include <linux/slab.h>
  11#include <linux/irq.h>
  12
  13#include <trace/events/host1x.h>
  14#include "channel.h"
  15#include "dev.h"
  16#include "intr.h"
  17
  18/* Wait list management */
  19
  20enum waitlist_state {
  21        WLS_PENDING,
  22        WLS_REMOVED,
  23        WLS_CANCELLED,
  24        WLS_HANDLED
  25};
  26
  27static void waiter_release(struct kref *kref)
  28{
  29        kfree(container_of(kref, struct host1x_waitlist, refcount));
  30}
  31
  32/*
  33 * add a waiter to a waiter queue, sorted by threshold
  34 * returns true if it was added at the head of the queue
  35 */
  36static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
  37                                struct list_head *queue)
  38{
  39        struct host1x_waitlist *pos;
  40        u32 thresh = waiter->thresh;
  41
  42        list_for_each_entry_reverse(pos, queue, list)
  43                if ((s32)(pos->thresh - thresh) <= 0) {
  44                        list_add(&waiter->list, &pos->list);
  45                        return false;
  46                }
  47
  48        list_add(&waiter->list, queue);
  49        return true;
  50}
  51
  52/*
  53 * run through a waiter queue for a single sync point ID
  54 * and gather all completed waiters into lists by actions
  55 */
  56static void remove_completed_waiters(struct list_head *head, u32 sync,
  57                        struct list_head completed[HOST1X_INTR_ACTION_COUNT])
  58{
  59        struct list_head *dest;
  60        struct host1x_waitlist *waiter, *next, *prev;
  61
  62        list_for_each_entry_safe(waiter, next, head, list) {
  63                if ((s32)(waiter->thresh - sync) > 0)
  64                        break;
  65
  66                dest = completed + waiter->action;
  67
  68                /* consolidate submit cleanups */
  69                if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
  70                    !list_empty(dest)) {
  71                        prev = list_entry(dest->prev,
  72                                          struct host1x_waitlist, list);
  73                        if (prev->data == waiter->data) {
  74                                prev->count++;
  75                                dest = NULL;
  76                        }
  77                }
  78
  79                /* PENDING->REMOVED or CANCELLED->HANDLED */
  80                if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
  81                        list_del(&waiter->list);
  82                        kref_put(&waiter->refcount, waiter_release);
  83                } else
  84                        list_move_tail(&waiter->list, dest);
  85        }
  86}
  87
  88static void reset_threshold_interrupt(struct host1x *host,
  89                                      struct list_head *head,
  90                                      unsigned int id)
  91{
  92        u32 thresh =
  93                list_first_entry(head, struct host1x_waitlist, list)->thresh;
  94
  95        host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
  96        host1x_hw_intr_enable_syncpt_intr(host, id);
  97}
  98
  99static void action_submit_complete(struct host1x_waitlist *waiter)
 100{
 101        struct host1x_channel *channel = waiter->data;
 102
 103        host1x_cdma_update(&channel->cdma);
 104
 105        /*  Add nr_completed to trace */
 106        trace_host1x_channel_submit_complete(dev_name(channel->dev),
 107                                             waiter->count, waiter->thresh);
 108
 109}
 110
 111static void action_wakeup(struct host1x_waitlist *waiter)
 112{
 113        wait_queue_head_t *wq = waiter->data;
 114
 115        wake_up(wq);
 116}
 117
 118static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
 119{
 120        wait_queue_head_t *wq = waiter->data;
 121
 122        wake_up_interruptible(wq);
 123}
 124
 125typedef void (*action_handler)(struct host1x_waitlist *waiter);
 126
 127static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
 128        action_submit_complete,
 129        action_wakeup,
 130        action_wakeup_interruptible,
 131};
 132
 133static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
 134{
 135        struct list_head *head = completed;
 136        unsigned int i;
 137
 138        for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
 139                action_handler handler = action_handlers[i];
 140                struct host1x_waitlist *waiter, *next;
 141
 142                list_for_each_entry_safe(waiter, next, head, list) {
 143                        list_del(&waiter->list);
 144                        handler(waiter);
 145                        WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
 146                                WLS_REMOVED);
 147                        kref_put(&waiter->refcount, waiter_release);
 148                }
 149        }
 150}
 151
 152/*
 153 * Remove & handle all waiters that have completed for the given syncpt
 154 */
 155static int process_wait_list(struct host1x *host,
 156                             struct host1x_syncpt *syncpt,
 157                             u32 threshold)
 158{
 159        struct list_head completed[HOST1X_INTR_ACTION_COUNT];
 160        unsigned int i;
 161        int empty;
 162
 163        for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
 164                INIT_LIST_HEAD(completed + i);
 165
 166        spin_lock(&syncpt->intr.lock);
 167
 168        remove_completed_waiters(&syncpt->intr.wait_head, threshold,
 169                                 completed);
 170
 171        empty = list_empty(&syncpt->intr.wait_head);
 172        if (empty)
 173                host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
 174        else
 175                reset_threshold_interrupt(host, &syncpt->intr.wait_head,
 176                                          syncpt->id);
 177
 178        spin_unlock(&syncpt->intr.lock);
 179
 180        run_handlers(completed);
 181
 182        return empty;
 183}
 184
 185/*
 186 * Sync point threshold interrupt service thread function
 187 * Handles sync point threshold triggers, in thread context
 188 */
 189
 190static void syncpt_thresh_work(struct work_struct *work)
 191{
 192        struct host1x_syncpt_intr *syncpt_intr =
 193                container_of(work, struct host1x_syncpt_intr, work);
 194        struct host1x_syncpt *syncpt =
 195                container_of(syncpt_intr, struct host1x_syncpt, intr);
 196        unsigned int id = syncpt->id;
 197        struct host1x *host = syncpt->host;
 198
 199        (void)process_wait_list(host, syncpt,
 200                                host1x_syncpt_load(host->syncpt + id));
 201}
 202
 203int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
 204                           u32 thresh, enum host1x_intr_action action,
 205                           void *data, struct host1x_waitlist *waiter,
 206                           void **ref)
 207{
 208        int queue_was_empty;
 209
 210        if (waiter == NULL) {
 211                pr_warn("%s: NULL waiter\n", __func__);
 212                return -EINVAL;
 213        }
 214
 215        /* initialize a new waiter */
 216        INIT_LIST_HEAD(&waiter->list);
 217        kref_init(&waiter->refcount);
 218        if (ref)
 219                kref_get(&waiter->refcount);
 220        waiter->thresh = thresh;
 221        waiter->action = action;
 222        atomic_set(&waiter->state, WLS_PENDING);
 223        waiter->data = data;
 224        waiter->count = 1;
 225
 226        spin_lock(&syncpt->intr.lock);
 227
 228        queue_was_empty = list_empty(&syncpt->intr.wait_head);
 229
 230        if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
 231                /* added at head of list - new threshold value */
 232                host1x_hw_intr_set_syncpt_threshold(host, syncpt->id, thresh);
 233
 234                /* added as first waiter - enable interrupt */
 235                if (queue_was_empty)
 236                        host1x_hw_intr_enable_syncpt_intr(host, syncpt->id);
 237        }
 238
 239        spin_unlock(&syncpt->intr.lock);
 240
 241        if (ref)
 242                *ref = waiter;
 243        return 0;
 244}
 245
 246void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref)
 247{
 248        struct host1x_waitlist *waiter = ref;
 249        struct host1x_syncpt *syncpt;
 250
 251        while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
 252               WLS_REMOVED)
 253                schedule();
 254
 255        syncpt = host->syncpt + id;
 256        (void)process_wait_list(host, syncpt,
 257                                host1x_syncpt_load(host->syncpt + id));
 258
 259        kref_put(&waiter->refcount, waiter_release);
 260}
 261
 262int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
 263{
 264        unsigned int id;
 265        u32 nb_pts = host1x_syncpt_nb_pts(host);
 266
 267        mutex_init(&host->intr_mutex);
 268        host->intr_syncpt_irq = irq_sync;
 269
 270        for (id = 0; id < nb_pts; ++id) {
 271                struct host1x_syncpt *syncpt = host->syncpt + id;
 272
 273                spin_lock_init(&syncpt->intr.lock);
 274                INIT_LIST_HEAD(&syncpt->intr.wait_head);
 275                snprintf(syncpt->intr.thresh_irq_name,
 276                         sizeof(syncpt->intr.thresh_irq_name),
 277                         "host1x_sp_%02u", id);
 278        }
 279
 280        host1x_intr_start(host);
 281
 282        return 0;
 283}
 284
 285void host1x_intr_deinit(struct host1x *host)
 286{
 287        host1x_intr_stop(host);
 288}
 289
 290void host1x_intr_start(struct host1x *host)
 291{
 292        u32 hz = clk_get_rate(host->clk);
 293        int err;
 294
 295        mutex_lock(&host->intr_mutex);
 296        err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
 297                                            syncpt_thresh_work);
 298        if (err) {
 299                mutex_unlock(&host->intr_mutex);
 300                return;
 301        }
 302        mutex_unlock(&host->intr_mutex);
 303}
 304
 305void host1x_intr_stop(struct host1x *host)
 306{
 307        unsigned int id;
 308        struct host1x_syncpt *syncpt = host->syncpt;
 309        u32 nb_pts = host1x_syncpt_nb_pts(host);
 310
 311        mutex_lock(&host->intr_mutex);
 312
 313        host1x_hw_intr_disable_all_syncpt_intrs(host);
 314
 315        for (id = 0; id < nb_pts; ++id) {
 316                struct host1x_waitlist *waiter, *next;
 317
 318                list_for_each_entry_safe(waiter, next,
 319                        &syncpt[id].intr.wait_head, list) {
 320                        if (atomic_cmpxchg(&waiter->state,
 321                            WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
 322                                list_del(&waiter->list);
 323                                kref_put(&waiter->refcount, waiter_release);
 324                        }
 325                }
 326
 327                if (!list_empty(&syncpt[id].intr.wait_head)) {
 328                        /* output diagnostics */
 329                        mutex_unlock(&host->intr_mutex);
 330                        pr_warn("%s cannot stop syncpt intr id=%u\n",
 331                                __func__, id);
 332                        return;
 333                }
 334        }
 335
 336        host1x_hw_intr_free_syncpt_irq(host);
 337
 338        mutex_unlock(&host->intr_mutex);
 339}
 340