linux/drivers/gpu/drm/nouveau/core/core/event.c
<<
>>
Prefs
   1/*
   2 * Copyright 2013 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22
  23#include <core/os.h>
  24#include <core/event.h>
  25
  26void
  27nouveau_event_put(struct nouveau_eventh *handler)
  28{
  29        struct nouveau_event *event = handler->event;
  30        unsigned long flags;
  31        u32 m, t;
  32
  33        if (!__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags))
  34                return;
  35
  36        spin_lock_irqsave(&event->refs_lock, flags);
  37        for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
  38                if (!--event->refs[handler->index * event->types_nr + t]) {
  39                        if (event->disable)
  40                                event->disable(event, 1 << t, handler->index);
  41                }
  42
  43        }
  44        spin_unlock_irqrestore(&event->refs_lock, flags);
  45}
  46
  47void
  48nouveau_event_get(struct nouveau_eventh *handler)
  49{
  50        struct nouveau_event *event = handler->event;
  51        unsigned long flags;
  52        u32 m, t;
  53
  54        if (__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags))
  55                return;
  56
  57        spin_lock_irqsave(&event->refs_lock, flags);
  58        for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
  59                if (!event->refs[handler->index * event->types_nr + t]++) {
  60                        if (event->enable)
  61                                event->enable(event, 1 << t, handler->index);
  62                }
  63
  64        }
  65        spin_unlock_irqrestore(&event->refs_lock, flags);
  66}
  67
  68static void
  69nouveau_event_fini(struct nouveau_eventh *handler)
  70{
  71        struct nouveau_event *event = handler->event;
  72        unsigned long flags;
  73        nouveau_event_put(handler);
  74        spin_lock_irqsave(&event->list_lock, flags);
  75        list_del(&handler->head);
  76        spin_unlock_irqrestore(&event->list_lock, flags);
  77}
  78
  79static int
  80nouveau_event_init(struct nouveau_event *event, u32 types, int index,
  81                   int (*func)(void *, u32, int), void *priv,
  82                   struct nouveau_eventh *handler)
  83{
  84        unsigned long flags;
  85
  86        if (types & ~((1 << event->types_nr) - 1))
  87                return -EINVAL;
  88        if (index >= event->index_nr)
  89                return -EINVAL;
  90
  91        handler->event = event;
  92        handler->flags = 0;
  93        handler->types = types;
  94        handler->index = index;
  95        handler->func = func;
  96        handler->priv = priv;
  97
  98        spin_lock_irqsave(&event->list_lock, flags);
  99        list_add_tail(&handler->head, &event->list[index]);
 100        spin_unlock_irqrestore(&event->list_lock, flags);
 101        return 0;
 102}
 103
 104int
 105nouveau_event_new(struct nouveau_event *event, u32 types, int index,
 106                  int (*func)(void *, u32, int), void *priv,
 107                  struct nouveau_eventh **phandler)
 108{
 109        struct nouveau_eventh *handler;
 110        int ret = -ENOMEM;
 111
 112        if (event->check) {
 113                ret = event->check(event, types, index);
 114                if (ret)
 115                        return ret;
 116        }
 117
 118        handler = *phandler = kmalloc(sizeof(*handler), GFP_KERNEL);
 119        if (handler) {
 120                ret = nouveau_event_init(event, types, index, func, priv, handler);
 121                if (ret)
 122                        kfree(handler);
 123        }
 124
 125        return ret;
 126}
 127
 128void
 129nouveau_event_ref(struct nouveau_eventh *handler, struct nouveau_eventh **ref)
 130{
 131        BUG_ON(handler != NULL);
 132        if (*ref) {
 133                nouveau_event_fini(*ref);
 134                kfree(*ref);
 135        }
 136        *ref = handler;
 137}
 138
 139void
 140nouveau_event_trigger(struct nouveau_event *event, u32 types, int index)
 141{
 142        struct nouveau_eventh *handler;
 143        unsigned long flags;
 144
 145        if (WARN_ON(index >= event->index_nr))
 146                return;
 147
 148        spin_lock_irqsave(&event->list_lock, flags);
 149        list_for_each_entry(handler, &event->list[index], head) {
 150                if (!test_bit(NVKM_EVENT_ENABLE, &handler->flags))
 151                        continue;
 152                if (!(handler->types & types))
 153                        continue;
 154                if (handler->func(handler->priv, handler->types & types, index)
 155                                != NVKM_EVENT_DROP)
 156                        continue;
 157                nouveau_event_put(handler);
 158        }
 159        spin_unlock_irqrestore(&event->list_lock, flags);
 160}
 161
 162void
 163nouveau_event_destroy(struct nouveau_event **pevent)
 164{
 165        struct nouveau_event *event = *pevent;
 166        if (event) {
 167                kfree(event);
 168                *pevent = NULL;
 169        }
 170}
 171
 172int
 173nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **pevent)
 174{
 175        struct nouveau_event *event;
 176        int i;
 177
 178        event = *pevent = kzalloc(sizeof(*event) + (index_nr * types_nr) *
 179                                  sizeof(event->refs[0]), GFP_KERNEL);
 180        if (!event)
 181                return -ENOMEM;
 182
 183        event->list = kmalloc(sizeof(*event->list) * index_nr, GFP_KERNEL);
 184        if (!event->list) {
 185                kfree(event);
 186                return -ENOMEM;
 187        }
 188
 189        spin_lock_init(&event->list_lock);
 190        spin_lock_init(&event->refs_lock);
 191        for (i = 0; i < index_nr; i++)
 192                INIT_LIST_HEAD(&event->list[i]);
 193        event->types_nr = types_nr;
 194        event->index_nr = index_nr;
 195        return 0;
 196}
 197