linux/drivers/gpu/drm/nouveau/nvkm/core/event.c
<<
>>
Prefs
   1/*
   2 * Copyright 2013-2014 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22#include <core/event.h>
  23#include <core/notify.h>
  24
  25void
  26nvkm_event_put(struct nvkm_event *event, u32 types, int index)
  27{
  28        assert_spin_locked(&event->refs_lock);
  29        while (types) {
  30                int type = __ffs(types); types &= ~(1 << type);
  31                if (--event->refs[index * event->types_nr + type] == 0) {
  32                        if (event->func->fini)
  33                                event->func->fini(event, 1 << type, index);
  34                }
  35        }
  36}
  37
  38void
  39nvkm_event_get(struct nvkm_event *event, u32 types, int index)
  40{
  41        assert_spin_locked(&event->refs_lock);
  42        while (types) {
  43                int type = __ffs(types); types &= ~(1 << type);
  44                if (++event->refs[index * event->types_nr + type] == 1) {
  45                        if (event->func->init)
  46                                event->func->init(event, 1 << type, index);
  47                }
  48        }
  49}
  50
  51void
  52nvkm_event_send(struct nvkm_event *event, u32 types, int index,
  53                void *data, u32 size)
  54{
  55        struct nvkm_notify *notify;
  56        unsigned long flags;
  57
  58        if (!event->refs || WARN_ON(index >= event->index_nr))
  59                return;
  60
  61        spin_lock_irqsave(&event->list_lock, flags);
  62        list_for_each_entry(notify, &event->list, head) {
  63                if (notify->index == index && (notify->types & types)) {
  64                        if (event->func->send) {
  65                                event->func->send(data, size, notify);
  66                                continue;
  67                        }
  68                        nvkm_notify_send(notify, data, size);
  69                }
  70        }
  71        spin_unlock_irqrestore(&event->list_lock, flags);
  72}
  73
  74void
  75nvkm_event_fini(struct nvkm_event *event)
  76{
  77        if (event->refs) {
  78                kfree(event->refs);
  79                event->refs = NULL;
  80        }
  81}
  82
  83int
  84nvkm_event_init(const struct nvkm_event_func *func, int types_nr, int index_nr,
  85                struct nvkm_event *event)
  86{
  87        event->refs = kzalloc(array3_size(index_nr, types_nr,
  88                                          sizeof(*event->refs)),
  89                              GFP_KERNEL);
  90        if (!event->refs)
  91                return -ENOMEM;
  92
  93        event->func = func;
  94        event->types_nr = types_nr;
  95        event->index_nr = index_nr;
  96        spin_lock_init(&event->refs_lock);
  97        spin_lock_init(&event->list_lock);
  98        INIT_LIST_HEAD(&event->list);
  99        return 0;
 100}
 101