1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <core/notify.h>
25#include <core/event.h>
26
27static inline void
28nvkm_notify_put_locked(struct nvkm_notify *notify)
29{
30 if (notify->block++ == 0)
31 nvkm_event_put(notify->event, notify->types, notify->index);
32}
33
34void
35nvkm_notify_put(struct nvkm_notify *notify)
36{
37 struct nvkm_event *event = notify->event;
38 unsigned long flags;
39 if (likely(event) &&
40 test_and_clear_bit(NVKM_NOTIFY_USER, ¬ify->flags)) {
41 spin_lock_irqsave(&event->refs_lock, flags);
42 nvkm_notify_put_locked(notify);
43 spin_unlock_irqrestore(&event->refs_lock, flags);
44 if (test_bit(NVKM_NOTIFY_WORK, ¬ify->flags))
45 flush_work(¬ify->work);
46 }
47}
48
49static inline void
50nvkm_notify_get_locked(struct nvkm_notify *notify)
51{
52 if (--notify->block == 0)
53 nvkm_event_get(notify->event, notify->types, notify->index);
54}
55
56void
57nvkm_notify_get(struct nvkm_notify *notify)
58{
59 struct nvkm_event *event = notify->event;
60 unsigned long flags;
61 if (likely(event) &&
62 !test_and_set_bit(NVKM_NOTIFY_USER, ¬ify->flags)) {
63 spin_lock_irqsave(&event->refs_lock, flags);
64 nvkm_notify_get_locked(notify);
65 spin_unlock_irqrestore(&event->refs_lock, flags);
66 }
67}
68
69static inline void
70nvkm_notify_func(struct nvkm_notify *notify)
71{
72 struct nvkm_event *event = notify->event;
73 int ret = notify->func(notify);
74 unsigned long flags;
75 if ((ret == NVKM_NOTIFY_KEEP) ||
76 !test_and_clear_bit(NVKM_NOTIFY_USER, ¬ify->flags)) {
77 spin_lock_irqsave(&event->refs_lock, flags);
78 nvkm_notify_get_locked(notify);
79 spin_unlock_irqrestore(&event->refs_lock, flags);
80 }
81}
82
83static void
84nvkm_notify_work(struct work_struct *work)
85{
86 struct nvkm_notify *notify = container_of(work, typeof(*notify), work);
87 nvkm_notify_func(notify);
88}
89
90void
91nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
92{
93 struct nvkm_event *event = notify->event;
94 unsigned long flags;
95
96 assert_spin_locked(&event->list_lock);
97 BUG_ON(size != notify->size);
98
99 spin_lock_irqsave(&event->refs_lock, flags);
100 if (notify->block) {
101 spin_unlock_irqrestore(&event->refs_lock, flags);
102 return;
103 }
104 nvkm_notify_put_locked(notify);
105 spin_unlock_irqrestore(&event->refs_lock, flags);
106
107 if (test_bit(NVKM_NOTIFY_WORK, ¬ify->flags)) {
108 memcpy((void *)notify->data, data, size);
109 schedule_work(¬ify->work);
110 } else {
111 notify->data = data;
112 nvkm_notify_func(notify);
113 notify->data = NULL;
114 }
115}
116
117void
118nvkm_notify_fini(struct nvkm_notify *notify)
119{
120 unsigned long flags;
121 if (notify->event) {
122 nvkm_notify_put(notify);
123 spin_lock_irqsave(¬ify->event->list_lock, flags);
124 list_del(¬ify->head);
125 spin_unlock_irqrestore(¬ify->event->list_lock, flags);
126 kfree((void *)notify->data);
127 notify->event = NULL;
128 }
129}
130
131int
132nvkm_notify_init(struct nvkm_object *object, struct nvkm_event *event,
133 int (*func)(struct nvkm_notify *), bool work,
134 void *data, u32 size, u32 reply,
135 struct nvkm_notify *notify)
136{
137 unsigned long flags;
138 int ret = -ENODEV;
139 if ((notify->event = event), event->refs) {
140 ret = event->func->ctor(object, data, size, notify);
141 if (ret == 0 && (ret = -EINVAL, notify->size == reply)) {
142 notify->flags = 0;
143 notify->block = 1;
144 notify->func = func;
145 notify->data = NULL;
146 if (ret = 0, work) {
147 INIT_WORK(¬ify->work, nvkm_notify_work);
148 set_bit(NVKM_NOTIFY_WORK, ¬ify->flags);
149 notify->data = kmalloc(reply, GFP_KERNEL);
150 if (!notify->data)
151 ret = -ENOMEM;
152 }
153 }
154 if (ret == 0) {
155 spin_lock_irqsave(&event->list_lock, flags);
156 list_add_tail(¬ify->head, &event->list);
157 spin_unlock_irqrestore(&event->list_lock, flags);
158 }
159 }
160 if (ret)
161 notify->event = NULL;
162 return ret;
163}
164