1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <linux/spinlock.h>
43#include <linux/slab.h>
44#include <linux/export.h>
45
46#include "uwb-internal.h"
47
48struct uwb_est {
49 u16 type_event_high;
50 u16 vendor, product;
51 u8 entries;
52 const struct uwb_est_entry *entry;
53};
54
55static struct uwb_est *uwb_est;
56static u8 uwb_est_size;
57static u8 uwb_est_used;
58static DEFINE_RWLOCK(uwb_est_lock);
59
60
61
62
63
64
65static
66struct uwb_est_entry uwb_est_00_00xx[] = {
67 [UWB_RC_EVT_IE_RCV] = {
68 .size = sizeof(struct uwb_rc_evt_ie_rcv),
69 .offset = 1 + offsetof(struct uwb_rc_evt_ie_rcv, wIELength),
70 },
71 [UWB_RC_EVT_BEACON] = {
72 .size = sizeof(struct uwb_rc_evt_beacon),
73 .offset = 1 + offsetof(struct uwb_rc_evt_beacon, wBeaconInfoLength),
74 },
75 [UWB_RC_EVT_BEACON_SIZE] = {
76 .size = sizeof(struct uwb_rc_evt_beacon_size),
77 },
78 [UWB_RC_EVT_BPOIE_CHANGE] = {
79 .size = sizeof(struct uwb_rc_evt_bpoie_change),
80 .offset = 1 + offsetof(struct uwb_rc_evt_bpoie_change,
81 wBPOIELength),
82 },
83 [UWB_RC_EVT_BP_SLOT_CHANGE] = {
84 .size = sizeof(struct uwb_rc_evt_bp_slot_change),
85 },
86 [UWB_RC_EVT_BP_SWITCH_IE_RCV] = {
87 .size = sizeof(struct uwb_rc_evt_bp_switch_ie_rcv),
88 .offset = 1 + offsetof(struct uwb_rc_evt_bp_switch_ie_rcv, wIELength),
89 },
90 [UWB_RC_EVT_DEV_ADDR_CONFLICT] = {
91 .size = sizeof(struct uwb_rc_evt_dev_addr_conflict),
92 },
93 [UWB_RC_EVT_DRP_AVAIL] = {
94 .size = sizeof(struct uwb_rc_evt_drp_avail)
95 },
96 [UWB_RC_EVT_DRP] = {
97 .size = sizeof(struct uwb_rc_evt_drp),
98 .offset = 1 + offsetof(struct uwb_rc_evt_drp, ie_length),
99 },
100 [UWB_RC_EVT_BP_SWITCH_STATUS] = {
101 .size = sizeof(struct uwb_rc_evt_bp_switch_status),
102 },
103 [UWB_RC_EVT_CMD_FRAME_RCV] = {
104 .size = sizeof(struct uwb_rc_evt_cmd_frame_rcv),
105 .offset = 1 + offsetof(struct uwb_rc_evt_cmd_frame_rcv, dataLength),
106 },
107 [UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV] = {
108 .size = sizeof(struct uwb_rc_evt_channel_change_ie_rcv),
109 .offset = 1 + offsetof(struct uwb_rc_evt_channel_change_ie_rcv, wIELength),
110 },
111 [UWB_RC_CMD_CHANNEL_CHANGE] = {
112 .size = sizeof(struct uwb_rc_evt_confirm),
113 },
114 [UWB_RC_CMD_DEV_ADDR_MGMT] = {
115 .size = sizeof(struct uwb_rc_evt_dev_addr_mgmt) },
116 [UWB_RC_CMD_GET_IE] = {
117 .size = sizeof(struct uwb_rc_evt_get_ie),
118 .offset = 1 + offsetof(struct uwb_rc_evt_get_ie, wIELength),
119 },
120 [UWB_RC_CMD_RESET] = {
121 .size = sizeof(struct uwb_rc_evt_confirm),
122 },
123 [UWB_RC_CMD_SCAN] = {
124 .size = sizeof(struct uwb_rc_evt_confirm),
125 },
126 [UWB_RC_CMD_SET_BEACON_FILTER] = {
127 .size = sizeof(struct uwb_rc_evt_confirm),
128 },
129 [UWB_RC_CMD_SET_DRP_IE] = {
130 .size = sizeof(struct uwb_rc_evt_set_drp_ie),
131 },
132 [UWB_RC_CMD_SET_IE] = {
133 .size = sizeof(struct uwb_rc_evt_set_ie),
134 },
135 [UWB_RC_CMD_SET_NOTIFICATION_FILTER] = {
136 .size = sizeof(struct uwb_rc_evt_confirm),
137 },
138 [UWB_RC_CMD_SET_TX_POWER] = {
139 .size = sizeof(struct uwb_rc_evt_confirm),
140 },
141 [UWB_RC_CMD_SLEEP] = {
142 .size = sizeof(struct uwb_rc_evt_confirm),
143 },
144 [UWB_RC_CMD_START_BEACON] = {
145 .size = sizeof(struct uwb_rc_evt_confirm),
146 },
147 [UWB_RC_CMD_STOP_BEACON] = {
148 .size = sizeof(struct uwb_rc_evt_confirm),
149 },
150 [UWB_RC_CMD_BP_MERGE] = {
151 .size = sizeof(struct uwb_rc_evt_confirm),
152 },
153 [UWB_RC_CMD_SEND_COMMAND_FRAME] = {
154 .size = sizeof(struct uwb_rc_evt_confirm),
155 },
156 [UWB_RC_CMD_SET_ASIE_NOTIF] = {
157 .size = sizeof(struct uwb_rc_evt_confirm),
158 },
159};
160
161static
162struct uwb_est_entry uwb_est_01_00xx[] = {
163 [UWB_RC_DAA_ENERGY_DETECTED] = {
164 .size = sizeof(struct uwb_rc_evt_daa_energy_detected),
165 },
166 [UWB_RC_SET_DAA_ENERGY_MASK] = {
167 .size = sizeof(struct uwb_rc_evt_set_daa_energy_mask),
168 },
169 [UWB_RC_SET_NOTIFICATION_FILTER_EX] = {
170 .size = sizeof(struct uwb_rc_evt_set_notification_filter_ex),
171 },
172};
173
174
175
176
177
178
179
180
181int uwb_est_create(void)
182{
183 int result;
184
185 uwb_est_size = 2;
186 uwb_est_used = 0;
187 uwb_est = kcalloc(uwb_est_size, sizeof(uwb_est[0]), GFP_KERNEL);
188 if (uwb_est == NULL)
189 return -ENOMEM;
190
191 result = uwb_est_register(UWB_RC_CET_GENERAL, 0, 0xffff, 0xffff,
192 uwb_est_00_00xx, ARRAY_SIZE(uwb_est_00_00xx));
193 if (result < 0)
194 goto out;
195 result = uwb_est_register(UWB_RC_CET_EX_TYPE_1, 0, 0xffff, 0xffff,
196 uwb_est_01_00xx, ARRAY_SIZE(uwb_est_01_00xx));
197out:
198 return result;
199}
200
201
202
203void uwb_est_destroy(void)
204{
205 kfree(uwb_est);
206 uwb_est = NULL;
207 uwb_est_size = uwb_est_used = 0;
208}
209
210
211
212
213
214
215
216static
217int uwb_est_grow(void)
218{
219 size_t actual_size = uwb_est_size * sizeof(uwb_est[0]);
220 void *new = kmalloc(2 * actual_size, GFP_ATOMIC);
221 if (new == NULL)
222 return -ENOMEM;
223 memcpy(new, uwb_est, actual_size);
224 memset(new + actual_size, 0, actual_size);
225 kfree(uwb_est);
226 uwb_est = new;
227 uwb_est_size *= 2;
228 return 0;
229}
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256int uwb_est_register(u8 type, u8 event_high, u16 vendor, u16 product,
257 const struct uwb_est_entry *entry, size_t entries)
258{
259 unsigned long flags;
260 unsigned itr;
261 int result = 0;
262
263 write_lock_irqsave(&uwb_est_lock, flags);
264 if (uwb_est_used == uwb_est_size) {
265 result = uwb_est_grow();
266 if (result < 0)
267 goto out;
268 }
269
270 for (itr = 0; itr < uwb_est_used; itr++)
271 if (uwb_est[itr].type_event_high < type
272 && uwb_est[itr].vendor < vendor
273 && uwb_est[itr].product < product)
274 break;
275
276
277 if (itr < uwb_est_used)
278 memmove(&uwb_est[itr+1], &uwb_est[itr], uwb_est_used - itr);
279 uwb_est[itr].type_event_high = type << 8 | event_high;
280 uwb_est[itr].vendor = vendor;
281 uwb_est[itr].product = product;
282 uwb_est[itr].entry = entry;
283 uwb_est[itr].entries = entries;
284 uwb_est_used++;
285out:
286 write_unlock_irqrestore(&uwb_est_lock, flags);
287 return result;
288}
289EXPORT_SYMBOL_GPL(uwb_est_register);
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305int uwb_est_unregister(u8 type, u8 event_high, u16 vendor, u16 product,
306 const struct uwb_est_entry *entry, size_t entries)
307{
308 unsigned long flags;
309 unsigned itr;
310 struct uwb_est est_cmp = {
311 .type_event_high = type << 8 | event_high,
312 .vendor = vendor,
313 .product = product,
314 .entry = entry,
315 .entries = entries
316 };
317 write_lock_irqsave(&uwb_est_lock, flags);
318 for (itr = 0; itr < uwb_est_used; itr++)
319 if (!memcmp(&uwb_est[itr], &est_cmp, sizeof(est_cmp)))
320 goto found;
321 write_unlock_irqrestore(&uwb_est_lock, flags);
322 return -ENOENT;
323
324found:
325 if (itr < uwb_est_used - 1)
326 memmove(&uwb_est[itr], &uwb_est[itr+1], uwb_est_used - itr - 1);
327 uwb_est_used--;
328 write_unlock_irqrestore(&uwb_est_lock, flags);
329 return 0;
330}
331EXPORT_SYMBOL_GPL(uwb_est_unregister);
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351static
352ssize_t uwb_est_get_size(struct uwb_rc *uwb_rc, struct uwb_est *est,
353 u8 event_low, const struct uwb_rceb *rceb,
354 size_t rceb_size)
355{
356 unsigned offset;
357 ssize_t size;
358 struct device *dev = &uwb_rc->uwb_dev.dev;
359 const struct uwb_est_entry *entry;
360
361 size = -ENOENT;
362 if (event_low >= est->entries) {
363 dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u out of range\n",
364 est, est->type_event_high, est->vendor, est->product,
365 est->entries, event_low);
366 goto out;
367 }
368 size = -ENOENT;
369 entry = &est->entry[event_low];
370 if (entry->size == 0 && entry->offset == 0) {
371 dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u unknown\n",
372 est, est->type_event_high, est->vendor, est->product,
373 est->entries, event_low);
374 goto out;
375 }
376 offset = entry->offset;
377 if (offset == 0)
378 size = entry->size;
379 else {
380
381 const void *ptr = rceb;
382 size_t type_size = 0;
383 offset--;
384 size = -ENOSPC;
385 switch (entry->type) {
386 case UWB_EST_16: type_size = sizeof(__le16); break;
387 case UWB_EST_8: type_size = sizeof(u8); break;
388 default: BUG();
389 }
390 if (offset + type_size > rceb_size) {
391 dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: "
392 "not enough data to read extra size\n",
393 est, est->type_event_high, est->vendor,
394 est->product, est->entries);
395 goto out;
396 }
397 size = entry->size;
398 ptr += offset;
399 switch (entry->type) {
400 case UWB_EST_16: size += le16_to_cpu(*(__le16 *)ptr); break;
401 case UWB_EST_8: size += *(u8 *)ptr; break;
402 default: BUG();
403 }
404 }
405out:
406 return size;
407}
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
429 size_t rceb_size)
430{
431
432 ssize_t size;
433 struct device *dev = &rc->uwb_dev.dev;
434 unsigned long flags;
435 unsigned itr;
436 u16 type_event_high, event;
437
438 read_lock_irqsave(&uwb_est_lock, flags);
439 size = -ENOSPC;
440 if (rceb_size < sizeof(*rceb))
441 goto out;
442 event = le16_to_cpu(rceb->wEvent);
443 type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8;
444 for (itr = 0; itr < uwb_est_used; itr++) {
445 if (uwb_est[itr].type_event_high != type_event_high)
446 continue;
447 size = uwb_est_get_size(rc, &uwb_est[itr],
448 event & 0x00ff, rceb, rceb_size);
449
450 if (size != -ENOENT)
451 goto out;
452 }
453 dev_dbg(dev,
454 "event 0x%02x/%04x/%02x: no handlers available; RCEB %4ph\n",
455 (unsigned) rceb->bEventType,
456 (unsigned) le16_to_cpu(rceb->wEvent),
457 (unsigned) rceb->bEventContext,
458 rceb);
459 size = -ENOENT;
460out:
461 read_unlock_irqrestore(&uwb_est_lock, flags);
462 return size;
463}
464EXPORT_SYMBOL_GPL(uwb_est_find_size);
465