1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <linux/spinlock.h>
43#include <linux/slab.h>
44
45#include "uwb-internal.h"
46
47struct uwb_est {
48 u16 type_event_high;
49 u16 vendor, product;
50 u8 entries;
51 const struct uwb_est_entry *entry;
52};
53
54static struct uwb_est *uwb_est;
55static u8 uwb_est_size;
56static u8 uwb_est_used;
57static DEFINE_RWLOCK(uwb_est_lock);
58
59
60
61
62
63
64static
65struct uwb_est_entry uwb_est_00_00xx[] = {
66 [UWB_RC_EVT_IE_RCV] = {
67 .size = sizeof(struct uwb_rc_evt_ie_rcv),
68 .offset = 1 + offsetof(struct uwb_rc_evt_ie_rcv, wIELength),
69 },
70 [UWB_RC_EVT_BEACON] = {
71 .size = sizeof(struct uwb_rc_evt_beacon),
72 .offset = 1 + offsetof(struct uwb_rc_evt_beacon, wBeaconInfoLength),
73 },
74 [UWB_RC_EVT_BEACON_SIZE] = {
75 .size = sizeof(struct uwb_rc_evt_beacon_size),
76 },
77 [UWB_RC_EVT_BPOIE_CHANGE] = {
78 .size = sizeof(struct uwb_rc_evt_bpoie_change),
79 .offset = 1 + offsetof(struct uwb_rc_evt_bpoie_change,
80 wBPOIELength),
81 },
82 [UWB_RC_EVT_BP_SLOT_CHANGE] = {
83 .size = sizeof(struct uwb_rc_evt_bp_slot_change),
84 },
85 [UWB_RC_EVT_BP_SWITCH_IE_RCV] = {
86 .size = sizeof(struct uwb_rc_evt_bp_switch_ie_rcv),
87 .offset = 1 + offsetof(struct uwb_rc_evt_bp_switch_ie_rcv, wIELength),
88 },
89 [UWB_RC_EVT_DEV_ADDR_CONFLICT] = {
90 .size = sizeof(struct uwb_rc_evt_dev_addr_conflict),
91 },
92 [UWB_RC_EVT_DRP_AVAIL] = {
93 .size = sizeof(struct uwb_rc_evt_drp_avail)
94 },
95 [UWB_RC_EVT_DRP] = {
96 .size = sizeof(struct uwb_rc_evt_drp),
97 .offset = 1 + offsetof(struct uwb_rc_evt_drp, ie_length),
98 },
99 [UWB_RC_EVT_BP_SWITCH_STATUS] = {
100 .size = sizeof(struct uwb_rc_evt_bp_switch_status),
101 },
102 [UWB_RC_EVT_CMD_FRAME_RCV] = {
103 .size = sizeof(struct uwb_rc_evt_cmd_frame_rcv),
104 .offset = 1 + offsetof(struct uwb_rc_evt_cmd_frame_rcv, dataLength),
105 },
106 [UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV] = {
107 .size = sizeof(struct uwb_rc_evt_channel_change_ie_rcv),
108 .offset = 1 + offsetof(struct uwb_rc_evt_channel_change_ie_rcv, wIELength),
109 },
110 [UWB_RC_CMD_CHANNEL_CHANGE] = {
111 .size = sizeof(struct uwb_rc_evt_confirm),
112 },
113 [UWB_RC_CMD_DEV_ADDR_MGMT] = {
114 .size = sizeof(struct uwb_rc_evt_dev_addr_mgmt) },
115 [UWB_RC_CMD_GET_IE] = {
116 .size = sizeof(struct uwb_rc_evt_get_ie),
117 .offset = 1 + offsetof(struct uwb_rc_evt_get_ie, wIELength),
118 },
119 [UWB_RC_CMD_RESET] = {
120 .size = sizeof(struct uwb_rc_evt_confirm),
121 },
122 [UWB_RC_CMD_SCAN] = {
123 .size = sizeof(struct uwb_rc_evt_confirm),
124 },
125 [UWB_RC_CMD_SET_BEACON_FILTER] = {
126 .size = sizeof(struct uwb_rc_evt_confirm),
127 },
128 [UWB_RC_CMD_SET_DRP_IE] = {
129 .size = sizeof(struct uwb_rc_evt_set_drp_ie),
130 },
131 [UWB_RC_CMD_SET_IE] = {
132 .size = sizeof(struct uwb_rc_evt_set_ie),
133 },
134 [UWB_RC_CMD_SET_NOTIFICATION_FILTER] = {
135 .size = sizeof(struct uwb_rc_evt_confirm),
136 },
137 [UWB_RC_CMD_SET_TX_POWER] = {
138 .size = sizeof(struct uwb_rc_evt_confirm),
139 },
140 [UWB_RC_CMD_SLEEP] = {
141 .size = sizeof(struct uwb_rc_evt_confirm),
142 },
143 [UWB_RC_CMD_START_BEACON] = {
144 .size = sizeof(struct uwb_rc_evt_confirm),
145 },
146 [UWB_RC_CMD_STOP_BEACON] = {
147 .size = sizeof(struct uwb_rc_evt_confirm),
148 },
149 [UWB_RC_CMD_BP_MERGE] = {
150 .size = sizeof(struct uwb_rc_evt_confirm),
151 },
152 [UWB_RC_CMD_SEND_COMMAND_FRAME] = {
153 .size = sizeof(struct uwb_rc_evt_confirm),
154 },
155 [UWB_RC_CMD_SET_ASIE_NOTIF] = {
156 .size = sizeof(struct uwb_rc_evt_confirm),
157 },
158};
159
160static
161struct uwb_est_entry uwb_est_01_00xx[] = {
162 [UWB_RC_DAA_ENERGY_DETECTED] = {
163 .size = sizeof(struct uwb_rc_evt_daa_energy_detected),
164 },
165 [UWB_RC_SET_DAA_ENERGY_MASK] = {
166 .size = sizeof(struct uwb_rc_evt_set_daa_energy_mask),
167 },
168 [UWB_RC_SET_NOTIFICATION_FILTER_EX] = {
169 .size = sizeof(struct uwb_rc_evt_set_notification_filter_ex),
170 },
171};
172
173
174
175
176
177
178
179
180int uwb_est_create(void)
181{
182 int result;
183
184 uwb_est_size = 2;
185 uwb_est_used = 0;
186 uwb_est = kzalloc(uwb_est_size * sizeof(uwb_est[0]), GFP_KERNEL);
187 if (uwb_est == NULL)
188 return -ENOMEM;
189
190 result = uwb_est_register(UWB_RC_CET_GENERAL, 0, 0xffff, 0xffff,
191 uwb_est_00_00xx, ARRAY_SIZE(uwb_est_00_00xx));
192 if (result < 0)
193 goto out;
194 result = uwb_est_register(UWB_RC_CET_EX_TYPE_1, 0, 0xffff, 0xffff,
195 uwb_est_01_00xx, ARRAY_SIZE(uwb_est_01_00xx));
196out:
197 return result;
198}
199
200
201
202void uwb_est_destroy(void)
203{
204 kfree(uwb_est);
205 uwb_est = NULL;
206 uwb_est_size = uwb_est_used = 0;
207}
208
209
210
211
212
213
214
215static
216int uwb_est_grow(void)
217{
218 size_t actual_size = uwb_est_size * sizeof(uwb_est[0]);
219 void *new = kmalloc(2 * actual_size, GFP_ATOMIC);
220 if (new == NULL)
221 return -ENOMEM;
222 memcpy(new, uwb_est, actual_size);
223 memset(new + actual_size, 0, actual_size);
224 kfree(uwb_est);
225 uwb_est = new;
226 uwb_est_size *= 2;
227 return 0;
228}
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255int uwb_est_register(u8 type, u8 event_high, u16 vendor, u16 product,
256 const struct uwb_est_entry *entry, size_t entries)
257{
258 unsigned long flags;
259 unsigned itr;
260 u16 type_event_high;
261 int result = 0;
262
263 write_lock_irqsave(&uwb_est_lock, flags);
264 if (uwb_est_used == uwb_est_size) {
265 result = uwb_est_grow();
266 if (result < 0)
267 goto out;
268 }
269
270 type_event_high = type << 8 | event_high;
271 for (itr = 0; itr < uwb_est_used; itr++)
272 if (uwb_est[itr].type_event_high < type
273 && uwb_est[itr].vendor < vendor
274 && uwb_est[itr].product < product)
275 break;
276
277
278 if (itr < uwb_est_used)
279 memmove(&uwb_est[itr+1], &uwb_est[itr], uwb_est_used - itr);
280 uwb_est[itr].type_event_high = type << 8 | event_high;
281 uwb_est[itr].vendor = vendor;
282 uwb_est[itr].product = product;
283 uwb_est[itr].entry = entry;
284 uwb_est[itr].entries = entries;
285 uwb_est_used++;
286out:
287 write_unlock_irqrestore(&uwb_est_lock, flags);
288 return result;
289}
290EXPORT_SYMBOL_GPL(uwb_est_register);
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306int uwb_est_unregister(u8 type, u8 event_high, u16 vendor, u16 product,
307 const struct uwb_est_entry *entry, size_t entries)
308{
309 unsigned long flags;
310 unsigned itr;
311 struct uwb_est est_cmp = {
312 .type_event_high = type << 8 | event_high,
313 .vendor = vendor,
314 .product = product,
315 .entry = entry,
316 .entries = entries
317 };
318 write_lock_irqsave(&uwb_est_lock, flags);
319 for (itr = 0; itr < uwb_est_used; itr++)
320 if (!memcmp(&uwb_est[itr], &est_cmp, sizeof(est_cmp)))
321 goto found;
322 write_unlock_irqrestore(&uwb_est_lock, flags);
323 return -ENOENT;
324
325found:
326 if (itr < uwb_est_used - 1)
327 memmove(&uwb_est[itr], &uwb_est[itr+1], uwb_est_used - itr - 1);
328 uwb_est_used--;
329 write_unlock_irqrestore(&uwb_est_lock, flags);
330 return 0;
331}
332EXPORT_SYMBOL_GPL(uwb_est_unregister);
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352static
353ssize_t uwb_est_get_size(struct uwb_rc *uwb_rc, struct uwb_est *est,
354 u8 event_low, const struct uwb_rceb *rceb,
355 size_t rceb_size)
356{
357 unsigned offset;
358 ssize_t size;
359 struct device *dev = &uwb_rc->uwb_dev.dev;
360 const struct uwb_est_entry *entry;
361
362 size = -ENOENT;
363 if (event_low >= est->entries) {
364 dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u out of range\n",
365 est, est->type_event_high, est->vendor, est->product,
366 est->entries, event_low);
367 goto out;
368 }
369 size = -ENOENT;
370 entry = &est->entry[event_low];
371 if (entry->size == 0 && entry->offset == 0) {
372 dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u unknown\n",
373 est, est->type_event_high, est->vendor, est->product,
374 est->entries, event_low);
375 goto out;
376 }
377 offset = entry->offset;
378 if (offset == 0)
379 size = entry->size;
380 else {
381
382 const void *ptr = rceb;
383 size_t type_size = 0;
384 offset--;
385 size = -ENOSPC;
386 switch (entry->type) {
387 case UWB_EST_16: type_size = sizeof(__le16); break;
388 case UWB_EST_8: type_size = sizeof(u8); break;
389 default: BUG();
390 }
391 if (offset + type_size > rceb_size) {
392 dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: "
393 "not enough data to read extra size\n",
394 est, est->type_event_high, est->vendor,
395 est->product, est->entries);
396 goto out;
397 }
398 size = entry->size;
399 ptr += offset;
400 switch (entry->type) {
401 case UWB_EST_16: size += le16_to_cpu(*(__le16 *)ptr); break;
402 case UWB_EST_8: size += *(u8 *)ptr; break;
403 default: BUG();
404 }
405 }
406out:
407 return size;
408}
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
430 size_t rceb_size)
431{
432
433 ssize_t size;
434 struct device *dev = &rc->uwb_dev.dev;
435 unsigned long flags;
436 unsigned itr;
437 u16 type_event_high, event;
438 u8 *ptr = (u8 *) rceb;
439
440 read_lock_irqsave(&uwb_est_lock, flags);
441 size = -ENOSPC;
442 if (rceb_size < sizeof(*rceb))
443 goto out;
444 event = le16_to_cpu(rceb->wEvent);
445 type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8;
446 for (itr = 0; itr < uwb_est_used; itr++) {
447 if (uwb_est[itr].type_event_high != type_event_high)
448 continue;
449 size = uwb_est_get_size(rc, &uwb_est[itr],
450 event & 0x00ff, rceb, rceb_size);
451
452 if (size != -ENOENT)
453 goto out;
454 }
455 dev_dbg(dev, "event 0x%02x/%04x/%02x: no handlers available; "
456 "RCEB %02x %02x %02x %02x\n",
457 (unsigned) rceb->bEventType,
458 (unsigned) le16_to_cpu(rceb->wEvent),
459 (unsigned) rceb->bEventContext,
460 ptr[0], ptr[1], ptr[2], ptr[3]);
461 size = -ENOENT;
462out:
463 read_unlock_irqrestore(&uwb_est_lock, flags);
464 return size;
465}
466EXPORT_SYMBOL_GPL(uwb_est_find_size);
467