1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/compat.h>
27#include <linux/export.h>
28#include <linux/utsname.h>
29#include <linux/sched.h>
30#include <asm/unaligned.h>
31
32#include <net/bluetooth/bluetooth.h>
33#include <net/bluetooth/hci_core.h>
34#include <net/bluetooth/hci_mon.h>
35#include <net/bluetooth/mgmt.h>
36
37#include "mgmt_util.h"
38
39static LIST_HEAD(mgmt_chan_list);
40static DEFINE_MUTEX(mgmt_chan_list_lock);
41
42static DEFINE_IDA(sock_cookie_ida);
43
44static atomic_t monitor_promisc = ATOMIC_INIT(0);
45
46
47
48
49#define hci_pi(sk) ((struct hci_pinfo *) sk)
50
51struct hci_pinfo {
52 struct bt_sock bt;
53 struct hci_dev *hdev;
54 struct hci_filter filter;
55 __u8 cmsg_mask;
56 unsigned short channel;
57 unsigned long flags;
58 __u32 cookie;
59 char comm[TASK_COMM_LEN];
60};
61
62void hci_sock_set_flag(struct sock *sk, int nr)
63{
64 set_bit(nr, &hci_pi(sk)->flags);
65}
66
67void hci_sock_clear_flag(struct sock *sk, int nr)
68{
69 clear_bit(nr, &hci_pi(sk)->flags);
70}
71
72int hci_sock_test_flag(struct sock *sk, int nr)
73{
74 return test_bit(nr, &hci_pi(sk)->flags);
75}
76
77unsigned short hci_sock_get_channel(struct sock *sk)
78{
79 return hci_pi(sk)->channel;
80}
81
82u32 hci_sock_get_cookie(struct sock *sk)
83{
84 return hci_pi(sk)->cookie;
85}
86
87static bool hci_sock_gen_cookie(struct sock *sk)
88{
89 int id = hci_pi(sk)->cookie;
90
91 if (!id) {
92 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
93 if (id < 0)
94 id = 0xffffffff;
95
96 hci_pi(sk)->cookie = id;
97 get_task_comm(hci_pi(sk)->comm, current);
98 return true;
99 }
100
101 return false;
102}
103
104static void hci_sock_free_cookie(struct sock *sk)
105{
106 int id = hci_pi(sk)->cookie;
107
108 if (id) {
109 hci_pi(sk)->cookie = 0xffffffff;
110 ida_simple_remove(&sock_cookie_ida, id);
111 }
112}
113
114static inline int hci_test_bit(int nr, const void *addr)
115{
116 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
117}
118
119
120#define HCI_SFLT_MAX_OGF 5
121
122struct hci_sec_filter {
123 __u32 type_mask;
124 __u32 event_mask[2];
125 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
126};
127
128static const struct hci_sec_filter hci_sec_filter = {
129
130 0x10,
131
132 { 0x1000d9fe, 0x0000b00c },
133
134 {
135 { 0x0 },
136
137 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
138
139 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
140
141 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
142
143 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
144
145 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
146 }
147};
148
149static struct bt_sock_list hci_sk_list = {
150 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
151};
152
153static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
154{
155 struct hci_filter *flt;
156 int flt_type, flt_event;
157
158
159 flt = &hci_pi(sk)->filter;
160
161 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
162
163 if (!test_bit(flt_type, &flt->type_mask))
164 return true;
165
166
167 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
168 return false;
169
170 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
171
172 if (!hci_test_bit(flt_event, &flt->event_mask))
173 return true;
174
175
176 if (!flt->opcode)
177 return false;
178
179 if (flt_event == HCI_EV_CMD_COMPLETE &&
180 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
181 return true;
182
183 if (flt_event == HCI_EV_CMD_STATUS &&
184 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
185 return true;
186
187 return false;
188}
189
190
191void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
192{
193 struct sock *sk;
194 struct sk_buff *skb_copy = NULL;
195
196 BT_DBG("hdev %p len %d", hdev, skb->len);
197
198 read_lock(&hci_sk_list.lock);
199
200 sk_for_each(sk, &hci_sk_list.head) {
201 struct sk_buff *nskb;
202
203 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
204 continue;
205
206
207 if (skb->sk == sk)
208 continue;
209
210 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
211 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
212 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
213 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
214 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
215 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
216 continue;
217 if (is_filtered_packet(sk, skb))
218 continue;
219 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
220 if (!bt_cb(skb)->incoming)
221 continue;
222 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
223 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
224 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
225 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
226 continue;
227 } else {
228
229 continue;
230 }
231
232 if (!skb_copy) {
233
234 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
235 if (!skb_copy)
236 continue;
237
238
239 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
240 }
241
242 nskb = skb_clone(skb_copy, GFP_ATOMIC);
243 if (!nskb)
244 continue;
245
246 if (sock_queue_rcv_skb(sk, nskb))
247 kfree_skb(nskb);
248 }
249
250 read_unlock(&hci_sk_list.lock);
251
252 kfree_skb(skb_copy);
253}
254
255
256static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
257 int flag, struct sock *skip_sk)
258{
259 struct sock *sk;
260
261 BT_DBG("channel %u len %d", channel, skb->len);
262
263 sk_for_each(sk, &hci_sk_list.head) {
264 struct sk_buff *nskb;
265
266
267 if (!hci_sock_test_flag(sk, flag))
268 continue;
269
270
271 if (sk == skip_sk)
272 continue;
273
274 if (sk->sk_state != BT_BOUND)
275 continue;
276
277 if (hci_pi(sk)->channel != channel)
278 continue;
279
280 nskb = skb_clone(skb, GFP_ATOMIC);
281 if (!nskb)
282 continue;
283
284 if (sock_queue_rcv_skb(sk, nskb))
285 kfree_skb(nskb);
286 }
287
288}
289
290void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
291 int flag, struct sock *skip_sk)
292{
293 read_lock(&hci_sk_list.lock);
294 __hci_send_to_channel(channel, skb, flag, skip_sk);
295 read_unlock(&hci_sk_list.lock);
296}
297
298
299void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
300{
301 struct sk_buff *skb_copy = NULL;
302 struct hci_mon_hdr *hdr;
303 __le16 opcode;
304
305 if (!atomic_read(&monitor_promisc))
306 return;
307
308 BT_DBG("hdev %p len %d", hdev, skb->len);
309
310 switch (hci_skb_pkt_type(skb)) {
311 case HCI_COMMAND_PKT:
312 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
313 break;
314 case HCI_EVENT_PKT:
315 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
316 break;
317 case HCI_ACLDATA_PKT:
318 if (bt_cb(skb)->incoming)
319 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
320 else
321 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
322 break;
323 case HCI_SCODATA_PKT:
324 if (bt_cb(skb)->incoming)
325 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
326 else
327 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
328 break;
329 case HCI_ISODATA_PKT:
330 if (bt_cb(skb)->incoming)
331 opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
332 else
333 opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
334 break;
335 case HCI_DIAG_PKT:
336 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
337 break;
338 default:
339 return;
340 }
341
342
343 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
344 if (!skb_copy)
345 return;
346
347
348 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
349 hdr->opcode = opcode;
350 hdr->index = cpu_to_le16(hdev->id);
351 hdr->len = cpu_to_le16(skb->len);
352
353 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
354 HCI_SOCK_TRUSTED, NULL);
355 kfree_skb(skb_copy);
356}
357
358void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
359 void *data, u16 data_len, ktime_t tstamp,
360 int flag, struct sock *skip_sk)
361{
362 struct sock *sk;
363 __le16 index;
364
365 if (hdev)
366 index = cpu_to_le16(hdev->id);
367 else
368 index = cpu_to_le16(MGMT_INDEX_NONE);
369
370 read_lock(&hci_sk_list.lock);
371
372 sk_for_each(sk, &hci_sk_list.head) {
373 struct hci_mon_hdr *hdr;
374 struct sk_buff *skb;
375
376 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
377 continue;
378
379
380 if (!hci_sock_test_flag(sk, flag))
381 continue;
382
383
384 if (sk == skip_sk)
385 continue;
386
387 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
388 if (!skb)
389 continue;
390
391 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
392 put_unaligned_le16(event, skb_put(skb, 2));
393
394 if (data)
395 skb_put_data(skb, data, data_len);
396
397 skb->tstamp = tstamp;
398
399 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
400 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
401 hdr->index = index;
402 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
403
404 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
405 HCI_SOCK_TRUSTED, NULL);
406 kfree_skb(skb);
407 }
408
409 read_unlock(&hci_sk_list.lock);
410}
411
412static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
413{
414 struct hci_mon_hdr *hdr;
415 struct hci_mon_new_index *ni;
416 struct hci_mon_index_info *ii;
417 struct sk_buff *skb;
418 __le16 opcode;
419
420 switch (event) {
421 case HCI_DEV_REG:
422 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
423 if (!skb)
424 return NULL;
425
426 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
427 ni->type = hdev->dev_type;
428 ni->bus = hdev->bus;
429 bacpy(&ni->bdaddr, &hdev->bdaddr);
430 memcpy(ni->name, hdev->name, 8);
431
432 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
433 break;
434
435 case HCI_DEV_UNREG:
436 skb = bt_skb_alloc(0, GFP_ATOMIC);
437 if (!skb)
438 return NULL;
439
440 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
441 break;
442
443 case HCI_DEV_SETUP:
444 if (hdev->manufacturer == 0xffff)
445 return NULL;
446 fallthrough;
447
448 case HCI_DEV_UP:
449 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
450 if (!skb)
451 return NULL;
452
453 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
454 bacpy(&ii->bdaddr, &hdev->bdaddr);
455 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
456
457 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
458 break;
459
460 case HCI_DEV_OPEN:
461 skb = bt_skb_alloc(0, GFP_ATOMIC);
462 if (!skb)
463 return NULL;
464
465 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
466 break;
467
468 case HCI_DEV_CLOSE:
469 skb = bt_skb_alloc(0, GFP_ATOMIC);
470 if (!skb)
471 return NULL;
472
473 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
474 break;
475
476 default:
477 return NULL;
478 }
479
480 __net_timestamp(skb);
481
482 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
483 hdr->opcode = opcode;
484 hdr->index = cpu_to_le16(hdev->id);
485 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
486
487 return skb;
488}
489
490static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
491{
492 struct hci_mon_hdr *hdr;
493 struct sk_buff *skb;
494 u16 format;
495 u8 ver[3];
496 u32 flags;
497
498
499 if (!hci_pi(sk)->cookie)
500 return NULL;
501
502 switch (hci_pi(sk)->channel) {
503 case HCI_CHANNEL_RAW:
504 format = 0x0000;
505 ver[0] = BT_SUBSYS_VERSION;
506 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
507 break;
508 case HCI_CHANNEL_USER:
509 format = 0x0001;
510 ver[0] = BT_SUBSYS_VERSION;
511 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
512 break;
513 case HCI_CHANNEL_CONTROL:
514 format = 0x0002;
515 mgmt_fill_version_info(ver);
516 break;
517 default:
518
519 return NULL;
520 }
521
522 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
523 if (!skb)
524 return NULL;
525
526 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
527
528 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
529 put_unaligned_le16(format, skb_put(skb, 2));
530 skb_put_data(skb, ver, sizeof(ver));
531 put_unaligned_le32(flags, skb_put(skb, 4));
532 skb_put_u8(skb, TASK_COMM_LEN);
533 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
534
535 __net_timestamp(skb);
536
537 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
538 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
539 if (hci_pi(sk)->hdev)
540 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
541 else
542 hdr->index = cpu_to_le16(HCI_DEV_NONE);
543 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
544
545 return skb;
546}
547
548static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
549{
550 struct hci_mon_hdr *hdr;
551 struct sk_buff *skb;
552
553
554 if (!hci_pi(sk)->cookie)
555 return NULL;
556
557 switch (hci_pi(sk)->channel) {
558 case HCI_CHANNEL_RAW:
559 case HCI_CHANNEL_USER:
560 case HCI_CHANNEL_CONTROL:
561 break;
562 default:
563
564 return NULL;
565 }
566
567 skb = bt_skb_alloc(4, GFP_ATOMIC);
568 if (!skb)
569 return NULL;
570
571 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
572
573 __net_timestamp(skb);
574
575 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
576 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
577 if (hci_pi(sk)->hdev)
578 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
579 else
580 hdr->index = cpu_to_le16(HCI_DEV_NONE);
581 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
582
583 return skb;
584}
585
586static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
587 u16 opcode, u16 len,
588 const void *buf)
589{
590 struct hci_mon_hdr *hdr;
591 struct sk_buff *skb;
592
593 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
594 if (!skb)
595 return NULL;
596
597 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
598 put_unaligned_le16(opcode, skb_put(skb, 2));
599
600 if (buf)
601 skb_put_data(skb, buf, len);
602
603 __net_timestamp(skb);
604
605 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
606 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
607 hdr->index = cpu_to_le16(index);
608 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
609
610 return skb;
611}
612
613static void __printf(2, 3)
614send_monitor_note(struct sock *sk, const char *fmt, ...)
615{
616 size_t len;
617 struct hci_mon_hdr *hdr;
618 struct sk_buff *skb;
619 va_list args;
620
621 va_start(args, fmt);
622 len = vsnprintf(NULL, 0, fmt, args);
623 va_end(args);
624
625 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
626 if (!skb)
627 return;
628
629 va_start(args, fmt);
630 vsprintf(skb_put(skb, len), fmt, args);
631 *(u8 *)skb_put(skb, 1) = 0;
632 va_end(args);
633
634 __net_timestamp(skb);
635
636 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
637 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
638 hdr->index = cpu_to_le16(HCI_DEV_NONE);
639 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
640
641 if (sock_queue_rcv_skb(sk, skb))
642 kfree_skb(skb);
643}
644
645static void send_monitor_replay(struct sock *sk)
646{
647 struct hci_dev *hdev;
648
649 read_lock(&hci_dev_list_lock);
650
651 list_for_each_entry(hdev, &hci_dev_list, list) {
652 struct sk_buff *skb;
653
654 skb = create_monitor_event(hdev, HCI_DEV_REG);
655 if (!skb)
656 continue;
657
658 if (sock_queue_rcv_skb(sk, skb))
659 kfree_skb(skb);
660
661 if (!test_bit(HCI_RUNNING, &hdev->flags))
662 continue;
663
664 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
665 if (!skb)
666 continue;
667
668 if (sock_queue_rcv_skb(sk, skb))
669 kfree_skb(skb);
670
671 if (test_bit(HCI_UP, &hdev->flags))
672 skb = create_monitor_event(hdev, HCI_DEV_UP);
673 else if (hci_dev_test_flag(hdev, HCI_SETUP))
674 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
675 else
676 skb = NULL;
677
678 if (skb) {
679 if (sock_queue_rcv_skb(sk, skb))
680 kfree_skb(skb);
681 }
682 }
683
684 read_unlock(&hci_dev_list_lock);
685}
686
687static void send_monitor_control_replay(struct sock *mon_sk)
688{
689 struct sock *sk;
690
691 read_lock(&hci_sk_list.lock);
692
693 sk_for_each(sk, &hci_sk_list.head) {
694 struct sk_buff *skb;
695
696 skb = create_monitor_ctrl_open(sk);
697 if (!skb)
698 continue;
699
700 if (sock_queue_rcv_skb(mon_sk, skb))
701 kfree_skb(skb);
702 }
703
704 read_unlock(&hci_sk_list.lock);
705}
706
707
708static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
709{
710 struct hci_event_hdr *hdr;
711 struct hci_ev_stack_internal *ev;
712 struct sk_buff *skb;
713
714 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
715 if (!skb)
716 return;
717
718 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
719 hdr->evt = HCI_EV_STACK_INTERNAL;
720 hdr->plen = sizeof(*ev) + dlen;
721
722 ev = skb_put(skb, sizeof(*ev) + dlen);
723 ev->type = type;
724 memcpy(ev->data, data, dlen);
725
726 bt_cb(skb)->incoming = 1;
727 __net_timestamp(skb);
728
729 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
730 hci_send_to_sock(hdev, skb);
731 kfree_skb(skb);
732}
733
734void hci_sock_dev_event(struct hci_dev *hdev, int event)
735{
736 BT_DBG("hdev %s event %d", hdev->name, event);
737
738 if (atomic_read(&monitor_promisc)) {
739 struct sk_buff *skb;
740
741
742 skb = create_monitor_event(hdev, event);
743 if (skb) {
744 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
745 HCI_SOCK_TRUSTED, NULL);
746 kfree_skb(skb);
747 }
748 }
749
750 if (event <= HCI_DEV_DOWN) {
751 struct hci_ev_si_device ev;
752
753
754 ev.event = event;
755 ev.dev_id = hdev->id;
756 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
757 }
758
759 if (event == HCI_DEV_UNREG) {
760 struct sock *sk;
761
762
763 read_lock(&hci_sk_list.lock);
764 sk_for_each(sk, &hci_sk_list.head) {
765 lock_sock(sk);
766 if (hci_pi(sk)->hdev == hdev) {
767 hci_pi(sk)->hdev = NULL;
768 sk->sk_err = EPIPE;
769 sk->sk_state = BT_OPEN;
770 sk->sk_state_change(sk);
771
772 hci_dev_put(hdev);
773 }
774 release_sock(sk);
775 }
776 read_unlock(&hci_sk_list.lock);
777 }
778}
779
780static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
781{
782 struct hci_mgmt_chan *c;
783
784 list_for_each_entry(c, &mgmt_chan_list, list) {
785 if (c->channel == channel)
786 return c;
787 }
788
789 return NULL;
790}
791
792static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
793{
794 struct hci_mgmt_chan *c;
795
796 mutex_lock(&mgmt_chan_list_lock);
797 c = __hci_mgmt_chan_find(channel);
798 mutex_unlock(&mgmt_chan_list_lock);
799
800 return c;
801}
802
803int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
804{
805 if (c->channel < HCI_CHANNEL_CONTROL)
806 return -EINVAL;
807
808 mutex_lock(&mgmt_chan_list_lock);
809 if (__hci_mgmt_chan_find(c->channel)) {
810 mutex_unlock(&mgmt_chan_list_lock);
811 return -EALREADY;
812 }
813
814 list_add_tail(&c->list, &mgmt_chan_list);
815
816 mutex_unlock(&mgmt_chan_list_lock);
817
818 return 0;
819}
820EXPORT_SYMBOL(hci_mgmt_chan_register);
821
822void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
823{
824 mutex_lock(&mgmt_chan_list_lock);
825 list_del(&c->list);
826 mutex_unlock(&mgmt_chan_list_lock);
827}
828EXPORT_SYMBOL(hci_mgmt_chan_unregister);
829
830static int hci_sock_release(struct socket *sock)
831{
832 struct sock *sk = sock->sk;
833 struct hci_dev *hdev;
834 struct sk_buff *skb;
835
836 BT_DBG("sock %p sk %p", sock, sk);
837
838 if (!sk)
839 return 0;
840
841 lock_sock(sk);
842
843 switch (hci_pi(sk)->channel) {
844 case HCI_CHANNEL_MONITOR:
845 atomic_dec(&monitor_promisc);
846 break;
847 case HCI_CHANNEL_RAW:
848 case HCI_CHANNEL_USER:
849 case HCI_CHANNEL_CONTROL:
850
851 skb = create_monitor_ctrl_close(sk);
852 if (skb) {
853 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
854 HCI_SOCK_TRUSTED, NULL);
855 kfree_skb(skb);
856 }
857
858 hci_sock_free_cookie(sk);
859 break;
860 }
861
862 bt_sock_unlink(&hci_sk_list, sk);
863
864 hdev = hci_pi(sk)->hdev;
865 if (hdev) {
866 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
867
868
869
870
871
872
873
874
875
876 hci_dev_do_close(hdev);
877 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
878 mgmt_index_added(hdev);
879 }
880
881 atomic_dec(&hdev->promisc);
882 hci_dev_put(hdev);
883 }
884
885 sock_orphan(sk);
886
887 skb_queue_purge(&sk->sk_receive_queue);
888 skb_queue_purge(&sk->sk_write_queue);
889
890 release_sock(sk);
891 sock_put(sk);
892 return 0;
893}
894
895static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
896{
897 bdaddr_t bdaddr;
898 int err;
899
900 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
901 return -EFAULT;
902
903 hci_dev_lock(hdev);
904
905 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
906
907 hci_dev_unlock(hdev);
908
909 return err;
910}
911
912static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
913{
914 bdaddr_t bdaddr;
915 int err;
916
917 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
918 return -EFAULT;
919
920 hci_dev_lock(hdev);
921
922 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
923
924 hci_dev_unlock(hdev);
925
926 return err;
927}
928
929
930static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
931 unsigned long arg)
932{
933 struct hci_dev *hdev = hci_pi(sk)->hdev;
934
935 if (!hdev)
936 return -EBADFD;
937
938 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
939 return -EBUSY;
940
941 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
942 return -EOPNOTSUPP;
943
944 if (hdev->dev_type != HCI_PRIMARY)
945 return -EOPNOTSUPP;
946
947 switch (cmd) {
948 case HCISETRAW:
949 if (!capable(CAP_NET_ADMIN))
950 return -EPERM;
951 return -EOPNOTSUPP;
952
953 case HCIGETCONNINFO:
954 return hci_get_conn_info(hdev, (void __user *)arg);
955
956 case HCIGETAUTHINFO:
957 return hci_get_auth_info(hdev, (void __user *)arg);
958
959 case HCIBLOCKADDR:
960 if (!capable(CAP_NET_ADMIN))
961 return -EPERM;
962 return hci_sock_blacklist_add(hdev, (void __user *)arg);
963
964 case HCIUNBLOCKADDR:
965 if (!capable(CAP_NET_ADMIN))
966 return -EPERM;
967 return hci_sock_blacklist_del(hdev, (void __user *)arg);
968 }
969
970 return -ENOIOCTLCMD;
971}
972
973static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
974 unsigned long arg)
975{
976 void __user *argp = (void __user *)arg;
977 struct sock *sk = sock->sk;
978 int err;
979
980 BT_DBG("cmd %x arg %lx", cmd, arg);
981
982 lock_sock(sk);
983
984 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
985 err = -EBADFD;
986 goto done;
987 }
988
989
990
991
992
993
994
995 if (hci_sock_gen_cookie(sk)) {
996 struct sk_buff *skb;
997
998 if (capable(CAP_NET_ADMIN))
999 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1000
1001
1002 skb = create_monitor_ctrl_open(sk);
1003 if (skb) {
1004 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1005 HCI_SOCK_TRUSTED, NULL);
1006 kfree_skb(skb);
1007 }
1008 }
1009
1010 release_sock(sk);
1011
1012 switch (cmd) {
1013 case HCIGETDEVLIST:
1014 return hci_get_dev_list(argp);
1015
1016 case HCIGETDEVINFO:
1017 return hci_get_dev_info(argp);
1018
1019 case HCIGETCONNLIST:
1020 return hci_get_conn_list(argp);
1021
1022 case HCIDEVUP:
1023 if (!capable(CAP_NET_ADMIN))
1024 return -EPERM;
1025 return hci_dev_open(arg);
1026
1027 case HCIDEVDOWN:
1028 if (!capable(CAP_NET_ADMIN))
1029 return -EPERM;
1030 return hci_dev_close(arg);
1031
1032 case HCIDEVRESET:
1033 if (!capable(CAP_NET_ADMIN))
1034 return -EPERM;
1035 return hci_dev_reset(arg);
1036
1037 case HCIDEVRESTAT:
1038 if (!capable(CAP_NET_ADMIN))
1039 return -EPERM;
1040 return hci_dev_reset_stat(arg);
1041
1042 case HCISETSCAN:
1043 case HCISETAUTH:
1044 case HCISETENCRYPT:
1045 case HCISETPTYPE:
1046 case HCISETLINKPOL:
1047 case HCISETLINKMODE:
1048 case HCISETACLMTU:
1049 case HCISETSCOMTU:
1050 if (!capable(CAP_NET_ADMIN))
1051 return -EPERM;
1052 return hci_dev_cmd(cmd, argp);
1053
1054 case HCIINQUIRY:
1055 return hci_inquiry(argp);
1056 }
1057
1058 lock_sock(sk);
1059
1060 err = hci_sock_bound_ioctl(sk, cmd, arg);
1061
1062done:
1063 release_sock(sk);
1064 return err;
1065}
1066
1067#ifdef CONFIG_COMPAT
1068static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1069 unsigned long arg)
1070{
1071 switch (cmd) {
1072 case HCIDEVUP:
1073 case HCIDEVDOWN:
1074 case HCIDEVRESET:
1075 case HCIDEVRESTAT:
1076 return hci_sock_ioctl(sock, cmd, arg);
1077 }
1078
1079 return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1080}
1081#endif
1082
1083static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1084 int addr_len)
1085{
1086 struct sockaddr_hci haddr;
1087 struct sock *sk = sock->sk;
1088 struct hci_dev *hdev = NULL;
1089 struct sk_buff *skb;
1090 int len, err = 0;
1091
1092 BT_DBG("sock %p sk %p", sock, sk);
1093
1094 if (!addr)
1095 return -EINVAL;
1096
1097 memset(&haddr, 0, sizeof(haddr));
1098 len = min_t(unsigned int, sizeof(haddr), addr_len);
1099 memcpy(&haddr, addr, len);
1100
1101 if (haddr.hci_family != AF_BLUETOOTH)
1102 return -EINVAL;
1103
1104 lock_sock(sk);
1105
1106 if (sk->sk_state == BT_BOUND) {
1107 err = -EALREADY;
1108 goto done;
1109 }
1110
1111 switch (haddr.hci_channel) {
1112 case HCI_CHANNEL_RAW:
1113 if (hci_pi(sk)->hdev) {
1114 err = -EALREADY;
1115 goto done;
1116 }
1117
1118 if (haddr.hci_dev != HCI_DEV_NONE) {
1119 hdev = hci_dev_get(haddr.hci_dev);
1120 if (!hdev) {
1121 err = -ENODEV;
1122 goto done;
1123 }
1124
1125 atomic_inc(&hdev->promisc);
1126 }
1127
1128 hci_pi(sk)->channel = haddr.hci_channel;
1129
1130 if (!hci_sock_gen_cookie(sk)) {
1131
1132
1133
1134
1135
1136
1137 skb = create_monitor_ctrl_close(sk);
1138 if (skb) {
1139 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1140 HCI_SOCK_TRUSTED, NULL);
1141 kfree_skb(skb);
1142 }
1143 }
1144
1145 if (capable(CAP_NET_ADMIN))
1146 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1147
1148 hci_pi(sk)->hdev = hdev;
1149
1150
1151 skb = create_monitor_ctrl_open(sk);
1152 if (skb) {
1153 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1154 HCI_SOCK_TRUSTED, NULL);
1155 kfree_skb(skb);
1156 }
1157 break;
1158
1159 case HCI_CHANNEL_USER:
1160 if (hci_pi(sk)->hdev) {
1161 err = -EALREADY;
1162 goto done;
1163 }
1164
1165 if (haddr.hci_dev == HCI_DEV_NONE) {
1166 err = -EINVAL;
1167 goto done;
1168 }
1169
1170 if (!capable(CAP_NET_ADMIN)) {
1171 err = -EPERM;
1172 goto done;
1173 }
1174
1175 hdev = hci_dev_get(haddr.hci_dev);
1176 if (!hdev) {
1177 err = -ENODEV;
1178 goto done;
1179 }
1180
1181 if (test_bit(HCI_INIT, &hdev->flags) ||
1182 hci_dev_test_flag(hdev, HCI_SETUP) ||
1183 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1184 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1185 test_bit(HCI_UP, &hdev->flags))) {
1186 err = -EBUSY;
1187 hci_dev_put(hdev);
1188 goto done;
1189 }
1190
1191 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1192 err = -EUSERS;
1193 hci_dev_put(hdev);
1194 goto done;
1195 }
1196
1197 mgmt_index_removed(hdev);
1198
1199 err = hci_dev_open(hdev->id);
1200 if (err) {
1201 if (err == -EALREADY) {
1202
1203
1204
1205
1206
1207
1208
1209 err = 0;
1210 } else {
1211 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1212 mgmt_index_added(hdev);
1213 hci_dev_put(hdev);
1214 goto done;
1215 }
1216 }
1217
1218 hci_pi(sk)->channel = haddr.hci_channel;
1219
1220 if (!hci_sock_gen_cookie(sk)) {
1221
1222
1223
1224
1225
1226 skb = create_monitor_ctrl_close(sk);
1227 if (skb) {
1228 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1229 HCI_SOCK_TRUSTED, NULL);
1230 kfree_skb(skb);
1231 }
1232 }
1233
1234
1235
1236
1237 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1238
1239 hci_pi(sk)->hdev = hdev;
1240
1241
1242 skb = create_monitor_ctrl_open(sk);
1243 if (skb) {
1244 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1245 HCI_SOCK_TRUSTED, NULL);
1246 kfree_skb(skb);
1247 }
1248
1249 atomic_inc(&hdev->promisc);
1250 break;
1251
1252 case HCI_CHANNEL_MONITOR:
1253 if (haddr.hci_dev != HCI_DEV_NONE) {
1254 err = -EINVAL;
1255 goto done;
1256 }
1257
1258 if (!capable(CAP_NET_RAW)) {
1259 err = -EPERM;
1260 goto done;
1261 }
1262
1263 hci_pi(sk)->channel = haddr.hci_channel;
1264
1265
1266
1267
1268 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1269
1270 send_monitor_note(sk, "Linux version %s (%s)",
1271 init_utsname()->release,
1272 init_utsname()->machine);
1273 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1274 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1275 send_monitor_replay(sk);
1276 send_monitor_control_replay(sk);
1277
1278 atomic_inc(&monitor_promisc);
1279 break;
1280
1281 case HCI_CHANNEL_LOGGING:
1282 if (haddr.hci_dev != HCI_DEV_NONE) {
1283 err = -EINVAL;
1284 goto done;
1285 }
1286
1287 if (!capable(CAP_NET_ADMIN)) {
1288 err = -EPERM;
1289 goto done;
1290 }
1291
1292 hci_pi(sk)->channel = haddr.hci_channel;
1293 break;
1294
1295 default:
1296 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1297 err = -EINVAL;
1298 goto done;
1299 }
1300
1301 if (haddr.hci_dev != HCI_DEV_NONE) {
1302 err = -EINVAL;
1303 goto done;
1304 }
1305
1306
1307
1308
1309
1310
1311 if (capable(CAP_NET_ADMIN))
1312 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1313
1314 hci_pi(sk)->channel = haddr.hci_channel;
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1327 if (!hci_sock_gen_cookie(sk)) {
1328
1329
1330
1331
1332
1333
1334 skb = create_monitor_ctrl_close(sk);
1335 if (skb) {
1336 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1337 HCI_SOCK_TRUSTED, NULL);
1338 kfree_skb(skb);
1339 }
1340 }
1341
1342
1343 skb = create_monitor_ctrl_open(sk);
1344 if (skb) {
1345 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1346 HCI_SOCK_TRUSTED, NULL);
1347 kfree_skb(skb);
1348 }
1349
1350 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1351 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1352 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1353 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1354 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1355 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1356 }
1357 break;
1358 }
1359
1360 sk->sk_state = BT_BOUND;
1361
1362done:
1363 release_sock(sk);
1364 return err;
1365}
1366
1367static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1368 int peer)
1369{
1370 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1371 struct sock *sk = sock->sk;
1372 struct hci_dev *hdev;
1373 int err = 0;
1374
1375 BT_DBG("sock %p sk %p", sock, sk);
1376
1377 if (peer)
1378 return -EOPNOTSUPP;
1379
1380 lock_sock(sk);
1381
1382 hdev = hci_pi(sk)->hdev;
1383 if (!hdev) {
1384 err = -EBADFD;
1385 goto done;
1386 }
1387
1388 haddr->hci_family = AF_BLUETOOTH;
1389 haddr->hci_dev = hdev->id;
1390 haddr->hci_channel= hci_pi(sk)->channel;
1391 err = sizeof(*haddr);
1392
1393done:
1394 release_sock(sk);
1395 return err;
1396}
1397
1398static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1399 struct sk_buff *skb)
1400{
1401 __u8 mask = hci_pi(sk)->cmsg_mask;
1402
1403 if (mask & HCI_CMSG_DIR) {
1404 int incoming = bt_cb(skb)->incoming;
1405 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1406 &incoming);
1407 }
1408
1409 if (mask & HCI_CMSG_TSTAMP) {
1410#ifdef CONFIG_COMPAT
1411 struct old_timeval32 ctv;
1412#endif
1413 struct __kernel_old_timeval tv;
1414 void *data;
1415 int len;
1416
1417 skb_get_timestamp(skb, &tv);
1418
1419 data = &tv;
1420 len = sizeof(tv);
1421#ifdef CONFIG_COMPAT
1422 if (!COMPAT_USE_64BIT_TIME &&
1423 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1424 ctv.tv_sec = tv.tv_sec;
1425 ctv.tv_usec = tv.tv_usec;
1426 data = &ctv;
1427 len = sizeof(ctv);
1428 }
1429#endif
1430
1431 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1432 }
1433}
1434
1435static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1436 size_t len, int flags)
1437{
1438 int noblock = flags & MSG_DONTWAIT;
1439 struct sock *sk = sock->sk;
1440 struct sk_buff *skb;
1441 int copied, err;
1442 unsigned int skblen;
1443
1444 BT_DBG("sock %p, sk %p", sock, sk);
1445
1446 if (flags & MSG_OOB)
1447 return -EOPNOTSUPP;
1448
1449 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1450 return -EOPNOTSUPP;
1451
1452 if (sk->sk_state == BT_CLOSED)
1453 return 0;
1454
1455 skb = skb_recv_datagram(sk, flags, noblock, &err);
1456 if (!skb)
1457 return err;
1458
1459 skblen = skb->len;
1460 copied = skb->len;
1461 if (len < copied) {
1462 msg->msg_flags |= MSG_TRUNC;
1463 copied = len;
1464 }
1465
1466 skb_reset_transport_header(skb);
1467 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1468
1469 switch (hci_pi(sk)->channel) {
1470 case HCI_CHANNEL_RAW:
1471 hci_sock_cmsg(sk, msg, skb);
1472 break;
1473 case HCI_CHANNEL_USER:
1474 case HCI_CHANNEL_MONITOR:
1475 sock_recv_timestamp(msg, sk, skb);
1476 break;
1477 default:
1478 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1479 sock_recv_timestamp(msg, sk, skb);
1480 break;
1481 }
1482
1483 skb_free_datagram(sk, skb);
1484
1485 if (flags & MSG_TRUNC)
1486 copied = skblen;
1487
1488 return err ? : copied;
1489}
1490
1491static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1492 struct msghdr *msg, size_t msglen)
1493{
1494 void *buf;
1495 u8 *cp;
1496 struct mgmt_hdr *hdr;
1497 u16 opcode, index, len;
1498 struct hci_dev *hdev = NULL;
1499 const struct hci_mgmt_handler *handler;
1500 bool var_len, no_hdev;
1501 int err;
1502
1503 BT_DBG("got %zu bytes", msglen);
1504
1505 if (msglen < sizeof(*hdr))
1506 return -EINVAL;
1507
1508 buf = kmalloc(msglen, GFP_KERNEL);
1509 if (!buf)
1510 return -ENOMEM;
1511
1512 if (memcpy_from_msg(buf, msg, msglen)) {
1513 err = -EFAULT;
1514 goto done;
1515 }
1516
1517 hdr = buf;
1518 opcode = __le16_to_cpu(hdr->opcode);
1519 index = __le16_to_cpu(hdr->index);
1520 len = __le16_to_cpu(hdr->len);
1521
1522 if (len != msglen - sizeof(*hdr)) {
1523 err = -EINVAL;
1524 goto done;
1525 }
1526
1527 if (chan->channel == HCI_CHANNEL_CONTROL) {
1528 struct sk_buff *skb;
1529
1530
1531 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1532 buf + sizeof(*hdr));
1533 if (skb) {
1534 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1535 HCI_SOCK_TRUSTED, NULL);
1536 kfree_skb(skb);
1537 }
1538 }
1539
1540 if (opcode >= chan->handler_count ||
1541 chan->handlers[opcode].func == NULL) {
1542 BT_DBG("Unknown op %u", opcode);
1543 err = mgmt_cmd_status(sk, index, opcode,
1544 MGMT_STATUS_UNKNOWN_COMMAND);
1545 goto done;
1546 }
1547
1548 handler = &chan->handlers[opcode];
1549
1550 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1551 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1552 err = mgmt_cmd_status(sk, index, opcode,
1553 MGMT_STATUS_PERMISSION_DENIED);
1554 goto done;
1555 }
1556
1557 if (index != MGMT_INDEX_NONE) {
1558 hdev = hci_dev_get(index);
1559 if (!hdev) {
1560 err = mgmt_cmd_status(sk, index, opcode,
1561 MGMT_STATUS_INVALID_INDEX);
1562 goto done;
1563 }
1564
1565 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1566 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1567 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1568 err = mgmt_cmd_status(sk, index, opcode,
1569 MGMT_STATUS_INVALID_INDEX);
1570 goto done;
1571 }
1572
1573 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1574 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1575 err = mgmt_cmd_status(sk, index, opcode,
1576 MGMT_STATUS_INVALID_INDEX);
1577 goto done;
1578 }
1579 }
1580
1581 if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1582 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1583 if (no_hdev != !hdev) {
1584 err = mgmt_cmd_status(sk, index, opcode,
1585 MGMT_STATUS_INVALID_INDEX);
1586 goto done;
1587 }
1588 }
1589
1590 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1591 if ((var_len && len < handler->data_len) ||
1592 (!var_len && len != handler->data_len)) {
1593 err = mgmt_cmd_status(sk, index, opcode,
1594 MGMT_STATUS_INVALID_PARAMS);
1595 goto done;
1596 }
1597
1598 if (hdev && chan->hdev_init)
1599 chan->hdev_init(sk, hdev);
1600
1601 cp = buf + sizeof(*hdr);
1602
1603 err = handler->func(sk, hdev, cp, len);
1604 if (err < 0)
1605 goto done;
1606
1607 err = msglen;
1608
1609done:
1610 if (hdev)
1611 hci_dev_put(hdev);
1612
1613 kfree(buf);
1614 return err;
1615}
1616
1617static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1618{
1619 struct hci_mon_hdr *hdr;
1620 struct sk_buff *skb;
1621 struct hci_dev *hdev;
1622 u16 index;
1623 int err;
1624
1625
1626
1627
1628
1629 if (len < sizeof(*hdr) + 3)
1630 return -EINVAL;
1631
1632 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1633 if (!skb)
1634 return err;
1635
1636 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1637 err = -EFAULT;
1638 goto drop;
1639 }
1640
1641 hdr = (void *)skb->data;
1642
1643 if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1644 err = -EINVAL;
1645 goto drop;
1646 }
1647
1648 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1649 __u8 priority = skb->data[sizeof(*hdr)];
1650 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1666 ident_len > len - sizeof(*hdr) - 3 ||
1667 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1668 err = -EINVAL;
1669 goto drop;
1670 }
1671 } else {
1672 err = -EINVAL;
1673 goto drop;
1674 }
1675
1676 index = __le16_to_cpu(hdr->index);
1677
1678 if (index != MGMT_INDEX_NONE) {
1679 hdev = hci_dev_get(index);
1680 if (!hdev) {
1681 err = -ENODEV;
1682 goto drop;
1683 }
1684 } else {
1685 hdev = NULL;
1686 }
1687
1688 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1689
1690 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1691 err = len;
1692
1693 if (hdev)
1694 hci_dev_put(hdev);
1695
1696drop:
1697 kfree_skb(skb);
1698 return err;
1699}
1700
1701static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1702 size_t len)
1703{
1704 struct sock *sk = sock->sk;
1705 struct hci_mgmt_chan *chan;
1706 struct hci_dev *hdev;
1707 struct sk_buff *skb;
1708 int err;
1709
1710 BT_DBG("sock %p sk %p", sock, sk);
1711
1712 if (msg->msg_flags & MSG_OOB)
1713 return -EOPNOTSUPP;
1714
1715 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1716 MSG_CMSG_COMPAT))
1717 return -EINVAL;
1718
1719 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1720 return -EINVAL;
1721
1722 lock_sock(sk);
1723
1724 switch (hci_pi(sk)->channel) {
1725 case HCI_CHANNEL_RAW:
1726 case HCI_CHANNEL_USER:
1727 break;
1728 case HCI_CHANNEL_MONITOR:
1729 err = -EOPNOTSUPP;
1730 goto done;
1731 case HCI_CHANNEL_LOGGING:
1732 err = hci_logging_frame(sk, msg, len);
1733 goto done;
1734 default:
1735 mutex_lock(&mgmt_chan_list_lock);
1736 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1737 if (chan)
1738 err = hci_mgmt_cmd(chan, sk, msg, len);
1739 else
1740 err = -EINVAL;
1741
1742 mutex_unlock(&mgmt_chan_list_lock);
1743 goto done;
1744 }
1745
1746 hdev = hci_pi(sk)->hdev;
1747 if (!hdev) {
1748 err = -EBADFD;
1749 goto done;
1750 }
1751
1752 if (!test_bit(HCI_UP, &hdev->flags)) {
1753 err = -ENETDOWN;
1754 goto done;
1755 }
1756
1757 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1758 if (!skb)
1759 goto done;
1760
1761 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1762 err = -EFAULT;
1763 goto drop;
1764 }
1765
1766 hci_skb_pkt_type(skb) = skb->data[0];
1767 skb_pull(skb, 1);
1768
1769 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1770
1771
1772
1773
1774
1775 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1776 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1777 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1778 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1779 err = -EINVAL;
1780 goto drop;
1781 }
1782
1783 skb_queue_tail(&hdev->raw_q, skb);
1784 queue_work(hdev->workqueue, &hdev->tx_work);
1785 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1786 u16 opcode = get_unaligned_le16(skb->data);
1787 u16 ogf = hci_opcode_ogf(opcode);
1788 u16 ocf = hci_opcode_ocf(opcode);
1789
1790 if (((ogf > HCI_SFLT_MAX_OGF) ||
1791 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1792 &hci_sec_filter.ocf_mask[ogf])) &&
1793 !capable(CAP_NET_RAW)) {
1794 err = -EPERM;
1795 goto drop;
1796 }
1797
1798
1799
1800
1801 hci_skb_opcode(skb) = opcode;
1802
1803 if (ogf == 0x3f) {
1804 skb_queue_tail(&hdev->raw_q, skb);
1805 queue_work(hdev->workqueue, &hdev->tx_work);
1806 } else {
1807
1808
1809
1810 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1811
1812 skb_queue_tail(&hdev->cmd_q, skb);
1813 queue_work(hdev->workqueue, &hdev->cmd_work);
1814 }
1815 } else {
1816 if (!capable(CAP_NET_RAW)) {
1817 err = -EPERM;
1818 goto drop;
1819 }
1820
1821 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1822 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1823 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1824 err = -EINVAL;
1825 goto drop;
1826 }
1827
1828 skb_queue_tail(&hdev->raw_q, skb);
1829 queue_work(hdev->workqueue, &hdev->tx_work);
1830 }
1831
1832 err = len;
1833
1834done:
1835 release_sock(sk);
1836 return err;
1837
1838drop:
1839 kfree_skb(skb);
1840 goto done;
1841}
1842
1843static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1844 sockptr_t optval, unsigned int len)
1845{
1846 struct hci_ufilter uf = { .opcode = 0 };
1847 struct sock *sk = sock->sk;
1848 int err = 0, opt = 0;
1849
1850 BT_DBG("sk %p, opt %d", sk, optname);
1851
1852 if (level != SOL_HCI)
1853 return -ENOPROTOOPT;
1854
1855 lock_sock(sk);
1856
1857 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1858 err = -EBADFD;
1859 goto done;
1860 }
1861
1862 switch (optname) {
1863 case HCI_DATA_DIR:
1864 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1865 err = -EFAULT;
1866 break;
1867 }
1868
1869 if (opt)
1870 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1871 else
1872 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1873 break;
1874
1875 case HCI_TIME_STAMP:
1876 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1877 err = -EFAULT;
1878 break;
1879 }
1880
1881 if (opt)
1882 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1883 else
1884 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1885 break;
1886
1887 case HCI_FILTER:
1888 {
1889 struct hci_filter *f = &hci_pi(sk)->filter;
1890
1891 uf.type_mask = f->type_mask;
1892 uf.opcode = f->opcode;
1893 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1894 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1895 }
1896
1897 len = min_t(unsigned int, len, sizeof(uf));
1898 if (copy_from_sockptr(&uf, optval, len)) {
1899 err = -EFAULT;
1900 break;
1901 }
1902
1903 if (!capable(CAP_NET_RAW)) {
1904 uf.type_mask &= hci_sec_filter.type_mask;
1905 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1906 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1907 }
1908
1909 {
1910 struct hci_filter *f = &hci_pi(sk)->filter;
1911
1912 f->type_mask = uf.type_mask;
1913 f->opcode = uf.opcode;
1914 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1915 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1916 }
1917 break;
1918
1919 default:
1920 err = -ENOPROTOOPT;
1921 break;
1922 }
1923
1924done:
1925 release_sock(sk);
1926 return err;
1927}
1928
1929static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1930 char __user *optval, int __user *optlen)
1931{
1932 struct hci_ufilter uf;
1933 struct sock *sk = sock->sk;
1934 int len, opt, err = 0;
1935
1936 BT_DBG("sk %p, opt %d", sk, optname);
1937
1938 if (level != SOL_HCI)
1939 return -ENOPROTOOPT;
1940
1941 if (get_user(len, optlen))
1942 return -EFAULT;
1943
1944 lock_sock(sk);
1945
1946 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1947 err = -EBADFD;
1948 goto done;
1949 }
1950
1951 switch (optname) {
1952 case HCI_DATA_DIR:
1953 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1954 opt = 1;
1955 else
1956 opt = 0;
1957
1958 if (put_user(opt, optval))
1959 err = -EFAULT;
1960 break;
1961
1962 case HCI_TIME_STAMP:
1963 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1964 opt = 1;
1965 else
1966 opt = 0;
1967
1968 if (put_user(opt, optval))
1969 err = -EFAULT;
1970 break;
1971
1972 case HCI_FILTER:
1973 {
1974 struct hci_filter *f = &hci_pi(sk)->filter;
1975
1976 memset(&uf, 0, sizeof(uf));
1977 uf.type_mask = f->type_mask;
1978 uf.opcode = f->opcode;
1979 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1980 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1981 }
1982
1983 len = min_t(unsigned int, len, sizeof(uf));
1984 if (copy_to_user(optval, &uf, len))
1985 err = -EFAULT;
1986 break;
1987
1988 default:
1989 err = -ENOPROTOOPT;
1990 break;
1991 }
1992
1993done:
1994 release_sock(sk);
1995 return err;
1996}
1997
1998static const struct proto_ops hci_sock_ops = {
1999 .family = PF_BLUETOOTH,
2000 .owner = THIS_MODULE,
2001 .release = hci_sock_release,
2002 .bind = hci_sock_bind,
2003 .getname = hci_sock_getname,
2004 .sendmsg = hci_sock_sendmsg,
2005 .recvmsg = hci_sock_recvmsg,
2006 .ioctl = hci_sock_ioctl,
2007#ifdef CONFIG_COMPAT
2008 .compat_ioctl = hci_sock_compat_ioctl,
2009#endif
2010 .poll = datagram_poll,
2011 .listen = sock_no_listen,
2012 .shutdown = sock_no_shutdown,
2013 .setsockopt = hci_sock_setsockopt,
2014 .getsockopt = hci_sock_getsockopt,
2015 .connect = sock_no_connect,
2016 .socketpair = sock_no_socketpair,
2017 .accept = sock_no_accept,
2018 .mmap = sock_no_mmap
2019};
2020
2021static struct proto hci_sk_proto = {
2022 .name = "HCI",
2023 .owner = THIS_MODULE,
2024 .obj_size = sizeof(struct hci_pinfo)
2025};
2026
2027static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2028 int kern)
2029{
2030 struct sock *sk;
2031
2032 BT_DBG("sock %p", sock);
2033
2034 if (sock->type != SOCK_RAW)
2035 return -ESOCKTNOSUPPORT;
2036
2037 sock->ops = &hci_sock_ops;
2038
2039 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2040 if (!sk)
2041 return -ENOMEM;
2042
2043 sock_init_data(sock, sk);
2044
2045 sock_reset_flag(sk, SOCK_ZAPPED);
2046
2047 sk->sk_protocol = protocol;
2048
2049 sock->state = SS_UNCONNECTED;
2050 sk->sk_state = BT_OPEN;
2051
2052 bt_sock_link(&hci_sk_list, sk);
2053 return 0;
2054}
2055
2056static const struct net_proto_family hci_sock_family_ops = {
2057 .family = PF_BLUETOOTH,
2058 .owner = THIS_MODULE,
2059 .create = hci_sock_create,
2060};
2061
2062int __init hci_sock_init(void)
2063{
2064 int err;
2065
2066 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2067
2068 err = proto_register(&hci_sk_proto, 0);
2069 if (err < 0)
2070 return err;
2071
2072 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2073 if (err < 0) {
2074 BT_ERR("HCI socket registration failed");
2075 goto error;
2076 }
2077
2078 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2079 if (err < 0) {
2080 BT_ERR("Failed to create HCI proc file");
2081 bt_sock_unregister(BTPROTO_HCI);
2082 goto error;
2083 }
2084
2085 BT_INFO("HCI socket layer initialized");
2086
2087 return 0;
2088
2089error:
2090 proto_unregister(&hci_sk_proto);
2091 return err;
2092}
2093
2094void hci_sock_cleanup(void)
2095{
2096 bt_procfs_cleanup(&init_net, "hci");
2097 bt_sock_unregister(BTPROTO_HCI);
2098 proto_unregister(&hci_sk_proto);
2099}
2100