1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/module.h>
28#include <linux/kmod.h>
29
30#include <linux/types.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/poll.h>
36#include <linux/fcntl.h>
37#include <linux/init.h>
38#include <linux/skbuff.h>
39#include <linux/interrupt.h>
40#include <linux/notifier.h>
41#include <net/sock.h>
42
43#include <asm/system.h>
44#include <asm/uaccess.h>
45#include <asm/unaligned.h>
46
47#include <net/bluetooth/bluetooth.h>
48#include <net/bluetooth/hci_core.h>
49
50#ifndef CONFIG_BT_HCI_CORE_DEBUG
51#undef BT_DBG
52#define BT_DBG(D...)
53#endif
54
55static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74
75static ATOMIC_NOTIFIER_HEAD(hci_notifier);
76
77
78
79int hci_register_notifier(struct notifier_block *nb)
80{
81 return atomic_notifier_chain_register(&hci_notifier, nb);
82}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
86 return atomic_notifier_chain_unregister(&hci_notifier, nb);
87}
88
89static void hci_notify(struct hci_dev *hdev, int event)
90{
91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
92}
93
94
95
96void hci_req_complete(struct hci_dev *hdev, int result)
97{
98 BT_DBG("%s result 0x%2.2x", hdev->name, result);
99
100 if (hdev->req_status == HCI_REQ_PEND) {
101 hdev->req_result = result;
102 hdev->req_status = HCI_REQ_DONE;
103 wake_up_interruptible(&hdev->req_wait_q);
104 }
105}
106
107static void hci_req_cancel(struct hci_dev *hdev, int err)
108{
109 BT_DBG("%s err 0x%2.2x", hdev->name, err);
110
111 if (hdev->req_status == HCI_REQ_PEND) {
112 hdev->req_result = err;
113 hdev->req_status = HCI_REQ_CANCELED;
114 wake_up_interruptible(&hdev->req_wait_q);
115 }
116}
117
118
119static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
120 unsigned long opt, __u32 timeout)
121{
122 DECLARE_WAITQUEUE(wait, current);
123 int err = 0;
124
125 BT_DBG("%s start", hdev->name);
126
127 hdev->req_status = HCI_REQ_PEND;
128
129 add_wait_queue(&hdev->req_wait_q, &wait);
130 set_current_state(TASK_INTERRUPTIBLE);
131
132 req(hdev, opt);
133 schedule_timeout(timeout);
134
135 remove_wait_queue(&hdev->req_wait_q, &wait);
136
137 if (signal_pending(current))
138 return -EINTR;
139
140 switch (hdev->req_status) {
141 case HCI_REQ_DONE:
142 err = -bt_err(hdev->req_result);
143 break;
144
145 case HCI_REQ_CANCELED:
146 err = -hdev->req_result;
147 break;
148
149 default:
150 err = -ETIMEDOUT;
151 break;
152 }
153
154 hdev->req_status = hdev->req_result = 0;
155
156 BT_DBG("%s end: err %d", hdev->name, err);
157
158 return err;
159}
160
161static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
162 unsigned long opt, __u32 timeout)
163{
164 int ret;
165
166
167 hci_req_lock(hdev);
168 ret = __hci_request(hdev, req, opt, timeout);
169 hci_req_unlock(hdev);
170
171 return ret;
172}
173
174static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
175{
176 BT_DBG("%s %ld", hdev->name, opt);
177
178
179 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
180}
181
182static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
183{
184 struct sk_buff *skb;
185 __le16 param;
186 __u8 flt_type;
187
188 BT_DBG("%s %ld", hdev->name, opt);
189
190
191
192
193 while ((skb = skb_dequeue(&hdev->driver_init))) {
194 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
195 skb->dev = (void *) hdev;
196 skb_queue_tail(&hdev->cmd_q, skb);
197 hci_sched_cmd(hdev);
198 }
199 skb_queue_purge(&hdev->driver_init);
200
201
202
203
204 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
205 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
206
207
208 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209
210
211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212
213
214 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
215
216#if 0
217
218 {
219 struct hci_cp_host_buffer_size cp;
220 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
221 cp.sco_mtu = HCI_MAX_SCO_SIZE;
222 cp.acl_max_pkt = cpu_to_le16(0xffff);
223 cp.sco_max_pkt = cpu_to_le16(0xffff);
224 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
225 }
226#endif
227
228
229 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
230
231
232 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
233
234
235 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
236
237
238 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
239
240
241
242
243 flt_type = HCI_FLT_CLEAR_ALL;
244 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
245
246
247 param = cpu_to_le16(0x8000);
248 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, ¶m);
249
250
251 param = cpu_to_le16(0x7d00);
252 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
253}
254
255static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
256{
257 __u8 scan = opt;
258
259 BT_DBG("%s %x", hdev->name, scan);
260
261
262 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
263}
264
265static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
266{
267 __u8 auth = opt;
268
269 BT_DBG("%s %x", hdev->name, auth);
270
271
272 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
273}
274
275static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
276{
277 __u8 encrypt = opt;
278
279 BT_DBG("%s %x", hdev->name, encrypt);
280
281
282 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
283}
284
285
286
287struct hci_dev *hci_dev_get(int index)
288{
289 struct hci_dev *hdev = NULL;
290 struct list_head *p;
291
292 BT_DBG("%d", index);
293
294 if (index < 0)
295 return NULL;
296
297 read_lock(&hci_dev_list_lock);
298 list_for_each(p, &hci_dev_list) {
299 struct hci_dev *d = list_entry(p, struct hci_dev, list);
300 if (d->id == index) {
301 hdev = hci_dev_hold(d);
302 break;
303 }
304 }
305 read_unlock(&hci_dev_list_lock);
306 return hdev;
307}
308
309
310static void inquiry_cache_flush(struct hci_dev *hdev)
311{
312 struct inquiry_cache *cache = &hdev->inq_cache;
313 struct inquiry_entry *next = cache->list, *e;
314
315 BT_DBG("cache %p", cache);
316
317 cache->list = NULL;
318 while ((e = next)) {
319 next = e->next;
320 kfree(e);
321 }
322}
323
324struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
325{
326 struct inquiry_cache *cache = &hdev->inq_cache;
327 struct inquiry_entry *e;
328
329 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
330
331 for (e = cache->list; e; e = e->next)
332 if (!bacmp(&e->data.bdaddr, bdaddr))
333 break;
334 return e;
335}
336
337void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
338{
339 struct inquiry_cache *cache = &hdev->inq_cache;
340 struct inquiry_entry *e;
341
342 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
343
344 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
345
346 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
347 return;
348 e->next = cache->list;
349 cache->list = e;
350 }
351
352 memcpy(&e->data, data, sizeof(*data));
353 e->timestamp = jiffies;
354 cache->timestamp = jiffies;
355}
356
357static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
358{
359 struct inquiry_cache *cache = &hdev->inq_cache;
360 struct inquiry_info *info = (struct inquiry_info *) buf;
361 struct inquiry_entry *e;
362 int copied = 0;
363
364 for (e = cache->list; e && copied < num; e = e->next, copied++) {
365 struct inquiry_data *data = &e->data;
366 bacpy(&info->bdaddr, &data->bdaddr);
367 info->pscan_rep_mode = data->pscan_rep_mode;
368 info->pscan_period_mode = data->pscan_period_mode;
369 info->pscan_mode = data->pscan_mode;
370 memcpy(info->dev_class, data->dev_class, 3);
371 info->clock_offset = data->clock_offset;
372 info++;
373 }
374
375 BT_DBG("cache %p, copied %d", cache, copied);
376 return copied;
377}
378
379static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
380{
381 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
382 struct hci_cp_inquiry cp;
383
384 BT_DBG("%s", hdev->name);
385
386 if (test_bit(HCI_INQUIRY, &hdev->flags))
387 return;
388
389
390 memcpy(&cp.lap, &ir->lap, 3);
391 cp.length = ir->length;
392 cp.num_rsp = ir->num_rsp;
393 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
394}
395
396int hci_inquiry(void __user *arg)
397{
398 __u8 __user *ptr = arg;
399 struct hci_inquiry_req ir;
400 struct hci_dev *hdev;
401 int err = 0, do_inquiry = 0, max_rsp;
402 long timeo;
403 __u8 *buf;
404
405 if (copy_from_user(&ir, ptr, sizeof(ir)))
406 return -EFAULT;
407
408 if (!(hdev = hci_dev_get(ir.dev_id)))
409 return -ENODEV;
410
411 hci_dev_lock_bh(hdev);
412 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
413 inquiry_cache_empty(hdev) ||
414 ir.flags & IREQ_CACHE_FLUSH) {
415 inquiry_cache_flush(hdev);
416 do_inquiry = 1;
417 }
418 hci_dev_unlock_bh(hdev);
419
420 timeo = ir.length * msecs_to_jiffies(2000);
421 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
422 goto done;
423
424
425 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
426
427
428
429
430 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
431 err = -ENOMEM;
432 goto done;
433 }
434
435 hci_dev_lock_bh(hdev);
436 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
437 hci_dev_unlock_bh(hdev);
438
439 BT_DBG("num_rsp %d", ir.num_rsp);
440
441 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
442 ptr += sizeof(ir);
443 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
444 ir.num_rsp))
445 err = -EFAULT;
446 } else
447 err = -EFAULT;
448
449 kfree(buf);
450
451done:
452 hci_dev_put(hdev);
453 return err;
454}
455
456
457
458int hci_dev_open(__u16 dev)
459{
460 struct hci_dev *hdev;
461 int ret = 0;
462
463 if (!(hdev = hci_dev_get(dev)))
464 return -ENODEV;
465
466 BT_DBG("%s %p", hdev->name, hdev);
467
468 hci_req_lock(hdev);
469
470 if (test_bit(HCI_UP, &hdev->flags)) {
471 ret = -EALREADY;
472 goto done;
473 }
474
475 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
476 set_bit(HCI_RAW, &hdev->flags);
477
478 if (hdev->open(hdev)) {
479 ret = -EIO;
480 goto done;
481 }
482
483 if (!test_bit(HCI_RAW, &hdev->flags)) {
484 atomic_set(&hdev->cmd_cnt, 1);
485 set_bit(HCI_INIT, &hdev->flags);
486
487
488 ret = __hci_request(hdev, hci_init_req, 0,
489 msecs_to_jiffies(HCI_INIT_TIMEOUT));
490
491 clear_bit(HCI_INIT, &hdev->flags);
492 }
493
494 if (!ret) {
495 hci_dev_hold(hdev);
496 set_bit(HCI_UP, &hdev->flags);
497 hci_notify(hdev, HCI_DEV_UP);
498 } else {
499
500 tasklet_kill(&hdev->rx_task);
501 tasklet_kill(&hdev->tx_task);
502 tasklet_kill(&hdev->cmd_task);
503
504 skb_queue_purge(&hdev->cmd_q);
505 skb_queue_purge(&hdev->rx_q);
506
507 if (hdev->flush)
508 hdev->flush(hdev);
509
510 if (hdev->sent_cmd) {
511 kfree_skb(hdev->sent_cmd);
512 hdev->sent_cmd = NULL;
513 }
514
515 hdev->close(hdev);
516 hdev->flags = 0;
517 }
518
519done:
520 hci_req_unlock(hdev);
521 hci_dev_put(hdev);
522 return ret;
523}
524
525static int hci_dev_do_close(struct hci_dev *hdev)
526{
527 BT_DBG("%s %p", hdev->name, hdev);
528
529 hci_req_cancel(hdev, ENODEV);
530 hci_req_lock(hdev);
531
532 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
533 hci_req_unlock(hdev);
534 return 0;
535 }
536
537
538 tasklet_kill(&hdev->rx_task);
539 tasklet_kill(&hdev->tx_task);
540
541 hci_dev_lock_bh(hdev);
542 inquiry_cache_flush(hdev);
543 hci_conn_hash_flush(hdev);
544 hci_dev_unlock_bh(hdev);
545
546 hci_notify(hdev, HCI_DEV_DOWN);
547
548 if (hdev->flush)
549 hdev->flush(hdev);
550
551
552 skb_queue_purge(&hdev->cmd_q);
553 atomic_set(&hdev->cmd_cnt, 1);
554 if (!test_bit(HCI_RAW, &hdev->flags)) {
555 set_bit(HCI_INIT, &hdev->flags);
556 __hci_request(hdev, hci_reset_req, 0,
557 msecs_to_jiffies(250));
558 clear_bit(HCI_INIT, &hdev->flags);
559 }
560
561
562 tasklet_kill(&hdev->cmd_task);
563
564
565 skb_queue_purge(&hdev->rx_q);
566 skb_queue_purge(&hdev->cmd_q);
567 skb_queue_purge(&hdev->raw_q);
568
569
570 if (hdev->sent_cmd) {
571 kfree_skb(hdev->sent_cmd);
572 hdev->sent_cmd = NULL;
573 }
574
575
576
577 hdev->close(hdev);
578
579
580 hdev->flags = 0;
581
582 hci_req_unlock(hdev);
583
584 hci_dev_put(hdev);
585 return 0;
586}
587
588int hci_dev_close(__u16 dev)
589{
590 struct hci_dev *hdev;
591 int err;
592
593 if (!(hdev = hci_dev_get(dev)))
594 return -ENODEV;
595 err = hci_dev_do_close(hdev);
596 hci_dev_put(hdev);
597 return err;
598}
599
600int hci_dev_reset(__u16 dev)
601{
602 struct hci_dev *hdev;
603 int ret = 0;
604
605 if (!(hdev = hci_dev_get(dev)))
606 return -ENODEV;
607
608 hci_req_lock(hdev);
609 tasklet_disable(&hdev->tx_task);
610
611 if (!test_bit(HCI_UP, &hdev->flags))
612 goto done;
613
614
615 skb_queue_purge(&hdev->rx_q);
616 skb_queue_purge(&hdev->cmd_q);
617
618 hci_dev_lock_bh(hdev);
619 inquiry_cache_flush(hdev);
620 hci_conn_hash_flush(hdev);
621 hci_dev_unlock_bh(hdev);
622
623 if (hdev->flush)
624 hdev->flush(hdev);
625
626 atomic_set(&hdev->cmd_cnt, 1);
627 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
628
629 if (!test_bit(HCI_RAW, &hdev->flags))
630 ret = __hci_request(hdev, hci_reset_req, 0,
631 msecs_to_jiffies(HCI_INIT_TIMEOUT));
632
633done:
634 tasklet_enable(&hdev->tx_task);
635 hci_req_unlock(hdev);
636 hci_dev_put(hdev);
637 return ret;
638}
639
640int hci_dev_reset_stat(__u16 dev)
641{
642 struct hci_dev *hdev;
643 int ret = 0;
644
645 if (!(hdev = hci_dev_get(dev)))
646 return -ENODEV;
647
648 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
649
650 hci_dev_put(hdev);
651
652 return ret;
653}
654
655int hci_dev_cmd(unsigned int cmd, void __user *arg)
656{
657 struct hci_dev *hdev;
658 struct hci_dev_req dr;
659 int err = 0;
660
661 if (copy_from_user(&dr, arg, sizeof(dr)))
662 return -EFAULT;
663
664 if (!(hdev = hci_dev_get(dr.dev_id)))
665 return -ENODEV;
666
667 switch (cmd) {
668 case HCISETAUTH:
669 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
670 msecs_to_jiffies(HCI_INIT_TIMEOUT));
671 break;
672
673 case HCISETENCRYPT:
674 if (!lmp_encrypt_capable(hdev)) {
675 err = -EOPNOTSUPP;
676 break;
677 }
678
679 if (!test_bit(HCI_AUTH, &hdev->flags)) {
680
681 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
682 msecs_to_jiffies(HCI_INIT_TIMEOUT));
683 if (err)
684 break;
685 }
686
687 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
688 msecs_to_jiffies(HCI_INIT_TIMEOUT));
689 break;
690
691 case HCISETSCAN:
692 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
693 msecs_to_jiffies(HCI_INIT_TIMEOUT));
694 break;
695
696 case HCISETPTYPE:
697 hdev->pkt_type = (__u16) dr.dev_opt;
698 break;
699
700 case HCISETLINKPOL:
701 hdev->link_policy = (__u16) dr.dev_opt;
702 break;
703
704 case HCISETLINKMODE:
705 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
706 break;
707
708 case HCISETACLMTU:
709 hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1);
710 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
711 break;
712
713 case HCISETSCOMTU:
714 hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1);
715 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
716 break;
717
718 default:
719 err = -EINVAL;
720 break;
721 }
722 hci_dev_put(hdev);
723 return err;
724}
725
726int hci_get_dev_list(void __user *arg)
727{
728 struct hci_dev_list_req *dl;
729 struct hci_dev_req *dr;
730 struct list_head *p;
731 int n = 0, size, err;
732 __u16 dev_num;
733
734 if (get_user(dev_num, (__u16 __user *) arg))
735 return -EFAULT;
736
737 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
738 return -EINVAL;
739
740 size = sizeof(*dl) + dev_num * sizeof(*dr);
741
742 if (!(dl = kmalloc(size, GFP_KERNEL)))
743 return -ENOMEM;
744
745 dr = dl->dev_req;
746
747 read_lock_bh(&hci_dev_list_lock);
748 list_for_each(p, &hci_dev_list) {
749 struct hci_dev *hdev;
750 hdev = list_entry(p, struct hci_dev, list);
751 (dr + n)->dev_id = hdev->id;
752 (dr + n)->dev_opt = hdev->flags;
753 if (++n >= dev_num)
754 break;
755 }
756 read_unlock_bh(&hci_dev_list_lock);
757
758 dl->dev_num = n;
759 size = sizeof(*dl) + n * sizeof(*dr);
760
761 err = copy_to_user(arg, dl, size);
762 kfree(dl);
763
764 return err ? -EFAULT : 0;
765}
766
767int hci_get_dev_info(void __user *arg)
768{
769 struct hci_dev *hdev;
770 struct hci_dev_info di;
771 int err = 0;
772
773 if (copy_from_user(&di, arg, sizeof(di)))
774 return -EFAULT;
775
776 if (!(hdev = hci_dev_get(di.dev_id)))
777 return -ENODEV;
778
779 strcpy(di.name, hdev->name);
780 di.bdaddr = hdev->bdaddr;
781 di.type = hdev->type;
782 di.flags = hdev->flags;
783 di.pkt_type = hdev->pkt_type;
784 di.acl_mtu = hdev->acl_mtu;
785 di.acl_pkts = hdev->acl_pkts;
786 di.sco_mtu = hdev->sco_mtu;
787 di.sco_pkts = hdev->sco_pkts;
788 di.link_policy = hdev->link_policy;
789 di.link_mode = hdev->link_mode;
790
791 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
792 memcpy(&di.features, &hdev->features, sizeof(di.features));
793
794 if (copy_to_user(arg, &di, sizeof(di)))
795 err = -EFAULT;
796
797 hci_dev_put(hdev);
798
799 return err;
800}
801
802
803
804
805struct hci_dev *hci_alloc_dev(void)
806{
807 struct hci_dev *hdev;
808
809 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
810 if (!hdev)
811 return NULL;
812
813 skb_queue_head_init(&hdev->driver_init);
814
815 return hdev;
816}
817EXPORT_SYMBOL(hci_alloc_dev);
818
819
820void hci_free_dev(struct hci_dev *hdev)
821{
822 skb_queue_purge(&hdev->driver_init);
823
824
825 put_device(&hdev->dev);
826}
827EXPORT_SYMBOL(hci_free_dev);
828
829
830int hci_register_dev(struct hci_dev *hdev)
831{
832 struct list_head *head = &hci_dev_list, *p;
833 int i, id = 0;
834
835 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
836
837 if (!hdev->open || !hdev->close || !hdev->destruct)
838 return -EINVAL;
839
840 write_lock_bh(&hci_dev_list_lock);
841
842
843 list_for_each(p, &hci_dev_list) {
844 if (list_entry(p, struct hci_dev, list)->id != id)
845 break;
846 head = p; id++;
847 }
848
849 sprintf(hdev->name, "hci%d", id);
850 hdev->id = id;
851 list_add(&hdev->list, head);
852
853 atomic_set(&hdev->refcnt, 1);
854 spin_lock_init(&hdev->lock);
855
856 hdev->flags = 0;
857 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
858 hdev->esco_type = (ESCO_HV1);
859 hdev->link_mode = (HCI_LM_ACCEPT);
860
861 hdev->idle_timeout = 0;
862 hdev->sniff_max_interval = 800;
863 hdev->sniff_min_interval = 80;
864
865 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
866 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
867 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
868
869 skb_queue_head_init(&hdev->rx_q);
870 skb_queue_head_init(&hdev->cmd_q);
871 skb_queue_head_init(&hdev->raw_q);
872
873 for (i = 0; i < 3; i++)
874 hdev->reassembly[i] = NULL;
875
876 init_waitqueue_head(&hdev->req_wait_q);
877 init_MUTEX(&hdev->req_lock);
878
879 inquiry_cache_init(hdev);
880
881 hci_conn_hash_init(hdev);
882
883 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
884
885 atomic_set(&hdev->promisc, 0);
886
887 write_unlock_bh(&hci_dev_list_lock);
888
889 hci_register_sysfs(hdev);
890
891 hci_notify(hdev, HCI_DEV_REG);
892
893 return id;
894}
895EXPORT_SYMBOL(hci_register_dev);
896
897
898int hci_unregister_dev(struct hci_dev *hdev)
899{
900 int i;
901
902 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
903
904 hci_unregister_sysfs(hdev);
905
906 write_lock_bh(&hci_dev_list_lock);
907 list_del(&hdev->list);
908 write_unlock_bh(&hci_dev_list_lock);
909
910 hci_dev_do_close(hdev);
911
912 for (i = 0; i < 3; i++)
913 kfree_skb(hdev->reassembly[i]);
914
915 hci_notify(hdev, HCI_DEV_UNREG);
916
917 __hci_dev_put(hdev);
918
919 return 0;
920}
921EXPORT_SYMBOL(hci_unregister_dev);
922
923
924int hci_suspend_dev(struct hci_dev *hdev)
925{
926 hci_notify(hdev, HCI_DEV_SUSPEND);
927 return 0;
928}
929EXPORT_SYMBOL(hci_suspend_dev);
930
931
932int hci_resume_dev(struct hci_dev *hdev)
933{
934 hci_notify(hdev, HCI_DEV_RESUME);
935 return 0;
936}
937EXPORT_SYMBOL(hci_resume_dev);
938
939
940#define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
941
942int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
943{
944 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
945 return -EILSEQ;
946
947 while (count) {
948 struct sk_buff *skb = __reassembly(hdev, type);
949 struct { int expect; } *scb;
950 int len = 0;
951
952 if (!skb) {
953
954
955 switch (type) {
956 case HCI_EVENT_PKT:
957 if (count >= HCI_EVENT_HDR_SIZE) {
958 struct hci_event_hdr *h = data;
959 len = HCI_EVENT_HDR_SIZE + h->plen;
960 } else
961 return -EILSEQ;
962 break;
963
964 case HCI_ACLDATA_PKT:
965 if (count >= HCI_ACL_HDR_SIZE) {
966 struct hci_acl_hdr *h = data;
967 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
968 } else
969 return -EILSEQ;
970 break;
971
972 case HCI_SCODATA_PKT:
973 if (count >= HCI_SCO_HDR_SIZE) {
974 struct hci_sco_hdr *h = data;
975 len = HCI_SCO_HDR_SIZE + h->dlen;
976 } else
977 return -EILSEQ;
978 break;
979 }
980
981 skb = bt_skb_alloc(len, GFP_ATOMIC);
982 if (!skb) {
983 BT_ERR("%s no memory for packet", hdev->name);
984 return -ENOMEM;
985 }
986
987 skb->dev = (void *) hdev;
988 bt_cb(skb)->pkt_type = type;
989
990 __reassembly(hdev, type) = skb;
991
992 scb = (void *) skb->cb;
993 scb->expect = len;
994 } else {
995
996
997 scb = (void *) skb->cb;
998 len = scb->expect;
999 }
1000
1001 len = min(len, count);
1002
1003 memcpy(skb_put(skb, len), data, len);
1004
1005 scb->expect -= len;
1006
1007 if (scb->expect == 0) {
1008
1009
1010 __reassembly(hdev, type) = NULL;
1011
1012 bt_cb(skb)->pkt_type = type;
1013 hci_recv_frame(skb);
1014 }
1015
1016 count -= len; data += len;
1017 }
1018
1019 return 0;
1020}
1021EXPORT_SYMBOL(hci_recv_fragment);
1022
1023
1024
1025
1026
1027int hci_register_proto(struct hci_proto *hp)
1028{
1029 int err = 0;
1030
1031 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1032
1033 if (hp->id >= HCI_MAX_PROTO)
1034 return -EINVAL;
1035
1036 write_lock_bh(&hci_task_lock);
1037
1038 if (!hci_proto[hp->id])
1039 hci_proto[hp->id] = hp;
1040 else
1041 err = -EEXIST;
1042
1043 write_unlock_bh(&hci_task_lock);
1044
1045 return err;
1046}
1047EXPORT_SYMBOL(hci_register_proto);
1048
1049int hci_unregister_proto(struct hci_proto *hp)
1050{
1051 int err = 0;
1052
1053 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1054
1055 if (hp->id >= HCI_MAX_PROTO)
1056 return -EINVAL;
1057
1058 write_lock_bh(&hci_task_lock);
1059
1060 if (hci_proto[hp->id])
1061 hci_proto[hp->id] = NULL;
1062 else
1063 err = -ENOENT;
1064
1065 write_unlock_bh(&hci_task_lock);
1066
1067 return err;
1068}
1069EXPORT_SYMBOL(hci_unregister_proto);
1070
1071int hci_register_cb(struct hci_cb *cb)
1072{
1073 BT_DBG("%p name %s", cb, cb->name);
1074
1075 write_lock_bh(&hci_cb_list_lock);
1076 list_add(&cb->list, &hci_cb_list);
1077 write_unlock_bh(&hci_cb_list_lock);
1078
1079 return 0;
1080}
1081EXPORT_SYMBOL(hci_register_cb);
1082
1083int hci_unregister_cb(struct hci_cb *cb)
1084{
1085 BT_DBG("%p name %s", cb, cb->name);
1086
1087 write_lock_bh(&hci_cb_list_lock);
1088 list_del(&cb->list);
1089 write_unlock_bh(&hci_cb_list_lock);
1090
1091 return 0;
1092}
1093EXPORT_SYMBOL(hci_unregister_cb);
1094
1095static int hci_send_frame(struct sk_buff *skb)
1096{
1097 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1098
1099 if (!hdev) {
1100 kfree_skb(skb);
1101 return -ENODEV;
1102 }
1103
1104 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1105
1106 if (atomic_read(&hdev->promisc)) {
1107
1108 __net_timestamp(skb);
1109
1110 hci_send_to_sock(hdev, skb);
1111 }
1112
1113
1114 skb_orphan(skb);
1115
1116 return hdev->send(skb);
1117}
1118
1119
1120int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1121{
1122 int len = HCI_COMMAND_HDR_SIZE + plen;
1123 struct hci_command_hdr *hdr;
1124 struct sk_buff *skb;
1125
1126 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1127
1128 skb = bt_skb_alloc(len, GFP_ATOMIC);
1129 if (!skb) {
1130 BT_ERR("%s no memory for command", hdev->name);
1131 return -ENOMEM;
1132 }
1133
1134 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1135 hdr->opcode = cpu_to_le16(opcode);
1136 hdr->plen = plen;
1137
1138 if (plen)
1139 memcpy(skb_put(skb, plen), param, plen);
1140
1141 BT_DBG("skb len %d", skb->len);
1142
1143 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1144 skb->dev = (void *) hdev;
1145 skb_queue_tail(&hdev->cmd_q, skb);
1146 hci_sched_cmd(hdev);
1147
1148 return 0;
1149}
1150
1151
1152void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1153{
1154 struct hci_command_hdr *hdr;
1155
1156 if (!hdev->sent_cmd)
1157 return NULL;
1158
1159 hdr = (void *) hdev->sent_cmd->data;
1160
1161 if (hdr->opcode != cpu_to_le16(opcode))
1162 return NULL;
1163
1164 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1165
1166 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1167}
1168
1169
1170static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1171{
1172 struct hci_acl_hdr *hdr;
1173 int len = skb->len;
1174
1175 skb_push(skb, HCI_ACL_HDR_SIZE);
1176 skb_reset_transport_header(skb);
1177 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1178 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1179 hdr->dlen = cpu_to_le16(len);
1180}
1181
1182int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1183{
1184 struct hci_dev *hdev = conn->hdev;
1185 struct sk_buff *list;
1186
1187 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1188
1189 skb->dev = (void *) hdev;
1190 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1191 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1192
1193 if (!(list = skb_shinfo(skb)->frag_list)) {
1194
1195 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1196
1197 skb_queue_tail(&conn->data_q, skb);
1198 } else {
1199
1200 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1201
1202 skb_shinfo(skb)->frag_list = NULL;
1203
1204
1205 spin_lock_bh(&conn->data_q.lock);
1206
1207 __skb_queue_tail(&conn->data_q, skb);
1208 do {
1209 skb = list; list = list->next;
1210
1211 skb->dev = (void *) hdev;
1212 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1213 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1214
1215 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1216
1217 __skb_queue_tail(&conn->data_q, skb);
1218 } while (list);
1219
1220 spin_unlock_bh(&conn->data_q.lock);
1221 }
1222
1223 hci_sched_tx(hdev);
1224 return 0;
1225}
1226EXPORT_SYMBOL(hci_send_acl);
1227
1228
1229int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1230{
1231 struct hci_dev *hdev = conn->hdev;
1232 struct hci_sco_hdr hdr;
1233
1234 BT_DBG("%s len %d", hdev->name, skb->len);
1235
1236 if (skb->len > hdev->sco_mtu) {
1237 kfree_skb(skb);
1238 return -EINVAL;
1239 }
1240
1241 hdr.handle = cpu_to_le16(conn->handle);
1242 hdr.dlen = skb->len;
1243
1244 skb_push(skb, HCI_SCO_HDR_SIZE);
1245 skb_reset_transport_header(skb);
1246 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1247
1248 skb->dev = (void *) hdev;
1249 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1250 skb_queue_tail(&conn->data_q, skb);
1251 hci_sched_tx(hdev);
1252 return 0;
1253}
1254EXPORT_SYMBOL(hci_send_sco);
1255
1256
1257
1258
1259static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1260{
1261 struct hci_conn_hash *h = &hdev->conn_hash;
1262 struct hci_conn *conn = NULL;
1263 int num = 0, min = ~0;
1264 struct list_head *p;
1265
1266
1267
1268 list_for_each(p, &h->list) {
1269 struct hci_conn *c;
1270 c = list_entry(p, struct hci_conn, list);
1271
1272 if (c->type != type || c->state != BT_CONNECTED
1273 || skb_queue_empty(&c->data_q))
1274 continue;
1275 num++;
1276
1277 if (c->sent < min) {
1278 min = c->sent;
1279 conn = c;
1280 }
1281 }
1282
1283 if (conn) {
1284 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1285 int q = cnt / num;
1286 *quote = q ? q : 1;
1287 } else
1288 *quote = 0;
1289
1290 BT_DBG("conn %p quote %d", conn, *quote);
1291 return conn;
1292}
1293
1294static inline void hci_acl_tx_to(struct hci_dev *hdev)
1295{
1296 struct hci_conn_hash *h = &hdev->conn_hash;
1297 struct list_head *p;
1298 struct hci_conn *c;
1299
1300 BT_ERR("%s ACL tx timeout", hdev->name);
1301
1302
1303 list_for_each(p, &h->list) {
1304 c = list_entry(p, struct hci_conn, list);
1305 if (c->type == ACL_LINK && c->sent) {
1306 BT_ERR("%s killing stalled ACL connection %s",
1307 hdev->name, batostr(&c->dst));
1308 hci_acl_disconn(c, 0x13);
1309 }
1310 }
1311}
1312
1313static inline void hci_sched_acl(struct hci_dev *hdev)
1314{
1315 struct hci_conn *conn;
1316 struct sk_buff *skb;
1317 int quote;
1318
1319 BT_DBG("%s", hdev->name);
1320
1321 if (!test_bit(HCI_RAW, &hdev->flags)) {
1322
1323
1324 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1325 hci_acl_tx_to(hdev);
1326 }
1327
1328 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
1329 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1330 BT_DBG("skb %p len %d", skb, skb->len);
1331
1332 hci_conn_enter_active_mode(conn);
1333
1334 hci_send_frame(skb);
1335 hdev->acl_last_tx = jiffies;
1336
1337 hdev->acl_cnt--;
1338 conn->sent++;
1339 }
1340 }
1341}
1342
1343
1344static inline void hci_sched_sco(struct hci_dev *hdev)
1345{
1346 struct hci_conn *conn;
1347 struct sk_buff *skb;
1348 int quote;
1349
1350 BT_DBG("%s", hdev->name);
1351
1352 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
1353 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1354 BT_DBG("skb %p len %d", skb, skb->len);
1355 hci_send_frame(skb);
1356
1357 conn->sent++;
1358 if (conn->sent == ~0)
1359 conn->sent = 0;
1360 }
1361 }
1362}
1363
1364static inline void hci_sched_esco(struct hci_dev *hdev)
1365{
1366 struct hci_conn *conn;
1367 struct sk_buff *skb;
1368 int quote;
1369
1370 BT_DBG("%s", hdev->name);
1371
1372 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
1373 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1374 BT_DBG("skb %p len %d", skb, skb->len);
1375 hci_send_frame(skb);
1376
1377 conn->sent++;
1378 if (conn->sent == ~0)
1379 conn->sent = 0;
1380 }
1381 }
1382}
1383
1384static void hci_tx_task(unsigned long arg)
1385{
1386 struct hci_dev *hdev = (struct hci_dev *) arg;
1387 struct sk_buff *skb;
1388
1389 read_lock(&hci_task_lock);
1390
1391 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1392
1393
1394
1395 hci_sched_acl(hdev);
1396
1397 hci_sched_sco(hdev);
1398
1399 hci_sched_esco(hdev);
1400
1401
1402 while ((skb = skb_dequeue(&hdev->raw_q)))
1403 hci_send_frame(skb);
1404
1405 read_unlock(&hci_task_lock);
1406}
1407
1408
1409
1410
1411static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1412{
1413 struct hci_acl_hdr *hdr = (void *) skb->data;
1414 struct hci_conn *conn;
1415 __u16 handle, flags;
1416
1417 skb_pull(skb, HCI_ACL_HDR_SIZE);
1418
1419 handle = __le16_to_cpu(hdr->handle);
1420 flags = hci_flags(handle);
1421 handle = hci_handle(handle);
1422
1423 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1424
1425 hdev->stat.acl_rx++;
1426
1427 hci_dev_lock(hdev);
1428 conn = hci_conn_hash_lookup_handle(hdev, handle);
1429 hci_dev_unlock(hdev);
1430
1431 if (conn) {
1432 register struct hci_proto *hp;
1433
1434 hci_conn_enter_active_mode(conn);
1435
1436
1437 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1438 hp->recv_acldata(conn, skb, flags);
1439 return;
1440 }
1441 } else {
1442 BT_ERR("%s ACL packet for unknown connection handle %d",
1443 hdev->name, handle);
1444 }
1445
1446 kfree_skb(skb);
1447}
1448
1449
1450static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1451{
1452 struct hci_sco_hdr *hdr = (void *) skb->data;
1453 struct hci_conn *conn;
1454 __u16 handle;
1455
1456 skb_pull(skb, HCI_SCO_HDR_SIZE);
1457
1458 handle = __le16_to_cpu(hdr->handle);
1459
1460 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1461
1462 hdev->stat.sco_rx++;
1463
1464 hci_dev_lock(hdev);
1465 conn = hci_conn_hash_lookup_handle(hdev, handle);
1466 hci_dev_unlock(hdev);
1467
1468 if (conn) {
1469 register struct hci_proto *hp;
1470
1471
1472 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1473 hp->recv_scodata(conn, skb);
1474 return;
1475 }
1476 } else {
1477 BT_ERR("%s SCO packet for unknown connection handle %d",
1478 hdev->name, handle);
1479 }
1480
1481 kfree_skb(skb);
1482}
1483
1484static void hci_rx_task(unsigned long arg)
1485{
1486 struct hci_dev *hdev = (struct hci_dev *) arg;
1487 struct sk_buff *skb;
1488
1489 BT_DBG("%s", hdev->name);
1490
1491 read_lock(&hci_task_lock);
1492
1493 while ((skb = skb_dequeue(&hdev->rx_q))) {
1494 if (atomic_read(&hdev->promisc)) {
1495
1496 hci_send_to_sock(hdev, skb);
1497 }
1498
1499 if (test_bit(HCI_RAW, &hdev->flags)) {
1500 kfree_skb(skb);
1501 continue;
1502 }
1503
1504 if (test_bit(HCI_INIT, &hdev->flags)) {
1505
1506 switch (bt_cb(skb)->pkt_type) {
1507 case HCI_ACLDATA_PKT:
1508 case HCI_SCODATA_PKT:
1509 kfree_skb(skb);
1510 continue;
1511 }
1512 }
1513
1514
1515 switch (bt_cb(skb)->pkt_type) {
1516 case HCI_EVENT_PKT:
1517 hci_event_packet(hdev, skb);
1518 break;
1519
1520 case HCI_ACLDATA_PKT:
1521 BT_DBG("%s ACL data packet", hdev->name);
1522 hci_acldata_packet(hdev, skb);
1523 break;
1524
1525 case HCI_SCODATA_PKT:
1526 BT_DBG("%s SCO data packet", hdev->name);
1527 hci_scodata_packet(hdev, skb);
1528 break;
1529
1530 default:
1531 kfree_skb(skb);
1532 break;
1533 }
1534 }
1535
1536 read_unlock(&hci_task_lock);
1537}
1538
1539static void hci_cmd_task(unsigned long arg)
1540{
1541 struct hci_dev *hdev = (struct hci_dev *) arg;
1542 struct sk_buff *skb;
1543
1544 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1545
1546 if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1547 BT_ERR("%s command tx timeout", hdev->name);
1548 atomic_set(&hdev->cmd_cnt, 1);
1549 }
1550
1551
1552 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1553 if (hdev->sent_cmd)
1554 kfree_skb(hdev->sent_cmd);
1555
1556 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1557 atomic_dec(&hdev->cmd_cnt);
1558 hci_send_frame(skb);
1559 hdev->cmd_last_tx = jiffies;
1560 } else {
1561 skb_queue_head(&hdev->cmd_q, skb);
1562 hci_sched_cmd(hdev);
1563 }
1564 }
1565}
1566