1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/jiffies.h>
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
40#include <linux/workqueue.h>
41#include <linux/interrupt.h>
42#include <linux/notifier.h>
43#include <linux/rfkill.h>
44#include <net/sock.h>
45
46#include <asm/system.h>
47#include <linux/uaccess.h>
48#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
53static void hci_cmd_task(unsigned long arg);
54static void hci_rx_task(unsigned long arg);
55static void hci_tx_task(unsigned long arg);
56static void hci_notify(struct hci_dev *hdev, int event);
57
58static DEFINE_RWLOCK(hci_task_lock);
59
60
61LIST_HEAD(hci_dev_list);
62DEFINE_RWLOCK(hci_dev_list_lock);
63
64
65LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock);
67
68
69#define HCI_MAX_PROTO 2
70struct hci_proto *hci_proto[HCI_MAX_PROTO];
71
72
73static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75
76
77int hci_register_notifier(struct notifier_block *nb)
78{
79 return atomic_notifier_chain_register(&hci_notifier, nb);
80}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
85}
86
87static void hci_notify(struct hci_dev *hdev, int event)
88{
89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
90}
91
92
93
94void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
95{
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
98
99
100
101
102 if (hdev->req_last_cmd && cmd != hdev->req_last_cmd)
103 return;
104
105 if (hdev->req_status == HCI_REQ_PEND) {
106 hdev->req_result = result;
107 hdev->req_status = HCI_REQ_DONE;
108 wake_up_interruptible(&hdev->req_wait_q);
109 }
110}
111
112static void hci_req_cancel(struct hci_dev *hdev, int err)
113{
114 BT_DBG("%s err 0x%2.2x", hdev->name, err);
115
116 if (hdev->req_status == HCI_REQ_PEND) {
117 hdev->req_result = err;
118 hdev->req_status = HCI_REQ_CANCELED;
119 wake_up_interruptible(&hdev->req_wait_q);
120 }
121}
122
123
124static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
125 unsigned long opt, __u32 timeout)
126{
127 DECLARE_WAITQUEUE(wait, current);
128 int err = 0;
129
130 BT_DBG("%s start", hdev->name);
131
132 hdev->req_status = HCI_REQ_PEND;
133
134 add_wait_queue(&hdev->req_wait_q, &wait);
135 set_current_state(TASK_INTERRUPTIBLE);
136
137 req(hdev, opt);
138 schedule_timeout(timeout);
139
140 remove_wait_queue(&hdev->req_wait_q, &wait);
141
142 if (signal_pending(current))
143 return -EINTR;
144
145 switch (hdev->req_status) {
146 case HCI_REQ_DONE:
147 err = -bt_err(hdev->req_result);
148 break;
149
150 case HCI_REQ_CANCELED:
151 err = -hdev->req_result;
152 break;
153
154 default:
155 err = -ETIMEDOUT;
156 break;
157 }
158
159 hdev->req_last_cmd = hdev->req_status = hdev->req_result = 0;
160
161 BT_DBG("%s end: err %d", hdev->name, err);
162
163 return err;
164}
165
166static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
167 unsigned long opt, __u32 timeout)
168{
169 int ret;
170
171 if (!test_bit(HCI_UP, &hdev->flags))
172 return -ENETDOWN;
173
174
175 hci_req_lock(hdev);
176 ret = __hci_request(hdev, req, opt, timeout);
177 hci_req_unlock(hdev);
178
179 return ret;
180}
181
182static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
183{
184 BT_DBG("%s %ld", hdev->name, opt);
185
186
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
188}
189
190static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
191{
192 struct sk_buff *skb;
193 __le16 param;
194 __u8 flt_type;
195
196 BT_DBG("%s %ld", hdev->name, opt);
197
198
199
200
201 while ((skb = skb_dequeue(&hdev->driver_init))) {
202 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
203 skb->dev = (void *) hdev;
204
205 skb_queue_tail(&hdev->cmd_q, skb);
206 tasklet_schedule(&hdev->cmd_task);
207 }
208 skb_queue_purge(&hdev->driver_init);
209
210
211
212
213 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
214 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
215
216
217 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
218
219
220 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
221
222
223 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
224
225#if 0
226
227 {
228 struct hci_cp_host_buffer_size cp;
229 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
230 cp.sco_mtu = HCI_MAX_SCO_SIZE;
231 cp.acl_max_pkt = cpu_to_le16(0xffff);
232 cp.sco_max_pkt = cpu_to_le16(0xffff);
233 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
234 }
235#endif
236
237
238 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
239
240
241 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
242
243
244 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
245
246
247 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
248
249
250
251
252 flt_type = HCI_FLT_CLEAR_ALL;
253 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
254
255
256 param = cpu_to_le16(0x8000);
257 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, ¶m);
258
259
260 param = cpu_to_le16(0x7d00);
261 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
262
263 hdev->req_last_cmd = HCI_OP_WRITE_CA_TIMEOUT;
264}
265
266static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
267{
268 __u8 scan = opt;
269
270 BT_DBG("%s %x", hdev->name, scan);
271
272
273 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
274}
275
276static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
277{
278 __u8 auth = opt;
279
280 BT_DBG("%s %x", hdev->name, auth);
281
282
283 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
284}
285
286static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
287{
288 __u8 encrypt = opt;
289
290 BT_DBG("%s %x", hdev->name, encrypt);
291
292
293 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
294}
295
296static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
297{
298 __le16 policy = cpu_to_le16(opt);
299
300 BT_DBG("%s %x", hdev->name, policy);
301
302
303 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
304}
305
306
307
308struct hci_dev *hci_dev_get(int index)
309{
310 struct hci_dev *hdev = NULL;
311 struct list_head *p;
312
313 BT_DBG("%d", index);
314
315 if (index < 0)
316 return NULL;
317
318 read_lock(&hci_dev_list_lock);
319 list_for_each(p, &hci_dev_list) {
320 struct hci_dev *d = list_entry(p, struct hci_dev, list);
321 if (d->id == index) {
322 hdev = hci_dev_hold(d);
323 break;
324 }
325 }
326 read_unlock(&hci_dev_list_lock);
327 return hdev;
328}
329
330
331static void inquiry_cache_flush(struct hci_dev *hdev)
332{
333 struct inquiry_cache *cache = &hdev->inq_cache;
334 struct inquiry_entry *next = cache->list, *e;
335
336 BT_DBG("cache %p", cache);
337
338 cache->list = NULL;
339 while ((e = next)) {
340 next = e->next;
341 kfree(e);
342 }
343}
344
345struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
346{
347 struct inquiry_cache *cache = &hdev->inq_cache;
348 struct inquiry_entry *e;
349
350 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
351
352 for (e = cache->list; e; e = e->next)
353 if (!bacmp(&e->data.bdaddr, bdaddr))
354 break;
355 return e;
356}
357
358void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
359{
360 struct inquiry_cache *cache = &hdev->inq_cache;
361 struct inquiry_entry *ie;
362
363 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
364
365 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
366 if (!ie) {
367
368 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
369 if (!ie)
370 return;
371
372 ie->next = cache->list;
373 cache->list = ie;
374 }
375
376 memcpy(&ie->data, data, sizeof(*data));
377 ie->timestamp = jiffies;
378 cache->timestamp = jiffies;
379}
380
381static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
382{
383 struct inquiry_cache *cache = &hdev->inq_cache;
384 struct inquiry_info *info = (struct inquiry_info *) buf;
385 struct inquiry_entry *e;
386 int copied = 0;
387
388 for (e = cache->list; e && copied < num; e = e->next, copied++) {
389 struct inquiry_data *data = &e->data;
390 bacpy(&info->bdaddr, &data->bdaddr);
391 info->pscan_rep_mode = data->pscan_rep_mode;
392 info->pscan_period_mode = data->pscan_period_mode;
393 info->pscan_mode = data->pscan_mode;
394 memcpy(info->dev_class, data->dev_class, 3);
395 info->clock_offset = data->clock_offset;
396 info++;
397 }
398
399 BT_DBG("cache %p, copied %d", cache, copied);
400 return copied;
401}
402
403static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
404{
405 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
406 struct hci_cp_inquiry cp;
407
408 BT_DBG("%s", hdev->name);
409
410 if (test_bit(HCI_INQUIRY, &hdev->flags))
411 return;
412
413
414 memcpy(&cp.lap, &ir->lap, 3);
415 cp.length = ir->length;
416 cp.num_rsp = ir->num_rsp;
417 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
418}
419
420int hci_inquiry(void __user *arg)
421{
422 __u8 __user *ptr = arg;
423 struct hci_inquiry_req ir;
424 struct hci_dev *hdev;
425 int err = 0, do_inquiry = 0, max_rsp;
426 long timeo;
427 __u8 *buf;
428
429 if (copy_from_user(&ir, ptr, sizeof(ir)))
430 return -EFAULT;
431
432 if (!(hdev = hci_dev_get(ir.dev_id)))
433 return -ENODEV;
434
435 hci_dev_lock_bh(hdev);
436 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
437 inquiry_cache_empty(hdev) ||
438 ir.flags & IREQ_CACHE_FLUSH) {
439 inquiry_cache_flush(hdev);
440 do_inquiry = 1;
441 }
442 hci_dev_unlock_bh(hdev);
443
444 timeo = ir.length * msecs_to_jiffies(2000);
445
446 if (do_inquiry) {
447 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
448 if (err < 0)
449 goto done;
450 }
451
452
453 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
454
455
456
457
458 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
459 if (!buf) {
460 err = -ENOMEM;
461 goto done;
462 }
463
464 hci_dev_lock_bh(hdev);
465 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
466 hci_dev_unlock_bh(hdev);
467
468 BT_DBG("num_rsp %d", ir.num_rsp);
469
470 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
471 ptr += sizeof(ir);
472 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
473 ir.num_rsp))
474 err = -EFAULT;
475 } else
476 err = -EFAULT;
477
478 kfree(buf);
479
480done:
481 hci_dev_put(hdev);
482 return err;
483}
484
485
486
487int hci_dev_open(__u16 dev)
488{
489 struct hci_dev *hdev;
490 int ret = 0;
491
492 if (!(hdev = hci_dev_get(dev)))
493 return -ENODEV;
494
495 BT_DBG("%s %p", hdev->name, hdev);
496
497 hci_req_lock(hdev);
498
499 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
500 ret = -ERFKILL;
501 goto done;
502 }
503
504 if (test_bit(HCI_UP, &hdev->flags)) {
505 ret = -EALREADY;
506 goto done;
507 }
508
509 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
510 set_bit(HCI_RAW, &hdev->flags);
511
512
513 if (hdev->dev_type != HCI_BREDR)
514 set_bit(HCI_RAW, &hdev->flags);
515
516 if (hdev->open(hdev)) {
517 ret = -EIO;
518 goto done;
519 }
520
521 if (!test_bit(HCI_RAW, &hdev->flags)) {
522 atomic_set(&hdev->cmd_cnt, 1);
523 set_bit(HCI_INIT, &hdev->flags);
524
525
526 ret = __hci_request(hdev, hci_init_req, 0,
527 msecs_to_jiffies(HCI_INIT_TIMEOUT));
528
529 clear_bit(HCI_INIT, &hdev->flags);
530 }
531
532 if (!ret) {
533 hci_dev_hold(hdev);
534 set_bit(HCI_UP, &hdev->flags);
535 hci_notify(hdev, HCI_DEV_UP);
536 } else {
537
538 tasklet_kill(&hdev->rx_task);
539 tasklet_kill(&hdev->tx_task);
540 tasklet_kill(&hdev->cmd_task);
541
542 skb_queue_purge(&hdev->cmd_q);
543 skb_queue_purge(&hdev->rx_q);
544
545 if (hdev->flush)
546 hdev->flush(hdev);
547
548 if (hdev->sent_cmd) {
549 kfree_skb(hdev->sent_cmd);
550 hdev->sent_cmd = NULL;
551 }
552
553 hdev->close(hdev);
554 hdev->flags = 0;
555 }
556
557done:
558 hci_req_unlock(hdev);
559 hci_dev_put(hdev);
560 return ret;
561}
562
563static int hci_dev_do_close(struct hci_dev *hdev)
564{
565 BT_DBG("%s %p", hdev->name, hdev);
566
567 hci_req_cancel(hdev, ENODEV);
568 hci_req_lock(hdev);
569
570 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
571 hci_req_unlock(hdev);
572 return 0;
573 }
574
575
576 tasklet_kill(&hdev->rx_task);
577 tasklet_kill(&hdev->tx_task);
578
579 hci_dev_lock_bh(hdev);
580 inquiry_cache_flush(hdev);
581 hci_conn_hash_flush(hdev);
582 hci_dev_unlock_bh(hdev);
583
584 hci_notify(hdev, HCI_DEV_DOWN);
585
586 if (hdev->flush)
587 hdev->flush(hdev);
588
589
590 skb_queue_purge(&hdev->cmd_q);
591 atomic_set(&hdev->cmd_cnt, 1);
592 if (!test_bit(HCI_RAW, &hdev->flags)) {
593 set_bit(HCI_INIT, &hdev->flags);
594 __hci_request(hdev, hci_reset_req, 0,
595 msecs_to_jiffies(250));
596 clear_bit(HCI_INIT, &hdev->flags);
597 }
598
599
600 tasklet_kill(&hdev->cmd_task);
601
602
603 skb_queue_purge(&hdev->rx_q);
604 skb_queue_purge(&hdev->cmd_q);
605 skb_queue_purge(&hdev->raw_q);
606
607
608 if (hdev->sent_cmd) {
609 kfree_skb(hdev->sent_cmd);
610 hdev->sent_cmd = NULL;
611 }
612
613
614
615 hdev->close(hdev);
616
617
618 hdev->flags = 0;
619
620 hci_req_unlock(hdev);
621
622 hci_dev_put(hdev);
623 return 0;
624}
625
626int hci_dev_close(__u16 dev)
627{
628 struct hci_dev *hdev;
629 int err;
630
631 hdev = hci_dev_get(dev);
632 if (!hdev)
633 return -ENODEV;
634 err = hci_dev_do_close(hdev);
635 hci_dev_put(hdev);
636 return err;
637}
638
639int hci_dev_reset(__u16 dev)
640{
641 struct hci_dev *hdev;
642 int ret = 0;
643
644 hdev = hci_dev_get(dev);
645 if (!hdev)
646 return -ENODEV;
647
648 hci_req_lock(hdev);
649 tasklet_disable(&hdev->tx_task);
650
651 if (!test_bit(HCI_UP, &hdev->flags))
652 goto done;
653
654
655 skb_queue_purge(&hdev->rx_q);
656 skb_queue_purge(&hdev->cmd_q);
657
658 hci_dev_lock_bh(hdev);
659 inquiry_cache_flush(hdev);
660 hci_conn_hash_flush(hdev);
661 hci_dev_unlock_bh(hdev);
662
663 if (hdev->flush)
664 hdev->flush(hdev);
665
666 atomic_set(&hdev->cmd_cnt, 1);
667 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
668
669 if (!test_bit(HCI_RAW, &hdev->flags))
670 ret = __hci_request(hdev, hci_reset_req, 0,
671 msecs_to_jiffies(HCI_INIT_TIMEOUT));
672
673done:
674 tasklet_enable(&hdev->tx_task);
675 hci_req_unlock(hdev);
676 hci_dev_put(hdev);
677 return ret;
678}
679
680int hci_dev_reset_stat(__u16 dev)
681{
682 struct hci_dev *hdev;
683 int ret = 0;
684
685 hdev = hci_dev_get(dev);
686 if (!hdev)
687 return -ENODEV;
688
689 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
690
691 hci_dev_put(hdev);
692
693 return ret;
694}
695
696int hci_dev_cmd(unsigned int cmd, void __user *arg)
697{
698 struct hci_dev *hdev;
699 struct hci_dev_req dr;
700 int err = 0;
701
702 if (copy_from_user(&dr, arg, sizeof(dr)))
703 return -EFAULT;
704
705 hdev = hci_dev_get(dr.dev_id);
706 if (!hdev)
707 return -ENODEV;
708
709 switch (cmd) {
710 case HCISETAUTH:
711 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
712 msecs_to_jiffies(HCI_INIT_TIMEOUT));
713 break;
714
715 case HCISETENCRYPT:
716 if (!lmp_encrypt_capable(hdev)) {
717 err = -EOPNOTSUPP;
718 break;
719 }
720
721 if (!test_bit(HCI_AUTH, &hdev->flags)) {
722
723 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
724 msecs_to_jiffies(HCI_INIT_TIMEOUT));
725 if (err)
726 break;
727 }
728
729 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
730 msecs_to_jiffies(HCI_INIT_TIMEOUT));
731 break;
732
733 case HCISETSCAN:
734 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
735 msecs_to_jiffies(HCI_INIT_TIMEOUT));
736 break;
737
738 case HCISETLINKPOL:
739 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
740 msecs_to_jiffies(HCI_INIT_TIMEOUT));
741 break;
742
743 case HCISETLINKMODE:
744 hdev->link_mode = ((__u16) dr.dev_opt) &
745 (HCI_LM_MASTER | HCI_LM_ACCEPT);
746 break;
747
748 case HCISETPTYPE:
749 hdev->pkt_type = (__u16) dr.dev_opt;
750 break;
751
752 case HCISETACLMTU:
753 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
754 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
755 break;
756
757 case HCISETSCOMTU:
758 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
759 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
760 break;
761
762 default:
763 err = -EINVAL;
764 break;
765 }
766
767 hci_dev_put(hdev);
768 return err;
769}
770
771int hci_get_dev_list(void __user *arg)
772{
773 struct hci_dev_list_req *dl;
774 struct hci_dev_req *dr;
775 struct list_head *p;
776 int n = 0, size, err;
777 __u16 dev_num;
778
779 if (get_user(dev_num, (__u16 __user *) arg))
780 return -EFAULT;
781
782 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
783 return -EINVAL;
784
785 size = sizeof(*dl) + dev_num * sizeof(*dr);
786
787 dl = kzalloc(size, GFP_KERNEL);
788 if (!dl)
789 return -ENOMEM;
790
791 dr = dl->dev_req;
792
793 read_lock_bh(&hci_dev_list_lock);
794 list_for_each(p, &hci_dev_list) {
795 struct hci_dev *hdev;
796 hdev = list_entry(p, struct hci_dev, list);
797 (dr + n)->dev_id = hdev->id;
798 (dr + n)->dev_opt = hdev->flags;
799 if (++n >= dev_num)
800 break;
801 }
802 read_unlock_bh(&hci_dev_list_lock);
803
804 dl->dev_num = n;
805 size = sizeof(*dl) + n * sizeof(*dr);
806
807 err = copy_to_user(arg, dl, size);
808 kfree(dl);
809
810 return err ? -EFAULT : 0;
811}
812
813int hci_get_dev_info(void __user *arg)
814{
815 struct hci_dev *hdev;
816 struct hci_dev_info di;
817 int err = 0;
818
819 if (copy_from_user(&di, arg, sizeof(di)))
820 return -EFAULT;
821
822 hdev = hci_dev_get(di.dev_id);
823 if (!hdev)
824 return -ENODEV;
825
826 strcpy(di.name, hdev->name);
827 di.bdaddr = hdev->bdaddr;
828 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
829 di.flags = hdev->flags;
830 di.pkt_type = hdev->pkt_type;
831 di.acl_mtu = hdev->acl_mtu;
832 di.acl_pkts = hdev->acl_pkts;
833 di.sco_mtu = hdev->sco_mtu;
834 di.sco_pkts = hdev->sco_pkts;
835 di.link_policy = hdev->link_policy;
836 di.link_mode = hdev->link_mode;
837
838 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
839 memcpy(&di.features, &hdev->features, sizeof(di.features));
840
841 if (copy_to_user(arg, &di, sizeof(di)))
842 err = -EFAULT;
843
844 hci_dev_put(hdev);
845
846 return err;
847}
848
849
850
851static int hci_rfkill_set_block(void *data, bool blocked)
852{
853 struct hci_dev *hdev = data;
854
855 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
856
857 if (!blocked)
858 return 0;
859
860 hci_dev_do_close(hdev);
861
862 return 0;
863}
864
865static const struct rfkill_ops hci_rfkill_ops = {
866 .set_block = hci_rfkill_set_block,
867};
868
869
870struct hci_dev *hci_alloc_dev(void)
871{
872 struct hci_dev *hdev;
873
874 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
875 if (!hdev)
876 return NULL;
877
878 skb_queue_head_init(&hdev->driver_init);
879
880 return hdev;
881}
882EXPORT_SYMBOL(hci_alloc_dev);
883
884
885void hci_free_dev(struct hci_dev *hdev)
886{
887 skb_queue_purge(&hdev->driver_init);
888
889
890 put_device(&hdev->dev);
891}
892EXPORT_SYMBOL(hci_free_dev);
893
894
895int hci_register_dev(struct hci_dev *hdev)
896{
897 struct list_head *head = &hci_dev_list, *p;
898 int i, id = 0;
899
900 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
901 hdev->bus, hdev->owner);
902
903 if (!hdev->open || !hdev->close || !hdev->destruct)
904 return -EINVAL;
905
906 write_lock_bh(&hci_dev_list_lock);
907
908
909 list_for_each(p, &hci_dev_list) {
910 if (list_entry(p, struct hci_dev, list)->id != id)
911 break;
912 head = p; id++;
913 }
914
915 sprintf(hdev->name, "hci%d", id);
916 hdev->id = id;
917 list_add(&hdev->list, head);
918
919 atomic_set(&hdev->refcnt, 1);
920 spin_lock_init(&hdev->lock);
921
922 hdev->flags = 0;
923 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
924 hdev->esco_type = (ESCO_HV1);
925 hdev->link_mode = (HCI_LM_ACCEPT);
926
927 hdev->idle_timeout = 0;
928 hdev->sniff_max_interval = 800;
929 hdev->sniff_min_interval = 80;
930
931 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
932 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
933 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
934
935 skb_queue_head_init(&hdev->rx_q);
936 skb_queue_head_init(&hdev->cmd_q);
937 skb_queue_head_init(&hdev->raw_q);
938
939 for (i = 0; i < NUM_REASSEMBLY; i++)
940 hdev->reassembly[i] = NULL;
941
942 init_waitqueue_head(&hdev->req_wait_q);
943 mutex_init(&hdev->req_lock);
944
945 inquiry_cache_init(hdev);
946
947 hci_conn_hash_init(hdev);
948
949 INIT_LIST_HEAD(&hdev->blacklist);
950
951 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
952
953 atomic_set(&hdev->promisc, 0);
954
955 write_unlock_bh(&hci_dev_list_lock);
956
957 hdev->workqueue = create_singlethread_workqueue(hdev->name);
958 if (!hdev->workqueue)
959 goto nomem;
960
961 hci_register_sysfs(hdev);
962
963 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
964 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
965 if (hdev->rfkill) {
966 if (rfkill_register(hdev->rfkill) < 0) {
967 rfkill_destroy(hdev->rfkill);
968 hdev->rfkill = NULL;
969 }
970 }
971
972 mgmt_index_added(hdev->id);
973 hci_notify(hdev, HCI_DEV_REG);
974
975 return id;
976
977nomem:
978 write_lock_bh(&hci_dev_list_lock);
979 list_del(&hdev->list);
980 write_unlock_bh(&hci_dev_list_lock);
981
982 return -ENOMEM;
983}
984EXPORT_SYMBOL(hci_register_dev);
985
986
987int hci_unregister_dev(struct hci_dev *hdev)
988{
989 int i;
990
991 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
992
993 write_lock_bh(&hci_dev_list_lock);
994 list_del(&hdev->list);
995 write_unlock_bh(&hci_dev_list_lock);
996
997 hci_dev_do_close(hdev);
998
999 for (i = 0; i < NUM_REASSEMBLY; i++)
1000 kfree_skb(hdev->reassembly[i]);
1001
1002 mgmt_index_removed(hdev->id);
1003 hci_notify(hdev, HCI_DEV_UNREG);
1004
1005 if (hdev->rfkill) {
1006 rfkill_unregister(hdev->rfkill);
1007 rfkill_destroy(hdev->rfkill);
1008 }
1009
1010 hci_unregister_sysfs(hdev);
1011
1012 destroy_workqueue(hdev->workqueue);
1013
1014 hci_dev_lock_bh(hdev);
1015 hci_blacklist_clear(hdev);
1016 hci_dev_unlock_bh(hdev);
1017
1018 __hci_dev_put(hdev);
1019
1020 return 0;
1021}
1022EXPORT_SYMBOL(hci_unregister_dev);
1023
1024
1025int hci_suspend_dev(struct hci_dev *hdev)
1026{
1027 hci_notify(hdev, HCI_DEV_SUSPEND);
1028 return 0;
1029}
1030EXPORT_SYMBOL(hci_suspend_dev);
1031
1032
1033int hci_resume_dev(struct hci_dev *hdev)
1034{
1035 hci_notify(hdev, HCI_DEV_RESUME);
1036 return 0;
1037}
1038EXPORT_SYMBOL(hci_resume_dev);
1039
1040
1041int hci_recv_frame(struct sk_buff *skb)
1042{
1043 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1044 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1045 && !test_bit(HCI_INIT, &hdev->flags))) {
1046 kfree_skb(skb);
1047 return -ENXIO;
1048 }
1049
1050
1051 bt_cb(skb)->incoming = 1;
1052
1053
1054 __net_timestamp(skb);
1055
1056
1057 skb_queue_tail(&hdev->rx_q, skb);
1058 tasklet_schedule(&hdev->rx_task);
1059
1060 return 0;
1061}
1062EXPORT_SYMBOL(hci_recv_frame);
1063
1064static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1065 int count, __u8 index, gfp_t gfp_mask)
1066{
1067 int len = 0;
1068 int hlen = 0;
1069 int remain = count;
1070 struct sk_buff *skb;
1071 struct bt_skb_cb *scb;
1072
1073 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1074 index >= NUM_REASSEMBLY)
1075 return -EILSEQ;
1076
1077 skb = hdev->reassembly[index];
1078
1079 if (!skb) {
1080 switch (type) {
1081 case HCI_ACLDATA_PKT:
1082 len = HCI_MAX_FRAME_SIZE;
1083 hlen = HCI_ACL_HDR_SIZE;
1084 break;
1085 case HCI_EVENT_PKT:
1086 len = HCI_MAX_EVENT_SIZE;
1087 hlen = HCI_EVENT_HDR_SIZE;
1088 break;
1089 case HCI_SCODATA_PKT:
1090 len = HCI_MAX_SCO_SIZE;
1091 hlen = HCI_SCO_HDR_SIZE;
1092 break;
1093 }
1094
1095 skb = bt_skb_alloc(len, gfp_mask);
1096 if (!skb)
1097 return -ENOMEM;
1098
1099 scb = (void *) skb->cb;
1100 scb->expect = hlen;
1101 scb->pkt_type = type;
1102
1103 skb->dev = (void *) hdev;
1104 hdev->reassembly[index] = skb;
1105 }
1106
1107 while (count) {
1108 scb = (void *) skb->cb;
1109 len = min(scb->expect, (__u16)count);
1110
1111 memcpy(skb_put(skb, len), data, len);
1112
1113 count -= len;
1114 data += len;
1115 scb->expect -= len;
1116 remain = count;
1117
1118 switch (type) {
1119 case HCI_EVENT_PKT:
1120 if (skb->len == HCI_EVENT_HDR_SIZE) {
1121 struct hci_event_hdr *h = hci_event_hdr(skb);
1122 scb->expect = h->plen;
1123
1124 if (skb_tailroom(skb) < scb->expect) {
1125 kfree_skb(skb);
1126 hdev->reassembly[index] = NULL;
1127 return -ENOMEM;
1128 }
1129 }
1130 break;
1131
1132 case HCI_ACLDATA_PKT:
1133 if (skb->len == HCI_ACL_HDR_SIZE) {
1134 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1135 scb->expect = __le16_to_cpu(h->dlen);
1136
1137 if (skb_tailroom(skb) < scb->expect) {
1138 kfree_skb(skb);
1139 hdev->reassembly[index] = NULL;
1140 return -ENOMEM;
1141 }
1142 }
1143 break;
1144
1145 case HCI_SCODATA_PKT:
1146 if (skb->len == HCI_SCO_HDR_SIZE) {
1147 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1148 scb->expect = h->dlen;
1149
1150 if (skb_tailroom(skb) < scb->expect) {
1151 kfree_skb(skb);
1152 hdev->reassembly[index] = NULL;
1153 return -ENOMEM;
1154 }
1155 }
1156 break;
1157 }
1158
1159 if (scb->expect == 0) {
1160
1161
1162 bt_cb(skb)->pkt_type = type;
1163 hci_recv_frame(skb);
1164
1165 hdev->reassembly[index] = NULL;
1166 return remain;
1167 }
1168 }
1169
1170 return remain;
1171}
1172
1173int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1174{
1175 int rem = 0;
1176
1177 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1178 return -EILSEQ;
1179
1180 while (count) {
1181 rem = hci_reassembly(hdev, type, data, count,
1182 type - 1, GFP_ATOMIC);
1183 if (rem < 0)
1184 return rem;
1185
1186 data += (count - rem);
1187 count = rem;
1188 };
1189
1190 return rem;
1191}
1192EXPORT_SYMBOL(hci_recv_fragment);
1193
1194#define STREAM_REASSEMBLY 0
1195
1196int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1197{
1198 int type;
1199 int rem = 0;
1200
1201 while (count) {
1202 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1203
1204 if (!skb) {
1205 struct { char type; } *pkt;
1206
1207
1208 pkt = data;
1209 type = pkt->type;
1210
1211 data++;
1212 count--;
1213 } else
1214 type = bt_cb(skb)->pkt_type;
1215
1216 rem = hci_reassembly(hdev, type, data,
1217 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1218 if (rem < 0)
1219 return rem;
1220
1221 data += (count - rem);
1222 count = rem;
1223 };
1224
1225 return rem;
1226}
1227EXPORT_SYMBOL(hci_recv_stream_fragment);
1228
1229
1230
1231
1232
1233int hci_register_proto(struct hci_proto *hp)
1234{
1235 int err = 0;
1236
1237 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1238
1239 if (hp->id >= HCI_MAX_PROTO)
1240 return -EINVAL;
1241
1242 write_lock_bh(&hci_task_lock);
1243
1244 if (!hci_proto[hp->id])
1245 hci_proto[hp->id] = hp;
1246 else
1247 err = -EEXIST;
1248
1249 write_unlock_bh(&hci_task_lock);
1250
1251 return err;
1252}
1253EXPORT_SYMBOL(hci_register_proto);
1254
1255int hci_unregister_proto(struct hci_proto *hp)
1256{
1257 int err = 0;
1258
1259 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1260
1261 if (hp->id >= HCI_MAX_PROTO)
1262 return -EINVAL;
1263
1264 write_lock_bh(&hci_task_lock);
1265
1266 if (hci_proto[hp->id])
1267 hci_proto[hp->id] = NULL;
1268 else
1269 err = -ENOENT;
1270
1271 write_unlock_bh(&hci_task_lock);
1272
1273 return err;
1274}
1275EXPORT_SYMBOL(hci_unregister_proto);
1276
1277int hci_register_cb(struct hci_cb *cb)
1278{
1279 BT_DBG("%p name %s", cb, cb->name);
1280
1281 write_lock_bh(&hci_cb_list_lock);
1282 list_add(&cb->list, &hci_cb_list);
1283 write_unlock_bh(&hci_cb_list_lock);
1284
1285 return 0;
1286}
1287EXPORT_SYMBOL(hci_register_cb);
1288
1289int hci_unregister_cb(struct hci_cb *cb)
1290{
1291 BT_DBG("%p name %s", cb, cb->name);
1292
1293 write_lock_bh(&hci_cb_list_lock);
1294 list_del(&cb->list);
1295 write_unlock_bh(&hci_cb_list_lock);
1296
1297 return 0;
1298}
1299EXPORT_SYMBOL(hci_unregister_cb);
1300
1301static int hci_send_frame(struct sk_buff *skb)
1302{
1303 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1304
1305 if (!hdev) {
1306 kfree_skb(skb);
1307 return -ENODEV;
1308 }
1309
1310 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1311
1312 if (atomic_read(&hdev->promisc)) {
1313
1314 __net_timestamp(skb);
1315
1316 hci_send_to_sock(hdev, skb);
1317 }
1318
1319
1320 skb_orphan(skb);
1321
1322 return hdev->send(skb);
1323}
1324
1325
1326int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1327{
1328 int len = HCI_COMMAND_HDR_SIZE + plen;
1329 struct hci_command_hdr *hdr;
1330 struct sk_buff *skb;
1331
1332 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1333
1334 skb = bt_skb_alloc(len, GFP_ATOMIC);
1335 if (!skb) {
1336 BT_ERR("%s no memory for command", hdev->name);
1337 return -ENOMEM;
1338 }
1339
1340 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1341 hdr->opcode = cpu_to_le16(opcode);
1342 hdr->plen = plen;
1343
1344 if (plen)
1345 memcpy(skb_put(skb, plen), param, plen);
1346
1347 BT_DBG("skb len %d", skb->len);
1348
1349 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1350 skb->dev = (void *) hdev;
1351
1352 skb_queue_tail(&hdev->cmd_q, skb);
1353 tasklet_schedule(&hdev->cmd_task);
1354
1355 return 0;
1356}
1357
1358
1359void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1360{
1361 struct hci_command_hdr *hdr;
1362
1363 if (!hdev->sent_cmd)
1364 return NULL;
1365
1366 hdr = (void *) hdev->sent_cmd->data;
1367
1368 if (hdr->opcode != cpu_to_le16(opcode))
1369 return NULL;
1370
1371 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1372
1373 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1374}
1375
1376
1377static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1378{
1379 struct hci_acl_hdr *hdr;
1380 int len = skb->len;
1381
1382 skb_push(skb, HCI_ACL_HDR_SIZE);
1383 skb_reset_transport_header(skb);
1384 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1385 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1386 hdr->dlen = cpu_to_le16(len);
1387}
1388
1389void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1390{
1391 struct hci_dev *hdev = conn->hdev;
1392 struct sk_buff *list;
1393
1394 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1395
1396 skb->dev = (void *) hdev;
1397 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1398 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1399
1400 list = skb_shinfo(skb)->frag_list;
1401 if (!list) {
1402
1403 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1404
1405 skb_queue_tail(&conn->data_q, skb);
1406 } else {
1407
1408 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1409
1410 skb_shinfo(skb)->frag_list = NULL;
1411
1412
1413 spin_lock_bh(&conn->data_q.lock);
1414
1415 __skb_queue_tail(&conn->data_q, skb);
1416 do {
1417 skb = list; list = list->next;
1418
1419 skb->dev = (void *) hdev;
1420 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1421 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1422
1423 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1424
1425 __skb_queue_tail(&conn->data_q, skb);
1426 } while (list);
1427
1428 spin_unlock_bh(&conn->data_q.lock);
1429 }
1430
1431 tasklet_schedule(&hdev->tx_task);
1432}
1433EXPORT_SYMBOL(hci_send_acl);
1434
1435
1436void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1437{
1438 struct hci_dev *hdev = conn->hdev;
1439 struct hci_sco_hdr hdr;
1440
1441 BT_DBG("%s len %d", hdev->name, skb->len);
1442
1443 hdr.handle = cpu_to_le16(conn->handle);
1444 hdr.dlen = skb->len;
1445
1446 skb_push(skb, HCI_SCO_HDR_SIZE);
1447 skb_reset_transport_header(skb);
1448 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1449
1450 skb->dev = (void *) hdev;
1451 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1452
1453 skb_queue_tail(&conn->data_q, skb);
1454 tasklet_schedule(&hdev->tx_task);
1455}
1456EXPORT_SYMBOL(hci_send_sco);
1457
1458
1459
1460
1461static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1462{
1463 struct hci_conn_hash *h = &hdev->conn_hash;
1464 struct hci_conn *conn = NULL;
1465 int num = 0, min = ~0;
1466 struct list_head *p;
1467
1468
1469
1470 list_for_each(p, &h->list) {
1471 struct hci_conn *c;
1472 c = list_entry(p, struct hci_conn, list);
1473
1474 if (c->type != type || skb_queue_empty(&c->data_q))
1475 continue;
1476
1477 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1478 continue;
1479
1480 num++;
1481
1482 if (c->sent < min) {
1483 min = c->sent;
1484 conn = c;
1485 }
1486 }
1487
1488 if (conn) {
1489 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1490 int q = cnt / num;
1491 *quote = q ? q : 1;
1492 } else
1493 *quote = 0;
1494
1495 BT_DBG("conn %p quote %d", conn, *quote);
1496 return conn;
1497}
1498
1499static inline void hci_acl_tx_to(struct hci_dev *hdev)
1500{
1501 struct hci_conn_hash *h = &hdev->conn_hash;
1502 struct list_head *p;
1503 struct hci_conn *c;
1504
1505 BT_ERR("%s ACL tx timeout", hdev->name);
1506
1507
1508 list_for_each(p, &h->list) {
1509 c = list_entry(p, struct hci_conn, list);
1510 if (c->type == ACL_LINK && c->sent) {
1511 BT_ERR("%s killing stalled ACL connection %s",
1512 hdev->name, batostr(&c->dst));
1513 hci_acl_disconn(c, 0x13);
1514 }
1515 }
1516}
1517
1518static inline void hci_sched_acl(struct hci_dev *hdev)
1519{
1520 struct hci_conn *conn;
1521 struct sk_buff *skb;
1522 int quote;
1523
1524 BT_DBG("%s", hdev->name);
1525
1526 if (!test_bit(HCI_RAW, &hdev->flags)) {
1527
1528
1529 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1530 hci_acl_tx_to(hdev);
1531 }
1532
1533 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
1534 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1535 BT_DBG("skb %p len %d", skb, skb->len);
1536
1537 hci_conn_enter_active_mode(conn);
1538
1539 hci_send_frame(skb);
1540 hdev->acl_last_tx = jiffies;
1541
1542 hdev->acl_cnt--;
1543 conn->sent++;
1544 }
1545 }
1546}
1547
1548
1549static inline void hci_sched_sco(struct hci_dev *hdev)
1550{
1551 struct hci_conn *conn;
1552 struct sk_buff *skb;
1553 int quote;
1554
1555 BT_DBG("%s", hdev->name);
1556
1557 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
1558 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1559 BT_DBG("skb %p len %d", skb, skb->len);
1560 hci_send_frame(skb);
1561
1562 conn->sent++;
1563 if (conn->sent == ~0)
1564 conn->sent = 0;
1565 }
1566 }
1567}
1568
1569static inline void hci_sched_esco(struct hci_dev *hdev)
1570{
1571 struct hci_conn *conn;
1572 struct sk_buff *skb;
1573 int quote;
1574
1575 BT_DBG("%s", hdev->name);
1576
1577 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
1578 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1579 BT_DBG("skb %p len %d", skb, skb->len);
1580 hci_send_frame(skb);
1581
1582 conn->sent++;
1583 if (conn->sent == ~0)
1584 conn->sent = 0;
1585 }
1586 }
1587}
1588
1589static void hci_tx_task(unsigned long arg)
1590{
1591 struct hci_dev *hdev = (struct hci_dev *) arg;
1592 struct sk_buff *skb;
1593
1594 read_lock(&hci_task_lock);
1595
1596 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1597
1598
1599
1600 hci_sched_acl(hdev);
1601
1602 hci_sched_sco(hdev);
1603
1604 hci_sched_esco(hdev);
1605
1606
1607 while ((skb = skb_dequeue(&hdev->raw_q)))
1608 hci_send_frame(skb);
1609
1610 read_unlock(&hci_task_lock);
1611}
1612
1613
1614
1615
1616static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1617{
1618 struct hci_acl_hdr *hdr = (void *) skb->data;
1619 struct hci_conn *conn;
1620 __u16 handle, flags;
1621
1622 skb_pull(skb, HCI_ACL_HDR_SIZE);
1623
1624 handle = __le16_to_cpu(hdr->handle);
1625 flags = hci_flags(handle);
1626 handle = hci_handle(handle);
1627
1628 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1629
1630 hdev->stat.acl_rx++;
1631
1632 hci_dev_lock(hdev);
1633 conn = hci_conn_hash_lookup_handle(hdev, handle);
1634 hci_dev_unlock(hdev);
1635
1636 if (conn) {
1637 register struct hci_proto *hp;
1638
1639 hci_conn_enter_active_mode(conn);
1640
1641
1642 hp = hci_proto[HCI_PROTO_L2CAP];
1643 if (hp && hp->recv_acldata) {
1644 hp->recv_acldata(conn, skb, flags);
1645 return;
1646 }
1647 } else {
1648 BT_ERR("%s ACL packet for unknown connection handle %d",
1649 hdev->name, handle);
1650 }
1651
1652 kfree_skb(skb);
1653}
1654
1655
1656static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1657{
1658 struct hci_sco_hdr *hdr = (void *) skb->data;
1659 struct hci_conn *conn;
1660 __u16 handle;
1661
1662 skb_pull(skb, HCI_SCO_HDR_SIZE);
1663
1664 handle = __le16_to_cpu(hdr->handle);
1665
1666 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1667
1668 hdev->stat.sco_rx++;
1669
1670 hci_dev_lock(hdev);
1671 conn = hci_conn_hash_lookup_handle(hdev, handle);
1672 hci_dev_unlock(hdev);
1673
1674 if (conn) {
1675 register struct hci_proto *hp;
1676
1677
1678 hp = hci_proto[HCI_PROTO_SCO];
1679 if (hp && hp->recv_scodata) {
1680 hp->recv_scodata(conn, skb);
1681 return;
1682 }
1683 } else {
1684 BT_ERR("%s SCO packet for unknown connection handle %d",
1685 hdev->name, handle);
1686 }
1687
1688 kfree_skb(skb);
1689}
1690
1691static void hci_rx_task(unsigned long arg)
1692{
1693 struct hci_dev *hdev = (struct hci_dev *) arg;
1694 struct sk_buff *skb;
1695
1696 BT_DBG("%s", hdev->name);
1697
1698 read_lock(&hci_task_lock);
1699
1700 while ((skb = skb_dequeue(&hdev->rx_q))) {
1701 if (atomic_read(&hdev->promisc)) {
1702
1703 hci_send_to_sock(hdev, skb);
1704 }
1705
1706 if (test_bit(HCI_RAW, &hdev->flags)) {
1707 kfree_skb(skb);
1708 continue;
1709 }
1710
1711 if (test_bit(HCI_INIT, &hdev->flags)) {
1712
1713 switch (bt_cb(skb)->pkt_type) {
1714 case HCI_ACLDATA_PKT:
1715 case HCI_SCODATA_PKT:
1716 kfree_skb(skb);
1717 continue;
1718 }
1719 }
1720
1721
1722 switch (bt_cb(skb)->pkt_type) {
1723 case HCI_EVENT_PKT:
1724 hci_event_packet(hdev, skb);
1725 break;
1726
1727 case HCI_ACLDATA_PKT:
1728 BT_DBG("%s ACL data packet", hdev->name);
1729 hci_acldata_packet(hdev, skb);
1730 break;
1731
1732 case HCI_SCODATA_PKT:
1733 BT_DBG("%s SCO data packet", hdev->name);
1734 hci_scodata_packet(hdev, skb);
1735 break;
1736
1737 default:
1738 kfree_skb(skb);
1739 break;
1740 }
1741 }
1742
1743 read_unlock(&hci_task_lock);
1744}
1745
1746static void hci_cmd_task(unsigned long arg)
1747{
1748 struct hci_dev *hdev = (struct hci_dev *) arg;
1749 struct sk_buff *skb;
1750
1751 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1752
1753 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1754 BT_ERR("%s command tx timeout", hdev->name);
1755 atomic_set(&hdev->cmd_cnt, 1);
1756 }
1757
1758
1759 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1760 kfree_skb(hdev->sent_cmd);
1761
1762 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1763 if (hdev->sent_cmd) {
1764 atomic_dec(&hdev->cmd_cnt);
1765 hci_send_frame(skb);
1766 hdev->cmd_last_tx = jiffies;
1767 } else {
1768 skb_queue_head(&hdev->cmd_q, skb);
1769 tasklet_schedule(&hdev->cmd_task);
1770 }
1771 }
1772}
1773