1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/jiffies.h>
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
40#include <linux/workqueue.h>
41#include <linux/interrupt.h>
42#include <linux/notifier.h>
43#include <linux/rfkill.h>
44#include <linux/timer.h>
45#include <linux/crypto.h>
46#include <net/sock.h>
47
48#include <asm/system.h>
49#include <linux/uaccess.h>
50#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
55#define AUTO_OFF_TIMEOUT 2000
56
57static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
60
61static DEFINE_RWLOCK(hci_task_lock);
62
63
64LIST_HEAD(hci_dev_list);
65DEFINE_RWLOCK(hci_dev_list_lock);
66
67
68LIST_HEAD(hci_cb_list);
69DEFINE_RWLOCK(hci_cb_list_lock);
70
71
72#define HCI_MAX_PROTO 2
73struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75
76static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77
78
79
80int hci_register_notifier(struct notifier_block *nb)
81{
82 return atomic_notifier_chain_register(&hci_notifier, nb);
83}
84
85int hci_unregister_notifier(struct notifier_block *nb)
86{
87 return atomic_notifier_chain_unregister(&hci_notifier, nb);
88}
89
90static void hci_notify(struct hci_dev *hdev, int event)
91{
92 atomic_notifier_call_chain(&hci_notifier, event, hdev);
93}
94
95
96
97void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
98{
99 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
101
102
103
104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
105 return;
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114static void hci_req_cancel(struct hci_dev *hdev, int err)
115{
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123}
124
125
126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127 unsigned long opt, __u32 timeout)
128{
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_status = hdev->req_result = 0;
162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166}
167
168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 unsigned long opt, __u32 timeout)
170{
171 int ret;
172
173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
176
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182}
183
184static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185{
186 BT_DBG("%s %ld", hdev->name, opt);
187
188
189 set_bit(HCI_RESET, &hdev->flags);
190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
191}
192
193static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194{
195 struct hci_cp_delete_stored_link_key cp;
196 struct sk_buff *skb;
197 __le16 param;
198 __u8 flt_type;
199
200 BT_DBG("%s %ld", hdev->name, opt);
201
202
203
204
205 while ((skb = skb_dequeue(&hdev->driver_init))) {
206 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
207 skb->dev = (void *) hdev;
208
209 skb_queue_tail(&hdev->cmd_q, skb);
210 tasklet_schedule(&hdev->cmd_task);
211 }
212 skb_queue_purge(&hdev->driver_init);
213
214
215
216
217 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218 set_bit(HCI_RESET, &hdev->flags);
219 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
220 }
221
222
223 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
224
225
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
227
228
229 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
230
231#if 0
232
233 {
234 struct hci_cp_host_buffer_size cp;
235 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
236 cp.sco_mtu = HCI_MAX_SCO_SIZE;
237 cp.acl_max_pkt = cpu_to_le16(0xffff);
238 cp.sco_max_pkt = cpu_to_le16(0xffff);
239 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
240 }
241#endif
242
243
244 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246
247 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
251
252
253 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
254
255
256
257
258 flt_type = HCI_FLT_CLEAR_ALL;
259 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
260
261
262 param = cpu_to_le16(0x7d00);
263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
264
265 bacpy(&cp.bdaddr, BDADDR_ANY);
266 cp.delete_all = 1;
267 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
268}
269
270static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271{
272 BT_DBG("%s", hdev->name);
273
274
275 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276}
277
278static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279{
280 __u8 scan = opt;
281
282 BT_DBG("%s %x", hdev->name, scan);
283
284
285 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
286}
287
288static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289{
290 __u8 auth = opt;
291
292 BT_DBG("%s %x", hdev->name, auth);
293
294
295 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
296}
297
298static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299{
300 __u8 encrypt = opt;
301
302 BT_DBG("%s %x", hdev->name, encrypt);
303
304
305 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
306}
307
308static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309{
310 __le16 policy = cpu_to_le16(opt);
311
312 BT_DBG("%s %x", hdev->name, policy);
313
314
315 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316}
317
318
319
320struct hci_dev *hci_dev_get(int index)
321{
322 struct hci_dev *hdev = NULL;
323 struct list_head *p;
324
325 BT_DBG("%d", index);
326
327 if (index < 0)
328 return NULL;
329
330 read_lock(&hci_dev_list_lock);
331 list_for_each(p, &hci_dev_list) {
332 struct hci_dev *d = list_entry(p, struct hci_dev, list);
333 if (d->id == index) {
334 hdev = hci_dev_hold(d);
335 break;
336 }
337 }
338 read_unlock(&hci_dev_list_lock);
339 return hdev;
340}
341
342
343static void inquiry_cache_flush(struct hci_dev *hdev)
344{
345 struct inquiry_cache *cache = &hdev->inq_cache;
346 struct inquiry_entry *next = cache->list, *e;
347
348 BT_DBG("cache %p", cache);
349
350 cache->list = NULL;
351 while ((e = next)) {
352 next = e->next;
353 kfree(e);
354 }
355}
356
357struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
358{
359 struct inquiry_cache *cache = &hdev->inq_cache;
360 struct inquiry_entry *e;
361
362 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
363
364 for (e = cache->list; e; e = e->next)
365 if (!bacmp(&e->data.bdaddr, bdaddr))
366 break;
367 return e;
368}
369
370void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
371{
372 struct inquiry_cache *cache = &hdev->inq_cache;
373 struct inquiry_entry *ie;
374
375 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
376
377 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
378 if (!ie) {
379
380 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
381 if (!ie)
382 return;
383
384 ie->next = cache->list;
385 cache->list = ie;
386 }
387
388 memcpy(&ie->data, data, sizeof(*data));
389 ie->timestamp = jiffies;
390 cache->timestamp = jiffies;
391}
392
393static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
394{
395 struct inquiry_cache *cache = &hdev->inq_cache;
396 struct inquiry_info *info = (struct inquiry_info *) buf;
397 struct inquiry_entry *e;
398 int copied = 0;
399
400 for (e = cache->list; e && copied < num; e = e->next, copied++) {
401 struct inquiry_data *data = &e->data;
402 bacpy(&info->bdaddr, &data->bdaddr);
403 info->pscan_rep_mode = data->pscan_rep_mode;
404 info->pscan_period_mode = data->pscan_period_mode;
405 info->pscan_mode = data->pscan_mode;
406 memcpy(info->dev_class, data->dev_class, 3);
407 info->clock_offset = data->clock_offset;
408 info++;
409 }
410
411 BT_DBG("cache %p, copied %d", cache, copied);
412 return copied;
413}
414
415static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
416{
417 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
418 struct hci_cp_inquiry cp;
419
420 BT_DBG("%s", hdev->name);
421
422 if (test_bit(HCI_INQUIRY, &hdev->flags))
423 return;
424
425
426 memcpy(&cp.lap, &ir->lap, 3);
427 cp.length = ir->length;
428 cp.num_rsp = ir->num_rsp;
429 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
430}
431
432int hci_inquiry(void __user *arg)
433{
434 __u8 __user *ptr = arg;
435 struct hci_inquiry_req ir;
436 struct hci_dev *hdev;
437 int err = 0, do_inquiry = 0, max_rsp;
438 long timeo;
439 __u8 *buf;
440
441 if (copy_from_user(&ir, ptr, sizeof(ir)))
442 return -EFAULT;
443
444 hdev = hci_dev_get(ir.dev_id);
445 if (!hdev)
446 return -ENODEV;
447
448 hci_dev_lock_bh(hdev);
449 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
450 inquiry_cache_empty(hdev) ||
451 ir.flags & IREQ_CACHE_FLUSH) {
452 inquiry_cache_flush(hdev);
453 do_inquiry = 1;
454 }
455 hci_dev_unlock_bh(hdev);
456
457 timeo = ir.length * msecs_to_jiffies(2000);
458
459 if (do_inquiry) {
460 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
461 if (err < 0)
462 goto done;
463 }
464
465
466 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
467
468
469
470
471 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
472 if (!buf) {
473 err = -ENOMEM;
474 goto done;
475 }
476
477 hci_dev_lock_bh(hdev);
478 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
479 hci_dev_unlock_bh(hdev);
480
481 BT_DBG("num_rsp %d", ir.num_rsp);
482
483 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
484 ptr += sizeof(ir);
485 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
486 ir.num_rsp))
487 err = -EFAULT;
488 } else
489 err = -EFAULT;
490
491 kfree(buf);
492
493done:
494 hci_dev_put(hdev);
495 return err;
496}
497
498
499
500int hci_dev_open(__u16 dev)
501{
502 struct hci_dev *hdev;
503 int ret = 0;
504
505 hdev = hci_dev_get(dev);
506 if (!hdev)
507 return -ENODEV;
508
509 BT_DBG("%s %p", hdev->name, hdev);
510
511 hci_req_lock(hdev);
512
513 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
514 ret = -ERFKILL;
515 goto done;
516 }
517
518 if (test_bit(HCI_UP, &hdev->flags)) {
519 ret = -EALREADY;
520 goto done;
521 }
522
523 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
524 set_bit(HCI_RAW, &hdev->flags);
525
526
527 if (hdev->dev_type != HCI_BREDR)
528 set_bit(HCI_RAW, &hdev->flags);
529
530 if (hdev->open(hdev)) {
531 ret = -EIO;
532 goto done;
533 }
534
535 if (!test_bit(HCI_RAW, &hdev->flags)) {
536 atomic_set(&hdev->cmd_cnt, 1);
537 set_bit(HCI_INIT, &hdev->flags);
538 hdev->init_last_cmd = 0;
539
540 ret = __hci_request(hdev, hci_init_req, 0,
541 msecs_to_jiffies(HCI_INIT_TIMEOUT));
542
543 if (lmp_host_le_capable(hdev))
544 ret = __hci_request(hdev, hci_le_init_req, 0,
545 msecs_to_jiffies(HCI_INIT_TIMEOUT));
546
547 clear_bit(HCI_INIT, &hdev->flags);
548 }
549
550 if (!ret) {
551 hci_dev_hold(hdev);
552 set_bit(HCI_UP, &hdev->flags);
553 hci_notify(hdev, HCI_DEV_UP);
554 if (!test_bit(HCI_SETUP, &hdev->flags))
555 mgmt_powered(hdev->id, 1);
556 } else {
557
558 tasklet_kill(&hdev->rx_task);
559 tasklet_kill(&hdev->tx_task);
560 tasklet_kill(&hdev->cmd_task);
561
562 skb_queue_purge(&hdev->cmd_q);
563 skb_queue_purge(&hdev->rx_q);
564
565 if (hdev->flush)
566 hdev->flush(hdev);
567
568 if (hdev->sent_cmd) {
569 kfree_skb(hdev->sent_cmd);
570 hdev->sent_cmd = NULL;
571 }
572
573 hdev->close(hdev);
574 hdev->flags = 0;
575 }
576
577done:
578 hci_req_unlock(hdev);
579 hci_dev_put(hdev);
580 return ret;
581}
582
583static int hci_dev_do_close(struct hci_dev *hdev)
584{
585 BT_DBG("%s %p", hdev->name, hdev);
586
587 hci_req_cancel(hdev, ENODEV);
588 hci_req_lock(hdev);
589
590 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
591 del_timer_sync(&hdev->cmd_timer);
592 hci_req_unlock(hdev);
593 return 0;
594 }
595
596
597 tasklet_kill(&hdev->rx_task);
598 tasklet_kill(&hdev->tx_task);
599
600 hci_dev_lock_bh(hdev);
601 inquiry_cache_flush(hdev);
602 hci_conn_hash_flush(hdev);
603 hci_dev_unlock_bh(hdev);
604
605 hci_notify(hdev, HCI_DEV_DOWN);
606
607 if (hdev->flush)
608 hdev->flush(hdev);
609
610
611 skb_queue_purge(&hdev->cmd_q);
612 atomic_set(&hdev->cmd_cnt, 1);
613 if (!test_bit(HCI_RAW, &hdev->flags)) {
614 set_bit(HCI_INIT, &hdev->flags);
615 __hci_request(hdev, hci_reset_req, 0,
616 msecs_to_jiffies(250));
617 clear_bit(HCI_INIT, &hdev->flags);
618 }
619
620
621 tasklet_kill(&hdev->cmd_task);
622
623
624 skb_queue_purge(&hdev->rx_q);
625 skb_queue_purge(&hdev->cmd_q);
626 skb_queue_purge(&hdev->raw_q);
627
628
629 if (hdev->sent_cmd) {
630 del_timer_sync(&hdev->cmd_timer);
631 kfree_skb(hdev->sent_cmd);
632 hdev->sent_cmd = NULL;
633 }
634
635
636
637 hdev->close(hdev);
638
639 mgmt_powered(hdev->id, 0);
640
641
642 hdev->flags = 0;
643
644 hci_req_unlock(hdev);
645
646 hci_dev_put(hdev);
647 return 0;
648}
649
650int hci_dev_close(__u16 dev)
651{
652 struct hci_dev *hdev;
653 int err;
654
655 hdev = hci_dev_get(dev);
656 if (!hdev)
657 return -ENODEV;
658 err = hci_dev_do_close(hdev);
659 hci_dev_put(hdev);
660 return err;
661}
662
663int hci_dev_reset(__u16 dev)
664{
665 struct hci_dev *hdev;
666 int ret = 0;
667
668 hdev = hci_dev_get(dev);
669 if (!hdev)
670 return -ENODEV;
671
672 hci_req_lock(hdev);
673 tasklet_disable(&hdev->tx_task);
674
675 if (!test_bit(HCI_UP, &hdev->flags))
676 goto done;
677
678
679 skb_queue_purge(&hdev->rx_q);
680 skb_queue_purge(&hdev->cmd_q);
681
682 hci_dev_lock_bh(hdev);
683 inquiry_cache_flush(hdev);
684 hci_conn_hash_flush(hdev);
685 hci_dev_unlock_bh(hdev);
686
687 if (hdev->flush)
688 hdev->flush(hdev);
689
690 atomic_set(&hdev->cmd_cnt, 1);
691 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
692
693 if (!test_bit(HCI_RAW, &hdev->flags))
694 ret = __hci_request(hdev, hci_reset_req, 0,
695 msecs_to_jiffies(HCI_INIT_TIMEOUT));
696
697done:
698 tasklet_enable(&hdev->tx_task);
699 hci_req_unlock(hdev);
700 hci_dev_put(hdev);
701 return ret;
702}
703
704int hci_dev_reset_stat(__u16 dev)
705{
706 struct hci_dev *hdev;
707 int ret = 0;
708
709 hdev = hci_dev_get(dev);
710 if (!hdev)
711 return -ENODEV;
712
713 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
714
715 hci_dev_put(hdev);
716
717 return ret;
718}
719
720int hci_dev_cmd(unsigned int cmd, void __user *arg)
721{
722 struct hci_dev *hdev;
723 struct hci_dev_req dr;
724 int err = 0;
725
726 if (copy_from_user(&dr, arg, sizeof(dr)))
727 return -EFAULT;
728
729 hdev = hci_dev_get(dr.dev_id);
730 if (!hdev)
731 return -ENODEV;
732
733 switch (cmd) {
734 case HCISETAUTH:
735 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
736 msecs_to_jiffies(HCI_INIT_TIMEOUT));
737 break;
738
739 case HCISETENCRYPT:
740 if (!lmp_encrypt_capable(hdev)) {
741 err = -EOPNOTSUPP;
742 break;
743 }
744
745 if (!test_bit(HCI_AUTH, &hdev->flags)) {
746
747 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
748 msecs_to_jiffies(HCI_INIT_TIMEOUT));
749 if (err)
750 break;
751 }
752
753 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
754 msecs_to_jiffies(HCI_INIT_TIMEOUT));
755 break;
756
757 case HCISETSCAN:
758 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
759 msecs_to_jiffies(HCI_INIT_TIMEOUT));
760 break;
761
762 case HCISETLINKPOL:
763 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
764 msecs_to_jiffies(HCI_INIT_TIMEOUT));
765 break;
766
767 case HCISETLINKMODE:
768 hdev->link_mode = ((__u16) dr.dev_opt) &
769 (HCI_LM_MASTER | HCI_LM_ACCEPT);
770 break;
771
772 case HCISETPTYPE:
773 hdev->pkt_type = (__u16) dr.dev_opt;
774 break;
775
776 case HCISETACLMTU:
777 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
778 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
779 break;
780
781 case HCISETSCOMTU:
782 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
783 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
784 break;
785
786 default:
787 err = -EINVAL;
788 break;
789 }
790
791 hci_dev_put(hdev);
792 return err;
793}
794
795int hci_get_dev_list(void __user *arg)
796{
797 struct hci_dev_list_req *dl;
798 struct hci_dev_req *dr;
799 struct list_head *p;
800 int n = 0, size, err;
801 __u16 dev_num;
802
803 if (get_user(dev_num, (__u16 __user *) arg))
804 return -EFAULT;
805
806 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
807 return -EINVAL;
808
809 size = sizeof(*dl) + dev_num * sizeof(*dr);
810
811 dl = kzalloc(size, GFP_KERNEL);
812 if (!dl)
813 return -ENOMEM;
814
815 dr = dl->dev_req;
816
817 read_lock_bh(&hci_dev_list_lock);
818 list_for_each(p, &hci_dev_list) {
819 struct hci_dev *hdev;
820
821 hdev = list_entry(p, struct hci_dev, list);
822
823 hci_del_off_timer(hdev);
824
825 if (!test_bit(HCI_MGMT, &hdev->flags))
826 set_bit(HCI_PAIRABLE, &hdev->flags);
827
828 (dr + n)->dev_id = hdev->id;
829 (dr + n)->dev_opt = hdev->flags;
830
831 if (++n >= dev_num)
832 break;
833 }
834 read_unlock_bh(&hci_dev_list_lock);
835
836 dl->dev_num = n;
837 size = sizeof(*dl) + n * sizeof(*dr);
838
839 err = copy_to_user(arg, dl, size);
840 kfree(dl);
841
842 return err ? -EFAULT : 0;
843}
844
845int hci_get_dev_info(void __user *arg)
846{
847 struct hci_dev *hdev;
848 struct hci_dev_info di;
849 int err = 0;
850
851 if (copy_from_user(&di, arg, sizeof(di)))
852 return -EFAULT;
853
854 hdev = hci_dev_get(di.dev_id);
855 if (!hdev)
856 return -ENODEV;
857
858 hci_del_off_timer(hdev);
859
860 if (!test_bit(HCI_MGMT, &hdev->flags))
861 set_bit(HCI_PAIRABLE, &hdev->flags);
862
863 strcpy(di.name, hdev->name);
864 di.bdaddr = hdev->bdaddr;
865 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
866 di.flags = hdev->flags;
867 di.pkt_type = hdev->pkt_type;
868 di.acl_mtu = hdev->acl_mtu;
869 di.acl_pkts = hdev->acl_pkts;
870 di.sco_mtu = hdev->sco_mtu;
871 di.sco_pkts = hdev->sco_pkts;
872 di.link_policy = hdev->link_policy;
873 di.link_mode = hdev->link_mode;
874
875 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
876 memcpy(&di.features, &hdev->features, sizeof(di.features));
877
878 if (copy_to_user(arg, &di, sizeof(di)))
879 err = -EFAULT;
880
881 hci_dev_put(hdev);
882
883 return err;
884}
885
886
887
888static int hci_rfkill_set_block(void *data, bool blocked)
889{
890 struct hci_dev *hdev = data;
891
892 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
893
894 if (!blocked)
895 return 0;
896
897 hci_dev_do_close(hdev);
898
899 return 0;
900}
901
902static const struct rfkill_ops hci_rfkill_ops = {
903 .set_block = hci_rfkill_set_block,
904};
905
906
907struct hci_dev *hci_alloc_dev(void)
908{
909 struct hci_dev *hdev;
910
911 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
912 if (!hdev)
913 return NULL;
914
915 skb_queue_head_init(&hdev->driver_init);
916
917 return hdev;
918}
919EXPORT_SYMBOL(hci_alloc_dev);
920
921
922void hci_free_dev(struct hci_dev *hdev)
923{
924 skb_queue_purge(&hdev->driver_init);
925
926
927 put_device(&hdev->dev);
928}
929EXPORT_SYMBOL(hci_free_dev);
930
931static void hci_power_on(struct work_struct *work)
932{
933 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
934
935 BT_DBG("%s", hdev->name);
936
937 if (hci_dev_open(hdev->id) < 0)
938 return;
939
940 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
941 mod_timer(&hdev->off_timer,
942 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
943
944 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
945 mgmt_index_added(hdev->id);
946}
947
948static void hci_power_off(struct work_struct *work)
949{
950 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
951
952 BT_DBG("%s", hdev->name);
953
954 hci_dev_close(hdev->id);
955}
956
957static void hci_auto_off(unsigned long data)
958{
959 struct hci_dev *hdev = (struct hci_dev *) data;
960
961 BT_DBG("%s", hdev->name);
962
963 clear_bit(HCI_AUTO_OFF, &hdev->flags);
964
965 queue_work(hdev->workqueue, &hdev->power_off);
966}
967
968void hci_del_off_timer(struct hci_dev *hdev)
969{
970 BT_DBG("%s", hdev->name);
971
972 clear_bit(HCI_AUTO_OFF, &hdev->flags);
973 del_timer(&hdev->off_timer);
974}
975
976int hci_uuids_clear(struct hci_dev *hdev)
977{
978 struct list_head *p, *n;
979
980 list_for_each_safe(p, n, &hdev->uuids) {
981 struct bt_uuid *uuid;
982
983 uuid = list_entry(p, struct bt_uuid, list);
984
985 list_del(p);
986 kfree(uuid);
987 }
988
989 return 0;
990}
991
992int hci_link_keys_clear(struct hci_dev *hdev)
993{
994 struct list_head *p, *n;
995
996 list_for_each_safe(p, n, &hdev->link_keys) {
997 struct link_key *key;
998
999 key = list_entry(p, struct link_key, list);
1000
1001 list_del(p);
1002 kfree(key);
1003 }
1004
1005 return 0;
1006}
1007
1008struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1009{
1010 struct list_head *p;
1011
1012 list_for_each(p, &hdev->link_keys) {
1013 struct link_key *k;
1014
1015 k = list_entry(p, struct link_key, list);
1016
1017 if (bacmp(bdaddr, &k->bdaddr) == 0)
1018 return k;
1019 }
1020
1021 return NULL;
1022}
1023
1024static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1025 u8 key_type, u8 old_key_type)
1026{
1027
1028 if (key_type < 0x03)
1029 return 1;
1030
1031
1032 if (key_type == HCI_LK_DEBUG_COMBINATION)
1033 return 0;
1034
1035
1036 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1037 return 0;
1038
1039
1040 if (!conn)
1041 return 1;
1042
1043
1044 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1045 return 1;
1046
1047
1048 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1049 return 1;
1050
1051
1052 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1053 return 1;
1054
1055
1056
1057 return 0;
1058}
1059
1060struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1061{
1062 struct link_key *k;
1063
1064 list_for_each_entry(k, &hdev->link_keys, list) {
1065 struct key_master_id *id;
1066
1067 if (k->type != HCI_LK_SMP_LTK)
1068 continue;
1069
1070 if (k->dlen != sizeof(*id))
1071 continue;
1072
1073 id = (void *) &k->data;
1074 if (id->ediv == ediv &&
1075 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1076 return k;
1077 }
1078
1079 return NULL;
1080}
1081EXPORT_SYMBOL(hci_find_ltk);
1082
1083struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1084 bdaddr_t *bdaddr, u8 type)
1085{
1086 struct link_key *k;
1087
1088 list_for_each_entry(k, &hdev->link_keys, list)
1089 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1090 return k;
1091
1092 return NULL;
1093}
1094EXPORT_SYMBOL(hci_find_link_key_type);
1095
1096int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1097 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1098{
1099 struct link_key *key, *old_key;
1100 u8 old_key_type, persistent;
1101
1102 old_key = hci_find_link_key(hdev, bdaddr);
1103 if (old_key) {
1104 old_key_type = old_key->type;
1105 key = old_key;
1106 } else {
1107 old_key_type = conn ? conn->key_type : 0xff;
1108 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1109 if (!key)
1110 return -ENOMEM;
1111 list_add(&key->list, &hdev->link_keys);
1112 }
1113
1114 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1115
1116
1117
1118
1119 if (type == HCI_LK_CHANGED_COMBINATION &&
1120 (!conn || conn->remote_auth == 0xff) &&
1121 old_key_type == 0xff) {
1122 type = HCI_LK_COMBINATION;
1123 if (conn)
1124 conn->key_type = type;
1125 }
1126
1127 bacpy(&key->bdaddr, bdaddr);
1128 memcpy(key->val, val, 16);
1129 key->pin_len = pin_len;
1130
1131 if (type == HCI_LK_CHANGED_COMBINATION)
1132 key->type = old_key_type;
1133 else
1134 key->type = type;
1135
1136 if (!new_key)
1137 return 0;
1138
1139 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1140
1141 mgmt_new_key(hdev->id, key, persistent);
1142
1143 if (!persistent) {
1144 list_del(&key->list);
1145 kfree(key);
1146 }
1147
1148 return 0;
1149}
1150
1151int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1152 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1153{
1154 struct link_key *key, *old_key;
1155 struct key_master_id *id;
1156 u8 old_key_type;
1157
1158 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1159
1160 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1161 if (old_key) {
1162 key = old_key;
1163 old_key_type = old_key->type;
1164 } else {
1165 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1166 if (!key)
1167 return -ENOMEM;
1168 list_add(&key->list, &hdev->link_keys);
1169 old_key_type = 0xff;
1170 }
1171
1172 key->dlen = sizeof(*id);
1173
1174 bacpy(&key->bdaddr, bdaddr);
1175 memcpy(key->val, ltk, sizeof(key->val));
1176 key->type = HCI_LK_SMP_LTK;
1177 key->pin_len = key_size;
1178
1179 id = (void *) &key->data;
1180 id->ediv = ediv;
1181 memcpy(id->rand, rand, sizeof(id->rand));
1182
1183 if (new_key)
1184 mgmt_new_key(hdev->id, key, old_key_type);
1185
1186 return 0;
1187}
1188
1189int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1190{
1191 struct link_key *key;
1192
1193 key = hci_find_link_key(hdev, bdaddr);
1194 if (!key)
1195 return -ENOENT;
1196
1197 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1198
1199 list_del(&key->list);
1200 kfree(key);
1201
1202 return 0;
1203}
1204
1205
1206static void hci_cmd_timer(unsigned long arg)
1207{
1208 struct hci_dev *hdev = (void *) arg;
1209
1210 BT_ERR("%s command tx timeout", hdev->name);
1211 atomic_set(&hdev->cmd_cnt, 1);
1212 tasklet_schedule(&hdev->cmd_task);
1213}
1214
1215struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1216 bdaddr_t *bdaddr)
1217{
1218 struct oob_data *data;
1219
1220 list_for_each_entry(data, &hdev->remote_oob_data, list)
1221 if (bacmp(bdaddr, &data->bdaddr) == 0)
1222 return data;
1223
1224 return NULL;
1225}
1226
1227int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1228{
1229 struct oob_data *data;
1230
1231 data = hci_find_remote_oob_data(hdev, bdaddr);
1232 if (!data)
1233 return -ENOENT;
1234
1235 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1236
1237 list_del(&data->list);
1238 kfree(data);
1239
1240 return 0;
1241}
1242
1243int hci_remote_oob_data_clear(struct hci_dev *hdev)
1244{
1245 struct oob_data *data, *n;
1246
1247 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1248 list_del(&data->list);
1249 kfree(data);
1250 }
1251
1252 return 0;
1253}
1254
1255int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1256 u8 *randomizer)
1257{
1258 struct oob_data *data;
1259
1260 data = hci_find_remote_oob_data(hdev, bdaddr);
1261
1262 if (!data) {
1263 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1264 if (!data)
1265 return -ENOMEM;
1266
1267 bacpy(&data->bdaddr, bdaddr);
1268 list_add(&data->list, &hdev->remote_oob_data);
1269 }
1270
1271 memcpy(data->hash, hash, sizeof(data->hash));
1272 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1273
1274 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1275
1276 return 0;
1277}
1278
1279struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1280 bdaddr_t *bdaddr)
1281{
1282 struct list_head *p;
1283
1284 list_for_each(p, &hdev->blacklist) {
1285 struct bdaddr_list *b;
1286
1287 b = list_entry(p, struct bdaddr_list, list);
1288
1289 if (bacmp(bdaddr, &b->bdaddr) == 0)
1290 return b;
1291 }
1292
1293 return NULL;
1294}
1295
1296int hci_blacklist_clear(struct hci_dev *hdev)
1297{
1298 struct list_head *p, *n;
1299
1300 list_for_each_safe(p, n, &hdev->blacklist) {
1301 struct bdaddr_list *b;
1302
1303 b = list_entry(p, struct bdaddr_list, list);
1304
1305 list_del(p);
1306 kfree(b);
1307 }
1308
1309 return 0;
1310}
1311
1312int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1313{
1314 struct bdaddr_list *entry;
1315
1316 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1317 return -EBADF;
1318
1319 if (hci_blacklist_lookup(hdev, bdaddr))
1320 return -EEXIST;
1321
1322 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1323 if (!entry)
1324 return -ENOMEM;
1325
1326 bacpy(&entry->bdaddr, bdaddr);
1327
1328 list_add(&entry->list, &hdev->blacklist);
1329
1330 return mgmt_device_blocked(hdev->id, bdaddr);
1331}
1332
1333int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1334{
1335 struct bdaddr_list *entry;
1336
1337 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
1338 return hci_blacklist_clear(hdev);
1339 }
1340
1341 entry = hci_blacklist_lookup(hdev, bdaddr);
1342 if (!entry) {
1343 return -ENOENT;
1344 }
1345
1346 list_del(&entry->list);
1347 kfree(entry);
1348
1349 return mgmt_device_unblocked(hdev->id, bdaddr);
1350}
1351
1352static void hci_clear_adv_cache(unsigned long arg)
1353{
1354 struct hci_dev *hdev = (void *) arg;
1355
1356 hci_dev_lock(hdev);
1357
1358 hci_adv_entries_clear(hdev);
1359
1360 hci_dev_unlock(hdev);
1361}
1362
1363int hci_adv_entries_clear(struct hci_dev *hdev)
1364{
1365 struct adv_entry *entry, *tmp;
1366
1367 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1368 list_del(&entry->list);
1369 kfree(entry);
1370 }
1371
1372 BT_DBG("%s adv cache cleared", hdev->name);
1373
1374 return 0;
1375}
1376
1377struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1378{
1379 struct adv_entry *entry;
1380
1381 list_for_each_entry(entry, &hdev->adv_entries, list)
1382 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1383 return entry;
1384
1385 return NULL;
1386}
1387
1388static inline int is_connectable_adv(u8 evt_type)
1389{
1390 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1391 return 1;
1392
1393 return 0;
1394}
1395
1396int hci_add_adv_entry(struct hci_dev *hdev,
1397 struct hci_ev_le_advertising_info *ev)
1398{
1399 struct adv_entry *entry;
1400
1401 if (!is_connectable_adv(ev->evt_type))
1402 return -EINVAL;
1403
1404
1405
1406 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1407 return 0;
1408
1409 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1410 if (!entry)
1411 return -ENOMEM;
1412
1413 bacpy(&entry->bdaddr, &ev->bdaddr);
1414 entry->bdaddr_type = ev->bdaddr_type;
1415
1416 list_add(&entry->list, &hdev->adv_entries);
1417
1418 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1419 batostr(&entry->bdaddr), entry->bdaddr_type);
1420
1421 return 0;
1422}
1423
1424
1425int hci_register_dev(struct hci_dev *hdev)
1426{
1427 struct list_head *head = &hci_dev_list, *p;
1428 int i, id = 0;
1429
1430 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1431 hdev->bus, hdev->owner);
1432
1433 if (!hdev->open || !hdev->close || !hdev->destruct)
1434 return -EINVAL;
1435
1436 write_lock_bh(&hci_dev_list_lock);
1437
1438
1439 list_for_each(p, &hci_dev_list) {
1440 if (list_entry(p, struct hci_dev, list)->id != id)
1441 break;
1442 head = p; id++;
1443 }
1444
1445 sprintf(hdev->name, "hci%d", id);
1446 hdev->id = id;
1447 list_add(&hdev->list, head);
1448
1449 atomic_set(&hdev->refcnt, 1);
1450 spin_lock_init(&hdev->lock);
1451
1452 hdev->flags = 0;
1453 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1454 hdev->esco_type = (ESCO_HV1);
1455 hdev->link_mode = (HCI_LM_ACCEPT);
1456 hdev->io_capability = 0x03;
1457
1458 hdev->idle_timeout = 0;
1459 hdev->sniff_max_interval = 800;
1460 hdev->sniff_min_interval = 80;
1461
1462 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1463 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1464 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1465
1466 skb_queue_head_init(&hdev->rx_q);
1467 skb_queue_head_init(&hdev->cmd_q);
1468 skb_queue_head_init(&hdev->raw_q);
1469
1470 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1471
1472 for (i = 0; i < NUM_REASSEMBLY; i++)
1473 hdev->reassembly[i] = NULL;
1474
1475 init_waitqueue_head(&hdev->req_wait_q);
1476 mutex_init(&hdev->req_lock);
1477
1478 inquiry_cache_init(hdev);
1479
1480 hci_conn_hash_init(hdev);
1481
1482 INIT_LIST_HEAD(&hdev->blacklist);
1483
1484 INIT_LIST_HEAD(&hdev->uuids);
1485
1486 INIT_LIST_HEAD(&hdev->link_keys);
1487
1488 INIT_LIST_HEAD(&hdev->remote_oob_data);
1489
1490 INIT_LIST_HEAD(&hdev->adv_entries);
1491 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1492 (unsigned long) hdev);
1493
1494 INIT_WORK(&hdev->power_on, hci_power_on);
1495 INIT_WORK(&hdev->power_off, hci_power_off);
1496 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1497
1498 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1499
1500 atomic_set(&hdev->promisc, 0);
1501
1502 write_unlock_bh(&hci_dev_list_lock);
1503
1504 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1505 if (!hdev->workqueue)
1506 goto nomem;
1507
1508 hci_register_sysfs(hdev);
1509
1510 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1511 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1512 if (hdev->rfkill) {
1513 if (rfkill_register(hdev->rfkill) < 0) {
1514 rfkill_destroy(hdev->rfkill);
1515 hdev->rfkill = NULL;
1516 }
1517 }
1518
1519 set_bit(HCI_AUTO_OFF, &hdev->flags);
1520 set_bit(HCI_SETUP, &hdev->flags);
1521 queue_work(hdev->workqueue, &hdev->power_on);
1522
1523 hci_notify(hdev, HCI_DEV_REG);
1524
1525 return id;
1526
1527nomem:
1528 write_lock_bh(&hci_dev_list_lock);
1529 list_del(&hdev->list);
1530 write_unlock_bh(&hci_dev_list_lock);
1531
1532 return -ENOMEM;
1533}
1534EXPORT_SYMBOL(hci_register_dev);
1535
1536
1537int hci_unregister_dev(struct hci_dev *hdev)
1538{
1539 int i;
1540
1541 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1542
1543 write_lock_bh(&hci_dev_list_lock);
1544 list_del(&hdev->list);
1545 write_unlock_bh(&hci_dev_list_lock);
1546
1547 hci_dev_do_close(hdev);
1548
1549 for (i = 0; i < NUM_REASSEMBLY; i++)
1550 kfree_skb(hdev->reassembly[i]);
1551
1552 if (!test_bit(HCI_INIT, &hdev->flags) &&
1553 !test_bit(HCI_SETUP, &hdev->flags))
1554 mgmt_index_removed(hdev->id);
1555
1556 hci_notify(hdev, HCI_DEV_UNREG);
1557
1558 if (hdev->rfkill) {
1559 rfkill_unregister(hdev->rfkill);
1560 rfkill_destroy(hdev->rfkill);
1561 }
1562
1563 hci_unregister_sysfs(hdev);
1564
1565 hci_del_off_timer(hdev);
1566 del_timer(&hdev->adv_timer);
1567
1568 destroy_workqueue(hdev->workqueue);
1569
1570 hci_dev_lock_bh(hdev);
1571 hci_blacklist_clear(hdev);
1572 hci_uuids_clear(hdev);
1573 hci_link_keys_clear(hdev);
1574 hci_remote_oob_data_clear(hdev);
1575 hci_adv_entries_clear(hdev);
1576 hci_dev_unlock_bh(hdev);
1577
1578 __hci_dev_put(hdev);
1579
1580 return 0;
1581}
1582EXPORT_SYMBOL(hci_unregister_dev);
1583
1584
1585int hci_suspend_dev(struct hci_dev *hdev)
1586{
1587 hci_notify(hdev, HCI_DEV_SUSPEND);
1588 return 0;
1589}
1590EXPORT_SYMBOL(hci_suspend_dev);
1591
1592
1593int hci_resume_dev(struct hci_dev *hdev)
1594{
1595 hci_notify(hdev, HCI_DEV_RESUME);
1596 return 0;
1597}
1598EXPORT_SYMBOL(hci_resume_dev);
1599
1600
1601int hci_recv_frame(struct sk_buff *skb)
1602{
1603 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1604 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1605 && !test_bit(HCI_INIT, &hdev->flags))) {
1606 kfree_skb(skb);
1607 return -ENXIO;
1608 }
1609
1610
1611 bt_cb(skb)->incoming = 1;
1612
1613
1614 __net_timestamp(skb);
1615
1616
1617 skb_queue_tail(&hdev->rx_q, skb);
1618 tasklet_schedule(&hdev->rx_task);
1619
1620 return 0;
1621}
1622EXPORT_SYMBOL(hci_recv_frame);
1623
1624static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1625 int count, __u8 index)
1626{
1627 int len = 0;
1628 int hlen = 0;
1629 int remain = count;
1630 struct sk_buff *skb;
1631 struct bt_skb_cb *scb;
1632
1633 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1634 index >= NUM_REASSEMBLY)
1635 return -EILSEQ;
1636
1637 skb = hdev->reassembly[index];
1638
1639 if (!skb) {
1640 switch (type) {
1641 case HCI_ACLDATA_PKT:
1642 len = HCI_MAX_FRAME_SIZE;
1643 hlen = HCI_ACL_HDR_SIZE;
1644 break;
1645 case HCI_EVENT_PKT:
1646 len = HCI_MAX_EVENT_SIZE;
1647 hlen = HCI_EVENT_HDR_SIZE;
1648 break;
1649 case HCI_SCODATA_PKT:
1650 len = HCI_MAX_SCO_SIZE;
1651 hlen = HCI_SCO_HDR_SIZE;
1652 break;
1653 }
1654
1655 skb = bt_skb_alloc(len, GFP_ATOMIC);
1656 if (!skb)
1657 return -ENOMEM;
1658
1659 scb = (void *) skb->cb;
1660 scb->expect = hlen;
1661 scb->pkt_type = type;
1662
1663 skb->dev = (void *) hdev;
1664 hdev->reassembly[index] = skb;
1665 }
1666
1667 while (count) {
1668 scb = (void *) skb->cb;
1669 len = min(scb->expect, (__u16)count);
1670
1671 memcpy(skb_put(skb, len), data, len);
1672
1673 count -= len;
1674 data += len;
1675 scb->expect -= len;
1676 remain = count;
1677
1678 switch (type) {
1679 case HCI_EVENT_PKT:
1680 if (skb->len == HCI_EVENT_HDR_SIZE) {
1681 struct hci_event_hdr *h = hci_event_hdr(skb);
1682 scb->expect = h->plen;
1683
1684 if (skb_tailroom(skb) < scb->expect) {
1685 kfree_skb(skb);
1686 hdev->reassembly[index] = NULL;
1687 return -ENOMEM;
1688 }
1689 }
1690 break;
1691
1692 case HCI_ACLDATA_PKT:
1693 if (skb->len == HCI_ACL_HDR_SIZE) {
1694 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1695 scb->expect = __le16_to_cpu(h->dlen);
1696
1697 if (skb_tailroom(skb) < scb->expect) {
1698 kfree_skb(skb);
1699 hdev->reassembly[index] = NULL;
1700 return -ENOMEM;
1701 }
1702 }
1703 break;
1704
1705 case HCI_SCODATA_PKT:
1706 if (skb->len == HCI_SCO_HDR_SIZE) {
1707 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1708 scb->expect = h->dlen;
1709
1710 if (skb_tailroom(skb) < scb->expect) {
1711 kfree_skb(skb);
1712 hdev->reassembly[index] = NULL;
1713 return -ENOMEM;
1714 }
1715 }
1716 break;
1717 }
1718
1719 if (scb->expect == 0) {
1720
1721
1722 bt_cb(skb)->pkt_type = type;
1723 hci_recv_frame(skb);
1724
1725 hdev->reassembly[index] = NULL;
1726 return remain;
1727 }
1728 }
1729
1730 return remain;
1731}
1732
1733int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1734{
1735 int rem = 0;
1736
1737 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1738 return -EILSEQ;
1739
1740 while (count) {
1741 rem = hci_reassembly(hdev, type, data, count, type - 1);
1742 if (rem < 0)
1743 return rem;
1744
1745 data += (count - rem);
1746 count = rem;
1747 }
1748
1749 return rem;
1750}
1751EXPORT_SYMBOL(hci_recv_fragment);
1752
1753#define STREAM_REASSEMBLY 0
1754
1755int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1756{
1757 int type;
1758 int rem = 0;
1759
1760 while (count) {
1761 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1762
1763 if (!skb) {
1764 struct { char type; } *pkt;
1765
1766
1767 pkt = data;
1768 type = pkt->type;
1769
1770 data++;
1771 count--;
1772 } else
1773 type = bt_cb(skb)->pkt_type;
1774
1775 rem = hci_reassembly(hdev, type, data, count,
1776 STREAM_REASSEMBLY);
1777 if (rem < 0)
1778 return rem;
1779
1780 data += (count - rem);
1781 count = rem;
1782 }
1783
1784 return rem;
1785}
1786EXPORT_SYMBOL(hci_recv_stream_fragment);
1787
1788
1789
1790
1791
1792int hci_register_proto(struct hci_proto *hp)
1793{
1794 int err = 0;
1795
1796 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1797
1798 if (hp->id >= HCI_MAX_PROTO)
1799 return -EINVAL;
1800
1801 write_lock_bh(&hci_task_lock);
1802
1803 if (!hci_proto[hp->id])
1804 hci_proto[hp->id] = hp;
1805 else
1806 err = -EEXIST;
1807
1808 write_unlock_bh(&hci_task_lock);
1809
1810 return err;
1811}
1812EXPORT_SYMBOL(hci_register_proto);
1813
1814int hci_unregister_proto(struct hci_proto *hp)
1815{
1816 int err = 0;
1817
1818 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1819
1820 if (hp->id >= HCI_MAX_PROTO)
1821 return -EINVAL;
1822
1823 write_lock_bh(&hci_task_lock);
1824
1825 if (hci_proto[hp->id])
1826 hci_proto[hp->id] = NULL;
1827 else
1828 err = -ENOENT;
1829
1830 write_unlock_bh(&hci_task_lock);
1831
1832 return err;
1833}
1834EXPORT_SYMBOL(hci_unregister_proto);
1835
1836int hci_register_cb(struct hci_cb *cb)
1837{
1838 BT_DBG("%p name %s", cb, cb->name);
1839
1840 write_lock_bh(&hci_cb_list_lock);
1841 list_add(&cb->list, &hci_cb_list);
1842 write_unlock_bh(&hci_cb_list_lock);
1843
1844 return 0;
1845}
1846EXPORT_SYMBOL(hci_register_cb);
1847
1848int hci_unregister_cb(struct hci_cb *cb)
1849{
1850 BT_DBG("%p name %s", cb, cb->name);
1851
1852 write_lock_bh(&hci_cb_list_lock);
1853 list_del(&cb->list);
1854 write_unlock_bh(&hci_cb_list_lock);
1855
1856 return 0;
1857}
1858EXPORT_SYMBOL(hci_unregister_cb);
1859
1860static int hci_send_frame(struct sk_buff *skb)
1861{
1862 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1863
1864 if (!hdev) {
1865 kfree_skb(skb);
1866 return -ENODEV;
1867 }
1868
1869 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1870
1871 if (atomic_read(&hdev->promisc)) {
1872
1873 __net_timestamp(skb);
1874
1875 hci_send_to_sock(hdev, skb, NULL);
1876 }
1877
1878
1879 skb_orphan(skb);
1880
1881 return hdev->send(skb);
1882}
1883
1884
1885int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1886{
1887 int len = HCI_COMMAND_HDR_SIZE + plen;
1888 struct hci_command_hdr *hdr;
1889 struct sk_buff *skb;
1890
1891 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1892
1893 skb = bt_skb_alloc(len, GFP_ATOMIC);
1894 if (!skb) {
1895 BT_ERR("%s no memory for command", hdev->name);
1896 return -ENOMEM;
1897 }
1898
1899 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1900 hdr->opcode = cpu_to_le16(opcode);
1901 hdr->plen = plen;
1902
1903 if (plen)
1904 memcpy(skb_put(skb, plen), param, plen);
1905
1906 BT_DBG("skb len %d", skb->len);
1907
1908 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1909 skb->dev = (void *) hdev;
1910
1911 if (test_bit(HCI_INIT, &hdev->flags))
1912 hdev->init_last_cmd = opcode;
1913
1914 skb_queue_tail(&hdev->cmd_q, skb);
1915 tasklet_schedule(&hdev->cmd_task);
1916
1917 return 0;
1918}
1919
1920
1921void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1922{
1923 struct hci_command_hdr *hdr;
1924
1925 if (!hdev->sent_cmd)
1926 return NULL;
1927
1928 hdr = (void *) hdev->sent_cmd->data;
1929
1930 if (hdr->opcode != cpu_to_le16(opcode))
1931 return NULL;
1932
1933 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1934
1935 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1936}
1937
1938
1939static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1940{
1941 struct hci_acl_hdr *hdr;
1942 int len = skb->len;
1943
1944 skb_push(skb, HCI_ACL_HDR_SIZE);
1945 skb_reset_transport_header(skb);
1946 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1947 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1948 hdr->dlen = cpu_to_le16(len);
1949}
1950
1951void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1952{
1953 struct hci_dev *hdev = conn->hdev;
1954 struct sk_buff *list;
1955
1956 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1957
1958 skb->dev = (void *) hdev;
1959 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1960 hci_add_acl_hdr(skb, conn->handle, flags);
1961
1962 list = skb_shinfo(skb)->frag_list;
1963 if (!list) {
1964
1965 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1966
1967 skb_queue_tail(&conn->data_q, skb);
1968 } else {
1969
1970 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1971
1972 skb_shinfo(skb)->frag_list = NULL;
1973
1974
1975 spin_lock_bh(&conn->data_q.lock);
1976
1977 __skb_queue_tail(&conn->data_q, skb);
1978
1979 flags &= ~ACL_START;
1980 flags |= ACL_CONT;
1981 do {
1982 skb = list; list = list->next;
1983
1984 skb->dev = (void *) hdev;
1985 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1986 hci_add_acl_hdr(skb, conn->handle, flags);
1987
1988 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1989
1990 __skb_queue_tail(&conn->data_q, skb);
1991 } while (list);
1992
1993 spin_unlock_bh(&conn->data_q.lock);
1994 }
1995
1996 tasklet_schedule(&hdev->tx_task);
1997}
1998EXPORT_SYMBOL(hci_send_acl);
1999
2000
2001void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2002{
2003 struct hci_dev *hdev = conn->hdev;
2004 struct hci_sco_hdr hdr;
2005
2006 BT_DBG("%s len %d", hdev->name, skb->len);
2007
2008 hdr.handle = cpu_to_le16(conn->handle);
2009 hdr.dlen = skb->len;
2010
2011 skb_push(skb, HCI_SCO_HDR_SIZE);
2012 skb_reset_transport_header(skb);
2013 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2014
2015 skb->dev = (void *) hdev;
2016 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2017
2018 skb_queue_tail(&conn->data_q, skb);
2019 tasklet_schedule(&hdev->tx_task);
2020}
2021EXPORT_SYMBOL(hci_send_sco);
2022
2023
2024
2025
2026static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2027{
2028 struct hci_conn_hash *h = &hdev->conn_hash;
2029 struct hci_conn *conn = NULL;
2030 int num = 0, min = ~0;
2031 struct list_head *p;
2032
2033
2034
2035 list_for_each(p, &h->list) {
2036 struct hci_conn *c;
2037 c = list_entry(p, struct hci_conn, list);
2038
2039 if (c->type != type || skb_queue_empty(&c->data_q))
2040 continue;
2041
2042 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2043 continue;
2044
2045 num++;
2046
2047 if (c->sent < min) {
2048 min = c->sent;
2049 conn = c;
2050 }
2051
2052 if (hci_conn_num(hdev, type) == num)
2053 break;
2054 }
2055
2056 if (conn) {
2057 int cnt, q;
2058
2059 switch (conn->type) {
2060 case ACL_LINK:
2061 cnt = hdev->acl_cnt;
2062 break;
2063 case SCO_LINK:
2064 case ESCO_LINK:
2065 cnt = hdev->sco_cnt;
2066 break;
2067 case LE_LINK:
2068 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2069 break;
2070 default:
2071 cnt = 0;
2072 BT_ERR("Unknown link type");
2073 }
2074
2075 q = cnt / num;
2076 *quote = q ? q : 1;
2077 } else
2078 *quote = 0;
2079
2080 BT_DBG("conn %p quote %d", conn, *quote);
2081 return conn;
2082}
2083
2084static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2085{
2086 struct hci_conn_hash *h = &hdev->conn_hash;
2087 struct list_head *p;
2088 struct hci_conn *c;
2089
2090 BT_ERR("%s link tx timeout", hdev->name);
2091
2092
2093 list_for_each(p, &h->list) {
2094 c = list_entry(p, struct hci_conn, list);
2095 if (c->type == type && c->sent) {
2096 BT_ERR("%s killing stalled connection %s",
2097 hdev->name, batostr(&c->dst));
2098 hci_acl_disconn(c, 0x13);
2099 }
2100 }
2101}
2102
2103static inline void hci_sched_acl(struct hci_dev *hdev)
2104{
2105 struct hci_conn *conn;
2106 struct sk_buff *skb;
2107 int quote;
2108
2109 BT_DBG("%s", hdev->name);
2110
2111 if (!hci_conn_num(hdev, ACL_LINK))
2112 return;
2113
2114 if (!test_bit(HCI_RAW, &hdev->flags)) {
2115
2116
2117 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2118 hci_link_tx_to(hdev, ACL_LINK);
2119 }
2120
2121 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
2122 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2123 BT_DBG("skb %p len %d", skb, skb->len);
2124
2125 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2126
2127 hci_send_frame(skb);
2128 hdev->acl_last_tx = jiffies;
2129
2130 hdev->acl_cnt--;
2131 conn->sent++;
2132 }
2133 }
2134}
2135
2136
2137static inline void hci_sched_sco(struct hci_dev *hdev)
2138{
2139 struct hci_conn *conn;
2140 struct sk_buff *skb;
2141 int quote;
2142
2143 BT_DBG("%s", hdev->name);
2144
2145 if (!hci_conn_num(hdev, SCO_LINK))
2146 return;
2147
2148 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
2149 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2150 BT_DBG("skb %p len %d", skb, skb->len);
2151 hci_send_frame(skb);
2152
2153 conn->sent++;
2154 if (conn->sent == ~0)
2155 conn->sent = 0;
2156 }
2157 }
2158}
2159
2160static inline void hci_sched_esco(struct hci_dev *hdev)
2161{
2162 struct hci_conn *conn;
2163 struct sk_buff *skb;
2164 int quote;
2165
2166 BT_DBG("%s", hdev->name);
2167
2168 if (!hci_conn_num(hdev, ESCO_LINK))
2169 return;
2170
2171 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
2172 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2173 BT_DBG("skb %p len %d", skb, skb->len);
2174 hci_send_frame(skb);
2175
2176 conn->sent++;
2177 if (conn->sent == ~0)
2178 conn->sent = 0;
2179 }
2180 }
2181}
2182
2183static inline void hci_sched_le(struct hci_dev *hdev)
2184{
2185 struct hci_conn *conn;
2186 struct sk_buff *skb;
2187 int quote, cnt;
2188
2189 BT_DBG("%s", hdev->name);
2190
2191 if (!hci_conn_num(hdev, LE_LINK))
2192 return;
2193
2194 if (!test_bit(HCI_RAW, &hdev->flags)) {
2195
2196
2197 if (!hdev->le_cnt && hdev->le_pkts &&
2198 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2199 hci_link_tx_to(hdev, LE_LINK);
2200 }
2201
2202 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2203 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, "e))) {
2204 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2205 BT_DBG("skb %p len %d", skb, skb->len);
2206
2207 hci_send_frame(skb);
2208 hdev->le_last_tx = jiffies;
2209
2210 cnt--;
2211 conn->sent++;
2212 }
2213 }
2214 if (hdev->le_pkts)
2215 hdev->le_cnt = cnt;
2216 else
2217 hdev->acl_cnt = cnt;
2218}
2219
2220static void hci_tx_task(unsigned long arg)
2221{
2222 struct hci_dev *hdev = (struct hci_dev *) arg;
2223 struct sk_buff *skb;
2224
2225 read_lock(&hci_task_lock);
2226
2227 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2228 hdev->sco_cnt, hdev->le_cnt);
2229
2230
2231
2232 hci_sched_acl(hdev);
2233
2234 hci_sched_sco(hdev);
2235
2236 hci_sched_esco(hdev);
2237
2238 hci_sched_le(hdev);
2239
2240
2241 while ((skb = skb_dequeue(&hdev->raw_q)))
2242 hci_send_frame(skb);
2243
2244 read_unlock(&hci_task_lock);
2245}
2246
2247
2248
2249
2250static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2251{
2252 struct hci_acl_hdr *hdr = (void *) skb->data;
2253 struct hci_conn *conn;
2254 __u16 handle, flags;
2255
2256 skb_pull(skb, HCI_ACL_HDR_SIZE);
2257
2258 handle = __le16_to_cpu(hdr->handle);
2259 flags = hci_flags(handle);
2260 handle = hci_handle(handle);
2261
2262 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2263
2264 hdev->stat.acl_rx++;
2265
2266 hci_dev_lock(hdev);
2267 conn = hci_conn_hash_lookup_handle(hdev, handle);
2268 hci_dev_unlock(hdev);
2269
2270 if (conn) {
2271 register struct hci_proto *hp;
2272
2273 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2274
2275
2276 hp = hci_proto[HCI_PROTO_L2CAP];
2277 if (hp && hp->recv_acldata) {
2278 hp->recv_acldata(conn, skb, flags);
2279 return;
2280 }
2281 } else {
2282 BT_ERR("%s ACL packet for unknown connection handle %d",
2283 hdev->name, handle);
2284 }
2285
2286 kfree_skb(skb);
2287}
2288
2289
2290static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2291{
2292 struct hci_sco_hdr *hdr = (void *) skb->data;
2293 struct hci_conn *conn;
2294 __u16 handle;
2295
2296 skb_pull(skb, HCI_SCO_HDR_SIZE);
2297
2298 handle = __le16_to_cpu(hdr->handle);
2299
2300 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2301
2302 hdev->stat.sco_rx++;
2303
2304 hci_dev_lock(hdev);
2305 conn = hci_conn_hash_lookup_handle(hdev, handle);
2306 hci_dev_unlock(hdev);
2307
2308 if (conn) {
2309 register struct hci_proto *hp;
2310
2311
2312 hp = hci_proto[HCI_PROTO_SCO];
2313 if (hp && hp->recv_scodata) {
2314 hp->recv_scodata(conn, skb);
2315 return;
2316 }
2317 } else {
2318 BT_ERR("%s SCO packet for unknown connection handle %d",
2319 hdev->name, handle);
2320 }
2321
2322 kfree_skb(skb);
2323}
2324
2325static void hci_rx_task(unsigned long arg)
2326{
2327 struct hci_dev *hdev = (struct hci_dev *) arg;
2328 struct sk_buff *skb;
2329
2330 BT_DBG("%s", hdev->name);
2331
2332 read_lock(&hci_task_lock);
2333
2334 while ((skb = skb_dequeue(&hdev->rx_q))) {
2335 if (atomic_read(&hdev->promisc)) {
2336
2337 hci_send_to_sock(hdev, skb, NULL);
2338 }
2339
2340 if (test_bit(HCI_RAW, &hdev->flags)) {
2341 kfree_skb(skb);
2342 continue;
2343 }
2344
2345 if (test_bit(HCI_INIT, &hdev->flags)) {
2346
2347 switch (bt_cb(skb)->pkt_type) {
2348 case HCI_ACLDATA_PKT:
2349 case HCI_SCODATA_PKT:
2350 kfree_skb(skb);
2351 continue;
2352 }
2353 }
2354
2355
2356 switch (bt_cb(skb)->pkt_type) {
2357 case HCI_EVENT_PKT:
2358 hci_event_packet(hdev, skb);
2359 break;
2360
2361 case HCI_ACLDATA_PKT:
2362 BT_DBG("%s ACL data packet", hdev->name);
2363 hci_acldata_packet(hdev, skb);
2364 break;
2365
2366 case HCI_SCODATA_PKT:
2367 BT_DBG("%s SCO data packet", hdev->name);
2368 hci_scodata_packet(hdev, skb);
2369 break;
2370
2371 default:
2372 kfree_skb(skb);
2373 break;
2374 }
2375 }
2376
2377 read_unlock(&hci_task_lock);
2378}
2379
2380static void hci_cmd_task(unsigned long arg)
2381{
2382 struct hci_dev *hdev = (struct hci_dev *) arg;
2383 struct sk_buff *skb;
2384
2385 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2386
2387
2388 if (atomic_read(&hdev->cmd_cnt)) {
2389 skb = skb_dequeue(&hdev->cmd_q);
2390 if (!skb)
2391 return;
2392
2393 kfree_skb(hdev->sent_cmd);
2394
2395 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2396 if (hdev->sent_cmd) {
2397 atomic_dec(&hdev->cmd_cnt);
2398 hci_send_frame(skb);
2399 if (test_bit(HCI_RESET, &hdev->flags))
2400 del_timer(&hdev->cmd_timer);
2401 else
2402 mod_timer(&hdev->cmd_timer,
2403 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2404 } else {
2405 skb_queue_head(&hdev->cmd_q, skb);
2406 tasklet_schedule(&hdev->cmd_task);
2407 }
2408 }
2409}
2410