1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef __HCI_CORE_H
26#define __HCI_CORE_H
27
28#include <net/bluetooth/hci.h>
29
30
31#define HCI_PROTO_L2CAP 0
32#define HCI_PROTO_SCO 1
33
34
35struct inquiry_data {
36 bdaddr_t bdaddr;
37 __u8 pscan_rep_mode;
38 __u8 pscan_period_mode;
39 __u8 pscan_mode;
40 __u8 dev_class[3];
41 __le16 clock_offset;
42 __s8 rssi;
43 __u8 ssp_mode;
44};
45
46struct inquiry_entry {
47 struct inquiry_entry *next;
48 __u32 timestamp;
49 struct inquiry_data data;
50};
51
52struct inquiry_cache {
53 spinlock_t lock;
54 __u32 timestamp;
55 struct inquiry_entry *list;
56};
57
58struct hci_conn_hash {
59 struct list_head list;
60 spinlock_t lock;
61 unsigned int acl_num;
62 unsigned int sco_num;
63};
64
65struct hci_dev {
66 struct list_head list;
67 spinlock_t lock;
68 atomic_t refcnt;
69
70 char name[8];
71 unsigned long flags;
72 __u16 id;
73 __u8 type;
74 bdaddr_t bdaddr;
75 __u8 dev_name[248];
76 __u8 dev_class[3];
77 __u8 features[8];
78 __u8 commands[64];
79 __u8 ssp_mode;
80 __u8 hci_ver;
81 __u16 hci_rev;
82 __u16 manufacturer;
83 __u16 voice_setting;
84
85 __u16 pkt_type;
86 __u16 esco_type;
87 __u16 link_policy;
88 __u16 link_mode;
89
90 __u32 idle_timeout;
91 __u16 sniff_min_interval;
92 __u16 sniff_max_interval;
93
94 unsigned long quirks;
95
96 atomic_t cmd_cnt;
97 unsigned int acl_cnt;
98 unsigned int sco_cnt;
99
100 unsigned int acl_mtu;
101 unsigned int sco_mtu;
102 unsigned int acl_pkts;
103 unsigned int sco_pkts;
104
105 unsigned long cmd_last_tx;
106 unsigned long acl_last_tx;
107 unsigned long sco_last_tx;
108
109 struct tasklet_struct cmd_task;
110 struct tasklet_struct rx_task;
111 struct tasklet_struct tx_task;
112
113 struct sk_buff_head rx_q;
114 struct sk_buff_head raw_q;
115 struct sk_buff_head cmd_q;
116
117 struct sk_buff *sent_cmd;
118 struct sk_buff *reassembly[3];
119
120 struct mutex req_lock;
121 wait_queue_head_t req_wait_q;
122 __u32 req_status;
123 __u32 req_result;
124
125 struct inquiry_cache inq_cache;
126 struct hci_conn_hash conn_hash;
127
128 struct hci_dev_stats stat;
129
130 struct sk_buff_head driver_init;
131
132 void *driver_data;
133 void *core_data;
134
135 atomic_t promisc;
136
137 struct device *parent;
138 struct device dev;
139
140 struct rfkill *rfkill;
141
142 struct module *owner;
143
144 int (*open)(struct hci_dev *hdev);
145 int (*close)(struct hci_dev *hdev);
146 int (*flush)(struct hci_dev *hdev);
147 int (*send)(struct sk_buff *skb);
148 void (*destruct)(struct hci_dev *hdev);
149 void (*notify)(struct hci_dev *hdev, unsigned int evt);
150 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
151};
152
153struct hci_conn {
154 struct list_head list;
155
156 atomic_t refcnt;
157 spinlock_t lock;
158
159 bdaddr_t dst;
160 __u16 handle;
161 __u16 state;
162 __u8 mode;
163 __u8 type;
164 __u8 out;
165 __u8 attempt;
166 __u8 dev_class[3];
167 __u8 features[8];
168 __u8 ssp_mode;
169 __u16 interval;
170 __u16 pkt_type;
171 __u16 link_policy;
172 __u32 link_mode;
173 __u8 auth_type;
174 __u8 sec_level;
175 __u8 power_save;
176 __u16 disc_timeout;
177 unsigned long pend;
178
179 unsigned int sent;
180
181 struct sk_buff_head data_q;
182
183 struct timer_list disc_timer;
184 struct timer_list idle_timer;
185
186 struct work_struct work_add;
187 struct work_struct work_del;
188
189 struct device dev;
190 atomic_t devref;
191
192 struct hci_dev *hdev;
193 void *l2cap_data;
194 void *sco_data;
195 void *priv;
196
197 struct hci_conn *link;
198};
199
200extern struct hci_proto *hci_proto[];
201extern struct list_head hci_dev_list;
202extern struct list_head hci_cb_list;
203extern rwlock_t hci_dev_list_lock;
204extern rwlock_t hci_cb_list_lock;
205
206
207#define INQUIRY_CACHE_AGE_MAX (HZ*30)
208#define INQUIRY_ENTRY_AGE_MAX (HZ*60)
209
210#define inquiry_cache_lock(c) spin_lock(&c->lock)
211#define inquiry_cache_unlock(c) spin_unlock(&c->lock)
212#define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
213#define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
214
215static inline void inquiry_cache_init(struct hci_dev *hdev)
216{
217 struct inquiry_cache *c = &hdev->inq_cache;
218 spin_lock_init(&c->lock);
219 c->list = NULL;
220}
221
222static inline int inquiry_cache_empty(struct hci_dev *hdev)
223{
224 struct inquiry_cache *c = &hdev->inq_cache;
225 return (c->list == NULL);
226}
227
228static inline long inquiry_cache_age(struct hci_dev *hdev)
229{
230 struct inquiry_cache *c = &hdev->inq_cache;
231 return jiffies - c->timestamp;
232}
233
234static inline long inquiry_entry_age(struct inquiry_entry *e)
235{
236 return jiffies - e->timestamp;
237}
238
239struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
240void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
241
242
243enum {
244 HCI_CONN_AUTH_PEND,
245 HCI_CONN_ENCRYPT_PEND,
246 HCI_CONN_RSWITCH_PEND,
247 HCI_CONN_MODE_CHANGE_PEND,
248};
249
250static inline void hci_conn_hash_init(struct hci_dev *hdev)
251{
252 struct hci_conn_hash *h = &hdev->conn_hash;
253 INIT_LIST_HEAD(&h->list);
254 spin_lock_init(&h->lock);
255 h->acl_num = 0;
256 h->sco_num = 0;
257}
258
259static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
260{
261 struct hci_conn_hash *h = &hdev->conn_hash;
262 list_add(&c->list, &h->list);
263 if (c->type == ACL_LINK)
264 h->acl_num++;
265 else
266 h->sco_num++;
267}
268
269static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
270{
271 struct hci_conn_hash *h = &hdev->conn_hash;
272 list_del(&c->list);
273 if (c->type == ACL_LINK)
274 h->acl_num--;
275 else
276 h->sco_num--;
277}
278
279static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
280 __u16 handle)
281{
282 struct hci_conn_hash *h = &hdev->conn_hash;
283 struct list_head *p;
284 struct hci_conn *c;
285
286 list_for_each(p, &h->list) {
287 c = list_entry(p, struct hci_conn, list);
288 if (c->handle == handle)
289 return c;
290 }
291 return NULL;
292}
293
294static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
295 __u8 type, bdaddr_t *ba)
296{
297 struct hci_conn_hash *h = &hdev->conn_hash;
298 struct list_head *p;
299 struct hci_conn *c;
300
301 list_for_each(p, &h->list) {
302 c = list_entry(p, struct hci_conn, list);
303 if (c->type == type && !bacmp(&c->dst, ba))
304 return c;
305 }
306 return NULL;
307}
308
309static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
310 __u8 type, __u16 state)
311{
312 struct hci_conn_hash *h = &hdev->conn_hash;
313 struct list_head *p;
314 struct hci_conn *c;
315
316 list_for_each(p, &h->list) {
317 c = list_entry(p, struct hci_conn, list);
318 if (c->type == type && c->state == state)
319 return c;
320 }
321 return NULL;
322}
323
324void hci_acl_connect(struct hci_conn *conn);
325void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
326void hci_add_sco(struct hci_conn *conn, __u16 handle);
327void hci_setup_sync(struct hci_conn *conn, __u16 handle);
328
329struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
330int hci_conn_del(struct hci_conn *conn);
331void hci_conn_hash_flush(struct hci_dev *hdev);
332void hci_conn_check_pending(struct hci_dev *hdev);
333
334struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type);
335int hci_conn_check_link_mode(struct hci_conn *conn);
336int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
337int hci_conn_change_link_key(struct hci_conn *conn);
338int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
339
340void hci_conn_enter_active_mode(struct hci_conn *conn);
341void hci_conn_enter_sniff_mode(struct hci_conn *conn);
342
343void hci_conn_hold_device(struct hci_conn *conn);
344void hci_conn_put_device(struct hci_conn *conn);
345
346static inline void hci_conn_hold(struct hci_conn *conn)
347{
348 atomic_inc(&conn->refcnt);
349 del_timer(&conn->disc_timer);
350}
351
352static inline void hci_conn_put(struct hci_conn *conn)
353{
354 if (atomic_dec_and_test(&conn->refcnt)) {
355 unsigned long timeo;
356 if (conn->type == ACL_LINK) {
357 del_timer(&conn->idle_timer);
358 if (conn->state == BT_CONNECTED) {
359 timeo = msecs_to_jiffies(conn->disc_timeout);
360 if (!conn->out)
361 timeo *= 2;
362 } else
363 timeo = msecs_to_jiffies(10);
364 } else
365 timeo = msecs_to_jiffies(10);
366 mod_timer(&conn->disc_timer, jiffies + timeo);
367 }
368}
369
370
371static inline void hci_sched_cmd(struct hci_dev *hdev)
372{
373 tasklet_schedule(&hdev->cmd_task);
374}
375
376static inline void hci_sched_rx(struct hci_dev *hdev)
377{
378 tasklet_schedule(&hdev->rx_task);
379}
380
381static inline void hci_sched_tx(struct hci_dev *hdev)
382{
383 tasklet_schedule(&hdev->tx_task);
384}
385
386
387static inline void __hci_dev_put(struct hci_dev *d)
388{
389 if (atomic_dec_and_test(&d->refcnt))
390 d->destruct(d);
391}
392
393static inline void hci_dev_put(struct hci_dev *d)
394{
395 __hci_dev_put(d);
396 module_put(d->owner);
397}
398
399static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
400{
401 atomic_inc(&d->refcnt);
402 return d;
403}
404
405static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
406{
407 if (try_module_get(d->owner))
408 return __hci_dev_hold(d);
409 return NULL;
410}
411
412#define hci_dev_lock(d) spin_lock(&d->lock)
413#define hci_dev_unlock(d) spin_unlock(&d->lock)
414#define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
415#define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
416
417struct hci_dev *hci_dev_get(int index);
418struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
419
420struct hci_dev *hci_alloc_dev(void);
421void hci_free_dev(struct hci_dev *hdev);
422int hci_register_dev(struct hci_dev *hdev);
423int hci_unregister_dev(struct hci_dev *hdev);
424int hci_suspend_dev(struct hci_dev *hdev);
425int hci_resume_dev(struct hci_dev *hdev);
426int hci_dev_open(__u16 dev);
427int hci_dev_close(__u16 dev);
428int hci_dev_reset(__u16 dev);
429int hci_dev_reset_stat(__u16 dev);
430int hci_dev_cmd(unsigned int cmd, void __user *arg);
431int hci_get_dev_list(void __user *arg);
432int hci_get_dev_info(void __user *arg);
433int hci_get_conn_list(void __user *arg);
434int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
435int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
436int hci_inquiry(void __user *arg);
437
438void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
439
440
441static inline int hci_recv_frame(struct sk_buff *skb)
442{
443 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
444 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
445 && !test_bit(HCI_INIT, &hdev->flags))) {
446 kfree_skb(skb);
447 return -ENXIO;
448 }
449
450
451 bt_cb(skb)->incoming = 1;
452
453
454 __net_timestamp(skb);
455
456
457 skb_queue_tail(&hdev->rx_q, skb);
458 hci_sched_rx(hdev);
459 return 0;
460}
461
462int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
463
464int hci_register_sysfs(struct hci_dev *hdev);
465void hci_unregister_sysfs(struct hci_dev *hdev);
466void hci_conn_init_sysfs(struct hci_conn *conn);
467void hci_conn_add_sysfs(struct hci_conn *conn);
468void hci_conn_del_sysfs(struct hci_conn *conn);
469
470#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
471
472
473#define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
474#define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
475#define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
476#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
477#define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
478#define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
479
480
481struct hci_proto {
482 char *name;
483 unsigned int id;
484 unsigned long flags;
485
486 void *priv;
487
488 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
489 int (*connect_cfm) (struct hci_conn *conn, __u8 status);
490 int (*disconn_ind) (struct hci_conn *conn);
491 int (*disconn_cfm) (struct hci_conn *conn, __u8 reason);
492 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
493 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
494 int (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
495};
496
497static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
498{
499 register struct hci_proto *hp;
500 int mask = 0;
501
502 hp = hci_proto[HCI_PROTO_L2CAP];
503 if (hp && hp->connect_ind)
504 mask |= hp->connect_ind(hdev, bdaddr, type);
505
506 hp = hci_proto[HCI_PROTO_SCO];
507 if (hp && hp->connect_ind)
508 mask |= hp->connect_ind(hdev, bdaddr, type);
509
510 return mask;
511}
512
513static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
514{
515 register struct hci_proto *hp;
516
517 hp = hci_proto[HCI_PROTO_L2CAP];
518 if (hp && hp->connect_cfm)
519 hp->connect_cfm(conn, status);
520
521 hp = hci_proto[HCI_PROTO_SCO];
522 if (hp && hp->connect_cfm)
523 hp->connect_cfm(conn, status);
524}
525
526static inline int hci_proto_disconn_ind(struct hci_conn *conn)
527{
528 register struct hci_proto *hp;
529 int reason = 0x13;
530
531 hp = hci_proto[HCI_PROTO_L2CAP];
532 if (hp && hp->disconn_ind)
533 reason = hp->disconn_ind(conn);
534
535 hp = hci_proto[HCI_PROTO_SCO];
536 if (hp && hp->disconn_ind)
537 reason = hp->disconn_ind(conn);
538
539 return reason;
540}
541
542static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
543{
544 register struct hci_proto *hp;
545
546 hp = hci_proto[HCI_PROTO_L2CAP];
547 if (hp && hp->disconn_cfm)
548 hp->disconn_cfm(conn, reason);
549
550 hp = hci_proto[HCI_PROTO_SCO];
551 if (hp && hp->disconn_cfm)
552 hp->disconn_cfm(conn, reason);
553}
554
555static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
556{
557 register struct hci_proto *hp;
558 __u8 encrypt;
559
560 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
561 return;
562
563 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
564
565 hp = hci_proto[HCI_PROTO_L2CAP];
566 if (hp && hp->security_cfm)
567 hp->security_cfm(conn, status, encrypt);
568
569 hp = hci_proto[HCI_PROTO_SCO];
570 if (hp && hp->security_cfm)
571 hp->security_cfm(conn, status, encrypt);
572}
573
574static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
575{
576 register struct hci_proto *hp;
577
578 hp = hci_proto[HCI_PROTO_L2CAP];
579 if (hp && hp->security_cfm)
580 hp->security_cfm(conn, status, encrypt);
581
582 hp = hci_proto[HCI_PROTO_SCO];
583 if (hp && hp->security_cfm)
584 hp->security_cfm(conn, status, encrypt);
585}
586
587int hci_register_proto(struct hci_proto *hproto);
588int hci_unregister_proto(struct hci_proto *hproto);
589
590
591struct hci_cb {
592 struct list_head list;
593
594 char *name;
595
596 void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
597 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
598 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
599};
600
601static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
602{
603 struct list_head *p;
604 __u8 encrypt;
605
606 hci_proto_auth_cfm(conn, status);
607
608 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
609 return;
610
611 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
612
613 read_lock_bh(&hci_cb_list_lock);
614 list_for_each(p, &hci_cb_list) {
615 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
616 if (cb->security_cfm)
617 cb->security_cfm(conn, status, encrypt);
618 }
619 read_unlock_bh(&hci_cb_list_lock);
620}
621
622static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
623{
624 struct list_head *p;
625
626 if (conn->sec_level == BT_SECURITY_SDP)
627 conn->sec_level = BT_SECURITY_LOW;
628
629 hci_proto_encrypt_cfm(conn, status, encrypt);
630
631 read_lock_bh(&hci_cb_list_lock);
632 list_for_each(p, &hci_cb_list) {
633 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
634 if (cb->security_cfm)
635 cb->security_cfm(conn, status, encrypt);
636 }
637 read_unlock_bh(&hci_cb_list_lock);
638}
639
640static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
641{
642 struct list_head *p;
643
644 read_lock_bh(&hci_cb_list_lock);
645 list_for_each(p, &hci_cb_list) {
646 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
647 if (cb->key_change_cfm)
648 cb->key_change_cfm(conn, status);
649 }
650 read_unlock_bh(&hci_cb_list_lock);
651}
652
653static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
654{
655 struct list_head *p;
656
657 read_lock_bh(&hci_cb_list_lock);
658 list_for_each(p, &hci_cb_list) {
659 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
660 if (cb->role_switch_cfm)
661 cb->role_switch_cfm(conn, status, role);
662 }
663 read_unlock_bh(&hci_cb_list_lock);
664}
665
666int hci_register_cb(struct hci_cb *hcb);
667int hci_unregister_cb(struct hci_cb *hcb);
668
669int hci_register_notifier(struct notifier_block *nb);
670int hci_unregister_notifier(struct notifier_block *nb);
671
672int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
673int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
674int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
675
676void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
677
678void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
679
680
681void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
682
683
684#define hci_pi(sk) ((struct hci_pinfo *) sk)
685
686struct hci_pinfo {
687 struct bt_sock bt;
688 struct hci_dev *hdev;
689 struct hci_filter filter;
690 __u32 cmsg_mask;
691};
692
693
694#define HCI_SFLT_MAX_OGF 5
695
696struct hci_sec_filter {
697 __u32 type_mask;
698 __u32 event_mask[2];
699 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
700};
701
702
703#define HCI_REQ_DONE 0
704#define HCI_REQ_PEND 1
705#define HCI_REQ_CANCELED 2
706
707#define hci_req_lock(d) mutex_lock(&d->req_lock)
708#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
709
710void hci_req_complete(struct hci_dev *hdev, int result);
711
712#endif
713