1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <asm/unaligned.h>
24
25#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
26#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
27
28struct hci_request {
29 struct hci_dev *hdev;
30 struct sk_buff_head cmd_q;
31
32
33
34
35 int err;
36};
37
38void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
39int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
40int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
41void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
42 const void *param);
43void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
44 const void *param, u8 event);
45void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
46 hci_req_complete_t *req_complete,
47 hci_req_complete_skb_t *req_complete_skb);
48
49int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
50 unsigned long opt),
51 unsigned long opt, u32 timeout, u8 *hci_status);
52int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
53 unsigned long opt),
54 unsigned long opt, u32 timeout, u8 *hci_status);
55void hci_req_sync_cancel(struct hci_dev *hdev, int err);
56
57struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
58 const void *param);
59
60int __hci_req_hci_power_on(struct hci_dev *hdev);
61
62void __hci_req_write_fast_connectable(struct hci_request *req, bool enable);
63void __hci_req_update_name(struct hci_request *req);
64void __hci_req_update_eir(struct hci_request *req);
65
66void hci_req_add_le_scan_disable(struct hci_request *req);
67void hci_req_add_le_passive_scan(struct hci_request *req);
68
69void hci_req_reenable_advertising(struct hci_dev *hdev);
70void __hci_req_enable_advertising(struct hci_request *req);
71void __hci_req_disable_advertising(struct hci_request *req);
72void __hci_req_update_adv_data(struct hci_request *req, u8 instance);
73int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance);
74void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance);
75
76int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
77 bool force);
78void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
79 struct hci_request *req, u8 instance,
80 bool force);
81
82void __hci_req_update_class(struct hci_request *req);
83
84
85bool hci_req_stop_discovery(struct hci_request *req);
86
87static inline void hci_req_update_scan(struct hci_dev *hdev)
88{
89 queue_work(hdev->req_workqueue, &hdev->scan_update);
90}
91
92void __hci_req_update_scan(struct hci_request *req);
93
94int hci_update_random_address(struct hci_request *req, bool require_privacy,
95 bool use_rpa, u8 *own_addr_type);
96
97int hci_abort_conn(struct hci_conn *conn, u8 reason);
98void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
99 u8 reason);
100
101static inline void hci_update_background_scan(struct hci_dev *hdev)
102{
103 queue_work(hdev->req_workqueue, &hdev->bg_scan_update);
104}
105
106void hci_request_setup(struct hci_dev *hdev);
107void hci_request_cancel_all(struct hci_dev *hdev);
108
109u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len);
110
111static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type,
112 u8 *data, u8 data_len)
113{
114 eir[eir_len++] = sizeof(type) + data_len;
115 eir[eir_len++] = type;
116 memcpy(&eir[eir_len], data, data_len);
117 eir_len += data_len;
118
119 return eir_len;
120}
121
122static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data)
123{
124 eir[eir_len++] = sizeof(type) + sizeof(data);
125 eir[eir_len++] = type;
126 put_unaligned_le16(data, &eir[eir_len]);
127 eir_len += sizeof(data);
128
129 return eir_len;
130}
131