1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/kernel.h>
32#include <linux/debugfs.h>
33
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
37#include "hci_uart.h"
38#include "btqca.h"
39
40
41#define HCI_IBS_SLEEP_IND 0xFE
42#define HCI_IBS_WAKE_IND 0xFD
43#define HCI_IBS_WAKE_ACK 0xFC
44#define HCI_MAX_IBS_SIZE 10
45
46
47#define STATE_IN_BAND_SLEEP_ENABLED 1
48
49#define IBS_WAKE_RETRANS_TIMEOUT_MS 100
50#define IBS_TX_IDLE_TIMEOUT_MS 2000
51#define BAUDRATE_SETTLE_TIMEOUT_MS 300
52
53
54enum tx_ibs_states {
55 HCI_IBS_TX_ASLEEP,
56 HCI_IBS_TX_WAKING,
57 HCI_IBS_TX_AWAKE,
58};
59
60
61enum rx_states {
62 HCI_IBS_RX_ASLEEP,
63 HCI_IBS_RX_AWAKE,
64};
65
66
67enum hci_ibs_clock_state_vote {
68 HCI_IBS_VOTE_STATS_UPDATE,
69 HCI_IBS_TX_VOTE_CLOCK_ON,
70 HCI_IBS_TX_VOTE_CLOCK_OFF,
71 HCI_IBS_RX_VOTE_CLOCK_ON,
72 HCI_IBS_RX_VOTE_CLOCK_OFF,
73};
74
75struct qca_data {
76 struct hci_uart *hu;
77 struct sk_buff *rx_skb;
78 struct sk_buff_head txq;
79 struct sk_buff_head tx_wait_q;
80 spinlock_t hci_ibs_lock;
81 u8 tx_ibs_state;
82 u8 rx_ibs_state;
83 bool tx_vote;
84 bool rx_vote;
85 struct timer_list tx_idle_timer;
86 u32 tx_idle_delay;
87 struct timer_list wake_retrans_timer;
88 u32 wake_retrans;
89 struct workqueue_struct *workqueue;
90 struct work_struct ws_awake_rx;
91 struct work_struct ws_awake_device;
92 struct work_struct ws_rx_vote_off;
93 struct work_struct ws_tx_vote_off;
94 unsigned long flags;
95
96
97 u64 ibs_sent_wacks;
98 u64 ibs_sent_slps;
99 u64 ibs_sent_wakes;
100 u64 ibs_recv_wacks;
101 u64 ibs_recv_slps;
102 u64 ibs_recv_wakes;
103 u64 vote_last_jif;
104 u32 vote_on_ms;
105 u32 vote_off_ms;
106 u64 tx_votes_on;
107 u64 rx_votes_on;
108 u64 tx_votes_off;
109 u64 rx_votes_off;
110 u64 votes_on;
111 u64 votes_off;
112};
113
114static void __serial_clock_on(struct tty_struct *tty)
115{
116
117
118
119
120}
121
122static void __serial_clock_off(struct tty_struct *tty)
123{
124
125
126
127
128}
129
130
131static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
132{
133 struct qca_data *qca = hu->priv;
134 unsigned int diff;
135
136 bool old_vote = (qca->tx_vote | qca->rx_vote);
137 bool new_vote;
138
139 switch (vote) {
140 case HCI_IBS_VOTE_STATS_UPDATE:
141 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
142
143 if (old_vote)
144 qca->vote_off_ms += diff;
145 else
146 qca->vote_on_ms += diff;
147 return;
148
149 case HCI_IBS_TX_VOTE_CLOCK_ON:
150 qca->tx_vote = true;
151 qca->tx_votes_on++;
152 new_vote = true;
153 break;
154
155 case HCI_IBS_RX_VOTE_CLOCK_ON:
156 qca->rx_vote = true;
157 qca->rx_votes_on++;
158 new_vote = true;
159 break;
160
161 case HCI_IBS_TX_VOTE_CLOCK_OFF:
162 qca->tx_vote = false;
163 qca->tx_votes_off++;
164 new_vote = qca->rx_vote | qca->tx_vote;
165 break;
166
167 case HCI_IBS_RX_VOTE_CLOCK_OFF:
168 qca->rx_vote = false;
169 qca->rx_votes_off++;
170 new_vote = qca->rx_vote | qca->tx_vote;
171 break;
172
173 default:
174 BT_ERR("Voting irregularity");
175 return;
176 }
177
178 if (new_vote != old_vote) {
179 if (new_vote)
180 __serial_clock_on(hu->tty);
181 else
182 __serial_clock_off(hu->tty);
183
184 BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false",
185 vote ? "true" : "false");
186
187 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
188
189 if (new_vote) {
190 qca->votes_on++;
191 qca->vote_off_ms += diff;
192 } else {
193 qca->votes_off++;
194 qca->vote_on_ms += diff;
195 }
196 qca->vote_last_jif = jiffies;
197 }
198}
199
200
201
202
203static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
204{
205 int err = 0;
206 struct sk_buff *skb = NULL;
207 struct qca_data *qca = hu->priv;
208
209 BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd);
210
211 skb = bt_skb_alloc(1, GFP_ATOMIC);
212 if (!skb) {
213 BT_ERR("Failed to allocate memory for HCI_IBS packet");
214 return -ENOMEM;
215 }
216
217
218 skb_put_u8(skb, cmd);
219
220 skb_queue_tail(&qca->txq, skb);
221
222 return err;
223}
224
225static void qca_wq_awake_device(struct work_struct *work)
226{
227 struct qca_data *qca = container_of(work, struct qca_data,
228 ws_awake_device);
229 struct hci_uart *hu = qca->hu;
230 unsigned long retrans_delay;
231
232 BT_DBG("hu %p wq awake device", hu);
233
234
235 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
236
237 spin_lock(&qca->hci_ibs_lock);
238
239
240 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
241 BT_ERR("Failed to send WAKE to device");
242
243 qca->ibs_sent_wakes++;
244
245
246 retrans_delay = msecs_to_jiffies(qca->wake_retrans);
247 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
248
249 spin_unlock(&qca->hci_ibs_lock);
250
251
252 hci_uart_tx_wakeup(hu);
253}
254
255static void qca_wq_awake_rx(struct work_struct *work)
256{
257 struct qca_data *qca = container_of(work, struct qca_data,
258 ws_awake_rx);
259 struct hci_uart *hu = qca->hu;
260
261 BT_DBG("hu %p wq awake rx", hu);
262
263 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
264
265 spin_lock(&qca->hci_ibs_lock);
266 qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
267
268
269
270
271 if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
272 BT_ERR("Failed to acknowledge device wake up");
273
274 qca->ibs_sent_wacks++;
275
276 spin_unlock(&qca->hci_ibs_lock);
277
278
279 hci_uart_tx_wakeup(hu);
280}
281
282static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
283{
284 struct qca_data *qca = container_of(work, struct qca_data,
285 ws_rx_vote_off);
286 struct hci_uart *hu = qca->hu;
287
288 BT_DBG("hu %p rx clock vote off", hu);
289
290 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
291}
292
293static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
294{
295 struct qca_data *qca = container_of(work, struct qca_data,
296 ws_tx_vote_off);
297 struct hci_uart *hu = qca->hu;
298
299 BT_DBG("hu %p tx clock vote off", hu);
300
301
302 hci_uart_tx_wakeup(hu);
303
304
305
306
307 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
308}
309
310static void hci_ibs_tx_idle_timeout(unsigned long arg)
311{
312 struct hci_uart *hu = (struct hci_uart *)arg;
313 struct qca_data *qca = hu->priv;
314 unsigned long flags;
315
316 BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
317
318 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
319 flags, SINGLE_DEPTH_NESTING);
320
321 switch (qca->tx_ibs_state) {
322 case HCI_IBS_TX_AWAKE:
323
324 if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
325 BT_ERR("Failed to send SLEEP to device");
326 break;
327 }
328 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
329 qca->ibs_sent_slps++;
330 queue_work(qca->workqueue, &qca->ws_tx_vote_off);
331 break;
332
333 case HCI_IBS_TX_ASLEEP:
334 case HCI_IBS_TX_WAKING:
335
336
337 default:
338 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
339 break;
340 }
341
342 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
343}
344
345static void hci_ibs_wake_retrans_timeout(unsigned long arg)
346{
347 struct hci_uart *hu = (struct hci_uart *)arg;
348 struct qca_data *qca = hu->priv;
349 unsigned long flags, retrans_delay;
350 bool retransmit = false;
351
352 BT_DBG("hu %p wake retransmit timeout in %d state",
353 hu, qca->tx_ibs_state);
354
355 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
356 flags, SINGLE_DEPTH_NESTING);
357
358 switch (qca->tx_ibs_state) {
359 case HCI_IBS_TX_WAKING:
360
361 retransmit = true;
362 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
363 BT_ERR("Failed to acknowledge device wake up");
364 break;
365 }
366 qca->ibs_sent_wakes++;
367 retrans_delay = msecs_to_jiffies(qca->wake_retrans);
368 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
369 break;
370
371 case HCI_IBS_TX_ASLEEP:
372 case HCI_IBS_TX_AWAKE:
373
374
375 default:
376 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
377 break;
378 }
379
380 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
381
382 if (retransmit)
383 hci_uart_tx_wakeup(hu);
384}
385
386
387static int qca_open(struct hci_uart *hu)
388{
389 struct qca_data *qca;
390
391 BT_DBG("hu %p qca_open", hu);
392
393 qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC);
394 if (!qca)
395 return -ENOMEM;
396
397 skb_queue_head_init(&qca->txq);
398 skb_queue_head_init(&qca->tx_wait_q);
399 spin_lock_init(&qca->hci_ibs_lock);
400 qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
401 if (!qca->workqueue) {
402 BT_ERR("QCA Workqueue not initialized properly");
403 kfree(qca);
404 return -ENOMEM;
405 }
406
407 INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
408 INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
409 INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
410 INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
411
412 qca->hu = hu;
413
414
415 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
416 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
417
418
419 qca->tx_vote = false;
420 qca->rx_vote = false;
421 qca->flags = 0;
422
423 qca->ibs_sent_wacks = 0;
424 qca->ibs_sent_slps = 0;
425 qca->ibs_sent_wakes = 0;
426 qca->ibs_recv_wacks = 0;
427 qca->ibs_recv_slps = 0;
428 qca->ibs_recv_wakes = 0;
429 qca->vote_last_jif = jiffies;
430 qca->vote_on_ms = 0;
431 qca->vote_off_ms = 0;
432 qca->votes_on = 0;
433 qca->votes_off = 0;
434 qca->tx_votes_on = 0;
435 qca->tx_votes_off = 0;
436 qca->rx_votes_on = 0;
437 qca->rx_votes_off = 0;
438
439 hu->priv = qca;
440
441 setup_timer(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout,
442 (u_long)hu);
443 qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
444
445 setup_timer(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, (u_long)hu);
446 qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
447
448 BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
449 qca->tx_idle_delay, qca->wake_retrans);
450
451 return 0;
452}
453
454static void qca_debugfs_init(struct hci_dev *hdev)
455{
456 struct hci_uart *hu = hci_get_drvdata(hdev);
457 struct qca_data *qca = hu->priv;
458 struct dentry *ibs_dir;
459 umode_t mode;
460
461 if (!hdev->debugfs)
462 return;
463
464 ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
465
466
467 mode = S_IRUGO;
468 debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
469 debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
470 debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
471 &qca->ibs_sent_slps);
472 debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
473 &qca->ibs_sent_wakes);
474 debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
475 &qca->ibs_sent_wacks);
476 debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
477 &qca->ibs_recv_slps);
478 debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
479 &qca->ibs_recv_wakes);
480 debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
481 &qca->ibs_recv_wacks);
482 debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
483 debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
484 debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
485 debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
486 debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
487 debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
488 debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
489 debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
490 debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
491 debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
492
493
494 mode = S_IRUGO | S_IWUSR;
495 debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
496 debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
497 &qca->tx_idle_delay);
498}
499
500
501static int qca_flush(struct hci_uart *hu)
502{
503 struct qca_data *qca = hu->priv;
504
505 BT_DBG("hu %p qca flush", hu);
506
507 skb_queue_purge(&qca->tx_wait_q);
508 skb_queue_purge(&qca->txq);
509
510 return 0;
511}
512
513
514static int qca_close(struct hci_uart *hu)
515{
516 struct qca_data *qca = hu->priv;
517
518 BT_DBG("hu %p qca close", hu);
519
520 serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
521
522 skb_queue_purge(&qca->tx_wait_q);
523 skb_queue_purge(&qca->txq);
524 del_timer(&qca->tx_idle_timer);
525 del_timer(&qca->wake_retrans_timer);
526 destroy_workqueue(qca->workqueue);
527 qca->hu = NULL;
528
529 kfree_skb(qca->rx_skb);
530
531 hu->priv = NULL;
532
533 kfree(qca);
534
535 return 0;
536}
537
538
539
540static void device_want_to_wakeup(struct hci_uart *hu)
541{
542 unsigned long flags;
543 struct qca_data *qca = hu->priv;
544
545 BT_DBG("hu %p want to wake up", hu);
546
547 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
548
549 qca->ibs_recv_wakes++;
550
551 switch (qca->rx_ibs_state) {
552 case HCI_IBS_RX_ASLEEP:
553
554
555
556 queue_work(qca->workqueue, &qca->ws_awake_rx);
557 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
558 return;
559
560 case HCI_IBS_RX_AWAKE:
561
562
563
564 if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
565 BT_ERR("Failed to acknowledge device wake up");
566 break;
567 }
568 qca->ibs_sent_wacks++;
569 break;
570
571 default:
572
573 BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
574 qca->rx_ibs_state);
575 break;
576 }
577
578 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
579
580
581 hci_uart_tx_wakeup(hu);
582}
583
584
585
586static void device_want_to_sleep(struct hci_uart *hu)
587{
588 unsigned long flags;
589 struct qca_data *qca = hu->priv;
590
591 BT_DBG("hu %p want to sleep", hu);
592
593 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
594
595 qca->ibs_recv_slps++;
596
597 switch (qca->rx_ibs_state) {
598 case HCI_IBS_RX_AWAKE:
599
600 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
601
602 queue_work(qca->workqueue, &qca->ws_rx_vote_off);
603 break;
604
605 case HCI_IBS_RX_ASLEEP:
606
607
608 default:
609
610 BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
611 qca->rx_ibs_state);
612 break;
613 }
614
615 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
616}
617
618
619
620static void device_woke_up(struct hci_uart *hu)
621{
622 unsigned long flags, idle_delay;
623 struct qca_data *qca = hu->priv;
624 struct sk_buff *skb = NULL;
625
626 BT_DBG("hu %p woke up", hu);
627
628 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
629
630 qca->ibs_recv_wacks++;
631
632 switch (qca->tx_ibs_state) {
633 case HCI_IBS_TX_AWAKE:
634
635 BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
636 qca->tx_ibs_state);
637 break;
638
639 case HCI_IBS_TX_WAKING:
640
641 while ((skb = skb_dequeue(&qca->tx_wait_q)))
642 skb_queue_tail(&qca->txq, skb);
643
644
645 del_timer(&qca->wake_retrans_timer);
646 idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
647 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
648 qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
649 break;
650
651 case HCI_IBS_TX_ASLEEP:
652
653
654 default:
655 BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
656 qca->tx_ibs_state);
657 break;
658 }
659
660 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
661
662
663 hci_uart_tx_wakeup(hu);
664}
665
666
667
668
669static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
670{
671 unsigned long flags = 0, idle_delay;
672 struct qca_data *qca = hu->priv;
673
674 BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
675 qca->tx_ibs_state);
676
677
678 memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
679
680
681
682
683 if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) {
684 skb_queue_tail(&qca->txq, skb);
685 return 0;
686 }
687
688 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
689
690
691 switch (qca->tx_ibs_state) {
692 case HCI_IBS_TX_AWAKE:
693 BT_DBG("Device awake, sending normally");
694 skb_queue_tail(&qca->txq, skb);
695 idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
696 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
697 break;
698
699 case HCI_IBS_TX_ASLEEP:
700 BT_DBG("Device asleep, waking up and queueing packet");
701
702 skb_queue_tail(&qca->tx_wait_q, skb);
703
704 qca->tx_ibs_state = HCI_IBS_TX_WAKING;
705
706 queue_work(qca->workqueue, &qca->ws_awake_device);
707 break;
708
709 case HCI_IBS_TX_WAKING:
710 BT_DBG("Device waking up, queueing packet");
711
712 skb_queue_tail(&qca->tx_wait_q, skb);
713 break;
714
715 default:
716 BT_ERR("Illegal tx state: %d (losing packet)",
717 qca->tx_ibs_state);
718 kfree_skb(skb);
719 break;
720 }
721
722 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
723
724 return 0;
725}
726
727static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
728{
729 struct hci_uart *hu = hci_get_drvdata(hdev);
730
731 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND);
732
733 device_want_to_sleep(hu);
734
735 kfree_skb(skb);
736 return 0;
737}
738
739static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
740{
741 struct hci_uart *hu = hci_get_drvdata(hdev);
742
743 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND);
744
745 device_want_to_wakeup(hu);
746
747 kfree_skb(skb);
748 return 0;
749}
750
751static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
752{
753 struct hci_uart *hu = hci_get_drvdata(hdev);
754
755 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK);
756
757 device_woke_up(hu);
758
759 kfree_skb(skb);
760 return 0;
761}
762
763#define QCA_IBS_SLEEP_IND_EVENT \
764 .type = HCI_IBS_SLEEP_IND, \
765 .hlen = 0, \
766 .loff = 0, \
767 .lsize = 0, \
768 .maxlen = HCI_MAX_IBS_SIZE
769
770#define QCA_IBS_WAKE_IND_EVENT \
771 .type = HCI_IBS_WAKE_IND, \
772 .hlen = 0, \
773 .loff = 0, \
774 .lsize = 0, \
775 .maxlen = HCI_MAX_IBS_SIZE
776
777#define QCA_IBS_WAKE_ACK_EVENT \
778 .type = HCI_IBS_WAKE_ACK, \
779 .hlen = 0, \
780 .loff = 0, \
781 .lsize = 0, \
782 .maxlen = HCI_MAX_IBS_SIZE
783
784static const struct h4_recv_pkt qca_recv_pkts[] = {
785 { H4_RECV_ACL, .recv = hci_recv_frame },
786 { H4_RECV_SCO, .recv = hci_recv_frame },
787 { H4_RECV_EVENT, .recv = hci_recv_frame },
788 { QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
789 { QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
790 { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
791};
792
793static int qca_recv(struct hci_uart *hu, const void *data, int count)
794{
795 struct qca_data *qca = hu->priv;
796
797 if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
798 return -EUNATCH;
799
800 qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
801 qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
802 if (IS_ERR(qca->rx_skb)) {
803 int err = PTR_ERR(qca->rx_skb);
804 BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
805 qca->rx_skb = NULL;
806 return err;
807 }
808
809 return count;
810}
811
812static struct sk_buff *qca_dequeue(struct hci_uart *hu)
813{
814 struct qca_data *qca = hu->priv;
815
816 return skb_dequeue(&qca->txq);
817}
818
819static uint8_t qca_get_baudrate_value(int speed)
820{
821 switch (speed) {
822 case 9600:
823 return QCA_BAUDRATE_9600;
824 case 19200:
825 return QCA_BAUDRATE_19200;
826 case 38400:
827 return QCA_BAUDRATE_38400;
828 case 57600:
829 return QCA_BAUDRATE_57600;
830 case 115200:
831 return QCA_BAUDRATE_115200;
832 case 230400:
833 return QCA_BAUDRATE_230400;
834 case 460800:
835 return QCA_BAUDRATE_460800;
836 case 500000:
837 return QCA_BAUDRATE_500000;
838 case 921600:
839 return QCA_BAUDRATE_921600;
840 case 1000000:
841 return QCA_BAUDRATE_1000000;
842 case 2000000:
843 return QCA_BAUDRATE_2000000;
844 case 3000000:
845 return QCA_BAUDRATE_3000000;
846 case 3500000:
847 return QCA_BAUDRATE_3500000;
848 default:
849 return QCA_BAUDRATE_115200;
850 }
851}
852
853static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
854{
855 struct hci_uart *hu = hci_get_drvdata(hdev);
856 struct qca_data *qca = hu->priv;
857 struct sk_buff *skb;
858 u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
859
860 if (baudrate > QCA_BAUDRATE_3000000)
861 return -EINVAL;
862
863 cmd[4] = baudrate;
864
865 skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC);
866 if (!skb) {
867 BT_ERR("Failed to allocate memory for baudrate packet");
868 return -ENOMEM;
869 }
870
871
872 skb_put_data(skb, cmd, sizeof(cmd));
873 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
874
875 skb_queue_tail(&qca->txq, skb);
876 hci_uart_tx_wakeup(hu);
877
878
879
880
881
882 set_current_state(TASK_UNINTERRUPTIBLE);
883 schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
884 set_current_state(TASK_INTERRUPTIBLE);
885
886 return 0;
887}
888
889static int qca_setup(struct hci_uart *hu)
890{
891 struct hci_dev *hdev = hu->hdev;
892 struct qca_data *qca = hu->priv;
893 unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
894 int ret;
895
896 BT_INFO("%s: ROME setup", hdev->name);
897
898
899 clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
900
901
902 speed = 0;
903 if (hu->init_speed)
904 speed = hu->init_speed;
905 else if (hu->proto->init_speed)
906 speed = hu->proto->init_speed;
907
908 if (speed)
909 hci_uart_set_baudrate(hu, speed);
910
911
912 speed = 0;
913 if (hu->oper_speed)
914 speed = hu->oper_speed;
915 else if (hu->proto->oper_speed)
916 speed = hu->proto->oper_speed;
917
918 if (speed) {
919 qca_baudrate = qca_get_baudrate_value(speed);
920
921 BT_INFO("%s: Set UART speed to %d", hdev->name, speed);
922 ret = qca_set_baudrate(hdev, qca_baudrate);
923 if (ret) {
924 BT_ERR("%s: Failed to change the baud rate (%d)",
925 hdev->name, ret);
926 return ret;
927 }
928 hci_uart_set_baudrate(hu, speed);
929 }
930
931
932 ret = qca_uart_setup_rome(hdev, qca_baudrate);
933 if (!ret) {
934 set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
935 qca_debugfs_init(hdev);
936 }
937
938
939 hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
940
941 return ret;
942}
943
944static struct hci_uart_proto qca_proto = {
945 .id = HCI_UART_QCA,
946 .name = "QCA",
947 .manufacturer = 29,
948 .init_speed = 115200,
949 .oper_speed = 3000000,
950 .open = qca_open,
951 .close = qca_close,
952 .flush = qca_flush,
953 .setup = qca_setup,
954 .recv = qca_recv,
955 .enqueue = qca_enqueue,
956 .dequeue = qca_dequeue,
957};
958
959int __init qca_init(void)
960{
961 return hci_uart_register_proto(&qca_proto);
962}
963
964int __exit qca_deinit(void)
965{
966 return hci_uart_unregister_proto(&qca_proto);
967}
968