1
2
3
4
5
6
7
8
9
10
11
12
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/usb.h>
19#include <linux/sched.h>
20#include <linux/kthread.h>
21#include <linux/usb/cdc.h>
22#include <linux/wait.h>
23#include <linux/if_ether.h>
24#include <linux/pm_runtime.h>
25
26#include "gdm_usb.h"
27#include "gdm_lte.h"
28#include "hci.h"
29#include "hci_packet.h"
30#include "gdm_endian.h"
31
32#define USB_DEVICE_CDC_DATA(vid, pid) \
33 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
34 USB_DEVICE_ID_MATCH_INT_CLASS | \
35 USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
36 .idVendor = vid,\
37 .idProduct = pid,\
38 .bInterfaceClass = USB_CLASS_COMM,\
39 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
40
41#define USB_DEVICE_MASS_DATA(vid, pid) \
42 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
43 USB_DEVICE_ID_MATCH_INT_INFO,\
44 .idVendor = vid,\
45 .idProduct = pid,\
46 .bInterfaceSubClass = USB_SC_SCSI, \
47 .bInterfaceClass = USB_CLASS_MASS_STORAGE,\
48 .bInterfaceProtocol = USB_PR_BULK
49
50static const struct usb_device_id id_table[] = {
51 { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7240) },
52 { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7243) },
53 { }
54};
55
56MODULE_DEVICE_TABLE(usb, id_table);
57
58static struct workqueue_struct *usb_tx_wq;
59static struct workqueue_struct *usb_rx_wq;
60
61static void do_tx(struct work_struct *work);
62static void do_rx(struct work_struct *work);
63
64static int gdm_usb_recv(void *priv_dev,
65 int (*cb)(void *cb_data,
66 void *data, int len, int context),
67 void *cb_data,
68 int context);
69
70static int request_mac_address(struct lte_udev *udev)
71{
72 u8 buf[16] = {0,};
73 struct hci_packet *hci = (struct hci_packet *)buf;
74 struct usb_device *usbdev = udev->usbdev;
75 int actual;
76 int ret = -1;
77
78 hci->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_GET_INFORMATION);
79 hci->len = gdm_cpu_to_dev16(&udev->gdm_ed, 1);
80 hci->data[0] = MAC_ADDRESS;
81
82 ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
83 &actual, 1000);
84
85 udev->request_mac_addr = 1;
86
87 return ret;
88}
89
90static struct usb_tx *alloc_tx_struct(int len)
91{
92 struct usb_tx *t = NULL;
93 int ret = 0;
94
95 t = kzalloc(sizeof(struct usb_tx), GFP_ATOMIC);
96 if (!t) {
97 ret = -ENOMEM;
98 goto out;
99 }
100
101 t->urb = usb_alloc_urb(0, GFP_ATOMIC);
102 if (!(len % 512))
103 len++;
104
105 t->buf = kmalloc(len, GFP_ATOMIC);
106 if (!t->urb || !t->buf) {
107 ret = -ENOMEM;
108 goto out;
109 }
110
111out:
112 if (ret < 0) {
113 if (t) {
114 usb_free_urb(t->urb);
115 kfree(t->buf);
116 kfree(t);
117 }
118 return NULL;
119 }
120
121 return t;
122}
123
124static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
125{
126 struct usb_tx_sdu *t_sdu;
127
128 t_sdu = kzalloc(sizeof(struct usb_tx_sdu), GFP_KERNEL);
129 if (!t_sdu)
130 return NULL;
131
132 t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_KERNEL);
133 if (!t_sdu->buf) {
134 kfree(t_sdu);
135 return NULL;
136 }
137
138 return t_sdu;
139}
140
141static void free_tx_struct(struct usb_tx *t)
142{
143 if (t) {
144 usb_free_urb(t->urb);
145 kfree(t->buf);
146 kfree(t);
147 }
148}
149
150static void free_tx_sdu_struct(struct usb_tx_sdu *t_sdu)
151{
152 if (t_sdu) {
153 kfree(t_sdu->buf);
154 kfree(t_sdu);
155 }
156}
157
158static struct usb_tx_sdu *get_tx_sdu_struct(struct tx_cxt *tx, int *no_spc)
159{
160 struct usb_tx_sdu *t_sdu;
161
162 if (list_empty(&tx->free_list))
163 return NULL;
164
165 t_sdu = list_entry(tx->free_list.next, struct usb_tx_sdu, list);
166 list_del(&t_sdu->list);
167
168 tx->avail_count--;
169
170 *no_spc = list_empty(&tx->free_list) ? 1 : 0;
171
172 return t_sdu;
173}
174
175static void put_tx_struct(struct tx_cxt *tx, struct usb_tx_sdu *t_sdu)
176{
177 list_add_tail(&t_sdu->list, &tx->free_list);
178 tx->avail_count++;
179}
180
181static struct usb_rx *alloc_rx_struct(void)
182{
183 struct usb_rx *r = NULL;
184 int ret = 0;
185
186 r = kmalloc(sizeof(struct usb_rx), GFP_KERNEL);
187 if (!r) {
188 ret = -ENOMEM;
189 goto out;
190 }
191
192 r->urb = usb_alloc_urb(0, GFP_KERNEL);
193 r->buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
194 if (!r->urb || !r->buf) {
195 ret = -ENOMEM;
196 goto out;
197 }
198out:
199
200 if (ret < 0) {
201 if (r) {
202 usb_free_urb(r->urb);
203 kfree(r->buf);
204 kfree(r);
205 }
206 return NULL;
207 }
208
209 return r;
210}
211
212static void free_rx_struct(struct usb_rx *r)
213{
214 if (r) {
215 usb_free_urb(r->urb);
216 kfree(r->buf);
217 kfree(r);
218 }
219}
220
221static struct usb_rx *get_rx_struct(struct rx_cxt *rx, int *no_spc)
222{
223 struct usb_rx *r;
224 unsigned long flags;
225
226 spin_lock_irqsave(&rx->rx_lock, flags);
227
228 if (list_empty(&rx->free_list)) {
229 spin_unlock_irqrestore(&rx->rx_lock, flags);
230 return NULL;
231 }
232
233 r = list_entry(rx->free_list.next, struct usb_rx, free_list);
234 list_del(&r->free_list);
235
236 rx->avail_count--;
237
238 *no_spc = list_empty(&rx->free_list) ? 1 : 0;
239
240 spin_unlock_irqrestore(&rx->rx_lock, flags);
241
242 return r;
243}
244
245static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r)
246{
247 unsigned long flags;
248
249 spin_lock_irqsave(&rx->rx_lock, flags);
250
251 list_add_tail(&r->free_list, &rx->free_list);
252 rx->avail_count++;
253
254 spin_unlock_irqrestore(&rx->rx_lock, flags);
255}
256
257static void release_usb(struct lte_udev *udev)
258{
259 struct rx_cxt *rx = &udev->rx;
260 struct tx_cxt *tx = &udev->tx;
261 struct usb_tx *t, *t_next;
262 struct usb_rx *r, *r_next;
263 struct usb_tx_sdu *t_sdu, *t_sdu_next;
264 unsigned long flags;
265
266 spin_lock_irqsave(&tx->lock, flags);
267 list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->sdu_list, list) {
268 list_del(&t_sdu->list);
269 free_tx_sdu_struct(t_sdu);
270 }
271
272 list_for_each_entry_safe(t, t_next, &tx->hci_list, list) {
273 list_del(&t->list);
274 free_tx_struct(t);
275 }
276
277 list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->free_list, list) {
278 list_del(&t_sdu->list);
279 free_tx_sdu_struct(t_sdu);
280 }
281 spin_unlock_irqrestore(&tx->lock, flags);
282
283 spin_lock_irqsave(&rx->submit_lock, flags);
284 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
285 rx_submit_list) {
286 spin_unlock_irqrestore(&rx->submit_lock, flags);
287 usb_kill_urb(r->urb);
288 spin_lock_irqsave(&rx->submit_lock, flags);
289 }
290 spin_unlock_irqrestore(&rx->submit_lock, flags);
291
292 spin_lock_irqsave(&rx->rx_lock, flags);
293 list_for_each_entry_safe(r, r_next, &rx->free_list, free_list) {
294 list_del(&r->free_list);
295 free_rx_struct(r);
296 }
297 spin_unlock_irqrestore(&rx->rx_lock, flags);
298
299 spin_lock_irqsave(&rx->to_host_lock, flags);
300 list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
301 if (r->index == (void *)udev) {
302 list_del(&r->to_host_list);
303 free_rx_struct(r);
304 }
305 }
306 spin_unlock_irqrestore(&rx->to_host_lock, flags);
307}
308
309static int init_usb(struct lte_udev *udev)
310{
311 int ret = 0;
312 int i;
313 struct tx_cxt *tx = &udev->tx;
314 struct rx_cxt *rx = &udev->rx;
315 struct usb_tx_sdu *t_sdu = NULL;
316 struct usb_rx *r = NULL;
317
318 udev->send_complete = 1;
319 udev->tx_stop = 0;
320 udev->request_mac_addr = 0;
321 udev->usb_state = PM_NORMAL;
322
323 INIT_LIST_HEAD(&tx->sdu_list);
324 INIT_LIST_HEAD(&tx->hci_list);
325 INIT_LIST_HEAD(&tx->free_list);
326 INIT_LIST_HEAD(&rx->rx_submit_list);
327 INIT_LIST_HEAD(&rx->free_list);
328 INIT_LIST_HEAD(&rx->to_host_list);
329 spin_lock_init(&tx->lock);
330 spin_lock_init(&rx->rx_lock);
331 spin_lock_init(&rx->submit_lock);
332 spin_lock_init(&rx->to_host_lock);
333
334 tx->avail_count = 0;
335 rx->avail_count = 0;
336
337 udev->rx_cb = NULL;
338
339 for (i = 0; i < MAX_NUM_SDU_BUF; i++) {
340 t_sdu = alloc_tx_sdu_struct();
341 if (t_sdu == NULL) {
342 ret = -ENOMEM;
343 goto fail;
344 }
345
346 list_add(&t_sdu->list, &tx->free_list);
347 tx->avail_count++;
348 }
349
350 for (i = 0; i < MAX_RX_SUBMIT_COUNT*2; i++) {
351 r = alloc_rx_struct();
352 if (r == NULL) {
353 ret = -ENOMEM;
354 goto fail;
355 }
356
357 list_add(&r->free_list, &rx->free_list);
358 rx->avail_count++;
359 }
360 INIT_DELAYED_WORK(&udev->work_tx, do_tx);
361 INIT_DELAYED_WORK(&udev->work_rx, do_rx);
362 return 0;
363fail:
364 release_usb(udev);
365 return ret;
366}
367
368static int set_mac_address(u8 *data, void *arg)
369{
370 struct phy_dev *phy_dev = (struct phy_dev *)arg;
371 struct lte_udev *udev = phy_dev->priv_dev;
372 struct tlv *tlv = (struct tlv *)data;
373 u8 mac_address[ETH_ALEN] = {0, };
374
375 if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) {
376 memcpy(mac_address, tlv->data, tlv->len);
377
378 if (register_lte_device(phy_dev,
379 &udev->intf->dev, mac_address) < 0)
380 pr_err("register lte device failed\n");
381
382 udev->request_mac_addr = 0;
383
384 return 1;
385 }
386
387 return 0;
388}
389
390static void do_rx(struct work_struct *work)
391{
392 struct lte_udev *udev =
393 container_of(work, struct lte_udev, work_rx.work);
394 struct rx_cxt *rx = &udev->rx;
395 struct usb_rx *r;
396 struct hci_packet *hci;
397 struct phy_dev *phy_dev;
398 u16 cmd_evt;
399 int ret;
400 unsigned long flags;
401
402 while (1) {
403 spin_lock_irqsave(&rx->to_host_lock, flags);
404 if (list_empty(&rx->to_host_list)) {
405 spin_unlock_irqrestore(&rx->to_host_lock, flags);
406 break;
407 }
408 r = list_entry(rx->to_host_list.next,
409 struct usb_rx, to_host_list);
410 list_del(&r->to_host_list);
411 spin_unlock_irqrestore(&rx->to_host_lock, flags);
412
413 phy_dev = (struct phy_dev *)r->cb_data;
414 udev = (struct lte_udev *)phy_dev->priv_dev;
415 hci = (struct hci_packet *)r->buf;
416 cmd_evt = gdm_dev16_to_cpu(&udev->gdm_ed, hci->cmd_evt);
417
418 switch (cmd_evt) {
419 case LTE_GET_INFORMATION_RESULT:
420 if (set_mac_address(hci->data, r->cb_data) == 0) {
421 ret = r->callback(r->cb_data,
422 r->buf,
423 r->urb->actual_length,
424 KERNEL_THREAD);
425 }
426 break;
427
428 default:
429 if (r->callback) {
430 ret = r->callback(r->cb_data,
431 r->buf,
432 r->urb->actual_length,
433 KERNEL_THREAD);
434
435 if (ret == -EAGAIN)
436 pr_err("failed to send received data\n");
437 }
438 break;
439 }
440
441 put_rx_struct(rx, r);
442
443 gdm_usb_recv(udev,
444 r->callback,
445 r->cb_data,
446 USB_COMPLETE);
447 }
448}
449
450static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx)
451{
452 unsigned long flags;
453 struct usb_rx *r_remove, *r_remove_next;
454
455 spin_lock_irqsave(&rx->submit_lock, flags);
456 list_for_each_entry_safe(r_remove, r_remove_next,
457 &rx->rx_submit_list, rx_submit_list) {
458 if (r == r_remove) {
459 list_del(&r->rx_submit_list);
460 break;
461 }
462 }
463 spin_unlock_irqrestore(&rx->submit_lock, flags);
464}
465
466static void gdm_usb_rcv_complete(struct urb *urb)
467{
468 struct usb_rx *r = urb->context;
469 struct rx_cxt *rx = r->rx;
470 unsigned long flags;
471 struct lte_udev *udev = container_of(r->rx, struct lte_udev, rx);
472 struct usb_device *usbdev = udev->usbdev;
473
474 remove_rx_submit_list(r, rx);
475
476 if (!urb->status && r->callback) {
477 spin_lock_irqsave(&rx->to_host_lock, flags);
478 list_add_tail(&r->to_host_list, &rx->to_host_list);
479 queue_work(usb_rx_wq, &udev->work_rx.work);
480 spin_unlock_irqrestore(&rx->to_host_lock, flags);
481 } else {
482 if (urb->status && udev->usb_state == PM_NORMAL)
483 pr_err("%s: urb status error %d\n",
484 __func__, urb->status);
485
486 put_rx_struct(rx, r);
487 }
488
489 usb_mark_last_busy(usbdev);
490}
491
492static int gdm_usb_recv(void *priv_dev,
493 int (*cb)(void *cb_data,
494 void *data, int len, int context),
495 void *cb_data,
496 int context)
497{
498 struct lte_udev *udev = priv_dev;
499 struct usb_device *usbdev = udev->usbdev;
500 struct rx_cxt *rx = &udev->rx;
501 struct usb_rx *r;
502 int no_spc;
503 int ret;
504 unsigned long flags;
505
506 if (!udev->usbdev) {
507 pr_err("invalid device\n");
508 return -ENODEV;
509 }
510
511 r = get_rx_struct(rx, &no_spc);
512 if (!r) {
513 pr_err("Out of Memory\n");
514 return -ENOMEM;
515 }
516
517 udev->rx_cb = cb;
518 r->callback = cb;
519 r->cb_data = cb_data;
520 r->index = (void *)udev;
521 r->rx = rx;
522
523 usb_fill_bulk_urb(r->urb,
524 usbdev,
525 usb_rcvbulkpipe(usbdev, 0x83),
526 r->buf,
527 RX_BUF_SIZE,
528 gdm_usb_rcv_complete,
529 r);
530
531 spin_lock_irqsave(&rx->submit_lock, flags);
532 list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
533 spin_unlock_irqrestore(&rx->submit_lock, flags);
534
535 if (context == KERNEL_THREAD)
536 ret = usb_submit_urb(r->urb, GFP_KERNEL);
537 else
538 ret = usb_submit_urb(r->urb, GFP_ATOMIC);
539
540 if (ret) {
541 spin_lock_irqsave(&rx->submit_lock, flags);
542 list_del(&r->rx_submit_list);
543 spin_unlock_irqrestore(&rx->submit_lock, flags);
544
545 pr_err("usb_submit_urb failed (%p)\n", r);
546 put_rx_struct(rx, r);
547 }
548
549 return ret;
550}
551
552static void gdm_usb_send_complete(struct urb *urb)
553{
554 struct usb_tx *t = urb->context;
555 struct tx_cxt *tx = t->tx;
556 struct lte_udev *udev = container_of(tx, struct lte_udev, tx);
557 unsigned long flags;
558
559 if (urb->status == -ECONNRESET) {
560 pr_info("CONNRESET\n");
561 return;
562 }
563
564 if (t->callback)
565 t->callback(t->cb_data);
566
567 free_tx_struct(t);
568
569 spin_lock_irqsave(&tx->lock, flags);
570 udev->send_complete = 1;
571 queue_work(usb_tx_wq, &udev->work_tx.work);
572 spin_unlock_irqrestore(&tx->lock, flags);
573}
574
575static int send_tx_packet(struct usb_device *usbdev, struct usb_tx *t, u32 len)
576{
577 int ret = 0;
578
579 if (!(len%512))
580 len++;
581
582 usb_fill_bulk_urb(t->urb,
583 usbdev,
584 usb_sndbulkpipe(usbdev, 2),
585 t->buf,
586 len,
587 gdm_usb_send_complete,
588 t);
589
590 ret = usb_submit_urb(t->urb, GFP_ATOMIC);
591
592 if (ret)
593 pr_err("usb_submit_urb failed: %d\n", ret);
594
595 usb_mark_last_busy(usbdev);
596
597 return ret;
598}
599
600static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf)
601{
602 struct tx_cxt *tx = &udev->tx;
603 struct usb_tx_sdu *t_sdu = NULL;
604 struct multi_sdu *multi_sdu = (struct multi_sdu *)send_buf;
605 u16 send_len = 0;
606 u16 num_packet = 0;
607 unsigned long flags;
608
609 multi_sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_MULTI_SDU);
610
611 while (num_packet < MAX_PACKET_IN_MULTI_SDU) {
612 spin_lock_irqsave(&tx->lock, flags);
613 if (list_empty(&tx->sdu_list)) {
614 spin_unlock_irqrestore(&tx->lock, flags);
615 break;
616 }
617
618 t_sdu = list_entry(tx->sdu_list.next, struct usb_tx_sdu, list);
619 if (send_len + t_sdu->len > MAX_SDU_SIZE) {
620 spin_unlock_irqrestore(&tx->lock, flags);
621 break;
622 }
623
624 list_del(&t_sdu->list);
625 spin_unlock_irqrestore(&tx->lock, flags);
626
627 memcpy(multi_sdu->data + send_len, t_sdu->buf, t_sdu->len);
628
629 send_len += (t_sdu->len + 3) & 0xfffc;
630 num_packet++;
631
632 if (tx->avail_count > 10)
633 t_sdu->callback(t_sdu->cb_data);
634
635 spin_lock_irqsave(&tx->lock, flags);
636 put_tx_struct(tx, t_sdu);
637 spin_unlock_irqrestore(&tx->lock, flags);
638 }
639
640 multi_sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
641 multi_sdu->num_packet = gdm_cpu_to_dev16(&udev->gdm_ed, num_packet);
642
643 return send_len + offsetof(struct multi_sdu, data);
644}
645
646static void do_tx(struct work_struct *work)
647{
648 struct lte_udev *udev =
649 container_of(work, struct lte_udev, work_tx.work);
650 struct usb_device *usbdev = udev->usbdev;
651 struct tx_cxt *tx = &udev->tx;
652 struct usb_tx *t = NULL;
653 int is_send = 0;
654 u32 len = 0;
655 unsigned long flags;
656
657 if (!usb_autopm_get_interface(udev->intf))
658 usb_autopm_put_interface(udev->intf);
659
660 if (udev->usb_state == PM_SUSPEND)
661 return;
662
663 spin_lock_irqsave(&tx->lock, flags);
664 if (!udev->send_complete) {
665 spin_unlock_irqrestore(&tx->lock, flags);
666 return;
667 }
668 udev->send_complete = 0;
669
670 if (!list_empty(&tx->hci_list)) {
671 t = list_entry(tx->hci_list.next, struct usb_tx, list);
672 list_del(&t->list);
673 len = t->len;
674 t->is_sdu = 0;
675 is_send = 1;
676 } else if (!list_empty(&tx->sdu_list)) {
677 if (udev->tx_stop) {
678 udev->send_complete = 1;
679 spin_unlock_irqrestore(&tx->lock, flags);
680 return;
681 }
682
683 t = alloc_tx_struct(TX_BUF_SIZE);
684 if (t == NULL) {
685 spin_unlock_irqrestore(&tx->lock, flags);
686 return;
687 }
688 t->callback = NULL;
689 t->tx = tx;
690 t->is_sdu = 1;
691 is_send = 1;
692 }
693
694 if (!is_send) {
695 udev->send_complete = 1;
696 spin_unlock_irqrestore(&tx->lock, flags);
697 return;
698 }
699 spin_unlock_irqrestore(&tx->lock, flags);
700
701 if (t->is_sdu)
702 len = packet_aggregation(udev, t->buf);
703
704 if (send_tx_packet(usbdev, t, len)) {
705 pr_err("send_tx_packet failed\n");
706 t->callback = NULL;
707 gdm_usb_send_complete(t->urb);
708 }
709}
710
711#define SDU_PARAM_LEN 12
712static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
713 unsigned int dftEpsId, unsigned int epsId,
714 void (*cb)(void *data), void *cb_data,
715 int dev_idx, int nic_type)
716{
717 struct lte_udev *udev = priv_dev;
718 struct tx_cxt *tx = &udev->tx;
719 struct usb_tx_sdu *t_sdu;
720 struct sdu *sdu = NULL;
721 unsigned long flags;
722 int no_spc = 0;
723 u16 send_len;
724
725 if (!udev->usbdev) {
726 pr_err("sdu send - invalid device\n");
727 return TX_NO_DEV;
728 }
729
730 spin_lock_irqsave(&tx->lock, flags);
731 t_sdu = get_tx_sdu_struct(tx, &no_spc);
732 spin_unlock_irqrestore(&tx->lock, flags);
733
734 if (t_sdu == NULL) {
735 pr_err("sdu send - free list empty\n");
736 return TX_NO_SPC;
737 }
738
739 sdu = (struct sdu *)t_sdu->buf;
740 sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_SDU);
741 if (nic_type == NIC_TYPE_ARP) {
742 send_len = len + SDU_PARAM_LEN;
743 memcpy(sdu->data, data, len);
744 } else {
745 send_len = len - ETH_HLEN;
746 send_len += SDU_PARAM_LEN;
747 memcpy(sdu->data, data+ETH_HLEN, len-ETH_HLEN);
748 }
749
750 sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
751 sdu->dftEpsId = gdm_cpu_to_dev32(&udev->gdm_ed, dftEpsId);
752 sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, epsId);
753 sdu->nic_type = gdm_cpu_to_dev32(&udev->gdm_ed, nic_type);
754
755 t_sdu->len = send_len + HCI_HEADER_SIZE;
756 t_sdu->callback = cb;
757 t_sdu->cb_data = cb_data;
758
759 spin_lock_irqsave(&tx->lock, flags);
760 list_add_tail(&t_sdu->list, &tx->sdu_list);
761 queue_work(usb_tx_wq, &udev->work_tx.work);
762 spin_unlock_irqrestore(&tx->lock, flags);
763
764 if (no_spc)
765 return TX_NO_BUFFER;
766
767 return 0;
768}
769
770static int gdm_usb_hci_send(void *priv_dev, void *data, int len,
771 void (*cb)(void *data), void *cb_data)
772{
773 struct lte_udev *udev = priv_dev;
774 struct tx_cxt *tx = &udev->tx;
775 struct usb_tx *t;
776 unsigned long flags;
777
778 if (!udev->usbdev) {
779 pr_err("hci send - invalid device\n");
780 return -ENODEV;
781 }
782
783 t = alloc_tx_struct(len);
784 if (t == NULL) {
785 pr_err("hci_send - out of memory\n");
786 return -ENOMEM;
787 }
788
789 memcpy(t->buf, data, len);
790 t->callback = cb;
791 t->cb_data = cb_data;
792 t->len = len;
793 t->tx = tx;
794 t->is_sdu = 0;
795
796 spin_lock_irqsave(&tx->lock, flags);
797 list_add_tail(&t->list, &tx->hci_list);
798 queue_work(usb_tx_wq, &udev->work_tx.work);
799 spin_unlock_irqrestore(&tx->lock, flags);
800
801 return 0;
802}
803
804static struct gdm_endian *gdm_usb_get_endian(void *priv_dev)
805{
806 struct lte_udev *udev = priv_dev;
807
808 return &udev->gdm_ed;
809}
810
811static int gdm_usb_probe(struct usb_interface *intf,
812 const struct usb_device_id *id)
813{
814 int ret = 0;
815 struct phy_dev *phy_dev = NULL;
816 struct lte_udev *udev = NULL;
817 u16 idVendor, idProduct;
818 int bInterfaceNumber;
819 struct usb_device *usbdev = interface_to_usbdev(intf);
820
821 bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
822 idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
823 idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
824
825 pr_info("net vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
826
827 if (bInterfaceNumber > NETWORK_INTERFACE) {
828 pr_info("not a network device\n");
829 return -ENODEV;
830 }
831
832 phy_dev = kzalloc(sizeof(struct phy_dev), GFP_KERNEL);
833 if (!phy_dev)
834 return -ENOMEM;
835
836 udev = kzalloc(sizeof(struct lte_udev), GFP_KERNEL);
837 if (!udev) {
838 ret = -ENOMEM;
839 goto err_udev;
840 }
841
842 phy_dev->priv_dev = (void *)udev;
843 phy_dev->send_hci_func = gdm_usb_hci_send;
844 phy_dev->send_sdu_func = gdm_usb_sdu_send;
845 phy_dev->rcv_func = gdm_usb_recv;
846 phy_dev->get_endian = gdm_usb_get_endian;
847
848 udev->usbdev = usbdev;
849 ret = init_usb(udev);
850 if (ret < 0) {
851 pr_err("init_usb func failed\n");
852 goto err_init_usb;
853 }
854 udev->intf = intf;
855
856 intf->needs_remote_wakeup = 1;
857 usb_enable_autosuspend(usbdev);
858 pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER);
859
860
861
862
863 if (idProduct == PID_GDM7243)
864 gdm_set_endian(&udev->gdm_ed, ENDIANNESS_BIG);
865 else
866 gdm_set_endian(&udev->gdm_ed, ENDIANNESS_LITTLE);
867
868 ret = request_mac_address(udev);
869 if (ret < 0) {
870 pr_err("request Mac address failed\n");
871 goto err_mac_address;
872 }
873
874 start_rx_proc(phy_dev);
875 usb_get_dev(usbdev);
876 usb_set_intfdata(intf, phy_dev);
877
878 return 0;
879
880err_mac_address:
881 release_usb(udev);
882err_init_usb:
883 kfree(udev);
884err_udev:
885 kfree(phy_dev);
886
887 return ret;
888}
889
890static void gdm_usb_disconnect(struct usb_interface *intf)
891{
892 struct phy_dev *phy_dev;
893 struct lte_udev *udev;
894 u16 idVendor, idProduct;
895 struct usb_device *usbdev;
896
897 usbdev = interface_to_usbdev(intf);
898
899 idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
900 idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
901
902 phy_dev = usb_get_intfdata(intf);
903
904 udev = phy_dev->priv_dev;
905 unregister_lte_device(phy_dev);
906
907 release_usb(udev);
908
909 kfree(udev);
910 udev = NULL;
911
912 kfree(phy_dev);
913 phy_dev = NULL;
914
915 usb_put_dev(usbdev);
916}
917
918static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg)
919{
920 struct phy_dev *phy_dev;
921 struct lte_udev *udev;
922 struct rx_cxt *rx;
923 struct usb_rx *r;
924 struct usb_rx *r_next;
925 unsigned long flags;
926
927 phy_dev = usb_get_intfdata(intf);
928 udev = phy_dev->priv_dev;
929 rx = &udev->rx;
930 if (udev->usb_state != PM_NORMAL) {
931 pr_err("usb suspend - invalid state\n");
932 return -1;
933 }
934
935 udev->usb_state = PM_SUSPEND;
936
937 spin_lock_irqsave(&rx->submit_lock, flags);
938 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
939 rx_submit_list) {
940 spin_unlock_irqrestore(&rx->submit_lock, flags);
941 usb_kill_urb(r->urb);
942 spin_lock_irqsave(&rx->submit_lock, flags);
943 }
944 spin_unlock_irqrestore(&rx->submit_lock, flags);
945
946 return 0;
947}
948
949static int gdm_usb_resume(struct usb_interface *intf)
950{
951 struct phy_dev *phy_dev;
952 struct lte_udev *udev;
953 struct tx_cxt *tx;
954 struct rx_cxt *rx;
955 unsigned long flags;
956 int issue_count;
957 int i;
958
959 phy_dev = usb_get_intfdata(intf);
960 udev = phy_dev->priv_dev;
961 rx = &udev->rx;
962
963 if (udev->usb_state != PM_SUSPEND) {
964 pr_err("usb resume - invalid state\n");
965 return -1;
966 }
967 udev->usb_state = PM_NORMAL;
968
969 spin_lock_irqsave(&rx->rx_lock, flags);
970 issue_count = rx->avail_count - MAX_RX_SUBMIT_COUNT;
971 spin_unlock_irqrestore(&rx->rx_lock, flags);
972
973 if (issue_count >= 0) {
974 for (i = 0; i < issue_count; i++)
975 gdm_usb_recv(phy_dev->priv_dev,
976 udev->rx_cb,
977 phy_dev,
978 USB_COMPLETE);
979 }
980
981 tx = &udev->tx;
982 spin_lock_irqsave(&tx->lock, flags);
983 queue_work(usb_tx_wq, &udev->work_tx.work);
984 spin_unlock_irqrestore(&tx->lock, flags);
985
986 return 0;
987}
988
989static struct usb_driver gdm_usb_lte_driver = {
990 .name = "gdm_lte",
991 .probe = gdm_usb_probe,
992 .disconnect = gdm_usb_disconnect,
993 .id_table = id_table,
994 .supports_autosuspend = 1,
995 .suspend = gdm_usb_suspend,
996 .resume = gdm_usb_resume,
997 .reset_resume = gdm_usb_resume,
998};
999
1000static int __init gdm_usb_lte_init(void)
1001{
1002 if (gdm_lte_event_init() < 0) {
1003 pr_err("error creating event\n");
1004 return -1;
1005 }
1006
1007 usb_tx_wq = create_workqueue("usb_tx_wq");
1008 if (usb_tx_wq == NULL)
1009 return -1;
1010
1011 usb_rx_wq = create_workqueue("usb_rx_wq");
1012 if (usb_rx_wq == NULL)
1013 return -1;
1014
1015 return usb_register(&gdm_usb_lte_driver);
1016}
1017
1018static void __exit gdm_usb_lte_exit(void)
1019{
1020 gdm_lte_event_exit();
1021
1022 usb_deregister(&gdm_usb_lte_driver);
1023
1024 if (usb_tx_wq) {
1025 flush_workqueue(usb_tx_wq);
1026 destroy_workqueue(usb_tx_wq);
1027 }
1028
1029 if (usb_rx_wq) {
1030 flush_workqueue(usb_rx_wq);
1031 destroy_workqueue(usb_rx_wq);
1032 }
1033}
1034
1035module_init(gdm_usb_lte_init);
1036module_exit(gdm_usb_lte_exit);
1037
1038MODULE_VERSION(DRIVER_VERSION);
1039MODULE_DESCRIPTION("GCT LTE USB Device Driver");
1040MODULE_LICENSE("GPL");
1041