1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79#define HDLC_MAGIC 0x239e
80
81#include <linux/module.h>
82#include <linux/init.h>
83#include <linux/kernel.h>
84#include <linux/sched.h>
85#include <linux/types.h>
86#include <linux/fcntl.h>
87#include <linux/interrupt.h>
88#include <linux/ptrace.h>
89
90#include <linux/poll.h>
91#include <linux/in.h>
92#include <linux/ioctl.h>
93#include <linux/slab.h>
94#include <linux/tty.h>
95#include <linux/errno.h>
96#include <linux/string.h>
97#include <linux/signal.h>
98#include <linux/if.h>
99#include <linux/bitops.h>
100
101#include <asm/termios.h>
102#include <linux/uaccess.h>
103#include "tty.h"
104
105
106
107
108#define MAX_HDLC_FRAME_SIZE 65535
109#define DEFAULT_RX_BUF_COUNT 10
110#define MAX_RX_BUF_COUNT 60
111#define DEFAULT_TX_BUF_COUNT 3
112
113struct n_hdlc_buf {
114 struct list_head list_item;
115 int count;
116 char buf[];
117};
118
119struct n_hdlc_buf_list {
120 struct list_head list;
121 int count;
122 spinlock_t spinlock;
123};
124
125
126
127
128
129
130
131
132
133
134
135struct n_hdlc {
136 int magic;
137 bool tbusy;
138 bool woke_up;
139 struct n_hdlc_buf_list tx_buf_list;
140 struct n_hdlc_buf_list rx_buf_list;
141 struct n_hdlc_buf_list tx_free_buf_list;
142 struct n_hdlc_buf_list rx_free_buf_list;
143};
144
145
146
147
148static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
149 struct n_hdlc_buf *buf);
150static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
151 struct n_hdlc_buf *buf);
152static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
153
154
155
156static struct n_hdlc *n_hdlc_alloc(void);
157
158
159static int maxframe = 4096;
160
161static void flush_rx_queue(struct tty_struct *tty)
162{
163 struct n_hdlc *n_hdlc = tty->disc_data;
164 struct n_hdlc_buf *buf;
165
166 while ((buf = n_hdlc_buf_get(&n_hdlc->rx_buf_list)))
167 n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, buf);
168}
169
170static void flush_tx_queue(struct tty_struct *tty)
171{
172 struct n_hdlc *n_hdlc = tty->disc_data;
173 struct n_hdlc_buf *buf;
174
175 while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
176 n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
177}
178
179static void n_hdlc_free_buf_list(struct n_hdlc_buf_list *list)
180{
181 struct n_hdlc_buf *buf;
182
183 do {
184 buf = n_hdlc_buf_get(list);
185 kfree(buf);
186 } while (buf);
187}
188
189
190
191
192
193
194
195
196static void n_hdlc_tty_close(struct tty_struct *tty)
197{
198 struct n_hdlc *n_hdlc = tty->disc_data;
199
200 if (n_hdlc->magic != HDLC_MAGIC) {
201 pr_warn("n_hdlc: trying to close unopened tty!\n");
202 return;
203 }
204#if defined(TTY_NO_WRITE_SPLIT)
205 clear_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
206#endif
207 tty->disc_data = NULL;
208
209
210 wake_up_interruptible(&tty->read_wait);
211 wake_up_interruptible(&tty->write_wait);
212
213 n_hdlc_free_buf_list(&n_hdlc->rx_free_buf_list);
214 n_hdlc_free_buf_list(&n_hdlc->tx_free_buf_list);
215 n_hdlc_free_buf_list(&n_hdlc->rx_buf_list);
216 n_hdlc_free_buf_list(&n_hdlc->tx_buf_list);
217 kfree(n_hdlc);
218}
219
220
221
222
223
224
225
226static int n_hdlc_tty_open(struct tty_struct *tty)
227{
228 struct n_hdlc *n_hdlc = tty->disc_data;
229
230 pr_debug("%s() called (device=%s)\n", __func__, tty->name);
231
232
233 if (n_hdlc) {
234 pr_err("%s: tty already associated!\n", __func__);
235 return -EEXIST;
236 }
237
238 n_hdlc = n_hdlc_alloc();
239 if (!n_hdlc) {
240 pr_err("%s: n_hdlc_alloc failed\n", __func__);
241 return -ENFILE;
242 }
243
244 tty->disc_data = n_hdlc;
245 tty->receive_room = 65536;
246
247
248 set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
249
250
251 tty_driver_flush_buffer(tty);
252
253 return 0;
254
255}
256
257
258
259
260
261
262
263
264
265
266static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
267{
268 register int actual;
269 unsigned long flags;
270 struct n_hdlc_buf *tbuf;
271
272check_again:
273
274 spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
275 if (n_hdlc->tbusy) {
276 n_hdlc->woke_up = true;
277 spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
278 return;
279 }
280 n_hdlc->tbusy = true;
281 n_hdlc->woke_up = false;
282 spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
283
284 tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
285 while (tbuf) {
286 pr_debug("sending frame %p, count=%d\n", tbuf, tbuf->count);
287
288
289 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
290 actual = tty->ops->write(tty, tbuf->buf, tbuf->count);
291
292
293 if (actual == -ERESTARTSYS) {
294 n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
295 break;
296 }
297
298
299 if (actual < 0)
300 actual = tbuf->count;
301
302 if (actual == tbuf->count) {
303 pr_debug("frame %p completed\n", tbuf);
304
305
306 n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf);
307
308
309 wake_up_interruptible(&tty->write_wait);
310
311
312 tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
313 } else {
314 pr_debug("frame %p pending\n", tbuf);
315
316
317
318
319
320 n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
321 break;
322 }
323 }
324
325 if (!tbuf)
326 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
327
328
329 spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
330 n_hdlc->tbusy = false;
331 spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
332
333 if (n_hdlc->woke_up)
334 goto check_again;
335}
336
337
338
339
340
341
342
343static void n_hdlc_tty_wakeup(struct tty_struct *tty)
344{
345 struct n_hdlc *n_hdlc = tty->disc_data;
346
347 n_hdlc_send_frames(n_hdlc, tty);
348}
349
350
351
352
353
354
355
356
357
358
359
360static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
361 const char *flags, int count)
362{
363 register struct n_hdlc *n_hdlc = tty->disc_data;
364 register struct n_hdlc_buf *buf;
365
366 pr_debug("%s() called count=%d\n", __func__, count);
367
368
369 if (n_hdlc->magic != HDLC_MAGIC) {
370 pr_err("line not using HDLC discipline\n");
371 return;
372 }
373
374 if (count > maxframe) {
375 pr_debug("rx count>maxframesize, data discarded\n");
376 return;
377 }
378
379
380 buf = n_hdlc_buf_get(&n_hdlc->rx_free_buf_list);
381 if (!buf) {
382
383
384
385
386 if (n_hdlc->rx_buf_list.count < MAX_RX_BUF_COUNT)
387 buf = kmalloc(struct_size(buf, buf, maxframe),
388 GFP_ATOMIC);
389 }
390
391 if (!buf) {
392 pr_debug("no more rx buffers, data discarded\n");
393 return;
394 }
395
396
397 memcpy(buf->buf, data, count);
398 buf->count = count;
399
400
401 n_hdlc_buf_put(&n_hdlc->rx_buf_list, buf);
402
403
404 wake_up_interruptible(&tty->read_wait);
405 if (tty->fasync != NULL)
406 kill_fasync(&tty->fasync, SIGIO, POLL_IN);
407
408}
409
410
411
412
413
414
415
416
417
418
419
420
421static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
422 __u8 *kbuf, size_t nr,
423 void **cookie, unsigned long offset)
424{
425 struct n_hdlc *n_hdlc = tty->disc_data;
426 int ret = 0;
427 struct n_hdlc_buf *rbuf;
428 DECLARE_WAITQUEUE(wait, current);
429
430
431 rbuf = *cookie;
432 if (rbuf)
433 goto have_rbuf;
434
435 add_wait_queue(&tty->read_wait, &wait);
436
437 for (;;) {
438 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
439 ret = -EIO;
440 break;
441 }
442 if (tty_hung_up_p(file))
443 break;
444
445 set_current_state(TASK_INTERRUPTIBLE);
446
447 rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list);
448 if (rbuf)
449 break;
450
451
452 if (tty_io_nonblock(tty, file)) {
453 ret = -EAGAIN;
454 break;
455 }
456
457 schedule();
458
459 if (signal_pending(current)) {
460 ret = -EINTR;
461 break;
462 }
463 }
464
465 remove_wait_queue(&tty->read_wait, &wait);
466 __set_current_state(TASK_RUNNING);
467
468 if (!rbuf)
469 return ret;
470 *cookie = rbuf;
471
472have_rbuf:
473
474 if (offset >= rbuf->count)
475 goto done_with_rbuf;
476
477
478 ret = -EOVERFLOW;
479 if (!nr)
480 goto done_with_rbuf;
481
482
483 ret = rbuf->count - offset;
484 if (ret > nr)
485 ret = nr;
486 memcpy(kbuf, rbuf->buf+offset, ret);
487 offset += ret;
488
489
490 if (offset < rbuf->count)
491 return ret;
492
493done_with_rbuf:
494 *cookie = NULL;
495
496 if (n_hdlc->rx_free_buf_list.count > DEFAULT_RX_BUF_COUNT)
497 kfree(rbuf);
498 else
499 n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf);
500
501 return ret;
502
503}
504
505
506
507
508
509
510
511
512
513
514static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
515 const unsigned char *data, size_t count)
516{
517 struct n_hdlc *n_hdlc = tty->disc_data;
518 int error = 0;
519 DECLARE_WAITQUEUE(wait, current);
520 struct n_hdlc_buf *tbuf;
521
522 pr_debug("%s() called count=%zd\n", __func__, count);
523
524 if (n_hdlc->magic != HDLC_MAGIC)
525 return -EIO;
526
527
528 if (count > maxframe) {
529 pr_debug("%s: truncating user packet from %zu to %d\n",
530 __func__, count, maxframe);
531 count = maxframe;
532 }
533
534 add_wait_queue(&tty->write_wait, &wait);
535
536 for (;;) {
537 set_current_state(TASK_INTERRUPTIBLE);
538
539 tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list);
540 if (tbuf)
541 break;
542
543 if (tty_io_nonblock(tty, file)) {
544 error = -EAGAIN;
545 break;
546 }
547 schedule();
548
549 if (signal_pending(current)) {
550 error = -EINTR;
551 break;
552 }
553 }
554
555 __set_current_state(TASK_RUNNING);
556 remove_wait_queue(&tty->write_wait, &wait);
557
558 if (!error) {
559
560 memcpy(tbuf->buf, data, count);
561
562
563 tbuf->count = error = count;
564 n_hdlc_buf_put(&n_hdlc->tx_buf_list, tbuf);
565 n_hdlc_send_frames(n_hdlc, tty);
566 }
567
568 return error;
569
570}
571
572
573
574
575
576
577
578
579
580
581static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
582 unsigned int cmd, unsigned long arg)
583{
584 struct n_hdlc *n_hdlc = tty->disc_data;
585 int error = 0;
586 int count;
587 unsigned long flags;
588 struct n_hdlc_buf *buf = NULL;
589
590 pr_debug("%s() called %d\n", __func__, cmd);
591
592
593 if (n_hdlc->magic != HDLC_MAGIC)
594 return -EBADF;
595
596 switch (cmd) {
597 case FIONREAD:
598
599
600 spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock, flags);
601 buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list,
602 struct n_hdlc_buf, list_item);
603 if (buf)
604 count = buf->count;
605 else
606 count = 0;
607 spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock, flags);
608 error = put_user(count, (int __user *)arg);
609 break;
610
611 case TIOCOUTQ:
612
613 count = tty_chars_in_buffer(tty);
614
615 spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
616 buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list,
617 struct n_hdlc_buf, list_item);
618 if (buf)
619 count += buf->count;
620 spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
621 error = put_user(count, (int __user *)arg);
622 break;
623
624 case TCFLSH:
625 switch (arg) {
626 case TCIOFLUSH:
627 case TCOFLUSH:
628 flush_tx_queue(tty);
629 }
630 fallthrough;
631
632 default:
633 error = n_tty_ioctl_helper(tty, file, cmd, arg);
634 break;
635 }
636 return error;
637
638}
639
640
641
642
643
644
645
646
647
648
649
650static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
651 poll_table *wait)
652{
653 struct n_hdlc *n_hdlc = tty->disc_data;
654 __poll_t mask = 0;
655
656 if (n_hdlc->magic != HDLC_MAGIC)
657 return 0;
658
659
660
661
662
663 poll_wait(filp, &tty->read_wait, wait);
664 poll_wait(filp, &tty->write_wait, wait);
665
666
667 if (!list_empty(&n_hdlc->rx_buf_list.list))
668 mask |= EPOLLIN | EPOLLRDNORM;
669 if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
670 mask |= EPOLLHUP;
671 if (tty_hung_up_p(filp))
672 mask |= EPOLLHUP;
673 if (!tty_is_writelocked(tty) &&
674 !list_empty(&n_hdlc->tx_free_buf_list.list))
675 mask |= EPOLLOUT | EPOLLWRNORM;
676
677 return mask;
678}
679
680static void n_hdlc_alloc_buf(struct n_hdlc_buf_list *list, unsigned int count,
681 const char *name)
682{
683 struct n_hdlc_buf *buf;
684 unsigned int i;
685
686 for (i = 0; i < count; i++) {
687 buf = kmalloc(struct_size(buf, buf, maxframe), GFP_KERNEL);
688 if (!buf) {
689 pr_debug("%s(), kmalloc() failed for %s buffer %u\n",
690 __func__, name, i);
691 return;
692 }
693 n_hdlc_buf_put(list, buf);
694 }
695}
696
697
698
699
700
701
702static struct n_hdlc *n_hdlc_alloc(void)
703{
704 struct n_hdlc *n_hdlc = kzalloc(sizeof(*n_hdlc), GFP_KERNEL);
705
706 if (!n_hdlc)
707 return NULL;
708
709 spin_lock_init(&n_hdlc->rx_free_buf_list.spinlock);
710 spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock);
711 spin_lock_init(&n_hdlc->rx_buf_list.spinlock);
712 spin_lock_init(&n_hdlc->tx_buf_list.spinlock);
713
714 INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list);
715 INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list);
716 INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list);
717 INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list);
718
719 n_hdlc_alloc_buf(&n_hdlc->rx_free_buf_list, DEFAULT_RX_BUF_COUNT, "rx");
720 n_hdlc_alloc_buf(&n_hdlc->tx_free_buf_list, DEFAULT_TX_BUF_COUNT, "tx");
721
722
723 n_hdlc->magic = HDLC_MAGIC;
724
725 return n_hdlc;
726
727}
728
729
730
731
732
733
734static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
735 struct n_hdlc_buf *buf)
736{
737 unsigned long flags;
738
739 spin_lock_irqsave(&buf_list->spinlock, flags);
740
741 list_add(&buf->list_item, &buf_list->list);
742 buf_list->count++;
743
744 spin_unlock_irqrestore(&buf_list->spinlock, flags);
745}
746
747
748
749
750
751
752static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list,
753 struct n_hdlc_buf *buf)
754{
755 unsigned long flags;
756
757 spin_lock_irqsave(&buf_list->spinlock, flags);
758
759 list_add_tail(&buf->list_item, &buf_list->list);
760 buf_list->count++;
761
762 spin_unlock_irqrestore(&buf_list->spinlock, flags);
763}
764
765
766
767
768
769
770
771
772
773static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list)
774{
775 unsigned long flags;
776 struct n_hdlc_buf *buf;
777
778 spin_lock_irqsave(&buf_list->spinlock, flags);
779
780 buf = list_first_entry_or_null(&buf_list->list,
781 struct n_hdlc_buf, list_item);
782 if (buf) {
783 list_del(&buf->list_item);
784 buf_list->count--;
785 }
786
787 spin_unlock_irqrestore(&buf_list->spinlock, flags);
788 return buf;
789}
790
791static struct tty_ldisc_ops n_hdlc_ldisc = {
792 .owner = THIS_MODULE,
793 .num = N_HDLC,
794 .name = "hdlc",
795 .open = n_hdlc_tty_open,
796 .close = n_hdlc_tty_close,
797 .read = n_hdlc_tty_read,
798 .write = n_hdlc_tty_write,
799 .ioctl = n_hdlc_tty_ioctl,
800 .poll = n_hdlc_tty_poll,
801 .receive_buf = n_hdlc_tty_receive,
802 .write_wakeup = n_hdlc_tty_wakeup,
803 .flush_buffer = flush_rx_queue,
804};
805
806static int __init n_hdlc_init(void)
807{
808 int status;
809
810
811 maxframe = clamp(maxframe, 4096, MAX_HDLC_FRAME_SIZE);
812
813 status = tty_register_ldisc(&n_hdlc_ldisc);
814 if (!status)
815 pr_info("N_HDLC line discipline registered with maxframe=%d\n",
816 maxframe);
817 else
818 pr_err("N_HDLC: error registering line discipline: %d\n",
819 status);
820
821 return status;
822
823}
824
825static void __exit n_hdlc_exit(void)
826{
827 tty_unregister_ldisc(&n_hdlc_ldisc);
828}
829
830module_init(n_hdlc_init);
831module_exit(n_hdlc_exit);
832
833MODULE_LICENSE("GPL");
834MODULE_AUTHOR("Paul Fulghum paulkf@microgate.com");
835module_param(maxframe, int, 0);
836MODULE_ALIAS_LDISC(N_HDLC);
837