1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include <net/sock.h>
38#include "core.h"
39#include "msg.h"
40#include "addr.h"
41#include "name_table.h"
42#include "crypto.h"
43
44#define BUF_ALIGN(x) ALIGN(x, 4)
45#define MAX_FORWARD_SIZE 1024
46#ifdef CONFIG_TIPC_CRYPTO
47#define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
48#define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
49#else
50#define BUF_HEADROOM (LL_MAX_HEADER + 48)
51#define BUF_OVERHEAD BUF_HEADROOM
52#endif
53
54const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
55 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
56
57
58
59
60
61
62
63
64
65
66
67
68struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
69{
70 struct sk_buff *skb;
71
72 skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp);
73 if (skb) {
74 skb_reserve(skb, BUF_HEADROOM);
75 skb_put(skb, size);
76 skb->next = NULL;
77 }
78 return skb;
79}
80
81void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
82 u32 hsize, u32 dnode)
83{
84 memset(m, 0, hsize);
85 msg_set_version(m);
86 msg_set_user(m, user);
87 msg_set_hdr_sz(m, hsize);
88 msg_set_size(m, hsize);
89 msg_set_prevnode(m, own_node);
90 msg_set_type(m, type);
91 if (hsize > SHORT_H_SIZE) {
92 msg_set_orignode(m, own_node);
93 msg_set_destnode(m, dnode);
94 }
95}
96
97struct sk_buff *tipc_msg_create(uint user, uint type,
98 uint hdr_sz, uint data_sz, u32 dnode,
99 u32 onode, u32 dport, u32 oport, int errcode)
100{
101 struct tipc_msg *msg;
102 struct sk_buff *buf;
103
104 buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
105 if (unlikely(!buf))
106 return NULL;
107
108 msg = buf_msg(buf);
109 tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
110 msg_set_size(msg, hdr_sz + data_sz);
111 msg_set_origport(msg, oport);
112 msg_set_destport(msg, dport);
113 msg_set_errcode(msg, errcode);
114 return buf;
115}
116
117
118
119
120
121
122
123
124int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
125{
126 struct sk_buff *head = *headbuf;
127 struct sk_buff *frag = *buf;
128 struct sk_buff *tail = NULL;
129 struct tipc_msg *msg;
130 u32 fragid;
131 int delta;
132 bool headstolen;
133
134 if (!frag)
135 goto err;
136
137 msg = buf_msg(frag);
138 fragid = msg_type(msg);
139 frag->next = NULL;
140 skb_pull(frag, msg_hdr_sz(msg));
141
142 if (fragid == FIRST_FRAGMENT) {
143 if (unlikely(head))
144 goto err;
145 *buf = NULL;
146 if (skb_has_frag_list(frag) && __skb_linearize(frag))
147 goto err;
148 frag = skb_unshare(frag, GFP_ATOMIC);
149 if (unlikely(!frag))
150 goto err;
151 head = *headbuf = frag;
152 TIPC_SKB_CB(head)->tail = NULL;
153 return 0;
154 }
155
156 if (!head)
157 goto err;
158
159 if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
160 kfree_skb_partial(frag, headstolen);
161 } else {
162 tail = TIPC_SKB_CB(head)->tail;
163 if (!skb_has_frag_list(head))
164 skb_shinfo(head)->frag_list = frag;
165 else
166 tail->next = frag;
167 head->truesize += frag->truesize;
168 head->data_len += frag->len;
169 head->len += frag->len;
170 TIPC_SKB_CB(head)->tail = frag;
171 }
172
173 if (fragid == LAST_FRAGMENT) {
174 TIPC_SKB_CB(head)->validated = 0;
175 if (unlikely(!tipc_msg_validate(&head)))
176 goto err;
177 *buf = head;
178 TIPC_SKB_CB(head)->tail = NULL;
179 *headbuf = NULL;
180 return 1;
181 }
182 *buf = NULL;
183 return 0;
184err:
185 kfree_skb(*buf);
186 kfree_skb(*headbuf);
187 *buf = *headbuf = NULL;
188 return 0;
189}
190
191
192
193
194
195
196
197
198
199
200
201int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
202 int mss, struct sk_buff_head *txq)
203{
204 struct sk_buff *skb;
205 int accounted, total, curr;
206 int mlen, cpy, rem = dlen;
207 struct tipc_msg *hdr;
208
209 skb = skb_peek_tail(txq);
210 accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
211 total = accounted;
212
213 do {
214 if (!skb || skb->len >= mss) {
215 skb = tipc_buf_acquire(mss, GFP_KERNEL);
216 if (unlikely(!skb))
217 return -ENOMEM;
218 skb_orphan(skb);
219 skb_trim(skb, MIN_H_SIZE);
220 hdr = buf_msg(skb);
221 skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
222 msg_set_hdr_sz(hdr, MIN_H_SIZE);
223 msg_set_size(hdr, MIN_H_SIZE);
224 __skb_queue_tail(txq, skb);
225 total += 1;
226 }
227 hdr = buf_msg(skb);
228 curr = msg_blocks(hdr);
229 mlen = msg_size(hdr);
230 cpy = min_t(size_t, rem, mss - mlen);
231 if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
232 return -EFAULT;
233 msg_set_size(hdr, mlen + cpy);
234 skb_put(skb, cpy);
235 rem -= cpy;
236 total += msg_blocks(hdr) - curr;
237 } while (rem > 0);
238 return total - accounted;
239}
240
241
242
243
244
245
246
247
248
249
250
251
252bool tipc_msg_validate(struct sk_buff **_skb)
253{
254 struct sk_buff *skb = *_skb;
255 struct tipc_msg *hdr;
256 int msz, hsz;
257
258
259 if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
260 skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
261 if (!skb)
262 return false;
263 kfree_skb(*_skb);
264 *_skb = skb;
265 }
266
267 if (unlikely(TIPC_SKB_CB(skb)->validated))
268 return true;
269
270 if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
271 return false;
272
273 hsz = msg_hdr_sz(buf_msg(skb));
274 if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
275 return false;
276 if (unlikely(!pskb_may_pull(skb, hsz)))
277 return false;
278
279 hdr = buf_msg(skb);
280 if (unlikely(msg_version(hdr) != TIPC_VERSION))
281 return false;
282
283 msz = msg_size(hdr);
284 if (unlikely(msz < hsz))
285 return false;
286 if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
287 return false;
288 if (unlikely(skb->len < msz))
289 return false;
290
291 TIPC_SKB_CB(skb)->validated = 1;
292 return true;
293}
294
295
296
297
298
299
300
301
302
303
304
305
306int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
307 int pktmax, struct sk_buff_head *frags)
308{
309 int pktno, nof_fragms, dsz, dmax, eat;
310 struct tipc_msg *_hdr;
311 struct sk_buff *_skb;
312 u8 *data;
313
314
315 if (skb_linearize(skb))
316 return -ENOMEM;
317
318 data = (u8 *)skb->data;
319 dsz = msg_size(buf_msg(skb));
320 dmax = pktmax - INT_H_SIZE;
321 if (dsz <= dmax || !dmax)
322 return -EINVAL;
323
324 nof_fragms = dsz / dmax + 1;
325 for (pktno = 1; pktno <= nof_fragms; pktno++) {
326 if (pktno < nof_fragms)
327 eat = dmax;
328 else
329 eat = dsz % dmax;
330
331 _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
332 if (!_skb)
333 goto error;
334 skb_orphan(_skb);
335 __skb_queue_tail(frags, _skb);
336
337 skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
338 skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
339 data += eat;
340
341 _hdr = buf_msg(_skb);
342 msg_set_fragm_no(_hdr, pktno);
343 msg_set_nof_fragms(_hdr, nof_fragms);
344 msg_set_size(_hdr, INT_H_SIZE + eat);
345 }
346 return 0;
347
348error:
349 __skb_queue_purge(frags);
350 __skb_queue_head_init(frags);
351 return -ENOMEM;
352}
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
369 int dsz, int pktmax, struct sk_buff_head *list)
370{
371 int mhsz = msg_hdr_sz(mhdr);
372 struct tipc_msg pkthdr;
373 int msz = mhsz + dsz;
374 int pktrem = pktmax;
375 struct sk_buff *skb;
376 int drem = dsz;
377 int pktno = 1;
378 char *pktpos;
379 int pktsz;
380 int rc;
381
382 msg_set_size(mhdr, msz);
383
384
385 if (likely(msz <= pktmax)) {
386 skb = tipc_buf_acquire(msz, GFP_KERNEL);
387
388
389 if (unlikely(!skb)) {
390 if (pktmax != MAX_MSG_SIZE)
391 return -ENOMEM;
392 rc = tipc_msg_build(mhdr, m, offset, dsz,
393 one_page_mtu, list);
394 if (rc != dsz)
395 return rc;
396 if (tipc_msg_assemble(list))
397 return dsz;
398 return -ENOMEM;
399 }
400 skb_orphan(skb);
401 __skb_queue_tail(list, skb);
402 skb_copy_to_linear_data(skb, mhdr, mhsz);
403 pktpos = skb->data + mhsz;
404 if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
405 return dsz;
406 rc = -EFAULT;
407 goto error;
408 }
409
410
411 tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
412 FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
413 msg_set_size(&pkthdr, pktmax);
414 msg_set_fragm_no(&pkthdr, pktno);
415 msg_set_importance(&pkthdr, msg_importance(mhdr));
416
417
418 skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
419 if (!skb)
420 return -ENOMEM;
421 skb_orphan(skb);
422 __skb_queue_tail(list, skb);
423 pktpos = skb->data;
424 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
425 pktpos += INT_H_SIZE;
426 pktrem -= INT_H_SIZE;
427 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
428 pktpos += mhsz;
429 pktrem -= mhsz;
430
431 do {
432 if (drem < pktrem)
433 pktrem = drem;
434
435 if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
436 rc = -EFAULT;
437 goto error;
438 }
439 drem -= pktrem;
440
441 if (!drem)
442 break;
443
444
445 if (drem < (pktmax - INT_H_SIZE))
446 pktsz = drem + INT_H_SIZE;
447 else
448 pktsz = pktmax;
449 skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
450 if (!skb) {
451 rc = -ENOMEM;
452 goto error;
453 }
454 skb_orphan(skb);
455 __skb_queue_tail(list, skb);
456 msg_set_type(&pkthdr, FRAGMENT);
457 msg_set_size(&pkthdr, pktsz);
458 msg_set_fragm_no(&pkthdr, ++pktno);
459 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
460 pktpos = skb->data + INT_H_SIZE;
461 pktrem = pktsz - INT_H_SIZE;
462
463 } while (1);
464 msg_set_type(buf_msg(skb), LAST_FRAGMENT);
465 return dsz;
466error:
467 __skb_queue_purge(list);
468 __skb_queue_head_init(list);
469 return rc;
470}
471
472
473
474
475
476
477
478
479
480static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
481 u32 max)
482{
483 struct tipc_msg *bmsg = buf_msg(bskb);
484 u32 msz, bsz, offset, pad;
485
486 msz = msg_size(msg);
487 bsz = msg_size(bmsg);
488 offset = BUF_ALIGN(bsz);
489 pad = offset - bsz;
490
491 if (unlikely(skb_tailroom(bskb) < (pad + msz)))
492 return false;
493 if (unlikely(max < (offset + msz)))
494 return false;
495
496 skb_put(bskb, pad + msz);
497 skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
498 msg_set_size(bmsg, offset + msz);
499 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
500 return true;
501}
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
517 u32 dnode, bool *new_bundle)
518{
519 struct tipc_msg *msg, *inner, *outer;
520 u32 tsz;
521
522
523 msg = buf_msg(*skb);
524 if (msg_user(msg) == MSG_FRAGMENTER)
525 return false;
526 if (msg_user(msg) == TUNNEL_PROTOCOL)
527 return false;
528 if (msg_user(msg) == BCAST_PROTOCOL)
529 return false;
530 if (mss <= INT_H_SIZE + msg_size(msg))
531 return false;
532
533
534 if (unlikely(!tskb))
535 return true;
536
537
538 if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
539 *new_bundle = false;
540 goto bundle;
541 }
542
543
544 tsz = msg_size(buf_msg(tskb));
545 if (unlikely(mss < BUF_ALIGN(INT_H_SIZE + tsz) + msg_size(msg)))
546 return true;
547 if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
548 GFP_ATOMIC)))
549 return true;
550 inner = buf_msg(tskb);
551 skb_push(tskb, INT_H_SIZE);
552 outer = buf_msg(tskb);
553 tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
554 dnode);
555 msg_set_importance(outer, msg_importance(inner));
556 msg_set_size(outer, INT_H_SIZE + tsz);
557 msg_set_msgcnt(outer, 1);
558 *new_bundle = true;
559
560bundle:
561 if (likely(tipc_msg_bundle(tskb, msg, mss))) {
562 consume_skb(*skb);
563 *skb = NULL;
564 }
565 return true;
566}
567
568
569
570
571
572
573
574
575
576
577bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
578{
579 struct tipc_msg *hdr, *ihdr;
580 int imsz;
581
582 *iskb = NULL;
583 if (unlikely(skb_linearize(skb)))
584 goto none;
585
586 hdr = buf_msg(skb);
587 if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
588 goto none;
589
590 ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
591 imsz = msg_size(ihdr);
592
593 if ((*pos + imsz) > msg_data_sz(hdr))
594 goto none;
595
596 *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
597 if (!*iskb)
598 goto none;
599
600 skb_copy_to_linear_data(*iskb, ihdr, imsz);
601 if (unlikely(!tipc_msg_validate(iskb)))
602 goto none;
603
604 *pos += BUF_ALIGN(imsz);
605 return true;
606none:
607 kfree_skb(skb);
608 kfree_skb(*iskb);
609 *iskb = NULL;
610 return false;
611}
612
613
614
615
616
617
618
619
620
621bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
622{
623 struct sk_buff *_skb = *skb;
624 struct tipc_msg *_hdr, *hdr;
625 int hlen, dlen;
626
627 if (skb_linearize(_skb))
628 goto exit;
629 _hdr = buf_msg(_skb);
630 dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
631 hlen = msg_hdr_sz(_hdr);
632
633 if (msg_dest_droppable(_hdr))
634 goto exit;
635 if (msg_errcode(_hdr))
636 goto exit;
637
638
639 if (hlen == SHORT_H_SIZE)
640 hlen = BASIC_H_SIZE;
641
642
643 if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
644 dlen = 0;
645
646
647 *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
648 if (!*skb)
649 goto exit;
650 memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
651 memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
652
653
654 hdr = buf_msg(*skb);
655 msg_set_hdr_sz(hdr, hlen);
656 msg_set_errcode(hdr, err);
657 msg_set_non_seq(hdr, 0);
658 msg_set_origport(hdr, msg_destport(_hdr));
659 msg_set_destport(hdr, msg_origport(_hdr));
660 msg_set_destnode(hdr, msg_prevnode(_hdr));
661 msg_set_prevnode(hdr, own_node);
662 msg_set_orignode(hdr, own_node);
663 msg_set_size(hdr, hlen + dlen);
664 skb_orphan(_skb);
665 kfree_skb(_skb);
666 return true;
667exit:
668 kfree_skb(_skb);
669 *skb = NULL;
670 return false;
671}
672
673bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
674{
675 struct sk_buff *skb, *_skb;
676
677 skb_queue_walk(msg, skb) {
678 _skb = skb_clone(skb, GFP_ATOMIC);
679 if (!_skb) {
680 __skb_queue_purge(cpy);
681 pr_err_ratelimited("Failed to clone buffer chain\n");
682 return false;
683 }
684 __skb_queue_tail(cpy, _skb);
685 }
686 return true;
687}
688
689
690
691
692
693
694
695
696
697bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
698{
699 struct tipc_msg *msg = buf_msg(skb);
700 u32 scope = msg_lookup_scope(msg);
701 u32 self = tipc_own_addr(net);
702 u32 inst = msg_nameinst(msg);
703 struct tipc_socket_addr sk;
704 struct tipc_uaddr ua;
705
706 if (!msg_isdata(msg))
707 return false;
708 if (!msg_named(msg))
709 return false;
710 if (msg_errcode(msg))
711 return false;
712 *err = TIPC_ERR_NO_NAME;
713 if (skb_linearize(skb))
714 return false;
715 msg = buf_msg(skb);
716 if (msg_reroute_cnt(msg))
717 return false;
718 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, scope,
719 msg_nametype(msg), inst, inst);
720 sk.node = tipc_scope2node(net, scope);
721 if (!tipc_nametbl_lookup_anycast(net, &ua, &sk))
722 return false;
723 msg_incr_reroute_cnt(msg);
724 if (sk.node != self)
725 msg_set_prevnode(msg, self);
726 msg_set_destnode(msg, sk.node);
727 msg_set_destport(msg, sk.ref);
728 *err = TIPC_OK;
729
730 return true;
731}
732
733
734
735bool tipc_msg_assemble(struct sk_buff_head *list)
736{
737 struct sk_buff *skb, *tmp = NULL;
738
739 if (skb_queue_len(list) == 1)
740 return true;
741
742 while ((skb = __skb_dequeue(list))) {
743 skb->next = NULL;
744 if (tipc_buf_append(&tmp, &skb)) {
745 __skb_queue_tail(list, skb);
746 return true;
747 }
748 if (!tmp)
749 break;
750 }
751 __skb_queue_purge(list);
752 __skb_queue_head_init(list);
753 pr_warn("Failed do assemble buffer\n");
754 return false;
755}
756
757
758
759
760bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
761{
762 struct sk_buff *skb, *_skb;
763 struct sk_buff *frag = NULL;
764 struct sk_buff *head = NULL;
765 int hdr_len;
766
767
768 if (skb_queue_len(list) == 1) {
769 skb = skb_peek(list);
770 hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
771 _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
772 if (!_skb)
773 return false;
774 __skb_queue_tail(rcvq, _skb);
775 return true;
776 }
777
778
779 skb_queue_walk(list, skb) {
780 frag = skb_clone(skb, GFP_ATOMIC);
781 if (!frag)
782 goto error;
783 frag->next = NULL;
784 if (tipc_buf_append(&head, &frag))
785 break;
786 if (!head)
787 goto error;
788 }
789 __skb_queue_tail(rcvq, frag);
790 return true;
791error:
792 pr_warn("Failed do clone local mcast rcv buffer\n");
793 kfree_skb(head);
794 return false;
795}
796
797bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
798 struct sk_buff_head *cpy)
799{
800 struct sk_buff *skb, *_skb;
801
802 skb_queue_walk(msg, skb) {
803 _skb = pskb_copy(skb, GFP_ATOMIC);
804 if (!_skb) {
805 __skb_queue_purge(cpy);
806 return false;
807 }
808 msg_set_destnode(buf_msg(_skb), dst);
809 __skb_queue_tail(cpy, _skb);
810 }
811 return true;
812}
813
814
815
816
817
818
819bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
820 struct sk_buff *skb)
821{
822 struct sk_buff *_skb, *tmp;
823
824 if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
825 __skb_queue_head(list, skb);
826 return true;
827 }
828
829 if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
830 __skb_queue_tail(list, skb);
831 return true;
832 }
833
834 skb_queue_walk_safe(list, _skb, tmp) {
835 if (more(seqno, buf_seqno(_skb)))
836 continue;
837 if (seqno == buf_seqno(_skb))
838 break;
839 __skb_queue_before(list, _skb, skb);
840 return true;
841 }
842 kfree_skb(skb);
843 return false;
844}
845
846void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
847 struct sk_buff_head *xmitq)
848{
849 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
850 __skb_queue_tail(xmitq, skb);
851}
852