1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "send.h"
19#include "main.h"
20
21#include <linux/atomic.h>
22#include <linux/byteorder/generic.h>
23#include <linux/etherdevice.h>
24#include <linux/fs.h>
25#include <linux/if_ether.h>
26#include <linux/if.h>
27#include <linux/jiffies.h>
28#include <linux/kernel.h>
29#include <linux/list.h>
30#include <linux/netdevice.h>
31#include <linux/printk.h>
32#include <linux/rculist.h>
33#include <linux/rcupdate.h>
34#include <linux/skbuff.h>
35#include <linux/slab.h>
36#include <linux/spinlock.h>
37#include <linux/stddef.h>
38#include <linux/workqueue.h>
39
40#include "distributed-arp-table.h"
41#include "fragmentation.h"
42#include "gateway_client.h"
43#include "hard-interface.h"
44#include "network-coding.h"
45#include "originator.h"
46#include "routing.h"
47#include "soft-interface.h"
48#include "translation-table.h"
49
50static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67int batadv_send_skb_packet(struct sk_buff *skb,
68 struct batadv_hard_iface *hard_iface,
69 const u8 *dst_addr)
70{
71 struct batadv_priv *bat_priv;
72 struct ethhdr *ethhdr;
73
74 bat_priv = netdev_priv(hard_iface->soft_iface);
75
76 if (hard_iface->if_status != BATADV_IF_ACTIVE)
77 goto send_skb_err;
78
79 if (unlikely(!hard_iface->net_dev))
80 goto send_skb_err;
81
82 if (!(hard_iface->net_dev->flags & IFF_UP)) {
83 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
84 hard_iface->net_dev->name);
85 goto send_skb_err;
86 }
87
88
89 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
90 goto send_skb_err;
91
92 skb_reset_mac_header(skb);
93
94 ethhdr = eth_hdr(skb);
95 ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
96 ether_addr_copy(ethhdr->h_dest, dst_addr);
97 ethhdr->h_proto = htons(ETH_P_BATMAN);
98
99 skb_set_network_header(skb, ETH_HLEN);
100 skb->protocol = htons(ETH_P_BATMAN);
101
102 skb->dev = hard_iface->net_dev;
103
104
105 batadv_nc_skb_store_for_decoding(bat_priv, skb);
106
107
108
109
110
111 return dev_queue_xmit(skb);
112send_skb_err:
113 kfree_skb(skb);
114 return NET_XMIT_DROP;
115}
116
117int batadv_send_broadcast_skb(struct sk_buff *skb,
118 struct batadv_hard_iface *hard_iface)
119{
120 return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
121}
122
123int batadv_send_unicast_skb(struct sk_buff *skb,
124 struct batadv_neigh_node *neigh)
125{
126#ifdef CONFIG_BATMAN_ADV_BATMAN_V
127 struct batadv_hardif_neigh_node *hardif_neigh;
128#endif
129 int ret;
130
131 ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
132
133#ifdef CONFIG_BATMAN_ADV_BATMAN_V
134 hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);
135
136 if ((hardif_neigh) && (ret != NET_XMIT_DROP))
137 hardif_neigh->bat_v.last_unicast_tx = jiffies;
138
139 if (hardif_neigh)
140 batadv_hardif_neigh_put(hardif_neigh);
141#endif
142
143 return ret;
144}
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160int batadv_send_skb_to_orig(struct sk_buff *skb,
161 struct batadv_orig_node *orig_node,
162 struct batadv_hard_iface *recv_if)
163{
164 struct batadv_priv *bat_priv = orig_node->bat_priv;
165 struct batadv_neigh_node *neigh_node;
166 int ret = NET_XMIT_DROP;
167
168
169 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
170 if (!neigh_node)
171 goto out;
172
173
174
175
176 if (atomic_read(&bat_priv->fragmentation) &&
177 skb->len > neigh_node->if_incoming->net_dev->mtu) {
178
179 if (batadv_frag_send_packet(skb, orig_node, neigh_node))
180 ret = NET_XMIT_SUCCESS;
181
182 goto out;
183 }
184
185
186
187
188
189 if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
190 ret = NET_XMIT_POLICED;
191 } else {
192 batadv_send_unicast_skb(skb, neigh_node);
193 ret = NET_XMIT_SUCCESS;
194 }
195
196out:
197 if (neigh_node)
198 batadv_neigh_node_put(neigh_node);
199
200 return ret;
201}
202
203
204
205
206
207
208
209
210
211
212static bool
213batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
214 struct batadv_orig_node *orig_node)
215{
216 struct batadv_unicast_packet *unicast_packet;
217 u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
218
219 if (batadv_skb_head_push(skb, hdr_size) < 0)
220 return false;
221
222 unicast_packet = (struct batadv_unicast_packet *)skb->data;
223 unicast_packet->version = BATADV_COMPAT_VERSION;
224
225 unicast_packet->packet_type = BATADV_UNICAST;
226
227 unicast_packet->ttl = BATADV_TTL;
228
229 ether_addr_copy(unicast_packet->dest, orig_node->orig);
230
231 unicast_packet->ttvn = ttvn;
232
233 return true;
234}
235
236
237
238
239
240
241
242
243static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
244 struct batadv_orig_node *orig_node)
245{
246 size_t uni_size = sizeof(struct batadv_unicast_packet);
247
248 return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
249}
250
251
252
253
254
255
256
257
258
259
260
261bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
262 struct sk_buff *skb,
263 struct batadv_orig_node *orig,
264 int packet_subtype)
265{
266 struct batadv_hard_iface *primary_if;
267 struct batadv_unicast_4addr_packet *uc_4addr_packet;
268 bool ret = false;
269
270 primary_if = batadv_primary_if_get_selected(bat_priv);
271 if (!primary_if)
272 goto out;
273
274
275
276
277
278 if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
279 orig))
280 goto out;
281
282 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
283 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
284 ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
285 uc_4addr_packet->subtype = packet_subtype;
286 uc_4addr_packet->reserved = 0;
287
288 ret = true;
289out:
290 if (primary_if)
291 batadv_hardif_put(primary_if);
292 return ret;
293}
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
313 struct sk_buff *skb, int packet_type,
314 int packet_subtype,
315 struct batadv_orig_node *orig_node,
316 unsigned short vid)
317{
318 struct batadv_unicast_packet *unicast_packet;
319 struct ethhdr *ethhdr;
320 int ret = NET_XMIT_DROP;
321
322 if (!orig_node)
323 goto out;
324
325 switch (packet_type) {
326 case BATADV_UNICAST:
327 if (!batadv_send_skb_prepare_unicast(skb, orig_node))
328 goto out;
329 break;
330 case BATADV_UNICAST_4ADDR:
331 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
332 orig_node,
333 packet_subtype))
334 goto out;
335 break;
336 default:
337
338
339
340 goto out;
341 }
342
343
344
345
346 ethhdr = eth_hdr(skb);
347 unicast_packet = (struct batadv_unicast_packet *)skb->data;
348
349
350
351
352
353
354 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
355 unicast_packet->ttvn = unicast_packet->ttvn - 1;
356
357 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
358 ret = NET_XMIT_SUCCESS;
359
360out:
361 if (orig_node)
362 batadv_orig_node_put(orig_node);
363 if (ret == NET_XMIT_DROP)
364 kfree_skb(skb);
365 return ret;
366}
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
387 struct sk_buff *skb, int packet_type,
388 int packet_subtype, u8 *dst_hint,
389 unsigned short vid)
390{
391 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
392 struct batadv_orig_node *orig_node;
393 u8 *src, *dst;
394
395 src = ethhdr->h_source;
396 dst = ethhdr->h_dest;
397
398
399 if (dst_hint) {
400 src = NULL;
401 dst = dst_hint;
402 }
403 orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
404
405 return batadv_send_skb_unicast(bat_priv, skb, packet_type,
406 packet_subtype, orig_node, vid);
407}
408
409
410
411
412
413
414
415
416
417
418
419
420int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
421 unsigned short vid)
422{
423 struct batadv_orig_node *orig_node;
424
425 orig_node = batadv_gw_get_selected_orig(bat_priv);
426 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
427 orig_node, vid);
428}
429
430void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
431{
432 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
433
434 if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
435 (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
436 return;
437
438
439
440
441
442
443
444 if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
445 hard_iface->if_status = BATADV_IF_ACTIVE;
446
447 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
448}
449
450static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
451{
452 kfree_skb(forw_packet->skb);
453 if (forw_packet->if_incoming)
454 batadv_hardif_put(forw_packet->if_incoming);
455 if (forw_packet->if_outgoing)
456 batadv_hardif_put(forw_packet->if_outgoing);
457 kfree(forw_packet);
458}
459
460static void
461_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
462 struct batadv_forw_packet *forw_packet,
463 unsigned long send_time)
464{
465
466 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
467 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
468 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
469
470
471 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
472 send_time);
473}
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
490 const struct sk_buff *skb,
491 unsigned long delay)
492{
493 struct batadv_hard_iface *primary_if = NULL;
494 struct batadv_forw_packet *forw_packet;
495 struct batadv_bcast_packet *bcast_packet;
496 struct sk_buff *newskb;
497
498 if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
499 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
500 "bcast packet queue full\n");
501 goto out;
502 }
503
504 primary_if = batadv_primary_if_get_selected(bat_priv);
505 if (!primary_if)
506 goto out_and_inc;
507
508 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
509
510 if (!forw_packet)
511 goto out_and_inc;
512
513 newskb = skb_copy(skb, GFP_ATOMIC);
514 if (!newskb)
515 goto packet_free;
516
517
518 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
519 bcast_packet->ttl--;
520
521 skb_reset_mac_header(newskb);
522
523 forw_packet->skb = newskb;
524 forw_packet->if_incoming = primary_if;
525 forw_packet->if_outgoing = NULL;
526
527
528 forw_packet->num_packets = 0;
529
530 INIT_DELAYED_WORK(&forw_packet->delayed_work,
531 batadv_send_outstanding_bcast_packet);
532
533 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
534 return NETDEV_TX_OK;
535
536packet_free:
537 kfree(forw_packet);
538out_and_inc:
539 atomic_inc(&bat_priv->bcast_queue_left);
540out:
541 if (primary_if)
542 batadv_hardif_put(primary_if);
543 return NETDEV_TX_BUSY;
544}
545
546static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
547{
548 struct batadv_hard_iface *hard_iface;
549 struct delayed_work *delayed_work;
550 struct batadv_forw_packet *forw_packet;
551 struct sk_buff *skb1;
552 struct net_device *soft_iface;
553 struct batadv_priv *bat_priv;
554
555 delayed_work = container_of(work, struct delayed_work, work);
556 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
557 delayed_work);
558 soft_iface = forw_packet->if_incoming->soft_iface;
559 bat_priv = netdev_priv(soft_iface);
560
561 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
562 hlist_del(&forw_packet->list);
563 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
564
565 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
566 goto out;
567
568 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
569 goto out;
570
571
572 rcu_read_lock();
573 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
574 if (hard_iface->soft_iface != soft_iface)
575 continue;
576
577 if (forw_packet->num_packets >= hard_iface->num_bcasts)
578 continue;
579
580
581 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
582 if (skb1)
583 batadv_send_broadcast_skb(skb1, hard_iface);
584 }
585 rcu_read_unlock();
586
587 forw_packet->num_packets++;
588
589
590 if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
591 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
592 msecs_to_jiffies(5));
593 return;
594 }
595
596out:
597 batadv_forw_packet_free(forw_packet);
598 atomic_inc(&bat_priv->bcast_queue_left);
599}
600
601void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
602{
603 struct delayed_work *delayed_work;
604 struct batadv_forw_packet *forw_packet;
605 struct batadv_priv *bat_priv;
606
607 delayed_work = container_of(work, struct delayed_work, work);
608 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
609 delayed_work);
610 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
611 spin_lock_bh(&bat_priv->forw_bat_list_lock);
612 hlist_del(&forw_packet->list);
613 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
614
615 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
616 goto out;
617
618 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
619
620
621
622
623
624
625
626
627
628 if (forw_packet->own &&
629 forw_packet->if_incoming == forw_packet->if_outgoing)
630 batadv_schedule_bat_ogm(forw_packet->if_incoming);
631
632out:
633
634 if (!forw_packet->own)
635 atomic_inc(&bat_priv->batman_queue_left);
636
637 batadv_forw_packet_free(forw_packet);
638}
639
640void
641batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
642 const struct batadv_hard_iface *hard_iface)
643{
644 struct batadv_forw_packet *forw_packet;
645 struct hlist_node *safe_tmp_node;
646 bool pending;
647
648 if (hard_iface)
649 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
650 "purge_outstanding_packets(): %s\n",
651 hard_iface->net_dev->name);
652 else
653 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
654 "purge_outstanding_packets()\n");
655
656
657 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
658 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
659 &bat_priv->forw_bcast_list, list) {
660
661
662
663 if ((hard_iface) &&
664 (forw_packet->if_incoming != hard_iface) &&
665 (forw_packet->if_outgoing != hard_iface))
666 continue;
667
668 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
669
670
671
672
673 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
674 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
675
676 if (pending) {
677 hlist_del(&forw_packet->list);
678 if (!forw_packet->own)
679 atomic_inc(&bat_priv->bcast_queue_left);
680
681 batadv_forw_packet_free(forw_packet);
682 }
683 }
684 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
685
686
687 spin_lock_bh(&bat_priv->forw_bat_list_lock);
688 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
689 &bat_priv->forw_bat_list, list) {
690
691
692
693 if ((hard_iface) &&
694 (forw_packet->if_incoming != hard_iface) &&
695 (forw_packet->if_outgoing != hard_iface))
696 continue;
697
698 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
699
700
701
702
703 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
704 spin_lock_bh(&bat_priv->forw_bat_list_lock);
705
706 if (pending) {
707 hlist_del(&forw_packet->list);
708 if (!forw_packet->own)
709 atomic_inc(&bat_priv->batman_queue_left);
710
711 batadv_forw_packet_free(forw_packet);
712 }
713 }
714 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
715}
716