1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/if_arp.h>
32#include <linux/module.h>
33#include <linux/sched.h>
34#include <net/arp.h>
35
36#include <net/irda/irda.h>
37#include <net/irda/irmod.h>
38#include <net/irda/irlan_common.h>
39#include <net/irda/irlan_client.h>
40#include <net/irda/irlan_event.h>
41#include <net/irda/irlan_eth.h>
42
43static int irlan_eth_open(struct net_device *dev);
44static int irlan_eth_close(struct net_device *dev);
45static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
46 struct net_device *dev);
47static void irlan_eth_set_multicast_list( struct net_device *dev);
48
49static const struct net_device_ops irlan_eth_netdev_ops = {
50 .ndo_open = irlan_eth_open,
51 .ndo_stop = irlan_eth_close,
52 .ndo_start_xmit = irlan_eth_xmit,
53 .ndo_set_rx_mode = irlan_eth_set_multicast_list,
54 .ndo_change_mtu = eth_change_mtu,
55 .ndo_validate_addr = eth_validate_addr,
56};
57
58
59
60
61
62
63
64static void irlan_eth_setup(struct net_device *dev)
65{
66 ether_setup(dev);
67
68 dev->netdev_ops = &irlan_eth_netdev_ops;
69 dev->destructor = free_netdev;
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88 dev->tx_queue_len = 4;
89}
90
91
92
93
94
95
96
97struct net_device *alloc_irlandev(const char *name)
98{
99 return alloc_netdev(sizeof(struct irlan_cb), name,
100 irlan_eth_setup);
101}
102
103
104
105
106
107
108
109static int irlan_eth_open(struct net_device *dev)
110{
111 struct irlan_cb *self = netdev_priv(dev);
112
113 IRDA_DEBUG(2, "%s()\n", __func__ );
114
115
116 netif_stop_queue(dev);
117
118
119 self->disconnect_reason = 0;
120 irlan_client_wakeup(self, self->saddr, self->daddr);
121
122
123
124 return wait_event_interruptible(self->open_wait,
125 !self->tsap_data->connected);
126}
127
128
129
130
131
132
133
134
135
136static int irlan_eth_close(struct net_device *dev)
137{
138 struct irlan_cb *self = netdev_priv(dev);
139
140 IRDA_DEBUG(2, "%s()\n", __func__ );
141
142
143 netif_stop_queue(dev);
144
145 irlan_close_data_channel(self);
146 irlan_close_tsaps(self);
147
148 irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL);
149 irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
150
151
152 skb_queue_purge(&self->client.txq);
153
154 self->client.tx_busy = 0;
155
156 return 0;
157}
158
159
160
161
162
163
164
165static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
166 struct net_device *dev)
167{
168 struct irlan_cb *self = netdev_priv(dev);
169 int ret;
170 unsigned int len;
171
172
173 if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) {
174 struct sk_buff *new_skb =
175 skb_realloc_headroom(skb, self->max_header_size);
176
177
178 dev_kfree_skb(skb);
179
180
181 if (new_skb == NULL)
182 return NETDEV_TX_OK;
183
184
185 skb = new_skb;
186 }
187
188 dev->trans_start = jiffies;
189
190 len = skb->len;
191
192 if (self->use_udata)
193 ret = irttp_udata_request(self->tsap_data, skb);
194 else
195 ret = irttp_data_request(self->tsap_data, skb);
196
197 if (ret < 0) {
198
199
200
201
202
203
204
205
206
207
208
209 dev->stats.tx_dropped++;
210 } else {
211 dev->stats.tx_packets++;
212 dev->stats.tx_bytes += len;
213 }
214
215 return NETDEV_TX_OK;
216}
217
218
219
220
221
222
223
224int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
225{
226 struct irlan_cb *self = instance;
227 struct net_device *dev = self->dev;
228
229 if (skb == NULL) {
230 dev->stats.rx_dropped++;
231 return 0;
232 }
233 if (skb->len < ETH_HLEN) {
234 IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n",
235 __func__, skb->len);
236 dev->stats.rx_dropped++;
237 dev_kfree_skb(skb);
238 return 0;
239 }
240
241
242
243
244
245
246 skb->protocol = eth_type_trans(skb, dev);
247
248 dev->stats.rx_packets++;
249 dev->stats.rx_bytes += skb->len;
250
251 netif_rx(skb);
252
253 return 0;
254}
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
271{
272 struct irlan_cb *self;
273 struct net_device *dev;
274
275 self = instance;
276
277 IRDA_ASSERT(self != NULL, return;);
278 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
279
280 dev = self->dev;
281
282 IRDA_ASSERT(dev != NULL, return;);
283
284 IRDA_DEBUG(0, "%s() : flow %s ; running %d\n", __func__,
285 flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START",
286 netif_running(dev));
287
288 switch (flow) {
289 case FLOW_STOP:
290
291 netif_stop_queue(dev);
292 break;
293 case FLOW_START:
294 default:
295
296
297 netif_wake_queue(dev);
298 break;
299 }
300}
301
302
303
304
305
306
307
308#define HW_MAX_ADDRS 4
309static void irlan_eth_set_multicast_list(struct net_device *dev)
310{
311 struct irlan_cb *self = netdev_priv(dev);
312
313 IRDA_DEBUG(2, "%s()\n", __func__ );
314
315
316 if (self->client.state != IRLAN_DATA) {
317 IRDA_DEBUG(1, "%s(), delaying!\n", __func__ );
318 return;
319 }
320
321 if (dev->flags & IFF_PROMISC) {
322
323 IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n");
324 }
325 else if ((dev->flags & IFF_ALLMULTI) ||
326 netdev_mc_count(dev) > HW_MAX_ADDRS) {
327
328 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
329
330
331 irlan_set_multicast_filter(self, TRUE);
332 }
333 else if (!netdev_mc_empty(dev)) {
334 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
335
336
337
338 irlan_set_multicast_filter(self, TRUE);
339 }
340 else {
341 IRDA_DEBUG(4, "%s(), Clearing multicast filter\n", __func__ );
342 irlan_set_multicast_filter(self, FALSE);
343 }
344
345 if (dev->flags & IFF_BROADCAST)
346 irlan_set_broadcast_filter(self, TRUE);
347 else
348 irlan_set_broadcast_filter(self, FALSE);
349}
350