1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/skbuff.h>
30#include <linux/string.h>
31#include <linux/module.h>
32#include <asm/byteorder.h>
33
34#include <net/irda/irda.h>
35#include <net/irda/wrapper.h>
36#include <net/irda/crc.h>
37#include <net/irda/irlap.h>
38#include <net/irda/irlap_frame.h>
39#include <net/irda/irda_device.h>
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58static inline int stuff_byte(__u8 byte, __u8 *buf)
59{
60 switch (byte) {
61 case BOF:
62 case EOF:
63 case CE:
64
65 buf[0] = CE;
66 buf[1] = byte^IRDA_TRANS;
67 return 2;
68
69 default:
70
71 buf[0] = byte;
72 return 1;
73
74 }
75}
76
77
78
79
80
81
82
83int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize)
84{
85 struct irda_skb_cb *cb = (struct irda_skb_cb *) skb->cb;
86 int xbofs;
87 int i;
88 int n;
89 union {
90 __u16 value;
91 __u8 bytes[2];
92 } fcs;
93
94
95 fcs.value = INIT_FCS;
96 n = 0;
97
98
99
100
101
102
103 if (cb->magic != LAP_MAGIC) {
104
105
106
107
108
109 IRDA_DEBUG(1, "%s(), wrong magic in skb!\n", __func__);
110 xbofs = 10;
111 } else
112 xbofs = cb->xbofs + cb->xbofs_delay;
113
114 IRDA_DEBUG(4, "%s(), xbofs=%d\n", __func__, xbofs);
115
116
117 if (xbofs > 163) {
118 IRDA_DEBUG(0, "%s(), too many xbofs (%d)\n", __func__,
119 xbofs);
120 xbofs = 163;
121 }
122
123 memset(tx_buff + n, XBOF, xbofs);
124 n += xbofs;
125
126
127 tx_buff[n++] = BOF;
128
129
130 for (i=0; i < skb->len; i++) {
131
132
133
134
135
136 if(n >= (buffsize-5)) {
137 IRDA_ERROR("%s(), tx buffer overflow (n=%d)\n",
138 __func__, n);
139 return n;
140 }
141
142 n += stuff_byte(skb->data[i], tx_buff+n);
143 fcs.value = irda_fcs(fcs.value, skb->data[i]);
144 }
145
146
147 fcs.value = ~fcs.value;
148#ifdef __LITTLE_ENDIAN
149 n += stuff_byte(fcs.bytes[0], tx_buff+n);
150 n += stuff_byte(fcs.bytes[1], tx_buff+n);
151#else
152 n += stuff_byte(fcs.bytes[1], tx_buff+n);
153 n += stuff_byte(fcs.bytes[0], tx_buff+n);
154#endif
155 tx_buff[n++] = EOF;
156
157 return n;
158}
159EXPORT_SYMBOL(async_wrap_skb);
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207static inline void
208async_bump(struct net_device *dev,
209 struct net_device_stats *stats,
210 iobuff_t *rx_buff)
211{
212 struct sk_buff *newskb;
213 struct sk_buff *dataskb;
214 int docopy;
215
216
217
218
219
220
221
222 docopy = ((rx_buff->skb == NULL) ||
223 (rx_buff->len < IRDA_RX_COPY_THRESHOLD));
224
225
226 newskb = dev_alloc_skb(docopy ? rx_buff->len + 1 : rx_buff->truesize);
227 if (!newskb) {
228 stats->rx_dropped++;
229
230
231
232 return;
233 }
234
235
236
237
238 skb_reserve(newskb, 1);
239
240 if(docopy) {
241
242 skb_copy_to_linear_data(newskb, rx_buff->data,
243 rx_buff->len - 2);
244
245 dataskb = newskb;
246 } else {
247
248 dataskb = rx_buff->skb;
249
250 rx_buff->skb = newskb;
251 rx_buff->head = newskb->data;
252
253 }
254
255
256 skb_put(dataskb, rx_buff->len - 2);
257
258
259 dataskb->dev = dev;
260 skb_reset_mac_header(dataskb);
261 dataskb->protocol = htons(ETH_P_IRDA);
262
263 netif_rx(dataskb);
264
265 stats->rx_packets++;
266 stats->rx_bytes += rx_buff->len;
267
268
269 rx_buff->data = rx_buff->head;
270 rx_buff->len = 0;
271}
272
273
274
275
276
277
278
279static inline void
280async_unwrap_bof(struct net_device *dev,
281 struct net_device_stats *stats,
282 iobuff_t *rx_buff, __u8 byte)
283{
284 switch(rx_buff->state) {
285 case LINK_ESCAPE:
286 case INSIDE_FRAME:
287
288
289 IRDA_DEBUG(1, "%s(), Discarding incomplete frame\n",
290 __func__);
291 stats->rx_errors++;
292 stats->rx_missed_errors++;
293 irda_device_set_media_busy(dev, TRUE);
294 break;
295
296 case OUTSIDE_FRAME:
297 case BEGIN_FRAME:
298 default:
299
300 break;
301 }
302
303
304 rx_buff->state = BEGIN_FRAME;
305 rx_buff->in_frame = TRUE;
306
307
308 rx_buff->data = rx_buff->head;
309 rx_buff->len = 0;
310 rx_buff->fcs = INIT_FCS;
311}
312
313
314
315
316
317
318
319static inline void
320async_unwrap_eof(struct net_device *dev,
321 struct net_device_stats *stats,
322 iobuff_t *rx_buff, __u8 byte)
323{
324#ifdef POSTPONE_RX_CRC
325 int i;
326#endif
327
328 switch(rx_buff->state) {
329 case OUTSIDE_FRAME:
330
331 stats->rx_errors++;
332 stats->rx_missed_errors++;
333 irda_device_set_media_busy(dev, TRUE);
334 break;
335
336 case BEGIN_FRAME:
337 case LINK_ESCAPE:
338 case INSIDE_FRAME:
339 default:
340
341
342
343 rx_buff->state = OUTSIDE_FRAME;
344 rx_buff->in_frame = FALSE;
345
346#ifdef POSTPONE_RX_CRC
347
348
349 for(i = 0; i < rx_buff->len; i++)
350 rx_buff->fcs = irda_fcs(rx_buff->fcs,
351 rx_buff->data[i]);
352#endif
353
354
355 if (rx_buff->fcs == GOOD_FCS) {
356
357 async_bump(dev, stats, rx_buff);
358 break;
359 } else {
360
361 irda_device_set_media_busy(dev, TRUE);
362
363 IRDA_DEBUG(1, "%s(), crc error\n", __func__);
364 stats->rx_errors++;
365 stats->rx_crc_errors++;
366 }
367 break;
368 }
369}
370
371
372
373
374
375
376
377static inline void
378async_unwrap_ce(struct net_device *dev,
379 struct net_device_stats *stats,
380 iobuff_t *rx_buff, __u8 byte)
381{
382 switch(rx_buff->state) {
383 case OUTSIDE_FRAME:
384
385 irda_device_set_media_busy(dev, TRUE);
386 break;
387
388 case LINK_ESCAPE:
389 IRDA_WARNING("%s: state not defined\n", __func__);
390 break;
391
392 case BEGIN_FRAME:
393 case INSIDE_FRAME:
394 default:
395
396 rx_buff->state = LINK_ESCAPE;
397 break;
398 }
399}
400
401
402
403
404
405
406
407static inline void
408async_unwrap_other(struct net_device *dev,
409 struct net_device_stats *stats,
410 iobuff_t *rx_buff, __u8 byte)
411{
412 switch(rx_buff->state) {
413
414
415 case INSIDE_FRAME:
416
417 if (rx_buff->len < rx_buff->truesize) {
418 rx_buff->data[rx_buff->len++] = byte;
419#ifndef POSTPONE_RX_CRC
420 rx_buff->fcs = irda_fcs(rx_buff->fcs, byte);
421#endif
422 } else {
423 IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n",
424 __func__);
425 rx_buff->state = OUTSIDE_FRAME;
426 }
427 break;
428
429 case LINK_ESCAPE:
430
431
432
433
434 byte ^= IRDA_TRANS;
435 if (rx_buff->len < rx_buff->truesize) {
436 rx_buff->data[rx_buff->len++] = byte;
437#ifndef POSTPONE_RX_CRC
438 rx_buff->fcs = irda_fcs(rx_buff->fcs, byte);
439#endif
440 rx_buff->state = INSIDE_FRAME;
441 } else {
442 IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n",
443 __func__);
444 rx_buff->state = OUTSIDE_FRAME;
445 }
446 break;
447
448 case OUTSIDE_FRAME:
449
450 if(byte != XBOF)
451 irda_device_set_media_busy(dev, TRUE);
452 break;
453
454 case BEGIN_FRAME:
455 default:
456 rx_buff->data[rx_buff->len++] = byte;
457#ifndef POSTPONE_RX_CRC
458 rx_buff->fcs = irda_fcs(rx_buff->fcs, byte);
459#endif
460 rx_buff->state = INSIDE_FRAME;
461 break;
462 }
463}
464
465
466
467
468
469
470
471
472void async_unwrap_char(struct net_device *dev,
473 struct net_device_stats *stats,
474 iobuff_t *rx_buff, __u8 byte)
475{
476 switch(byte) {
477 case CE:
478 async_unwrap_ce(dev, stats, rx_buff, byte);
479 break;
480 case BOF:
481 async_unwrap_bof(dev, stats, rx_buff, byte);
482 break;
483 case EOF:
484 async_unwrap_eof(dev, stats, rx_buff, byte);
485 break;
486 default:
487 async_unwrap_other(dev, stats, rx_buff, byte);
488 break;
489 }
490}
491EXPORT_SYMBOL(async_unwrap_char);
492
493