1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/module.h>
20#include <linux/gfp.h>
21
22#include <linux/pci.h>
23#include <linux/delay.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/if_arp.h>
27#include <asm/byteorder.h>
28
29#include "prismcompat.h"
30#include "isl_38xx.h"
31#include "islpci_eth.h"
32#include "islpci_mgt.h"
33#include "oid_mgt.h"
34
35
36
37
38void
39islpci_eth_cleanup_transmit(islpci_private *priv,
40 isl38xx_control_block *control_block)
41{
42 struct sk_buff *skb;
43 u32 index;
44
45
46 while (priv->free_data_tx !=
47 le32_to_cpu(control_block->
48 device_curr_frag[ISL38XX_CB_TX_DATA_LQ])) {
49
50 index = priv->free_data_tx % ISL38XX_CB_TX_QSIZE;
51
52
53
54 if (priv->pci_map_tx_address[index]) {
55
56
57 skb = priv->data_low_tx[index];
58
59#if VERBOSE > SHOW_ERROR_MESSAGES
60 DEBUG(SHOW_TRACING,
61 "cleanup skb %p skb->data %p skb->len %u truesize %u\n ",
62 skb, skb->data, skb->len, skb->truesize);
63#endif
64
65 pci_unmap_single(priv->pdev,
66 priv->pci_map_tx_address[index],
67 skb->len, PCI_DMA_TODEVICE);
68 dev_kfree_skb_irq(skb);
69 skb = NULL;
70 }
71
72 priv->free_data_tx++;
73 }
74}
75
76netdev_tx_t
77islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
78{
79 islpci_private *priv = netdev_priv(ndev);
80 isl38xx_control_block *cb = priv->control_block;
81 u32 index;
82 dma_addr_t pci_map_address;
83 int frame_size;
84 isl38xx_fragment *fragment;
85 int offset;
86 struct sk_buff *newskb;
87 int newskb_offset;
88 unsigned long flags;
89 unsigned char wds_mac[6];
90 u32 curr_frag;
91
92#if VERBOSE > SHOW_ERROR_MESSAGES
93 DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit\n");
94#endif
95
96
97 spin_lock_irqsave(&priv->slock, flags);
98
99
100 curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]);
101 if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) {
102 printk(KERN_ERR "%s: transmit device queue full when awake\n",
103 ndev->name);
104 netif_stop_queue(ndev);
105
106
107 isl38xx_w32_flush(priv->device_base, ISL38XX_DEV_INT_UPDATE,
108 ISL38XX_DEV_INT_REG);
109 udelay(ISL38XX_WRITEIO_DELAY);
110 goto drop_free;
111 }
112
113
114
115 if (likely(((long) skb->data & 0x03) | init_wds)) {
116
117 offset = (4 - (long) skb->data) & 0x03;
118 offset += init_wds ? 6 : 0;
119
120
121 if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
122 unsigned char *src = skb->data;
123
124#if VERBOSE > SHOW_ERROR_MESSAGES
125 DEBUG(SHOW_TRACING, "skb offset %i wds %i\n", offset,
126 init_wds);
127#endif
128
129
130 skb_reserve(skb, (4 - (long) skb->data) & 0x03);
131 if (init_wds) {
132
133 skb_put(skb, 6);
134#ifdef ISLPCI_ETH_DEBUG
135 printk("islpci_eth_transmit:wds_mac\n");
136#endif
137 memmove(skb->data + 6, src, skb->len);
138 skb_copy_to_linear_data(skb, wds_mac, 6);
139 } else {
140 memmove(skb->data, src, skb->len);
141 }
142
143#if VERBOSE > SHOW_ERROR_MESSAGES
144 DEBUG(SHOW_TRACING, "memmove %p %p %i\n", skb->data,
145 src, skb->len);
146#endif
147 } else {
148 newskb =
149 dev_alloc_skb(init_wds ? skb->len + 6 : skb->len);
150 if (unlikely(newskb == NULL)) {
151 printk(KERN_ERR "%s: Cannot allocate skb\n",
152 ndev->name);
153 goto drop_free;
154 }
155 newskb_offset = (4 - (long) newskb->data) & 0x03;
156
157
158 if (newskb_offset)
159 skb_reserve(newskb, newskb_offset);
160
161 skb_put(newskb, init_wds ? skb->len + 6 : skb->len);
162 if (init_wds) {
163 skb_copy_from_linear_data(skb,
164 newskb->data + 6,
165 skb->len);
166 skb_copy_to_linear_data(newskb, wds_mac, 6);
167#ifdef ISLPCI_ETH_DEBUG
168 printk("islpci_eth_transmit:wds_mac\n");
169#endif
170 } else
171 skb_copy_from_linear_data(skb, newskb->data,
172 skb->len);
173
174#if VERBOSE > SHOW_ERROR_MESSAGES
175 DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n",
176 newskb->data, skb->data, skb->len, init_wds);
177#endif
178
179 newskb->dev = skb->dev;
180 dev_kfree_skb_irq(skb);
181 skb = newskb;
182 }
183 }
184
185#if VERBOSE > SHOW_ERROR_MESSAGES
186 DEBUG(SHOW_BUFFER_CONTENTS, "\ntx %p ", skb->data);
187 display_buffer((char *) skb->data, skb->len);
188#endif
189
190
191 pci_map_address = pci_map_single(priv->pdev,
192 (void *) skb->data, skb->len,
193 PCI_DMA_TODEVICE);
194 if (unlikely(pci_map_address == 0)) {
195 printk(KERN_WARNING "%s: cannot map buffer to PCI\n",
196 ndev->name);
197 goto drop_free;
198 }
199
200 index = curr_frag % ISL38XX_CB_TX_QSIZE;
201 fragment = &cb->tx_data_low[index];
202
203 priv->pci_map_tx_address[index] = pci_map_address;
204
205 priv->data_low_tx[index] = skb;
206
207 frame_size = skb->len;
208 fragment->size = cpu_to_le16(frame_size);
209 fragment->flags = cpu_to_le16(0);
210 fragment->address = cpu_to_le32(pci_map_address);
211 curr_frag++;
212
213
214
215 wmb();
216 cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ] = cpu_to_le32(curr_frag);
217
218 if (curr_frag - priv->free_data_tx + ISL38XX_MIN_QTHRESHOLD
219 > ISL38XX_CB_TX_QSIZE) {
220
221 netif_stop_queue(ndev);
222
223
224 priv->data_low_tx_full = 1;
225 }
226
227 ndev->stats.tx_packets++;
228 ndev->stats.tx_bytes += skb->len;
229
230
231 islpci_trigger(priv);
232
233
234 spin_unlock_irqrestore(&priv->slock, flags);
235
236 return NETDEV_TX_OK;
237
238 drop_free:
239 ndev->stats.tx_dropped++;
240 spin_unlock_irqrestore(&priv->slock, flags);
241 dev_kfree_skb(skb);
242 return NETDEV_TX_OK;
243}
244
245static inline int
246islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb)
247{
248
249
250
251 struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data;
252
253 if (hdr->flags & 0x01)
254
255 return -1;
256 if (priv->ndev->type == ARPHRD_IEEE80211_PRISM) {
257 struct avs_80211_1_header *avs;
258
259 u32 clock = le32_to_cpu(hdr->clock);
260 u8 rate = hdr->rate;
261 u16 freq = le16_to_cpu(hdr->freq);
262 u8 rssi = hdr->rssi;
263
264 skb_pull(*skb, sizeof (struct rfmon_header));
265
266 if (skb_headroom(*skb) < sizeof (struct avs_80211_1_header)) {
267 struct sk_buff *newskb = skb_copy_expand(*skb,
268 sizeof (struct
269 avs_80211_1_header),
270 0, GFP_ATOMIC);
271 if (newskb) {
272 dev_kfree_skb_irq(*skb);
273 *skb = newskb;
274 } else
275 return -1;
276
277 }
278
279
280 avs =
281 (struct avs_80211_1_header *) skb_push(*skb,
282 sizeof (struct
283 avs_80211_1_header));
284
285 avs->version = cpu_to_be32(P80211CAPTURE_VERSION);
286 avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header));
287 avs->mactime = cpu_to_be64(clock);
288 avs->hosttime = cpu_to_be64(jiffies);
289 avs->phytype = cpu_to_be32(6);
290 avs->channel = cpu_to_be32(channel_of_freq(freq));
291 avs->datarate = cpu_to_be32(rate * 5);
292 avs->antenna = cpu_to_be32(0);
293 avs->priority = cpu_to_be32(0);
294 avs->ssi_type = cpu_to_be32(3);
295 avs->ssi_signal = cpu_to_be32(rssi & 0x7f);
296 avs->ssi_noise = cpu_to_be32(priv->local_iwstatistics.qual.noise);
297 avs->preamble = cpu_to_be32(0);
298 avs->encoding = cpu_to_be32(0);
299 } else
300 skb_pull(*skb, sizeof (struct rfmon_header));
301
302 (*skb)->protocol = htons(ETH_P_802_2);
303 skb_reset_mac_header(*skb);
304 (*skb)->pkt_type = PACKET_OTHERHOST;
305
306 return 0;
307}
308
309int
310islpci_eth_receive(islpci_private *priv)
311{
312 struct net_device *ndev = priv->ndev;
313 isl38xx_control_block *control_block = priv->control_block;
314 struct sk_buff *skb;
315 u16 size;
316 u32 index, offset;
317 unsigned char *src;
318 int discard = 0;
319
320#if VERBOSE > SHOW_ERROR_MESSAGES
321 DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive\n");
322#endif
323
324
325
326 index = priv->free_data_rx % ISL38XX_CB_RX_QSIZE;
327 size = le16_to_cpu(control_block->rx_data_low[index].size);
328 skb = priv->data_low_rx[index];
329 offset = ((unsigned long)
330 le32_to_cpu(control_block->rx_data_low[index].address) -
331 (unsigned long) skb->data) & 3;
332
333#if VERBOSE > SHOW_ERROR_MESSAGES
334 DEBUG(SHOW_TRACING,
335 "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n ",
336 control_block->rx_data_low[priv->free_data_rx].address, skb->data,
337 skb->len, offset, skb->truesize);
338#endif
339
340
341 pci_unmap_single(priv->pdev,
342 priv->pci_map_rx_address[index],
343 MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE);
344
345
346 skb_put(skb, size);
347 if (offset) {
348
349 skb_pull(skb, 2);
350 skb_put(skb, 2);
351 }
352#if VERBOSE > SHOW_ERROR_MESSAGES
353
354 DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
355 display_buffer((char *) skb->data, skb->len);
356#endif
357
358
359
360 if (init_wds) {
361
362 src = skb->data + 6;
363 memmove(skb->data, src, skb->len - 6);
364 skb_trim(skb, skb->len - 6);
365 }
366#if VERBOSE > SHOW_ERROR_MESSAGES
367 DEBUG(SHOW_TRACING, "Fragment size %i in skb at %p\n", size, skb);
368 DEBUG(SHOW_TRACING, "Skb data at %p, length %i\n", skb->data, skb->len);
369
370
371 DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
372 display_buffer((char *) skb->data, skb->len);
373#endif
374
375 if (unlikely(priv->iw_mode == IW_MODE_MONITOR)) {
376 skb->dev = ndev;
377 discard = islpci_monitor_rx(priv, &skb);
378 } else {
379 if (unlikely(skb->data[2 * ETH_ALEN] == 0)) {
380
381
382
383 struct iw_quality wstats;
384 struct rx_annex_header *annex =
385 (struct rx_annex_header *) skb->data;
386 wstats.level = annex->rfmon.rssi;
387
388
389 wstats.noise = priv->local_iwstatistics.qual.noise;
390 wstats.qual = wstats.level - wstats.noise;
391 wstats.updated = 0x07;
392
393 wireless_spy_update(ndev, annex->addr2, &wstats);
394
395 skb_copy_from_linear_data(skb,
396 (skb->data +
397 sizeof(struct rfmon_header)),
398 2 * ETH_ALEN);
399 skb_pull(skb, sizeof (struct rfmon_header));
400 }
401 skb->protocol = eth_type_trans(skb, ndev);
402 }
403 skb->ip_summed = CHECKSUM_NONE;
404 ndev->stats.rx_packets++;
405 ndev->stats.rx_bytes += size;
406
407
408#ifdef ISLPCI_ETH_DEBUG
409 printk
410 ("islpci_eth_receive:netif_rx %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
411 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
412 skb->data[4], skb->data[5]);
413#endif
414 if (unlikely(discard)) {
415 dev_kfree_skb_irq(skb);
416 skb = NULL;
417 } else
418 netif_rx(skb);
419
420
421 priv->free_data_rx++;
422
423
424 while (index =
425 le32_to_cpu(control_block->
426 driver_curr_frag[ISL38XX_CB_RX_DATA_LQ]),
427 index - priv->free_data_rx < ISL38XX_CB_RX_QSIZE) {
428
429
430 skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2);
431 if (unlikely(skb == NULL)) {
432
433 DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb\n");
434 break;
435 }
436 skb_reserve(skb, (4 - (long) skb->data) & 0x03);
437
438 index = index % ISL38XX_CB_RX_QSIZE;
439 priv->data_low_rx[index] = skb;
440
441#if VERBOSE > SHOW_ERROR_MESSAGES
442 DEBUG(SHOW_TRACING,
443 "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n ",
444 skb, skb->data, skb->len, index, skb->truesize);
445#endif
446
447
448 priv->pci_map_rx_address[index] =
449 pci_map_single(priv->pdev, (void *) skb->data,
450 MAX_FRAGMENT_SIZE_RX + 2,
451 PCI_DMA_FROMDEVICE);
452 if (unlikely(!priv->pci_map_rx_address[index])) {
453
454 DEBUG(SHOW_ERROR_MESSAGES,
455 "Error mapping DMA address\n");
456
457
458 dev_kfree_skb_irq((struct sk_buff *) skb);
459 skb = NULL;
460 break;
461 }
462
463 control_block->rx_data_low[index].address =
464 cpu_to_le32((u32)priv->pci_map_rx_address[index]);
465 wmb();
466
467
468 le32_add_cpu(&control_block->
469 driver_curr_frag[ISL38XX_CB_RX_DATA_LQ], 1);
470 }
471
472
473 islpci_trigger(priv);
474
475 return 0;
476}
477
478void
479islpci_do_reset_and_wake(struct work_struct *work)
480{
481 islpci_private *priv = container_of(work, islpci_private, reset_task);
482
483 islpci_reset(priv, 1);
484 priv->reset_task_pending = 0;
485 smp_wmb();
486 netif_wake_queue(priv->ndev);
487}
488
489void
490islpci_eth_tx_timeout(struct net_device *ndev)
491{
492 islpci_private *priv = netdev_priv(ndev);
493
494
495 ndev->stats.tx_errors++;
496
497 if (!priv->reset_task_pending) {
498 printk(KERN_WARNING
499 "%s: tx_timeout, scheduling reset", ndev->name);
500 netif_stop_queue(ndev);
501 priv->reset_task_pending = 1;
502 schedule_work(&priv->reset_task);
503 } else {
504 printk(KERN_WARNING
505 "%s: tx_timeout, waiting for reset", ndev->name);
506 }
507}
508