1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/module.h>
19#include <linux/gfp.h>
20
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/if_arp.h>
26#include <asm/byteorder.h>
27
28#include "prismcompat.h"
29#include "isl_38xx.h"
30#include "islpci_eth.h"
31#include "islpci_mgt.h"
32#include "oid_mgt.h"
33
34
35
36
37void
38islpci_eth_cleanup_transmit(islpci_private *priv,
39 isl38xx_control_block *control_block)
40{
41 struct sk_buff *skb;
42 u32 index;
43
44
45 while (priv->free_data_tx !=
46 le32_to_cpu(control_block->
47 device_curr_frag[ISL38XX_CB_TX_DATA_LQ])) {
48
49 index = priv->free_data_tx % ISL38XX_CB_TX_QSIZE;
50
51
52
53 if (priv->pci_map_tx_address[index]) {
54
55
56 skb = priv->data_low_tx[index];
57
58#if VERBOSE > SHOW_ERROR_MESSAGES
59 DEBUG(SHOW_TRACING,
60 "cleanup skb %p skb->data %p skb->len %u truesize %u\n ",
61 skb, skb->data, skb->len, skb->truesize);
62#endif
63
64 pci_unmap_single(priv->pdev,
65 priv->pci_map_tx_address[index],
66 skb->len, PCI_DMA_TODEVICE);
67 dev_kfree_skb_irq(skb);
68 skb = NULL;
69 }
70
71 priv->free_data_tx++;
72 }
73}
74
75netdev_tx_t
76islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
77{
78 islpci_private *priv = netdev_priv(ndev);
79 isl38xx_control_block *cb = priv->control_block;
80 u32 index;
81 dma_addr_t pci_map_address;
82 int frame_size;
83 isl38xx_fragment *fragment;
84 int offset;
85 struct sk_buff *newskb;
86 int newskb_offset;
87 unsigned long flags;
88 unsigned char wds_mac[6];
89 u32 curr_frag;
90
91#if VERBOSE > SHOW_ERROR_MESSAGES
92 DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit\n");
93#endif
94
95
96 spin_lock_irqsave(&priv->slock, flags);
97
98
99 curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]);
100 if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) {
101 printk(KERN_ERR "%s: transmit device queue full when awake\n",
102 ndev->name);
103 netif_stop_queue(ndev);
104
105
106 isl38xx_w32_flush(priv->device_base, ISL38XX_DEV_INT_UPDATE,
107 ISL38XX_DEV_INT_REG);
108 udelay(ISL38XX_WRITEIO_DELAY);
109 goto drop_free;
110 }
111
112
113
114 if (likely(((long) skb->data & 0x03) | init_wds)) {
115
116 offset = (4 - (long) skb->data) & 0x03;
117 offset += init_wds ? 6 : 0;
118
119
120 if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
121 unsigned char *src = skb->data;
122
123#if VERBOSE > SHOW_ERROR_MESSAGES
124 DEBUG(SHOW_TRACING, "skb offset %i wds %i\n", offset,
125 init_wds);
126#endif
127
128
129 skb_reserve(skb, (4 - (long) skb->data) & 0x03);
130 if (init_wds) {
131
132 skb_put(skb, 6);
133#ifdef ISLPCI_ETH_DEBUG
134 printk("islpci_eth_transmit:wds_mac\n");
135#endif
136 memmove(skb->data + 6, src, skb->len);
137 skb_copy_to_linear_data(skb, wds_mac, 6);
138 } else {
139 memmove(skb->data, src, skb->len);
140 }
141
142#if VERBOSE > SHOW_ERROR_MESSAGES
143 DEBUG(SHOW_TRACING, "memmove %p %p %i\n", skb->data,
144 src, skb->len);
145#endif
146 } else {
147 newskb =
148 dev_alloc_skb(init_wds ? skb->len + 6 : skb->len);
149 if (unlikely(newskb == NULL)) {
150 printk(KERN_ERR "%s: Cannot allocate skb\n",
151 ndev->name);
152 goto drop_free;
153 }
154 newskb_offset = (4 - (long) newskb->data) & 0x03;
155
156
157 if (newskb_offset)
158 skb_reserve(newskb, newskb_offset);
159
160 skb_put(newskb, init_wds ? skb->len + 6 : skb->len);
161 if (init_wds) {
162 skb_copy_from_linear_data(skb,
163 newskb->data + 6,
164 skb->len);
165 skb_copy_to_linear_data(newskb, wds_mac, 6);
166#ifdef ISLPCI_ETH_DEBUG
167 printk("islpci_eth_transmit:wds_mac\n");
168#endif
169 } else
170 skb_copy_from_linear_data(skb, newskb->data,
171 skb->len);
172
173#if VERBOSE > SHOW_ERROR_MESSAGES
174 DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n",
175 newskb->data, skb->data, skb->len, init_wds);
176#endif
177
178 newskb->dev = skb->dev;
179 dev_kfree_skb_irq(skb);
180 skb = newskb;
181 }
182 }
183
184#if VERBOSE > SHOW_ERROR_MESSAGES
185 DEBUG(SHOW_BUFFER_CONTENTS, "\ntx %p ", skb->data);
186 display_buffer((char *) skb->data, skb->len);
187#endif
188
189
190 pci_map_address = pci_map_single(priv->pdev,
191 (void *) skb->data, skb->len,
192 PCI_DMA_TODEVICE);
193 if (pci_dma_mapping_error(priv->pdev, pci_map_address)) {
194 printk(KERN_WARNING "%s: cannot map buffer to PCI\n",
195 ndev->name);
196 goto drop_free;
197 }
198
199 index = curr_frag % ISL38XX_CB_TX_QSIZE;
200 fragment = &cb->tx_data_low[index];
201
202 priv->pci_map_tx_address[index] = pci_map_address;
203
204 priv->data_low_tx[index] = skb;
205
206 frame_size = skb->len;
207 fragment->size = cpu_to_le16(frame_size);
208 fragment->flags = cpu_to_le16(0);
209 fragment->address = cpu_to_le32(pci_map_address);
210 curr_frag++;
211
212
213
214 wmb();
215 cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ] = cpu_to_le32(curr_frag);
216
217 if (curr_frag - priv->free_data_tx + ISL38XX_MIN_QTHRESHOLD
218 > ISL38XX_CB_TX_QSIZE) {
219
220 netif_stop_queue(ndev);
221
222
223 priv->data_low_tx_full = 1;
224 }
225
226 ndev->stats.tx_packets++;
227 ndev->stats.tx_bytes += skb->len;
228
229
230 islpci_trigger(priv);
231
232
233 spin_unlock_irqrestore(&priv->slock, flags);
234
235 return NETDEV_TX_OK;
236
237 drop_free:
238 ndev->stats.tx_dropped++;
239 spin_unlock_irqrestore(&priv->slock, flags);
240 dev_kfree_skb(skb);
241 return NETDEV_TX_OK;
242}
243
244static inline int
245islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb)
246{
247
248
249
250 struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data;
251
252 if (hdr->flags & 0x01)
253
254 return -1;
255 if (priv->ndev->type == ARPHRD_IEEE80211_PRISM) {
256 struct avs_80211_1_header *avs;
257
258 u32 clock = le32_to_cpu(hdr->clock);
259 u8 rate = hdr->rate;
260 u16 freq = le16_to_cpu(hdr->freq);
261 u8 rssi = hdr->rssi;
262
263 skb_pull(*skb, sizeof (struct rfmon_header));
264
265 if (skb_headroom(*skb) < sizeof (struct avs_80211_1_header)) {
266 struct sk_buff *newskb = skb_copy_expand(*skb,
267 sizeof (struct
268 avs_80211_1_header),
269 0, GFP_ATOMIC);
270 if (newskb) {
271 dev_kfree_skb_irq(*skb);
272 *skb = newskb;
273 } else
274 return -1;
275
276 }
277
278
279 avs =
280 (struct avs_80211_1_header *) skb_push(*skb,
281 sizeof (struct
282 avs_80211_1_header));
283
284 avs->version = cpu_to_be32(P80211CAPTURE_VERSION);
285 avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header));
286 avs->mactime = cpu_to_be64(clock);
287 avs->hosttime = cpu_to_be64(jiffies);
288 avs->phytype = cpu_to_be32(6);
289 avs->channel = cpu_to_be32(channel_of_freq(freq));
290 avs->datarate = cpu_to_be32(rate * 5);
291 avs->antenna = cpu_to_be32(0);
292 avs->priority = cpu_to_be32(0);
293 avs->ssi_type = cpu_to_be32(3);
294 avs->ssi_signal = cpu_to_be32(rssi & 0x7f);
295 avs->ssi_noise = cpu_to_be32(priv->local_iwstatistics.qual.noise);
296 avs->preamble = cpu_to_be32(0);
297 avs->encoding = cpu_to_be32(0);
298 } else
299 skb_pull(*skb, sizeof (struct rfmon_header));
300
301 (*skb)->protocol = htons(ETH_P_802_2);
302 skb_reset_mac_header(*skb);
303 (*skb)->pkt_type = PACKET_OTHERHOST;
304
305 return 0;
306}
307
308int
309islpci_eth_receive(islpci_private *priv)
310{
311 struct net_device *ndev = priv->ndev;
312 isl38xx_control_block *control_block = priv->control_block;
313 struct sk_buff *skb;
314 u16 size;
315 u32 index, offset;
316 unsigned char *src;
317 int discard = 0;
318
319#if VERBOSE > SHOW_ERROR_MESSAGES
320 DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive\n");
321#endif
322
323
324
325 index = priv->free_data_rx % ISL38XX_CB_RX_QSIZE;
326 size = le16_to_cpu(control_block->rx_data_low[index].size);
327 skb = priv->data_low_rx[index];
328 offset = ((unsigned long)
329 le32_to_cpu(control_block->rx_data_low[index].address) -
330 (unsigned long) skb->data) & 3;
331
332#if VERBOSE > SHOW_ERROR_MESSAGES
333 DEBUG(SHOW_TRACING,
334 "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n ",
335 control_block->rx_data_low[priv->free_data_rx].address, skb->data,
336 skb->len, offset, skb->truesize);
337#endif
338
339
340 pci_unmap_single(priv->pdev,
341 priv->pci_map_rx_address[index],
342 MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE);
343
344
345 skb_put(skb, size);
346 if (offset) {
347
348 skb_pull(skb, 2);
349 skb_put(skb, 2);
350 }
351#if VERBOSE > SHOW_ERROR_MESSAGES
352
353 DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
354 display_buffer((char *) skb->data, skb->len);
355#endif
356
357
358
359 if (init_wds) {
360
361 src = skb->data + 6;
362 memmove(skb->data, src, skb->len - 6);
363 skb_trim(skb, skb->len - 6);
364 }
365#if VERBOSE > SHOW_ERROR_MESSAGES
366 DEBUG(SHOW_TRACING, "Fragment size %i in skb at %p\n", size, skb);
367 DEBUG(SHOW_TRACING, "Skb data at %p, length %i\n", skb->data, skb->len);
368
369
370 DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
371 display_buffer((char *) skb->data, skb->len);
372#endif
373
374 if (unlikely(priv->iw_mode == IW_MODE_MONITOR)) {
375 skb->dev = ndev;
376 discard = islpci_monitor_rx(priv, &skb);
377 } else {
378 if (unlikely(skb->data[2 * ETH_ALEN] == 0)) {
379
380
381
382 struct iw_quality wstats;
383 struct rx_annex_header *annex =
384 (struct rx_annex_header *) skb->data;
385 wstats.level = annex->rfmon.rssi;
386
387
388 wstats.noise = priv->local_iwstatistics.qual.noise;
389 wstats.qual = wstats.level - wstats.noise;
390 wstats.updated = 0x07;
391
392 wireless_spy_update(ndev, annex->addr2, &wstats);
393
394 skb_copy_from_linear_data(skb,
395 (skb->data +
396 sizeof(struct rfmon_header)),
397 2 * ETH_ALEN);
398 skb_pull(skb, sizeof (struct rfmon_header));
399 }
400 skb->protocol = eth_type_trans(skb, ndev);
401 }
402 skb->ip_summed = CHECKSUM_NONE;
403 ndev->stats.rx_packets++;
404 ndev->stats.rx_bytes += size;
405
406
407#ifdef ISLPCI_ETH_DEBUG
408 printk
409 ("islpci_eth_receive:netif_rx %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
410 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
411 skb->data[4], skb->data[5]);
412#endif
413 if (unlikely(discard)) {
414 dev_kfree_skb_irq(skb);
415 skb = NULL;
416 } else
417 netif_rx(skb);
418
419
420 priv->free_data_rx++;
421
422
423 while (index =
424 le32_to_cpu(control_block->
425 driver_curr_frag[ISL38XX_CB_RX_DATA_LQ]),
426 index - priv->free_data_rx < ISL38XX_CB_RX_QSIZE) {
427
428
429 skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2);
430 if (unlikely(skb == NULL)) {
431
432 DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb\n");
433 break;
434 }
435 skb_reserve(skb, (4 - (long) skb->data) & 0x03);
436
437 index = index % ISL38XX_CB_RX_QSIZE;
438 priv->data_low_rx[index] = skb;
439
440#if VERBOSE > SHOW_ERROR_MESSAGES
441 DEBUG(SHOW_TRACING,
442 "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n ",
443 skb, skb->data, skb->len, index, skb->truesize);
444#endif
445
446
447 priv->pci_map_rx_address[index] =
448 pci_map_single(priv->pdev, (void *) skb->data,
449 MAX_FRAGMENT_SIZE_RX + 2,
450 PCI_DMA_FROMDEVICE);
451 if (pci_dma_mapping_error(priv->pdev,
452 priv->pci_map_rx_address[index])) {
453
454 DEBUG(SHOW_ERROR_MESSAGES,
455 "Error mapping DMA address\n");
456
457
458 dev_kfree_skb_irq(skb);
459 skb = NULL;
460 break;
461 }
462
463 control_block->rx_data_low[index].address =
464 cpu_to_le32((u32)priv->pci_map_rx_address[index]);
465 wmb();
466
467
468 le32_add_cpu(&control_block->
469 driver_curr_frag[ISL38XX_CB_RX_DATA_LQ], 1);
470 }
471
472
473 islpci_trigger(priv);
474
475 return 0;
476}
477
478void
479islpci_do_reset_and_wake(struct work_struct *work)
480{
481 islpci_private *priv = container_of(work, islpci_private, reset_task);
482
483 islpci_reset(priv, 1);
484 priv->reset_task_pending = 0;
485 smp_wmb();
486 netif_wake_queue(priv->ndev);
487}
488
489void
490islpci_eth_tx_timeout(struct net_device *ndev)
491{
492 islpci_private *priv = netdev_priv(ndev);
493
494
495 ndev->stats.tx_errors++;
496
497 if (!priv->reset_task_pending) {
498 printk(KERN_WARNING
499 "%s: tx_timeout, scheduling reset", ndev->name);
500 netif_stop_queue(ndev);
501 priv->reset_task_pending = 1;
502 schedule_work(&priv->reset_task);
503 } else {
504 printk(KERN_WARNING
505 "%s: tx_timeout, waiting for reset", ndev->name);
506 }
507}
508