1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/module.h>
20
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/if_arp.h>
26#include <asm/byteorder.h>
27
28#include "prismcompat.h"
29#include "isl_38xx.h"
30#include "islpci_eth.h"
31#include "islpci_mgt.h"
32#include "oid_mgt.h"
33
34
35
36
37void
38islpci_eth_cleanup_transmit(islpci_private *priv,
39 isl38xx_control_block *control_block)
40{
41 struct sk_buff *skb;
42 u32 index;
43
44
45 while (priv->free_data_tx !=
46 le32_to_cpu(control_block->
47 device_curr_frag[ISL38XX_CB_TX_DATA_LQ])) {
48
49 index = priv->free_data_tx % ISL38XX_CB_TX_QSIZE;
50
51
52
53 if (priv->pci_map_tx_address[index]) {
54
55
56 skb = priv->data_low_tx[index];
57
58#if VERBOSE > SHOW_ERROR_MESSAGES
59 DEBUG(SHOW_TRACING,
60 "cleanup skb %p skb->data %p skb->len %u truesize %u\n ",
61 skb, skb->data, skb->len, skb->truesize);
62#endif
63
64 pci_unmap_single(priv->pdev,
65 priv->pci_map_tx_address[index],
66 skb->len, PCI_DMA_TODEVICE);
67 dev_kfree_skb_irq(skb);
68 skb = NULL;
69 }
70
71 priv->free_data_tx++;
72 }
73}
74
75netdev_tx_t
76islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
77{
78 islpci_private *priv = netdev_priv(ndev);
79 isl38xx_control_block *cb = priv->control_block;
80 u32 index;
81 dma_addr_t pci_map_address;
82 int frame_size;
83 isl38xx_fragment *fragment;
84 int offset;
85 struct sk_buff *newskb;
86 int newskb_offset;
87 unsigned long flags;
88 unsigned char wds_mac[6];
89 u32 curr_frag;
90
91#if VERBOSE > SHOW_ERROR_MESSAGES
92 DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit \n");
93#endif
94
95
96 spin_lock_irqsave(&priv->slock, flags);
97
98
99 curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]);
100 if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) {
101 printk(KERN_ERR "%s: transmit device queue full when awake\n",
102 ndev->name);
103 netif_stop_queue(ndev);
104
105
106 isl38xx_w32_flush(priv->device_base, ISL38XX_DEV_INT_UPDATE,
107 ISL38XX_DEV_INT_REG);
108 udelay(ISL38XX_WRITEIO_DELAY);
109 goto drop_free;
110 }
111
112
113
114 if (likely(((long) skb->data & 0x03) | init_wds)) {
115
116 offset = (4 - (long) skb->data) & 0x03;
117 offset += init_wds ? 6 : 0;
118
119
120 if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
121 unsigned char *src = skb->data;
122
123#if VERBOSE > SHOW_ERROR_MESSAGES
124 DEBUG(SHOW_TRACING, "skb offset %i wds %i\n", offset,
125 init_wds);
126#endif
127
128
129 skb_reserve(skb, (4 - (long) skb->data) & 0x03);
130 if (init_wds) {
131
132 skb_put(skb, 6);
133#ifdef ISLPCI_ETH_DEBUG
134 printk("islpci_eth_transmit:wds_mac\n");
135#endif
136 memmove(skb->data + 6, src, skb->len);
137 skb_copy_to_linear_data(skb, wds_mac, 6);
138 } else {
139 memmove(skb->data, src, skb->len);
140 }
141
142#if VERBOSE > SHOW_ERROR_MESSAGES
143 DEBUG(SHOW_TRACING, "memmove %p %p %i \n", skb->data,
144 src, skb->len);
145#endif
146 } else {
147 newskb =
148 dev_alloc_skb(init_wds ? skb->len + 6 : skb->len);
149 if (unlikely(newskb == NULL)) {
150 printk(KERN_ERR "%s: Cannot allocate skb\n",
151 ndev->name);
152 goto drop_free;
153 }
154 newskb_offset = (4 - (long) newskb->data) & 0x03;
155
156
157 if (newskb_offset)
158 skb_reserve(newskb, newskb_offset);
159
160 skb_put(newskb, init_wds ? skb->len + 6 : skb->len);
161 if (init_wds) {
162 skb_copy_from_linear_data(skb,
163 newskb->data + 6,
164 skb->len);
165 skb_copy_to_linear_data(newskb, wds_mac, 6);
166#ifdef ISLPCI_ETH_DEBUG
167 printk("islpci_eth_transmit:wds_mac\n");
168#endif
169 } else
170 skb_copy_from_linear_data(skb, newskb->data,
171 skb->len);
172
173#if VERBOSE > SHOW_ERROR_MESSAGES
174 DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n",
175 newskb->data, skb->data, skb->len, init_wds);
176#endif
177
178 newskb->dev = skb->dev;
179 dev_kfree_skb_irq(skb);
180 skb = newskb;
181 }
182 }
183
184#if VERBOSE > SHOW_ERROR_MESSAGES
185 DEBUG(SHOW_BUFFER_CONTENTS, "\ntx %p ", skb->data);
186 display_buffer((char *) skb->data, skb->len);
187#endif
188
189
190 pci_map_address = pci_map_single(priv->pdev,
191 (void *) skb->data, skb->len,
192 PCI_DMA_TODEVICE);
193 if (unlikely(pci_map_address == 0)) {
194 printk(KERN_WARNING "%s: cannot map buffer to PCI\n",
195 ndev->name);
196 goto drop_free;
197 }
198
199 index = curr_frag % ISL38XX_CB_TX_QSIZE;
200 fragment = &cb->tx_data_low[index];
201
202 priv->pci_map_tx_address[index] = pci_map_address;
203
204 priv->data_low_tx[index] = skb;
205
206 frame_size = skb->len;
207 fragment->size = cpu_to_le16(frame_size);
208 fragment->flags = cpu_to_le16(0);
209 fragment->address = cpu_to_le32(pci_map_address);
210 curr_frag++;
211
212
213
214 wmb();
215 cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ] = cpu_to_le32(curr_frag);
216
217 if (curr_frag - priv->free_data_tx + ISL38XX_MIN_QTHRESHOLD
218 > ISL38XX_CB_TX_QSIZE) {
219
220 netif_stop_queue(ndev);
221
222
223 priv->data_low_tx_full = 1;
224 }
225
226
227 ndev->trans_start = jiffies;
228 ndev->stats.tx_packets++;
229 ndev->stats.tx_bytes += skb->len;
230
231
232 islpci_trigger(priv);
233
234
235 spin_unlock_irqrestore(&priv->slock, flags);
236
237 return NETDEV_TX_OK;
238
239 drop_free:
240 ndev->stats.tx_dropped++;
241 spin_unlock_irqrestore(&priv->slock, flags);
242 dev_kfree_skb(skb);
243 return NETDEV_TX_OK;
244}
245
246static inline int
247islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb)
248{
249
250
251
252 struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data;
253
254 if (hdr->flags & 0x01)
255
256 return -1;
257 if (priv->ndev->type == ARPHRD_IEEE80211_PRISM) {
258 struct avs_80211_1_header *avs;
259
260 u32 clock = le32_to_cpu(hdr->clock);
261 u8 rate = hdr->rate;
262 u16 freq = le16_to_cpu(hdr->freq);
263 u8 rssi = hdr->rssi;
264
265 skb_pull(*skb, sizeof (struct rfmon_header));
266
267 if (skb_headroom(*skb) < sizeof (struct avs_80211_1_header)) {
268 struct sk_buff *newskb = skb_copy_expand(*skb,
269 sizeof (struct
270 avs_80211_1_header),
271 0, GFP_ATOMIC);
272 if (newskb) {
273 dev_kfree_skb_irq(*skb);
274 *skb = newskb;
275 } else
276 return -1;
277
278 }
279
280
281 avs =
282 (struct avs_80211_1_header *) skb_push(*skb,
283 sizeof (struct
284 avs_80211_1_header));
285
286 avs->version = cpu_to_be32(P80211CAPTURE_VERSION);
287 avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header));
288 avs->mactime = cpu_to_be64(clock);
289 avs->hosttime = cpu_to_be64(jiffies);
290 avs->phytype = cpu_to_be32(6);
291 avs->channel = cpu_to_be32(channel_of_freq(freq));
292 avs->datarate = cpu_to_be32(rate * 5);
293 avs->antenna = cpu_to_be32(0);
294 avs->priority = cpu_to_be32(0);
295 avs->ssi_type = cpu_to_be32(3);
296 avs->ssi_signal = cpu_to_be32(rssi & 0x7f);
297 avs->ssi_noise = cpu_to_be32(priv->local_iwstatistics.qual.noise);
298 avs->preamble = cpu_to_be32(0);
299 avs->encoding = cpu_to_be32(0);
300 } else
301 skb_pull(*skb, sizeof (struct rfmon_header));
302
303 (*skb)->protocol = htons(ETH_P_802_2);
304 skb_reset_mac_header(*skb);
305 (*skb)->pkt_type = PACKET_OTHERHOST;
306
307 return 0;
308}
309
310int
311islpci_eth_receive(islpci_private *priv)
312{
313 struct net_device *ndev = priv->ndev;
314 isl38xx_control_block *control_block = priv->control_block;
315 struct sk_buff *skb;
316 u16 size;
317 u32 index, offset;
318 unsigned char *src;
319 int discard = 0;
320
321#if VERBOSE > SHOW_ERROR_MESSAGES
322 DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive \n");
323#endif
324
325
326
327 index = priv->free_data_rx % ISL38XX_CB_RX_QSIZE;
328 size = le16_to_cpu(control_block->rx_data_low[index].size);
329 skb = priv->data_low_rx[index];
330 offset = ((unsigned long)
331 le32_to_cpu(control_block->rx_data_low[index].address) -
332 (unsigned long) skb->data) & 3;
333
334#if VERBOSE > SHOW_ERROR_MESSAGES
335 DEBUG(SHOW_TRACING,
336 "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n ",
337 control_block->rx_data_low[priv->free_data_rx].address, skb->data,
338 skb->len, offset, skb->truesize);
339#endif
340
341
342 pci_unmap_single(priv->pdev,
343 priv->pci_map_rx_address[index],
344 MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE);
345
346
347 skb_put(skb, size);
348 if (offset) {
349
350 skb_pull(skb, 2);
351 skb_put(skb, 2);
352 }
353#if VERBOSE > SHOW_ERROR_MESSAGES
354
355 DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
356 display_buffer((char *) skb->data, skb->len);
357#endif
358
359
360
361 if (init_wds) {
362
363 src = skb->data + 6;
364 memmove(skb->data, src, skb->len - 6);
365 skb_trim(skb, skb->len - 6);
366 }
367#if VERBOSE > SHOW_ERROR_MESSAGES
368 DEBUG(SHOW_TRACING, "Fragment size %i in skb at %p\n", size, skb);
369 DEBUG(SHOW_TRACING, "Skb data at %p, length %i\n", skb->data, skb->len);
370
371
372 DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
373 display_buffer((char *) skb->data, skb->len);
374#endif
375
376 if (unlikely(priv->iw_mode == IW_MODE_MONITOR)) {
377 skb->dev = ndev;
378 discard = islpci_monitor_rx(priv, &skb);
379 } else {
380 if (unlikely(skb->data[2 * ETH_ALEN] == 0)) {
381
382
383
384 struct iw_quality wstats;
385 struct rx_annex_header *annex =
386 (struct rx_annex_header *) skb->data;
387 wstats.level = annex->rfmon.rssi;
388
389
390 wstats.noise = priv->local_iwstatistics.qual.noise;
391 wstats.qual = wstats.level - wstats.noise;
392 wstats.updated = 0x07;
393
394 wireless_spy_update(ndev, annex->addr2, &wstats);
395
396 skb_copy_from_linear_data(skb,
397 (skb->data +
398 sizeof(struct rfmon_header)),
399 2 * ETH_ALEN);
400 skb_pull(skb, sizeof (struct rfmon_header));
401 }
402 skb->protocol = eth_type_trans(skb, ndev);
403 }
404 skb->ip_summed = CHECKSUM_NONE;
405 ndev->stats.rx_packets++;
406 ndev->stats.rx_bytes += size;
407
408
409#ifdef ISLPCI_ETH_DEBUG
410 printk
411 ("islpci_eth_receive:netif_rx %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
412 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
413 skb->data[4], skb->data[5]);
414#endif
415 if (unlikely(discard)) {
416 dev_kfree_skb_irq(skb);
417 skb = NULL;
418 } else
419 netif_rx(skb);
420
421
422 priv->free_data_rx++;
423
424
425 while (index =
426 le32_to_cpu(control_block->
427 driver_curr_frag[ISL38XX_CB_RX_DATA_LQ]),
428 index - priv->free_data_rx < ISL38XX_CB_RX_QSIZE) {
429
430
431 skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2);
432 if (unlikely(skb == NULL)) {
433
434 DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb \n");
435 break;
436 }
437 skb_reserve(skb, (4 - (long) skb->data) & 0x03);
438
439 index = index % ISL38XX_CB_RX_QSIZE;
440 priv->data_low_rx[index] = skb;
441
442#if VERBOSE > SHOW_ERROR_MESSAGES
443 DEBUG(SHOW_TRACING,
444 "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n ",
445 skb, skb->data, skb->len, index, skb->truesize);
446#endif
447
448
449 priv->pci_map_rx_address[index] =
450 pci_map_single(priv->pdev, (void *) skb->data,
451 MAX_FRAGMENT_SIZE_RX + 2,
452 PCI_DMA_FROMDEVICE);
453 if (unlikely(!priv->pci_map_rx_address[index])) {
454
455 DEBUG(SHOW_ERROR_MESSAGES,
456 "Error mapping DMA address\n");
457
458
459 dev_kfree_skb_irq((struct sk_buff *) skb);
460 skb = NULL;
461 break;
462 }
463
464 control_block->rx_data_low[index].address =
465 cpu_to_le32((u32)priv->pci_map_rx_address[index]);
466 wmb();
467
468
469 le32_add_cpu(&control_block->
470 driver_curr_frag[ISL38XX_CB_RX_DATA_LQ], 1);
471 }
472
473
474 islpci_trigger(priv);
475
476 return 0;
477}
478
479void
480islpci_do_reset_and_wake(struct work_struct *work)
481{
482 islpci_private *priv = container_of(work, islpci_private, reset_task);
483
484 islpci_reset(priv, 1);
485 priv->reset_task_pending = 0;
486 smp_wmb();
487 netif_wake_queue(priv->ndev);
488}
489
490void
491islpci_eth_tx_timeout(struct net_device *ndev)
492{
493 islpci_private *priv = netdev_priv(ndev);
494
495
496 ndev->stats.tx_errors++;
497
498 if (!priv->reset_task_pending) {
499 printk(KERN_WARNING
500 "%s: tx_timeout, scheduling reset", ndev->name);
501 netif_stop_queue(ndev);
502 priv->reset_task_pending = 1;
503 schedule_work(&priv->reset_task);
504 } else {
505 printk(KERN_WARNING
506 "%s: tx_timeout, waiting for reset", ndev->name);
507 }
508}
509