1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/init.h>
29#include <linux/capability.h>
30#include <linux/interrupt.h>
31#include <linux/bitops.h>
32#include <linux/pci.h>
33#include <linux/module.h>
34#include <linux/atmdev.h>
35#include <linux/sonet.h>
36#include <linux/atm_suni.h>
37#include <linux/dma-mapping.h>
38#include <linux/delay.h>
39#include <linux/firmware.h>
40#include <asm/io.h>
41#include <asm/string.h>
42#include <asm/page.h>
43#include <asm/irq.h>
44#include <asm/dma.h>
45#include <asm/byteorder.h>
46#include <asm/uaccess.h>
47#include <linux/atomic.h>
48
49#ifdef CONFIG_SBUS
50#include <linux/of.h>
51#include <linux/of_device.h>
52#include <asm/idprom.h>
53#include <asm/openprom.h>
54#include <asm/oplib.h>
55#include <asm/pgtable.h>
56#endif
57
58#if defined(CONFIG_ATM_FORE200E_USE_TASKLET)
59#define FORE200E_USE_TASKLET
60#endif
61
62#if 0
63#define FORE200E_BSQ_DEBUG
64#endif
65
66#if 1
67#define FORE200E_52BYTE_AAL0_SDU
68#endif
69
70#include "fore200e.h"
71#include "suni.h"
72
73#define FORE200E_VERSION "0.3e"
74
75#define FORE200E "fore200e: "
76
77#if 0
78#define CONFIG_ATM_FORE200E_DEBUG 1
79#endif
80#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
81#define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
82 printk(FORE200E format, ##args); } while (0)
83#else
84#define DPRINTK(level, format, args...) do {} while (0)
85#endif
86
87
88#define FORE200E_ALIGN(addr, alignment) \
89 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
90
91#define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
92
93#define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
94
95#define FORE200E_NEXT_ENTRY(index, modulo) (index = ((index) + 1) % (modulo))
96
97#if 1
98#define ASSERT(expr) if (!(expr)) { \
99 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
100 __func__, __LINE__, #expr); \
101 panic(FORE200E "%s", __func__); \
102 }
103#else
104#define ASSERT(expr) do {} while (0)
105#endif
106
107
108static const struct atmdev_ops fore200e_ops;
109static const struct fore200e_bus fore200e_bus[];
110
111static LIST_HEAD(fore200e_boards);
112
113
114MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
115MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
116MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
117
118
119static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
120 { BUFFER_S1_NBR, BUFFER_L1_NBR },
121 { BUFFER_S2_NBR, BUFFER_L2_NBR }
122};
123
124static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
125 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
126 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
127};
128
129
130#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
131static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
132#endif
133
134
135#if 0
136static int
137fore200e_fore2atm_aal(enum fore200e_aal aal)
138{
139 switch(aal) {
140 case FORE200E_AAL0: return ATM_AAL0;
141 case FORE200E_AAL34: return ATM_AAL34;
142 case FORE200E_AAL5: return ATM_AAL5;
143 }
144
145 return -EINVAL;
146}
147#endif
148
149
150static enum fore200e_aal
151fore200e_atm2fore_aal(int aal)
152{
153 switch(aal) {
154 case ATM_AAL0: return FORE200E_AAL0;
155 case ATM_AAL34: return FORE200E_AAL34;
156 case ATM_AAL1:
157 case ATM_AAL2:
158 case ATM_AAL5: return FORE200E_AAL5;
159 }
160
161 return -EINVAL;
162}
163
164
165static char*
166fore200e_irq_itoa(int irq)
167{
168 static char str[8];
169 sprintf(str, "%d", irq);
170 return str;
171}
172
173
174
175
176
177static int
178fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
179{
180 unsigned long offset = 0;
181
182 if (alignment <= sizeof(int))
183 alignment = 0;
184
185 chunk->alloc_size = size + alignment;
186 chunk->align_size = size;
187 chunk->direction = direction;
188
189 chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
190 if (chunk->alloc_addr == NULL)
191 return -ENOMEM;
192
193 if (alignment > 0)
194 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
195
196 chunk->align_addr = chunk->alloc_addr + offset;
197
198 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
199
200 return 0;
201}
202
203
204
205
206static void
207fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
208{
209 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
210
211 kfree(chunk->alloc_addr);
212}
213
214
215static void
216fore200e_spin(int msecs)
217{
218 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
219 while (time_before(jiffies, timeout));
220}
221
222
223static int
224fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
225{
226 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
227 int ok;
228
229 mb();
230 do {
231 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
232 break;
233
234 } while (time_before(jiffies, timeout));
235
236#if 1
237 if (!ok) {
238 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
239 *addr, val);
240 }
241#endif
242
243 return ok;
244}
245
246
247static int
248fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
249{
250 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
251 int ok;
252
253 do {
254 if ((ok = (fore200e->bus->read(addr) == val)))
255 break;
256
257 } while (time_before(jiffies, timeout));
258
259#if 1
260 if (!ok) {
261 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
262 fore200e->bus->read(addr), val);
263 }
264#endif
265
266 return ok;
267}
268
269
270static void
271fore200e_free_rx_buf(struct fore200e* fore200e)
272{
273 int scheme, magn, nbr;
274 struct buffer* buffer;
275
276 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
277 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
278
279 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
280
281 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
282
283 struct chunk* data = &buffer[ nbr ].data;
284
285 if (data->alloc_addr != NULL)
286 fore200e_chunk_free(fore200e, data);
287 }
288 }
289 }
290 }
291}
292
293
294static void
295fore200e_uninit_bs_queue(struct fore200e* fore200e)
296{
297 int scheme, magn;
298
299 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
300 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
301
302 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
303 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
304
305 if (status->alloc_addr)
306 fore200e->bus->dma_chunk_free(fore200e, status);
307
308 if (rbd_block->alloc_addr)
309 fore200e->bus->dma_chunk_free(fore200e, rbd_block);
310 }
311 }
312}
313
314
315static int
316fore200e_reset(struct fore200e* fore200e, int diag)
317{
318 int ok;
319
320 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
321
322 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
323
324 fore200e->bus->reset(fore200e);
325
326 if (diag) {
327 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
328 if (ok == 0) {
329
330 printk(FORE200E "device %s self-test failed\n", fore200e->name);
331 return -ENODEV;
332 }
333
334 printk(FORE200E "device %s self-test passed\n", fore200e->name);
335
336 fore200e->state = FORE200E_STATE_RESET;
337 }
338
339 return 0;
340}
341
342
343static void
344fore200e_shutdown(struct fore200e* fore200e)
345{
346 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
347 fore200e->name, fore200e->phys_base,
348 fore200e_irq_itoa(fore200e->irq));
349
350 if (fore200e->state > FORE200E_STATE_RESET) {
351
352 fore200e_reset(fore200e, 0);
353 }
354
355
356 switch(fore200e->state) {
357
358 case FORE200E_STATE_COMPLETE:
359 kfree(fore200e->stats);
360
361 case FORE200E_STATE_IRQ:
362 free_irq(fore200e->irq, fore200e->atm_dev);
363
364 case FORE200E_STATE_ALLOC_BUF:
365 fore200e_free_rx_buf(fore200e);
366
367 case FORE200E_STATE_INIT_BSQ:
368 fore200e_uninit_bs_queue(fore200e);
369
370 case FORE200E_STATE_INIT_RXQ:
371 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
372 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
373
374 case FORE200E_STATE_INIT_TXQ:
375 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
376 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
377
378 case FORE200E_STATE_INIT_CMDQ:
379 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
380
381 case FORE200E_STATE_INITIALIZE:
382
383
384 case FORE200E_STATE_START_FW:
385
386
387 case FORE200E_STATE_RESET:
388
389
390 case FORE200E_STATE_MAP:
391 fore200e->bus->unmap(fore200e);
392
393 case FORE200E_STATE_CONFIGURE:
394
395
396 case FORE200E_STATE_REGISTER:
397
398 atm_dev_deregister(fore200e->atm_dev);
399
400 case FORE200E_STATE_BLANK:
401
402 break;
403 }
404}
405
406
407#ifdef CONFIG_PCI
408
409static u32 fore200e_pca_read(volatile u32 __iomem *addr)
410{
411
412
413 return le32_to_cpu(readl(addr));
414}
415
416
417static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
418{
419
420
421 writel(cpu_to_le32(val), addr);
422}
423
424
425static u32
426fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
427{
428 u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
429
430 DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
431 virt_addr, size, direction, dma_addr);
432
433 return dma_addr;
434}
435
436
437static void
438fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
439{
440 DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
441 dma_addr, size, direction);
442
443 pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
444}
445
446
447static void
448fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
449{
450 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
451
452 pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
453}
454
455static void
456fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
457{
458 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
459
460 pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
461}
462
463
464
465
466
467static int
468fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
469 int size, int nbr, int alignment)
470{
471
472 chunk->alloc_size = size * nbr;
473 chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
474 chunk->alloc_size,
475 &chunk->dma_addr);
476
477 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
478 return -ENOMEM;
479
480 chunk->align_addr = chunk->alloc_addr;
481
482 return 0;
483}
484
485
486
487
488static void
489fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
490{
491 pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
492 chunk->alloc_size,
493 chunk->alloc_addr,
494 chunk->dma_addr);
495}
496
497
498static int
499fore200e_pca_irq_check(struct fore200e* fore200e)
500{
501
502 int irq_posted = readl(fore200e->regs.pca.psr);
503
504#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
505 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
506 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
507 }
508#endif
509
510 return irq_posted;
511}
512
513
514static void
515fore200e_pca_irq_ack(struct fore200e* fore200e)
516{
517 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
518}
519
520
521static void
522fore200e_pca_reset(struct fore200e* fore200e)
523{
524 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
525 fore200e_spin(10);
526 writel(0, fore200e->regs.pca.hcr);
527}
528
529
530static int fore200e_pca_map(struct fore200e* fore200e)
531{
532 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
533
534 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
535
536 if (fore200e->virt_base == NULL) {
537 printk(FORE200E "can't map device %s\n", fore200e->name);
538 return -EFAULT;
539 }
540
541 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
542
543
544 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
545 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
546 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
547
548 fore200e->state = FORE200E_STATE_MAP;
549 return 0;
550}
551
552
553static void
554fore200e_pca_unmap(struct fore200e* fore200e)
555{
556 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
557
558 if (fore200e->virt_base != NULL)
559 iounmap(fore200e->virt_base);
560}
561
562
563static int fore200e_pca_configure(struct fore200e *fore200e)
564{
565 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
566 u8 master_ctrl, latency;
567
568 DPRINTK(2, "device %s being configured\n", fore200e->name);
569
570 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
571 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
572 return -EIO;
573 }
574
575 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
576
577 master_ctrl = master_ctrl
578#if defined(__BIG_ENDIAN)
579
580 | PCA200E_CTRL_CONVERT_ENDIAN
581#endif
582#if 0
583 | PCA200E_CTRL_DIS_CACHE_RD
584 | PCA200E_CTRL_DIS_WRT_INVAL
585 | PCA200E_CTRL_ENA_CONT_REQ_MODE
586 | PCA200E_CTRL_2_CACHE_WRT_INVAL
587#endif
588 | PCA200E_CTRL_LARGE_PCI_BURSTS;
589
590 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
591
592
593
594
595 latency = 192;
596 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
597
598 fore200e->state = FORE200E_STATE_CONFIGURE;
599 return 0;
600}
601
602
603static int __init
604fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
605{
606 struct host_cmdq* cmdq = &fore200e->host_cmdq;
607 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
608 struct prom_opcode opcode;
609 int ok;
610 u32 prom_dma;
611
612 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
613
614 opcode.opcode = OPCODE_GET_PROM;
615 opcode.pad = 0;
616
617 prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
618
619 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
620
621 *entry->status = STATUS_PENDING;
622
623 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
624
625 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
626
627 *entry->status = STATUS_FREE;
628
629 fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
630
631 if (ok == 0) {
632 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
633 return -EIO;
634 }
635
636#if defined(__BIG_ENDIAN)
637
638#define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
639
640
641 swap_here(&prom->mac_addr[0]);
642 swap_here(&prom->mac_addr[4]);
643#endif
644
645 return 0;
646}
647
648
649static int
650fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
651{
652 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
653
654 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
655 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
656}
657
658#endif
659
660
661#ifdef CONFIG_SBUS
662
663static u32 fore200e_sba_read(volatile u32 __iomem *addr)
664{
665 return sbus_readl(addr);
666}
667
668static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
669{
670 sbus_writel(val, addr);
671}
672
673static u32 fore200e_sba_dma_map(struct fore200e *fore200e, void* virt_addr, int size, int direction)
674{
675 struct platform_device *op = fore200e->bus_dev;
676 u32 dma_addr;
677
678 dma_addr = dma_map_single(&op->dev, virt_addr, size, direction);
679
680 DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
681 virt_addr, size, direction, dma_addr);
682
683 return dma_addr;
684}
685
686static void fore200e_sba_dma_unmap(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
687{
688 struct platform_device *op = fore200e->bus_dev;
689
690 DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
691 dma_addr, size, direction);
692
693 dma_unmap_single(&op->dev, dma_addr, size, direction);
694}
695
696static void fore200e_sba_dma_sync_for_cpu(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
697{
698 struct platform_device *op = fore200e->bus_dev;
699
700 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
701
702 dma_sync_single_for_cpu(&op->dev, dma_addr, size, direction);
703}
704
705static void fore200e_sba_dma_sync_for_device(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
706{
707 struct platform_device *op = fore200e->bus_dev;
708
709 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
710
711 dma_sync_single_for_device(&op->dev, dma_addr, size, direction);
712}
713
714
715
716
717static int fore200e_sba_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
718 int size, int nbr, int alignment)
719{
720 struct platform_device *op = fore200e->bus_dev;
721
722 chunk->alloc_size = chunk->align_size = size * nbr;
723
724
725 chunk->alloc_addr = dma_alloc_coherent(&op->dev, chunk->alloc_size,
726 &chunk->dma_addr, GFP_ATOMIC);
727
728 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
729 return -ENOMEM;
730
731 chunk->align_addr = chunk->alloc_addr;
732
733 return 0;
734}
735
736
737static void fore200e_sba_dma_chunk_free(struct fore200e *fore200e, struct chunk *chunk)
738{
739 struct platform_device *op = fore200e->bus_dev;
740
741 dma_free_coherent(&op->dev, chunk->alloc_size,
742 chunk->alloc_addr, chunk->dma_addr);
743}
744
745static void fore200e_sba_irq_enable(struct fore200e *fore200e)
746{
747 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
748 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
749}
750
751static int fore200e_sba_irq_check(struct fore200e *fore200e)
752{
753 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
754}
755
756static void fore200e_sba_irq_ack(struct fore200e *fore200e)
757{
758 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
759 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
760}
761
762static void fore200e_sba_reset(struct fore200e *fore200e)
763{
764 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
765 fore200e_spin(10);
766 fore200e->bus->write(0, fore200e->regs.sba.hcr);
767}
768
769static int __init fore200e_sba_map(struct fore200e *fore200e)
770{
771 struct platform_device *op = fore200e->bus_dev;
772 unsigned int bursts;
773
774
775 fore200e->regs.sba.hcr = of_ioremap(&op->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
776 fore200e->regs.sba.bsr = of_ioremap(&op->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
777 fore200e->regs.sba.isr = of_ioremap(&op->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
778 fore200e->virt_base = of_ioremap(&op->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
779
780 if (!fore200e->virt_base) {
781 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
782 return -EFAULT;
783 }
784
785 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
786
787 fore200e->bus->write(0x02, fore200e->regs.sba.isr);
788
789
790 bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00);
791
792 if (sbus_can_dma_64bit())
793 sbus_set_sbus64(&op->dev, bursts);
794
795 fore200e->state = FORE200E_STATE_MAP;
796 return 0;
797}
798
799static void fore200e_sba_unmap(struct fore200e *fore200e)
800{
801 struct platform_device *op = fore200e->bus_dev;
802
803 of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
804 of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
805 of_iounmap(&op->resource[2], fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
806 of_iounmap(&op->resource[3], fore200e->virt_base, SBA200E_RAM_LENGTH);
807}
808
809static int __init fore200e_sba_configure(struct fore200e *fore200e)
810{
811 fore200e->state = FORE200E_STATE_CONFIGURE;
812 return 0;
813}
814
815static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom)
816{
817 struct platform_device *op = fore200e->bus_dev;
818 const u8 *prop;
819 int len;
820
821 prop = of_get_property(op->dev.of_node, "madaddrlo2", &len);
822 if (!prop)
823 return -ENODEV;
824 memcpy(&prom->mac_addr[4], prop, 4);
825
826 prop = of_get_property(op->dev.of_node, "madaddrhi4", &len);
827 if (!prop)
828 return -ENODEV;
829 memcpy(&prom->mac_addr[2], prop, 4);
830
831 prom->serial_number = of_getintprop_default(op->dev.of_node,
832 "serialnumber", 0);
833 prom->hw_revision = of_getintprop_default(op->dev.of_node,
834 "promversion", 0);
835
836 return 0;
837}
838
839static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
840{
841 struct platform_device *op = fore200e->bus_dev;
842 const struct linux_prom_registers *regs;
843
844 regs = of_get_property(op->dev.of_node, "reg", NULL);
845
846 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n",
847 (regs ? regs->which_io : 0), op->dev.of_node->name);
848}
849#endif
850
851
852static void
853fore200e_tx_irq(struct fore200e* fore200e)
854{
855 struct host_txq* txq = &fore200e->host_txq;
856 struct host_txq_entry* entry;
857 struct atm_vcc* vcc;
858 struct fore200e_vc_map* vc_map;
859
860 if (fore200e->host_txq.txing == 0)
861 return;
862
863 for (;;) {
864
865 entry = &txq->host_entry[ txq->tail ];
866
867 if ((*entry->status & STATUS_COMPLETE) == 0) {
868 break;
869 }
870
871 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
872 entry, txq->tail, entry->vc_map, entry->skb);
873
874
875 kfree(entry->data);
876
877
878 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
879 DMA_TO_DEVICE);
880
881 vc_map = entry->vc_map;
882
883
884 if ((vc_map->vcc == NULL) ||
885 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
886
887 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
888 fore200e->atm_dev->number);
889
890 dev_kfree_skb_any(entry->skb);
891 }
892 else {
893 ASSERT(vc_map->vcc);
894
895
896 if (vc_map->incarn != entry->incarn) {
897
898
899
900
901
902
903
904
905
906
907
908
909
910 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
911 fore200e->atm_dev->number);
912
913 dev_kfree_skb_any(entry->skb);
914 }
915 else {
916 vcc = vc_map->vcc;
917 ASSERT(vcc);
918
919
920 if (vcc->pop) {
921 vcc->pop(vcc, entry->skb);
922 }
923 else {
924 dev_kfree_skb_any(entry->skb);
925 }
926#if 1
927
928 if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
929 atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
930 }
931#endif
932
933 if (*entry->status & STATUS_ERROR)
934 atomic_inc(&vcc->stats->tx_err);
935 else
936 atomic_inc(&vcc->stats->tx);
937 }
938 }
939
940 *entry->status = STATUS_FREE;
941
942 fore200e->host_txq.txing--;
943
944 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
945 }
946}
947
948
949#ifdef FORE200E_BSQ_DEBUG
950int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
951{
952 struct buffer* buffer;
953 int count = 0;
954
955 buffer = bsq->freebuf;
956 while (buffer) {
957
958 if (buffer->supplied) {
959 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
960 where, scheme, magn, buffer->index);
961 }
962
963 if (buffer->magn != magn) {
964 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
965 where, scheme, magn, buffer->index, buffer->magn);
966 }
967
968 if (buffer->scheme != scheme) {
969 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
970 where, scheme, magn, buffer->index, buffer->scheme);
971 }
972
973 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
974 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
975 where, scheme, magn, buffer->index);
976 }
977
978 count++;
979 buffer = buffer->next;
980 }
981
982 if (count != bsq->freebuf_count) {
983 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
984 where, scheme, magn, count, bsq->freebuf_count);
985 }
986 return 0;
987}
988#endif
989
990
991static void
992fore200e_supply(struct fore200e* fore200e)
993{
994 int scheme, magn, i;
995
996 struct host_bsq* bsq;
997 struct host_bsq_entry* entry;
998 struct buffer* buffer;
999
1000 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1001 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1002
1003 bsq = &fore200e->host_bsq[ scheme ][ magn ];
1004
1005#ifdef FORE200E_BSQ_DEBUG
1006 bsq_audit(1, bsq, scheme, magn);
1007#endif
1008 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1009
1010 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1011 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1012
1013 entry = &bsq->host_entry[ bsq->head ];
1014
1015 for (i = 0; i < RBD_BLK_SIZE; i++) {
1016
1017
1018 buffer = bsq->freebuf;
1019 if (!buffer) {
1020 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1021 scheme, magn, bsq->freebuf_count);
1022 return;
1023 }
1024 bsq->freebuf = buffer->next;
1025
1026#ifdef FORE200E_BSQ_DEBUG
1027 if (buffer->supplied)
1028 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
1029 scheme, magn, buffer->index);
1030 buffer->supplied = 1;
1031#endif
1032 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
1033 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
1034 }
1035
1036 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
1037
1038
1039 bsq->freebuf_count -= RBD_BLK_SIZE;
1040
1041 *entry->status = STATUS_PENDING;
1042 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1043 }
1044 }
1045 }
1046}
1047
1048
1049static int
1050fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
1051{
1052 struct sk_buff* skb;
1053 struct buffer* buffer;
1054 struct fore200e_vcc* fore200e_vcc;
1055 int i, pdu_len = 0;
1056#ifdef FORE200E_52BYTE_AAL0_SDU
1057 u32 cell_header = 0;
1058#endif
1059
1060 ASSERT(vcc);
1061
1062 fore200e_vcc = FORE200E_VCC(vcc);
1063 ASSERT(fore200e_vcc);
1064
1065#ifdef FORE200E_52BYTE_AAL0_SDU
1066 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
1067
1068 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
1069 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
1070 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
1071 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
1072 rpd->atm_header.clp;
1073 pdu_len = 4;
1074 }
1075#endif
1076
1077
1078 for (i = 0; i < rpd->nseg; i++)
1079 pdu_len += rpd->rsd[ i ].length;
1080
1081 skb = alloc_skb(pdu_len, GFP_ATOMIC);
1082 if (skb == NULL) {
1083 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1084
1085 atomic_inc(&vcc->stats->rx_drop);
1086 return -ENOMEM;
1087 }
1088
1089 __net_timestamp(skb);
1090
1091#ifdef FORE200E_52BYTE_AAL0_SDU
1092 if (cell_header) {
1093 *((u32*)skb_put(skb, 4)) = cell_header;
1094 }
1095#endif
1096
1097
1098 for (i = 0; i < rpd->nseg; i++) {
1099
1100
1101 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1102
1103
1104 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1105
1106 memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
1107
1108
1109 fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1110 }
1111
1112 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1113
1114 if (pdu_len < fore200e_vcc->rx_min_pdu)
1115 fore200e_vcc->rx_min_pdu = pdu_len;
1116 if (pdu_len > fore200e_vcc->rx_max_pdu)
1117 fore200e_vcc->rx_max_pdu = pdu_len;
1118 fore200e_vcc->rx_pdu++;
1119
1120
1121 if (atm_charge(vcc, skb->truesize) == 0) {
1122
1123 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1124 vcc->itf, vcc->vpi, vcc->vci);
1125
1126 dev_kfree_skb_any(skb);
1127
1128 atomic_inc(&vcc->stats->rx_drop);
1129 return -ENOMEM;
1130 }
1131
1132 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1133
1134 vcc->push(vcc, skb);
1135 atomic_inc(&vcc->stats->rx);
1136
1137 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1138
1139 return 0;
1140}
1141
1142
1143static void
1144fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1145{
1146 struct host_bsq* bsq;
1147 struct buffer* buffer;
1148 int i;
1149
1150 for (i = 0; i < rpd->nseg; i++) {
1151
1152
1153 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1154
1155 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1156
1157#ifdef FORE200E_BSQ_DEBUG
1158 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1159
1160 if (buffer->supplied == 0)
1161 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1162 buffer->scheme, buffer->magn, buffer->index);
1163 buffer->supplied = 0;
1164#endif
1165
1166
1167 buffer->next = bsq->freebuf;
1168 bsq->freebuf = buffer;
1169
1170
1171 bsq->freebuf_count++;
1172 }
1173}
1174
1175
1176static void
1177fore200e_rx_irq(struct fore200e* fore200e)
1178{
1179 struct host_rxq* rxq = &fore200e->host_rxq;
1180 struct host_rxq_entry* entry;
1181 struct atm_vcc* vcc;
1182 struct fore200e_vc_map* vc_map;
1183
1184 for (;;) {
1185
1186 entry = &rxq->host_entry[ rxq->head ];
1187
1188
1189 if ((*entry->status & STATUS_COMPLETE) == 0)
1190 break;
1191
1192 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1193
1194 if ((vc_map->vcc == NULL) ||
1195 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1196
1197 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1198 fore200e->atm_dev->number,
1199 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1200 }
1201 else {
1202 vcc = vc_map->vcc;
1203 ASSERT(vcc);
1204
1205 if ((*entry->status & STATUS_ERROR) == 0) {
1206
1207 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1208 }
1209 else {
1210 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1211 fore200e->atm_dev->number,
1212 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1213 atomic_inc(&vcc->stats->rx_err);
1214 }
1215 }
1216
1217 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1218
1219 fore200e_collect_rpd(fore200e, entry->rpd);
1220
1221
1222 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1223 *entry->status = STATUS_FREE;
1224
1225 fore200e_supply(fore200e);
1226 }
1227}
1228
1229
1230#ifndef FORE200E_USE_TASKLET
1231static void
1232fore200e_irq(struct fore200e* fore200e)
1233{
1234 unsigned long flags;
1235
1236 spin_lock_irqsave(&fore200e->q_lock, flags);
1237 fore200e_rx_irq(fore200e);
1238 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1239
1240 spin_lock_irqsave(&fore200e->q_lock, flags);
1241 fore200e_tx_irq(fore200e);
1242 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1243}
1244#endif
1245
1246
1247static irqreturn_t
1248fore200e_interrupt(int irq, void* dev)
1249{
1250 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1251
1252 if (fore200e->bus->irq_check(fore200e) == 0) {
1253
1254 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1255 return IRQ_NONE;
1256 }
1257 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1258
1259#ifdef FORE200E_USE_TASKLET
1260 tasklet_schedule(&fore200e->tx_tasklet);
1261 tasklet_schedule(&fore200e->rx_tasklet);
1262#else
1263 fore200e_irq(fore200e);
1264#endif
1265
1266 fore200e->bus->irq_ack(fore200e);
1267 return IRQ_HANDLED;
1268}
1269
1270
1271#ifdef FORE200E_USE_TASKLET
1272static void
1273fore200e_tx_tasklet(unsigned long data)
1274{
1275 struct fore200e* fore200e = (struct fore200e*) data;
1276 unsigned long flags;
1277
1278 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1279
1280 spin_lock_irqsave(&fore200e->q_lock, flags);
1281 fore200e_tx_irq(fore200e);
1282 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1283}
1284
1285
1286static void
1287fore200e_rx_tasklet(unsigned long data)
1288{
1289 struct fore200e* fore200e = (struct fore200e*) data;
1290 unsigned long flags;
1291
1292 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1293
1294 spin_lock_irqsave(&fore200e->q_lock, flags);
1295 fore200e_rx_irq((struct fore200e*) data);
1296 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1297}
1298#endif
1299
1300
1301static int
1302fore200e_select_scheme(struct atm_vcc* vcc)
1303{
1304
1305 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1306
1307 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1308 vcc->itf, vcc->vpi, vcc->vci, scheme);
1309
1310 return scheme;
1311}
1312
1313
1314static int
1315fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1316{
1317 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1318 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1319 struct activate_opcode activ_opcode;
1320 struct deactivate_opcode deactiv_opcode;
1321 struct vpvc vpvc;
1322 int ok;
1323 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1324
1325 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1326
1327 if (activate) {
1328 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1329
1330 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1331 activ_opcode.aal = aal;
1332 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1333 activ_opcode.pad = 0;
1334 }
1335 else {
1336 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1337 deactiv_opcode.pad = 0;
1338 }
1339
1340 vpvc.vci = vcc->vci;
1341 vpvc.vpi = vcc->vpi;
1342
1343 *entry->status = STATUS_PENDING;
1344
1345 if (activate) {
1346
1347#ifdef FORE200E_52BYTE_AAL0_SDU
1348 mtu = 48;
1349#endif
1350
1351 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1352 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1353 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1354 }
1355 else {
1356 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1357 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1358 }
1359
1360 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1361
1362 *entry->status = STATUS_FREE;
1363
1364 if (ok == 0) {
1365 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1366 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1367 return -EIO;
1368 }
1369
1370 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1371 activate ? "open" : "clos");
1372
1373 return 0;
1374}
1375
1376
1377#define FORE200E_MAX_BACK2BACK_CELLS 255
1378
1379static void
1380fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1381{
1382 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1383
1384
1385 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1386 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1387 }
1388 else {
1389
1390 rate->data_cells = rate->idle_cells = 0;
1391 }
1392}
1393
1394
1395static int
1396fore200e_open(struct atm_vcc *vcc)
1397{
1398 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1399 struct fore200e_vcc* fore200e_vcc;
1400 struct fore200e_vc_map* vc_map;
1401 unsigned long flags;
1402 int vci = vcc->vci;
1403 short vpi = vcc->vpi;
1404
1405 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1406 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1407
1408 spin_lock_irqsave(&fore200e->q_lock, flags);
1409
1410 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1411 if (vc_map->vcc) {
1412
1413 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1414
1415 printk(FORE200E "VC %d.%d.%d already in use\n",
1416 fore200e->atm_dev->number, vpi, vci);
1417
1418 return -EINVAL;
1419 }
1420
1421 vc_map->vcc = vcc;
1422
1423 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1424
1425 fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1426 if (fore200e_vcc == NULL) {
1427 vc_map->vcc = NULL;
1428 return -ENOMEM;
1429 }
1430
1431 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1432 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1433 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1434 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1435 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1436 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1437 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1438
1439
1440 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1441
1442 mutex_lock(&fore200e->rate_mtx);
1443 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1444 mutex_unlock(&fore200e->rate_mtx);
1445
1446 kfree(fore200e_vcc);
1447 vc_map->vcc = NULL;
1448 return -EAGAIN;
1449 }
1450
1451
1452 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1453 mutex_unlock(&fore200e->rate_mtx);
1454 }
1455
1456 vcc->itf = vcc->dev->number;
1457
1458 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1459 set_bit(ATM_VF_ADDR, &vcc->flags);
1460
1461 vcc->dev_data = fore200e_vcc;
1462
1463 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1464
1465 vc_map->vcc = NULL;
1466
1467 clear_bit(ATM_VF_ADDR, &vcc->flags);
1468 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1469
1470 vcc->dev_data = NULL;
1471
1472 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1473
1474 kfree(fore200e_vcc);
1475 return -EINVAL;
1476 }
1477
1478
1479 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1480
1481 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1482 set_bit(ATM_VF_HASQOS, &vcc->flags);
1483
1484 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1485 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1486 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1487 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1488 }
1489
1490 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1491 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1492 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1493
1494
1495 vc_map->incarn = ++fore200e->incarn_count;
1496
1497
1498 set_bit(ATM_VF_READY, &vcc->flags);
1499
1500 return 0;
1501}
1502
1503
1504static void
1505fore200e_close(struct atm_vcc* vcc)
1506{
1507 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1508 struct fore200e_vcc* fore200e_vcc;
1509 struct fore200e_vc_map* vc_map;
1510 unsigned long flags;
1511
1512 ASSERT(vcc);
1513 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1514 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1515
1516 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1517
1518 clear_bit(ATM_VF_READY, &vcc->flags);
1519
1520 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1521
1522 spin_lock_irqsave(&fore200e->q_lock, flags);
1523
1524 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1525
1526
1527 vc_map->vcc = NULL;
1528
1529 vcc->itf = vcc->vci = vcc->vpi = 0;
1530
1531 fore200e_vcc = FORE200E_VCC(vcc);
1532 vcc->dev_data = NULL;
1533
1534 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1535
1536
1537 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1538
1539 mutex_lock(&fore200e->rate_mtx);
1540 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1541 mutex_unlock(&fore200e->rate_mtx);
1542
1543 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1544 }
1545
1546 clear_bit(ATM_VF_ADDR, &vcc->flags);
1547 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1548
1549 ASSERT(fore200e_vcc);
1550 kfree(fore200e_vcc);
1551}
1552
1553
1554static int
1555fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1556{
1557 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1558 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1559 struct fore200e_vc_map* vc_map;
1560 struct host_txq* txq = &fore200e->host_txq;
1561 struct host_txq_entry* entry;
1562 struct tpd* tpd;
1563 struct tpd_haddr tpd_haddr;
1564 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1565 int tx_copy = 0;
1566 int tx_len = skb->len;
1567 u32* cell_header = NULL;
1568 unsigned char* skb_data;
1569 int skb_len;
1570 unsigned char* data;
1571 unsigned long flags;
1572
1573 ASSERT(vcc);
1574 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1575 ASSERT(fore200e);
1576 ASSERT(fore200e_vcc);
1577
1578 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1579 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1580 dev_kfree_skb_any(skb);
1581 return -EINVAL;
1582 }
1583
1584#ifdef FORE200E_52BYTE_AAL0_SDU
1585 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1586 cell_header = (u32*) skb->data;
1587 skb_data = skb->data + 4;
1588 skb_len = tx_len = skb->len - 4;
1589
1590 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1591 }
1592 else
1593#endif
1594 {
1595 skb_data = skb->data;
1596 skb_len = skb->len;
1597 }
1598
1599 if (((unsigned long)skb_data) & 0x3) {
1600
1601 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1602 tx_copy = 1;
1603 tx_len = skb_len;
1604 }
1605
1606 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1607
1608
1609 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1610 tx_copy = 1;
1611 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1612 }
1613
1614 if (tx_copy) {
1615 data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
1616 if (data == NULL) {
1617 if (vcc->pop) {
1618 vcc->pop(vcc, skb);
1619 }
1620 else {
1621 dev_kfree_skb_any(skb);
1622 }
1623 return -ENOMEM;
1624 }
1625
1626 memcpy(data, skb_data, skb_len);
1627 if (skb_len < tx_len)
1628 memset(data + skb_len, 0x00, tx_len - skb_len);
1629 }
1630 else {
1631 data = skb_data;
1632 }
1633
1634 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1635 ASSERT(vc_map->vcc == vcc);
1636
1637 retry_here:
1638
1639 spin_lock_irqsave(&fore200e->q_lock, flags);
1640
1641 entry = &txq->host_entry[ txq->head ];
1642
1643 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1644
1645
1646 fore200e_tx_irq(fore200e);
1647
1648 if (*entry->status != STATUS_FREE) {
1649
1650 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1651
1652
1653 if (--retry > 0) {
1654 udelay(50);
1655 goto retry_here;
1656 }
1657
1658 atomic_inc(&vcc->stats->tx_err);
1659
1660 fore200e->tx_sat++;
1661 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1662 fore200e->name, fore200e->cp_queues->heartbeat);
1663 if (vcc->pop) {
1664 vcc->pop(vcc, skb);
1665 }
1666 else {
1667 dev_kfree_skb_any(skb);
1668 }
1669
1670 if (tx_copy)
1671 kfree(data);
1672
1673 return -ENOBUFS;
1674 }
1675 }
1676
1677 entry->incarn = vc_map->incarn;
1678 entry->vc_map = vc_map;
1679 entry->skb = skb;
1680 entry->data = tx_copy ? data : NULL;
1681
1682 tpd = entry->tpd;
1683 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
1684 tpd->tsd[ 0 ].length = tx_len;
1685
1686 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1687 txq->txing++;
1688
1689
1690
1691
1692
1693 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1694 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1695 tpd->tsd[0].length, skb_len);
1696
1697 if (skb_len < fore200e_vcc->tx_min_pdu)
1698 fore200e_vcc->tx_min_pdu = skb_len;
1699 if (skb_len > fore200e_vcc->tx_max_pdu)
1700 fore200e_vcc->tx_max_pdu = skb_len;
1701 fore200e_vcc->tx_pdu++;
1702
1703
1704 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1705 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1706
1707 if (cell_header) {
1708 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1709 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1710 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1711 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1712 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1713 }
1714 else {
1715
1716 tpd->atm_header.clp = 0;
1717 tpd->atm_header.plt = 0;
1718 tpd->atm_header.vci = vcc->vci;
1719 tpd->atm_header.vpi = vcc->vpi;
1720 tpd->atm_header.gfc = 0;
1721 }
1722
1723 tpd->spec.length = tx_len;
1724 tpd->spec.nseg = 1;
1725 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1726 tpd->spec.intr = 1;
1727
1728 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT);
1729 tpd_haddr.pad = 0;
1730 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT;
1731
1732 *entry->status = STATUS_PENDING;
1733 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1734
1735 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1736
1737 return 0;
1738}
1739
1740
1741static int
1742fore200e_getstats(struct fore200e* fore200e)
1743{
1744 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1745 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1746 struct stats_opcode opcode;
1747 int ok;
1748 u32 stats_dma_addr;
1749
1750 if (fore200e->stats == NULL) {
1751 fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
1752 if (fore200e->stats == NULL)
1753 return -ENOMEM;
1754 }
1755
1756 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1757 sizeof(struct stats), DMA_FROM_DEVICE);
1758
1759 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1760
1761 opcode.opcode = OPCODE_GET_STATS;
1762 opcode.pad = 0;
1763
1764 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1765
1766 *entry->status = STATUS_PENDING;
1767
1768 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1769
1770 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1771
1772 *entry->status = STATUS_FREE;
1773
1774 fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1775
1776 if (ok == 0) {
1777 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1778 return -EIO;
1779 }
1780
1781 return 0;
1782}
1783
1784
1785static int
1786fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1787{
1788
1789
1790 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1791 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1792
1793 return -EINVAL;
1794}
1795
1796
1797static int
1798fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, unsigned int optlen)
1799{
1800
1801
1802 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1803 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1804
1805 return -EINVAL;
1806}
1807
1808
1809#if 0
1810static int
1811fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1812{
1813 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1814 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1815 struct oc3_opcode opcode;
1816 int ok;
1817 u32 oc3_regs_dma_addr;
1818
1819 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1820
1821 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1822
1823 opcode.opcode = OPCODE_GET_OC3;
1824 opcode.reg = 0;
1825 opcode.value = 0;
1826 opcode.mask = 0;
1827
1828 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1829
1830 *entry->status = STATUS_PENDING;
1831
1832 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1833
1834 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1835
1836 *entry->status = STATUS_FREE;
1837
1838 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1839
1840 if (ok == 0) {
1841 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1842 return -EIO;
1843 }
1844
1845 return 0;
1846}
1847#endif
1848
1849
1850static int
1851fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1852{
1853 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1854 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1855 struct oc3_opcode opcode;
1856 int ok;
1857
1858 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1859
1860 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1861
1862 opcode.opcode = OPCODE_SET_OC3;
1863 opcode.reg = reg;
1864 opcode.value = value;
1865 opcode.mask = mask;
1866
1867 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1868
1869 *entry->status = STATUS_PENDING;
1870
1871 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1872
1873 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1874
1875 *entry->status = STATUS_FREE;
1876
1877 if (ok == 0) {
1878 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1879 return -EIO;
1880 }
1881
1882 return 0;
1883}
1884
1885
1886static int
1887fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1888{
1889 u32 mct_value, mct_mask;
1890 int error;
1891
1892 if (!capable(CAP_NET_ADMIN))
1893 return -EPERM;
1894
1895 switch (loop_mode) {
1896
1897 case ATM_LM_NONE:
1898 mct_value = 0;
1899 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
1900 break;
1901
1902 case ATM_LM_LOC_PHY:
1903 mct_value = mct_mask = SUNI_MCT_DLE;
1904 break;
1905
1906 case ATM_LM_RMT_PHY:
1907 mct_value = mct_mask = SUNI_MCT_LLE;
1908 break;
1909
1910 default:
1911 return -EINVAL;
1912 }
1913
1914 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1915 if (error == 0)
1916 fore200e->loop_mode = loop_mode;
1917
1918 return error;
1919}
1920
1921
1922static int
1923fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
1924{
1925 struct sonet_stats tmp;
1926
1927 if (fore200e_getstats(fore200e) < 0)
1928 return -EIO;
1929
1930 tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors);
1931 tmp.line_bip = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors);
1932 tmp.path_bip = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors);
1933 tmp.line_febe = be32_to_cpu(fore200e->stats->oc3.line_febe_errors);
1934 tmp.path_febe = be32_to_cpu(fore200e->stats->oc3.path_febe_errors);
1935 tmp.corr_hcs = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors);
1936 tmp.uncorr_hcs = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors);
1937 tmp.tx_cells = be32_to_cpu(fore200e->stats->aal0.cells_transmitted) +
1938 be32_to_cpu(fore200e->stats->aal34.cells_transmitted) +
1939 be32_to_cpu(fore200e->stats->aal5.cells_transmitted);
1940 tmp.rx_cells = be32_to_cpu(fore200e->stats->aal0.cells_received) +
1941 be32_to_cpu(fore200e->stats->aal34.cells_received) +
1942 be32_to_cpu(fore200e->stats->aal5.cells_received);
1943
1944 if (arg)
1945 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
1946
1947 return 0;
1948}
1949
1950
1951static int
1952fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
1953{
1954 struct fore200e* fore200e = FORE200E_DEV(dev);
1955
1956 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
1957
1958 switch (cmd) {
1959
1960 case SONET_GETSTAT:
1961 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
1962
1963 case SONET_GETDIAG:
1964 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
1965
1966 case ATM_SETLOOP:
1967 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
1968
1969 case ATM_GETLOOP:
1970 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
1971
1972 case ATM_QUERYLOOP:
1973 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
1974 }
1975
1976 return -ENOSYS;
1977}
1978
1979
1980static int
1981fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
1982{
1983 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1984 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1985
1986 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1987 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
1988 return -EINVAL;
1989 }
1990
1991 DPRINTK(2, "change_qos %d.%d.%d, "
1992 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1993 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
1994 "available_cell_rate = %u",
1995 vcc->itf, vcc->vpi, vcc->vci,
1996 fore200e_traffic_class[ qos->txtp.traffic_class ],
1997 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
1998 fore200e_traffic_class[ qos->rxtp.traffic_class ],
1999 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2000 flags, fore200e->available_cell_rate);
2001
2002 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2003
2004 mutex_lock(&fore200e->rate_mtx);
2005 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2006 mutex_unlock(&fore200e->rate_mtx);
2007 return -EAGAIN;
2008 }
2009
2010 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2011 fore200e->available_cell_rate -= qos->txtp.max_pcr;
2012
2013 mutex_unlock(&fore200e->rate_mtx);
2014
2015 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2016
2017
2018 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2019
2020 set_bit(ATM_VF_HASQOS, &vcc->flags);
2021
2022 return 0;
2023 }
2024
2025 return -EINVAL;
2026}
2027
2028
2029static int fore200e_irq_request(struct fore200e *fore200e)
2030{
2031 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
2032
2033 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
2034 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2035 return -EBUSY;
2036 }
2037
2038 printk(FORE200E "IRQ %s reserved for device %s\n",
2039 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2040
2041#ifdef FORE200E_USE_TASKLET
2042 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
2043 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
2044#endif
2045
2046 fore200e->state = FORE200E_STATE_IRQ;
2047 return 0;
2048}
2049
2050
2051static int fore200e_get_esi(struct fore200e *fore200e)
2052{
2053 struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
2054 int ok, i;
2055
2056 if (!prom)
2057 return -ENOMEM;
2058
2059 ok = fore200e->bus->prom_read(fore200e, prom);
2060 if (ok < 0) {
2061 kfree(prom);
2062 return -EBUSY;
2063 }
2064
2065 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n",
2066 fore200e->name,
2067 (prom->hw_revision & 0xFF) + '@',
2068 prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
2069
2070 for (i = 0; i < ESI_LEN; i++) {
2071 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2072 }
2073
2074 kfree(prom);
2075
2076 return 0;
2077}
2078
2079
2080static int fore200e_alloc_rx_buf(struct fore200e *fore200e)
2081{
2082 int scheme, magn, nbr, size, i;
2083
2084 struct host_bsq* bsq;
2085 struct buffer* buffer;
2086
2087 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2088 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2089
2090 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2091
2092 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
2093 size = fore200e_rx_buf_size[ scheme ][ magn ];
2094
2095 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2096
2097
2098 buffer = bsq->buffer = kzalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
2099
2100 if (buffer == NULL)
2101 return -ENOMEM;
2102
2103 bsq->freebuf = NULL;
2104
2105 for (i = 0; i < nbr; i++) {
2106
2107 buffer[ i ].scheme = scheme;
2108 buffer[ i ].magn = magn;
2109#ifdef FORE200E_BSQ_DEBUG
2110 buffer[ i ].index = i;
2111 buffer[ i ].supplied = 0;
2112#endif
2113
2114
2115 if (fore200e_chunk_alloc(fore200e,
2116 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2117 DMA_FROM_DEVICE) < 0) {
2118
2119 while (i > 0)
2120 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2121 kfree(buffer);
2122
2123 return -ENOMEM;
2124 }
2125
2126
2127 buffer[ i ].next = bsq->freebuf;
2128 bsq->freebuf = &buffer[ i ];
2129 }
2130
2131 bsq->freebuf_count = nbr;
2132
2133#ifdef FORE200E_BSQ_DEBUG
2134 bsq_audit(3, bsq, scheme, magn);
2135#endif
2136 }
2137 }
2138
2139 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2140 return 0;
2141}
2142
2143
2144static int fore200e_init_bs_queue(struct fore200e *fore200e)
2145{
2146 int scheme, magn, i;
2147
2148 struct host_bsq* bsq;
2149 struct cp_bsq_entry __iomem * cp_entry;
2150
2151 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2152 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2153
2154 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2155
2156 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2157
2158
2159 if (fore200e->bus->dma_chunk_alloc(fore200e,
2160 &bsq->status,
2161 sizeof(enum status),
2162 QUEUE_SIZE_BS,
2163 fore200e->bus->status_alignment) < 0) {
2164 return -ENOMEM;
2165 }
2166
2167
2168 if (fore200e->bus->dma_chunk_alloc(fore200e,
2169 &bsq->rbd_block,
2170 sizeof(struct rbd_block),
2171 QUEUE_SIZE_BS,
2172 fore200e->bus->descr_alignment) < 0) {
2173
2174 fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2175 return -ENOMEM;
2176 }
2177
2178
2179 cp_entry = fore200e->virt_base +
2180 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2181
2182
2183 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2184
2185 bsq->host_entry[ i ].status =
2186 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2187 bsq->host_entry[ i ].rbd_block =
2188 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2189 bsq->host_entry[ i ].rbd_block_dma =
2190 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2191 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2192
2193 *bsq->host_entry[ i ].status = STATUS_FREE;
2194
2195 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2196 &cp_entry[ i ].status_haddr);
2197 }
2198 }
2199 }
2200
2201 fore200e->state = FORE200E_STATE_INIT_BSQ;
2202 return 0;
2203}
2204
2205
2206static int fore200e_init_rx_queue(struct fore200e *fore200e)
2207{
2208 struct host_rxq* rxq = &fore200e->host_rxq;
2209 struct cp_rxq_entry __iomem * cp_entry;
2210 int i;
2211
2212 DPRINTK(2, "receive queue is being initialized\n");
2213
2214
2215 if (fore200e->bus->dma_chunk_alloc(fore200e,
2216 &rxq->status,
2217 sizeof(enum status),
2218 QUEUE_SIZE_RX,
2219 fore200e->bus->status_alignment) < 0) {
2220 return -ENOMEM;
2221 }
2222
2223
2224 if (fore200e->bus->dma_chunk_alloc(fore200e,
2225 &rxq->rpd,
2226 sizeof(struct rpd),
2227 QUEUE_SIZE_RX,
2228 fore200e->bus->descr_alignment) < 0) {
2229
2230 fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2231 return -ENOMEM;
2232 }
2233
2234
2235 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2236
2237
2238 for (i=0; i < QUEUE_SIZE_RX; i++) {
2239
2240 rxq->host_entry[ i ].status =
2241 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2242 rxq->host_entry[ i ].rpd =
2243 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2244 rxq->host_entry[ i ].rpd_dma =
2245 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2246 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2247
2248 *rxq->host_entry[ i ].status = STATUS_FREE;
2249
2250 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2251 &cp_entry[ i ].status_haddr);
2252
2253 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2254 &cp_entry[ i ].rpd_haddr);
2255 }
2256
2257
2258 rxq->head = 0;
2259
2260 fore200e->state = FORE200E_STATE_INIT_RXQ;
2261 return 0;
2262}
2263
2264
2265static int fore200e_init_tx_queue(struct fore200e *fore200e)
2266{
2267 struct host_txq* txq = &fore200e->host_txq;
2268 struct cp_txq_entry __iomem * cp_entry;
2269 int i;
2270
2271 DPRINTK(2, "transmit queue is being initialized\n");
2272
2273
2274 if (fore200e->bus->dma_chunk_alloc(fore200e,
2275 &txq->status,
2276 sizeof(enum status),
2277 QUEUE_SIZE_TX,
2278 fore200e->bus->status_alignment) < 0) {
2279 return -ENOMEM;
2280 }
2281
2282
2283 if (fore200e->bus->dma_chunk_alloc(fore200e,
2284 &txq->tpd,
2285 sizeof(struct tpd),
2286 QUEUE_SIZE_TX,
2287 fore200e->bus->descr_alignment) < 0) {
2288
2289 fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2290 return -ENOMEM;
2291 }
2292
2293
2294 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2295
2296
2297 for (i=0; i < QUEUE_SIZE_TX; i++) {
2298
2299 txq->host_entry[ i ].status =
2300 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2301 txq->host_entry[ i ].tpd =
2302 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2303 txq->host_entry[ i ].tpd_dma =
2304 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2305 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2306
2307 *txq->host_entry[ i ].status = STATUS_FREE;
2308
2309 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2310 &cp_entry[ i ].status_haddr);
2311
2312
2313
2314
2315
2316 }
2317
2318
2319 txq->head = 0;
2320 txq->tail = 0;
2321
2322 fore200e->state = FORE200E_STATE_INIT_TXQ;
2323 return 0;
2324}
2325
2326
2327static int fore200e_init_cmd_queue(struct fore200e *fore200e)
2328{
2329 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2330 struct cp_cmdq_entry __iomem * cp_entry;
2331 int i;
2332
2333 DPRINTK(2, "command queue is being initialized\n");
2334
2335
2336 if (fore200e->bus->dma_chunk_alloc(fore200e,
2337 &cmdq->status,
2338 sizeof(enum status),
2339 QUEUE_SIZE_CMD,
2340 fore200e->bus->status_alignment) < 0) {
2341 return -ENOMEM;
2342 }
2343
2344
2345 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2346
2347
2348 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2349
2350 cmdq->host_entry[ i ].status =
2351 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2352 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2353
2354 *cmdq->host_entry[ i ].status = STATUS_FREE;
2355
2356 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2357 &cp_entry[ i ].status_haddr);
2358 }
2359
2360
2361 cmdq->head = 0;
2362
2363 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2364 return 0;
2365}
2366
2367
2368static void fore200e_param_bs_queue(struct fore200e *fore200e,
2369 enum buffer_scheme scheme,
2370 enum buffer_magn magn, int queue_length,
2371 int pool_size, int supply_blksize)
2372{
2373 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2374
2375 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2376 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2377 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2378 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2379}
2380
2381
2382static int fore200e_initialize(struct fore200e *fore200e)
2383{
2384 struct cp_queues __iomem * cpq;
2385 int ok, scheme, magn;
2386
2387 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2388
2389 mutex_init(&fore200e->rate_mtx);
2390 spin_lock_init(&fore200e->q_lock);
2391
2392 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2393
2394
2395 fore200e->bus->write(1, &cpq->imask);
2396
2397 if (fore200e->bus->irq_enable)
2398 fore200e->bus->irq_enable(fore200e);
2399
2400 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2401
2402 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2403 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2404 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2405
2406 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2407 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2408
2409 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2410 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2411 fore200e_param_bs_queue(fore200e, scheme, magn,
2412 QUEUE_SIZE_BS,
2413 fore200e_rx_buf_nbr[ scheme ][ magn ],
2414 RBD_BLK_SIZE);
2415
2416
2417 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2418 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2419
2420 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2421 if (ok == 0) {
2422 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2423 return -ENODEV;
2424 }
2425
2426 printk(FORE200E "device %s initialized\n", fore200e->name);
2427
2428 fore200e->state = FORE200E_STATE_INITIALIZE;
2429 return 0;
2430}
2431
2432
2433static void fore200e_monitor_putc(struct fore200e *fore200e, char c)
2434{
2435 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2436
2437#if 0
2438 printk("%c", c);
2439#endif
2440 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2441}
2442
2443
2444static int fore200e_monitor_getc(struct fore200e *fore200e)
2445{
2446 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2447 unsigned long timeout = jiffies + msecs_to_jiffies(50);
2448 int c;
2449
2450 while (time_before(jiffies, timeout)) {
2451
2452 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2453
2454 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2455
2456 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2457#if 0
2458 printk("%c", c & 0xFF);
2459#endif
2460 return c & 0xFF;
2461 }
2462 }
2463
2464 return -1;
2465}
2466
2467
2468static void fore200e_monitor_puts(struct fore200e *fore200e, char *str)
2469{
2470 while (*str) {
2471
2472
2473 while (fore200e_monitor_getc(fore200e) >= 0);
2474
2475 fore200e_monitor_putc(fore200e, *str++);
2476 }
2477
2478 while (fore200e_monitor_getc(fore200e) >= 0);
2479}
2480
2481#ifdef __LITTLE_ENDIAN
2482#define FW_EXT ".bin"
2483#else
2484#define FW_EXT "_ecd.bin2"
2485#endif
2486
2487static int fore200e_load_and_start_fw(struct fore200e *fore200e)
2488{
2489 const struct firmware *firmware;
2490 struct device *device;
2491 struct fw_header *fw_header;
2492 const __le32 *fw_data;
2493 u32 fw_size;
2494 u32 __iomem *load_addr;
2495 char buf[48];
2496 int err = -ENODEV;
2497
2498 if (strcmp(fore200e->bus->model_name, "PCA-200E") == 0)
2499 device = &((struct pci_dev *) fore200e->bus_dev)->dev;
2500#ifdef CONFIG_SBUS
2501 else if (strcmp(fore200e->bus->model_name, "SBA-200E") == 0)
2502 device = &((struct platform_device *) fore200e->bus_dev)->dev;
2503#endif
2504 else
2505 return err;
2506
2507 sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
2508 if ((err = request_firmware(&firmware, buf, device)) < 0) {
2509 printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
2510 return err;
2511 }
2512
2513 fw_data = (__le32 *) firmware->data;
2514 fw_size = firmware->size / sizeof(u32);
2515 fw_header = (struct fw_header *) firmware->data;
2516 load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2517
2518 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2519 fore200e->name, load_addr, fw_size);
2520
2521 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2522 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2523 goto release;
2524 }
2525
2526 for (; fw_size--; fw_data++, load_addr++)
2527 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2528
2529 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2530
2531#if defined(__sparc_v9__)
2532
2533 fore200e_spin(100);
2534#endif
2535
2536 sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2537 fore200e_monitor_puts(fore200e, buf);
2538
2539 if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) {
2540 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2541 goto release;
2542 }
2543
2544 printk(FORE200E "device %s firmware started\n", fore200e->name);
2545
2546 fore200e->state = FORE200E_STATE_START_FW;
2547 err = 0;
2548
2549release:
2550 release_firmware(firmware);
2551 return err;
2552}
2553
2554
2555static int fore200e_register(struct fore200e *fore200e, struct device *parent)
2556{
2557 struct atm_dev* atm_dev;
2558
2559 DPRINTK(2, "device %s being registered\n", fore200e->name);
2560
2561 atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops,
2562 -1, NULL);
2563 if (atm_dev == NULL) {
2564 printk(FORE200E "unable to register device %s\n", fore200e->name);
2565 return -ENODEV;
2566 }
2567
2568 atm_dev->dev_data = fore200e;
2569 fore200e->atm_dev = atm_dev;
2570
2571 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2572 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2573
2574 fore200e->available_cell_rate = ATM_OC3_PCR;
2575
2576 fore200e->state = FORE200E_STATE_REGISTER;
2577 return 0;
2578}
2579
2580
2581static int fore200e_init(struct fore200e *fore200e, struct device *parent)
2582{
2583 if (fore200e_register(fore200e, parent) < 0)
2584 return -ENODEV;
2585
2586 if (fore200e->bus->configure(fore200e) < 0)
2587 return -ENODEV;
2588
2589 if (fore200e->bus->map(fore200e) < 0)
2590 return -ENODEV;
2591
2592 if (fore200e_reset(fore200e, 1) < 0)
2593 return -ENODEV;
2594
2595 if (fore200e_load_and_start_fw(fore200e) < 0)
2596 return -ENODEV;
2597
2598 if (fore200e_initialize(fore200e) < 0)
2599 return -ENODEV;
2600
2601 if (fore200e_init_cmd_queue(fore200e) < 0)
2602 return -ENOMEM;
2603
2604 if (fore200e_init_tx_queue(fore200e) < 0)
2605 return -ENOMEM;
2606
2607 if (fore200e_init_rx_queue(fore200e) < 0)
2608 return -ENOMEM;
2609
2610 if (fore200e_init_bs_queue(fore200e) < 0)
2611 return -ENOMEM;
2612
2613 if (fore200e_alloc_rx_buf(fore200e) < 0)
2614 return -ENOMEM;
2615
2616 if (fore200e_get_esi(fore200e) < 0)
2617 return -EIO;
2618
2619 if (fore200e_irq_request(fore200e) < 0)
2620 return -EBUSY;
2621
2622 fore200e_supply(fore200e);
2623
2624
2625 fore200e->state = FORE200E_STATE_COMPLETE;
2626 return 0;
2627}
2628
2629#ifdef CONFIG_SBUS
2630static const struct of_device_id fore200e_sba_match[];
2631static int fore200e_sba_probe(struct platform_device *op)
2632{
2633 const struct of_device_id *match;
2634 const struct fore200e_bus *bus;
2635 struct fore200e *fore200e;
2636 static int index = 0;
2637 int err;
2638
2639 match = of_match_device(fore200e_sba_match, &op->dev);
2640 if (!match)
2641 return -EINVAL;
2642 bus = match->data;
2643
2644 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2645 if (!fore200e)
2646 return -ENOMEM;
2647
2648 fore200e->bus = bus;
2649 fore200e->bus_dev = op;
2650 fore200e->irq = op->archdata.irqs[0];
2651 fore200e->phys_base = op->resource[0].start;
2652
2653 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2654
2655 err = fore200e_init(fore200e, &op->dev);
2656 if (err < 0) {
2657 fore200e_shutdown(fore200e);
2658 kfree(fore200e);
2659 return err;
2660 }
2661
2662 index++;
2663 dev_set_drvdata(&op->dev, fore200e);
2664
2665 return 0;
2666}
2667
2668static int fore200e_sba_remove(struct platform_device *op)
2669{
2670 struct fore200e *fore200e = dev_get_drvdata(&op->dev);
2671
2672 fore200e_shutdown(fore200e);
2673 kfree(fore200e);
2674
2675 return 0;
2676}
2677
2678static const struct of_device_id fore200e_sba_match[] = {
2679 {
2680 .name = SBA200E_PROM_NAME,
2681 .data = (void *) &fore200e_bus[1],
2682 },
2683 {},
2684};
2685MODULE_DEVICE_TABLE(of, fore200e_sba_match);
2686
2687static struct platform_driver fore200e_sba_driver = {
2688 .driver = {
2689 .name = "fore_200e",
2690 .owner = THIS_MODULE,
2691 .of_match_table = fore200e_sba_match,
2692 },
2693 .probe = fore200e_sba_probe,
2694 .remove = fore200e_sba_remove,
2695};
2696#endif
2697
2698#ifdef CONFIG_PCI
2699static int fore200e_pca_detect(struct pci_dev *pci_dev,
2700 const struct pci_device_id *pci_ent)
2701{
2702 const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
2703 struct fore200e* fore200e;
2704 int err = 0;
2705 static int index = 0;
2706
2707 if (pci_enable_device(pci_dev)) {
2708 err = -EINVAL;
2709 goto out;
2710 }
2711
2712 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2713 if (fore200e == NULL) {
2714 err = -ENOMEM;
2715 goto out_disable;
2716 }
2717
2718 fore200e->bus = bus;
2719 fore200e->bus_dev = pci_dev;
2720 fore200e->irq = pci_dev->irq;
2721 fore200e->phys_base = pci_resource_start(pci_dev, 0);
2722
2723 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
2724
2725 pci_set_master(pci_dev);
2726
2727 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2728 fore200e->bus->model_name,
2729 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2730
2731 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2732
2733 err = fore200e_init(fore200e, &pci_dev->dev);
2734 if (err < 0) {
2735 fore200e_shutdown(fore200e);
2736 goto out_free;
2737 }
2738
2739 ++index;
2740 pci_set_drvdata(pci_dev, fore200e);
2741
2742out:
2743 return err;
2744
2745out_free:
2746 kfree(fore200e);
2747out_disable:
2748 pci_disable_device(pci_dev);
2749 goto out;
2750}
2751
2752
2753static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
2754{
2755 struct fore200e *fore200e;
2756
2757 fore200e = pci_get_drvdata(pci_dev);
2758
2759 fore200e_shutdown(fore200e);
2760 kfree(fore200e);
2761 pci_disable_device(pci_dev);
2762}
2763
2764
2765static struct pci_device_id fore200e_pca_tbl[] = {
2766 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
2767 0, 0, (unsigned long) &fore200e_bus[0] },
2768 { 0, }
2769};
2770
2771MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2772
2773static struct pci_driver fore200e_pca_driver = {
2774 .name = "fore_200e",
2775 .probe = fore200e_pca_detect,
2776 .remove = fore200e_pca_remove_one,
2777 .id_table = fore200e_pca_tbl,
2778};
2779#endif
2780
2781static int __init fore200e_module_init(void)
2782{
2783 int err = 0;
2784
2785 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2786
2787#ifdef CONFIG_SBUS
2788 err = platform_driver_register(&fore200e_sba_driver);
2789 if (err)
2790 return err;
2791#endif
2792
2793#ifdef CONFIG_PCI
2794 err = pci_register_driver(&fore200e_pca_driver);
2795#endif
2796
2797#ifdef CONFIG_SBUS
2798 if (err)
2799 platform_driver_unregister(&fore200e_sba_driver);
2800#endif
2801
2802 return err;
2803}
2804
2805static void __exit fore200e_module_cleanup(void)
2806{
2807#ifdef CONFIG_PCI
2808 pci_unregister_driver(&fore200e_pca_driver);
2809#endif
2810#ifdef CONFIG_SBUS
2811 platform_driver_unregister(&fore200e_sba_driver);
2812#endif
2813}
2814
2815static int
2816fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2817{
2818 struct fore200e* fore200e = FORE200E_DEV(dev);
2819 struct fore200e_vcc* fore200e_vcc;
2820 struct atm_vcc* vcc;
2821 int i, len, left = *pos;
2822 unsigned long flags;
2823
2824 if (!left--) {
2825
2826 if (fore200e_getstats(fore200e) < 0)
2827 return -EIO;
2828
2829 len = sprintf(page,"\n"
2830 " device:\n"
2831 " internal name:\t\t%s\n", fore200e->name);
2832
2833
2834 if (fore200e->bus->proc_read)
2835 len += fore200e->bus->proc_read(fore200e, page + len);
2836
2837 len += sprintf(page + len,
2838 " interrupt line:\t\t%s\n"
2839 " physical base address:\t0x%p\n"
2840 " virtual base address:\t0x%p\n"
2841 " factory address (ESI):\t%pM\n"
2842 " board serial number:\t\t%d\n\n",
2843 fore200e_irq_itoa(fore200e->irq),
2844 (void*)fore200e->phys_base,
2845 fore200e->virt_base,
2846 fore200e->esi,
2847 fore200e->esi[4] * 256 + fore200e->esi[5]);
2848
2849 return len;
2850 }
2851
2852 if (!left--)
2853 return sprintf(page,
2854 " free small bufs, scheme 1:\t%d\n"
2855 " free large bufs, scheme 1:\t%d\n"
2856 " free small bufs, scheme 2:\t%d\n"
2857 " free large bufs, scheme 2:\t%d\n",
2858 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2859 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2860 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2861 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2862
2863 if (!left--) {
2864 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2865
2866 len = sprintf(page,"\n\n"
2867 " cell processor:\n"
2868 " heartbeat state:\t\t");
2869
2870 if (hb >> 16 != 0xDEAD)
2871 len += sprintf(page + len, "0x%08x\n", hb);
2872 else
2873 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2874
2875 return len;
2876 }
2877
2878 if (!left--) {
2879 static const char* media_name[] = {
2880 "unshielded twisted pair",
2881 "multimode optical fiber ST",
2882 "multimode optical fiber SC",
2883 "single-mode optical fiber ST",
2884 "single-mode optical fiber SC",
2885 "unknown"
2886 };
2887
2888 static const char* oc3_mode[] = {
2889 "normal operation",
2890 "diagnostic loopback",
2891 "line loopback",
2892 "unknown"
2893 };
2894
2895 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2896 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2897 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2898 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2899 u32 oc3_index;
2900
2901 if (media_index > 4)
2902 media_index = 5;
2903
2904 switch (fore200e->loop_mode) {
2905 case ATM_LM_NONE: oc3_index = 0;
2906 break;
2907 case ATM_LM_LOC_PHY: oc3_index = 1;
2908 break;
2909 case ATM_LM_RMT_PHY: oc3_index = 2;
2910 break;
2911 default: oc3_index = 3;
2912 }
2913
2914 return sprintf(page,
2915 " firmware release:\t\t%d.%d.%d\n"
2916 " monitor release:\t\t%d.%d\n"
2917 " media type:\t\t\t%s\n"
2918 " OC-3 revision:\t\t0x%x\n"
2919 " OC-3 mode:\t\t\t%s",
2920 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2921 mon960_release >> 16, mon960_release << 16 >> 16,
2922 media_name[ media_index ],
2923 oc3_revision,
2924 oc3_mode[ oc3_index ]);
2925 }
2926
2927 if (!left--) {
2928 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2929
2930 return sprintf(page,
2931 "\n\n"
2932 " monitor:\n"
2933 " version number:\t\t%d\n"
2934 " boot status word:\t\t0x%08x\n",
2935 fore200e->bus->read(&cp_monitor->mon_version),
2936 fore200e->bus->read(&cp_monitor->bstat));
2937 }
2938
2939 if (!left--)
2940 return sprintf(page,
2941 "\n"
2942 " device statistics:\n"
2943 " 4b5b:\n"
2944 " crc_header_errors:\t\t%10u\n"
2945 " framing_errors:\t\t%10u\n",
2946 be32_to_cpu(fore200e->stats->phy.crc_header_errors),
2947 be32_to_cpu(fore200e->stats->phy.framing_errors));
2948
2949 if (!left--)
2950 return sprintf(page, "\n"
2951 " OC-3:\n"
2952 " section_bip8_errors:\t%10u\n"
2953 " path_bip8_errors:\t\t%10u\n"
2954 " line_bip24_errors:\t\t%10u\n"
2955 " line_febe_errors:\t\t%10u\n"
2956 " path_febe_errors:\t\t%10u\n"
2957 " corr_hcs_errors:\t\t%10u\n"
2958 " ucorr_hcs_errors:\t\t%10u\n",
2959 be32_to_cpu(fore200e->stats->oc3.section_bip8_errors),
2960 be32_to_cpu(fore200e->stats->oc3.path_bip8_errors),
2961 be32_to_cpu(fore200e->stats->oc3.line_bip24_errors),
2962 be32_to_cpu(fore200e->stats->oc3.line_febe_errors),
2963 be32_to_cpu(fore200e->stats->oc3.path_febe_errors),
2964 be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors),
2965 be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors));
2966
2967 if (!left--)
2968 return sprintf(page,"\n"
2969 " ATM:\t\t\t\t cells\n"
2970 " TX:\t\t\t%10u\n"
2971 " RX:\t\t\t%10u\n"
2972 " vpi out of range:\t\t%10u\n"
2973 " vpi no conn:\t\t%10u\n"
2974 " vci out of range:\t\t%10u\n"
2975 " vci no conn:\t\t%10u\n",
2976 be32_to_cpu(fore200e->stats->atm.cells_transmitted),
2977 be32_to_cpu(fore200e->stats->atm.cells_received),
2978 be32_to_cpu(fore200e->stats->atm.vpi_bad_range),
2979 be32_to_cpu(fore200e->stats->atm.vpi_no_conn),
2980 be32_to_cpu(fore200e->stats->atm.vci_bad_range),
2981 be32_to_cpu(fore200e->stats->atm.vci_no_conn));
2982
2983 if (!left--)
2984 return sprintf(page,"\n"
2985 " AAL0:\t\t\t cells\n"
2986 " TX:\t\t\t%10u\n"
2987 " RX:\t\t\t%10u\n"
2988 " dropped:\t\t\t%10u\n",
2989 be32_to_cpu(fore200e->stats->aal0.cells_transmitted),
2990 be32_to_cpu(fore200e->stats->aal0.cells_received),
2991 be32_to_cpu(fore200e->stats->aal0.cells_dropped));
2992
2993 if (!left--)
2994 return sprintf(page,"\n"
2995 " AAL3/4:\n"
2996 " SAR sublayer:\t\t cells\n"
2997 " TX:\t\t\t%10u\n"
2998 " RX:\t\t\t%10u\n"
2999 " dropped:\t\t\t%10u\n"
3000 " CRC errors:\t\t%10u\n"
3001 " protocol errors:\t\t%10u\n\n"
3002 " CS sublayer:\t\t PDUs\n"
3003 " TX:\t\t\t%10u\n"
3004 " RX:\t\t\t%10u\n"
3005 " dropped:\t\t\t%10u\n"
3006 " protocol errors:\t\t%10u\n",
3007 be32_to_cpu(fore200e->stats->aal34.cells_transmitted),
3008 be32_to_cpu(fore200e->stats->aal34.cells_received),
3009 be32_to_cpu(fore200e->stats->aal34.cells_dropped),
3010 be32_to_cpu(fore200e->stats->aal34.cells_crc_errors),
3011 be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors),
3012 be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted),
3013 be32_to_cpu(fore200e->stats->aal34.cspdus_received),
3014 be32_to_cpu(fore200e->stats->aal34.cspdus_dropped),
3015 be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors));
3016
3017 if (!left--)
3018 return sprintf(page,"\n"
3019 " AAL5:\n"
3020 " SAR sublayer:\t\t cells\n"
3021 " TX:\t\t\t%10u\n"
3022 " RX:\t\t\t%10u\n"
3023 " dropped:\t\t\t%10u\n"
3024 " congestions:\t\t%10u\n\n"
3025 " CS sublayer:\t\t PDUs\n"
3026 " TX:\t\t\t%10u\n"
3027 " RX:\t\t\t%10u\n"
3028 " dropped:\t\t\t%10u\n"
3029 " CRC errors:\t\t%10u\n"
3030 " protocol errors:\t\t%10u\n",
3031 be32_to_cpu(fore200e->stats->aal5.cells_transmitted),
3032 be32_to_cpu(fore200e->stats->aal5.cells_received),
3033 be32_to_cpu(fore200e->stats->aal5.cells_dropped),
3034 be32_to_cpu(fore200e->stats->aal5.congestion_experienced),
3035 be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted),
3036 be32_to_cpu(fore200e->stats->aal5.cspdus_received),
3037 be32_to_cpu(fore200e->stats->aal5.cspdus_dropped),
3038 be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors),
3039 be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors));
3040
3041 if (!left--)
3042 return sprintf(page,"\n"
3043 " AUX:\t\t allocation failures\n"
3044 " small b1:\t\t\t%10u\n"
3045 " large b1:\t\t\t%10u\n"
3046 " small b2:\t\t\t%10u\n"
3047 " large b2:\t\t\t%10u\n"
3048 " RX PDUs:\t\t\t%10u\n"
3049 " TX PDUs:\t\t\t%10lu\n",
3050 be32_to_cpu(fore200e->stats->aux.small_b1_failed),
3051 be32_to_cpu(fore200e->stats->aux.large_b1_failed),
3052 be32_to_cpu(fore200e->stats->aux.small_b2_failed),
3053 be32_to_cpu(fore200e->stats->aux.large_b2_failed),
3054 be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed),
3055 fore200e->tx_sat);
3056
3057 if (!left--)
3058 return sprintf(page,"\n"
3059 " receive carrier:\t\t\t%s\n",
3060 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
3061
3062 if (!left--) {
3063 return sprintf(page,"\n"
3064 " VCCs:\n address VPI VCI AAL "
3065 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
3066 }
3067
3068 for (i = 0; i < NBR_CONNECT; i++) {
3069
3070 vcc = fore200e->vc_map[i].vcc;
3071
3072 if (vcc == NULL)
3073 continue;
3074
3075 spin_lock_irqsave(&fore200e->q_lock, flags);
3076
3077 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
3078
3079 fore200e_vcc = FORE200E_VCC(vcc);
3080 ASSERT(fore200e_vcc);
3081
3082 len = sprintf(page,
3083 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
3084 (u32)(unsigned long)vcc,
3085 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3086 fore200e_vcc->tx_pdu,
3087 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3088 fore200e_vcc->tx_max_pdu,
3089 fore200e_vcc->rx_pdu,
3090 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3091 fore200e_vcc->rx_max_pdu);
3092
3093 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3094 return len;
3095 }
3096
3097 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3098 }
3099
3100 return 0;
3101}
3102
3103module_init(fore200e_module_init);
3104module_exit(fore200e_module_cleanup);
3105
3106
3107static const struct atmdev_ops fore200e_ops =
3108{
3109 .open = fore200e_open,
3110 .close = fore200e_close,
3111 .ioctl = fore200e_ioctl,
3112 .getsockopt = fore200e_getsockopt,
3113 .setsockopt = fore200e_setsockopt,
3114 .send = fore200e_send,
3115 .change_qos = fore200e_change_qos,
3116 .proc_read = fore200e_proc_read,
3117 .owner = THIS_MODULE
3118};
3119
3120
3121static const struct fore200e_bus fore200e_bus[] = {
3122#ifdef CONFIG_PCI
3123 { "PCA-200E", "pca200e", 32, 4, 32,
3124 fore200e_pca_read,
3125 fore200e_pca_write,
3126 fore200e_pca_dma_map,
3127 fore200e_pca_dma_unmap,
3128 fore200e_pca_dma_sync_for_cpu,
3129 fore200e_pca_dma_sync_for_device,
3130 fore200e_pca_dma_chunk_alloc,
3131 fore200e_pca_dma_chunk_free,
3132 fore200e_pca_configure,
3133 fore200e_pca_map,
3134 fore200e_pca_reset,
3135 fore200e_pca_prom_read,
3136 fore200e_pca_unmap,
3137 NULL,
3138 fore200e_pca_irq_check,
3139 fore200e_pca_irq_ack,
3140 fore200e_pca_proc_read,
3141 },
3142#endif
3143#ifdef CONFIG_SBUS
3144 { "SBA-200E", "sba200e", 32, 64, 32,
3145 fore200e_sba_read,
3146 fore200e_sba_write,
3147 fore200e_sba_dma_map,
3148 fore200e_sba_dma_unmap,
3149 fore200e_sba_dma_sync_for_cpu,
3150 fore200e_sba_dma_sync_for_device,
3151 fore200e_sba_dma_chunk_alloc,
3152 fore200e_sba_dma_chunk_free,
3153 fore200e_sba_configure,
3154 fore200e_sba_map,
3155 fore200e_sba_reset,
3156 fore200e_sba_prom_read,
3157 fore200e_sba_unmap,
3158 fore200e_sba_irq_enable,
3159 fore200e_sba_irq_check,
3160 fore200e_sba_irq_ack,
3161 fore200e_sba_proc_read,
3162 },
3163#endif
3164 {}
3165};
3166
3167MODULE_LICENSE("GPL");
3168#ifdef CONFIG_PCI
3169#ifdef __LITTLE_ENDIAN__
3170MODULE_FIRMWARE("pca200e.bin");
3171#else
3172MODULE_FIRMWARE("pca200e_ecd.bin2");
3173#endif
3174#endif
3175#ifdef CONFIG_SBUS
3176MODULE_FIRMWARE("sba200e_ecd.bin2");
3177#endif
3178