1/*
2 A FORE Systems 200E-series driver for ATM on Linux.
3 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
4
5 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
6
7 This driver simultaneously supports PCA-200E and SBA-200E adapters
8 on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23*/
24
25
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/init.h>
29#include <linux/capability.h>
30#include <linux/interrupt.h>
31#include <linux/bitops.h>
32#include <linux/pci.h>
33#include <linux/module.h>
34#include <linux/atmdev.h>
35#include <linux/sonet.h>
36#include <linux/atm_suni.h>
37#include <linux/dma-mapping.h>
38#include <linux/delay.h>
39#include <linux/firmware.h>
40#include <asm/io.h>
41#include <asm/string.h>
42#include <asm/page.h>
43#include <asm/irq.h>
44#include <asm/dma.h>
45#include <asm/byteorder.h>
46#include <linux/uaccess.h>
47#include <linux/atomic.h>
48
49#ifdef CONFIG_SBUS
50#include <linux/of.h>
51#include <linux/of_device.h>
52#include <asm/idprom.h>
53#include <asm/openprom.h>
54#include <asm/oplib.h>
55#include <asm/pgtable.h>
56#endif
57
58#if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
59#define FORE200E_USE_TASKLET
60#endif
61
62#if 0 /* enable the debugging code of the buffer supply queues */
63#define FORE200E_BSQ_DEBUG
64#endif
65
66#if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
67#define FORE200E_52BYTE_AAL0_SDU
68#endif
69
70#include "fore200e.h"
71#include "suni.h"
72
73#define FORE200E_VERSION "0.3e"
74
75#define FORE200E "fore200e: "
76
77#if 0 /* override .config */
78#define CONFIG_ATM_FORE200E_DEBUG 1
79#endif
80#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
81#define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
82 printk(FORE200E format, ##args); } while (0)
83#else
84#define DPRINTK(level, format, args...) do {} while (0)
85#endif
86
87
88#define FORE200E_ALIGN(addr, alignment) \
89 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
90
91#define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
92
93#define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
94
95#define FORE200E_NEXT_ENTRY(index, modulo) (index = ((index) + 1) % (modulo))
96
97#if 1
98#define ASSERT(expr) if (!(expr)) { \
99 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
100 __func__, __LINE__, #expr); \
101 panic(FORE200E "%s", __func__); \
102 }
103#else
104#define ASSERT(expr) do {} while (0)
105#endif
106
107
108static const struct atmdev_ops fore200e_ops;
109
110static LIST_HEAD(fore200e_boards);
111
112
113MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
114MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
115MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
116
117
118static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
119 { BUFFER_S1_NBR, BUFFER_L1_NBR },
120 { BUFFER_S2_NBR, BUFFER_L2_NBR }
121};
122
123static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
124 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
125 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
126};
127
128
129#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
130static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
131#endif
132
133
134#if 0 /* currently unused */
135static int
136fore200e_fore2atm_aal(enum fore200e_aal aal)
137{
138 switch(aal) {
139 case FORE200E_AAL0: return ATM_AAL0;
140 case FORE200E_AAL34: return ATM_AAL34;
141 case FORE200E_AAL5: return ATM_AAL5;
142 }
143
144 return -EINVAL;
145}
146#endif
147
148
149static enum fore200e_aal
150fore200e_atm2fore_aal(int aal)
151{
152 switch(aal) {
153 case ATM_AAL0: return FORE200E_AAL0;
154 case ATM_AAL34: return FORE200E_AAL34;
155 case ATM_AAL1:
156 case ATM_AAL2:
157 case ATM_AAL5: return FORE200E_AAL5;
158 }
159
160 return -EINVAL;
161}
162
163
164static char*
165fore200e_irq_itoa(int irq)
166{
167 static char str[8];
168 sprintf(str, "%d", irq);
169 return str;
170}
171
172
173/* allocate and align a chunk of memory intended to hold the data behing exchanged
174 between the driver and the adapter (using streaming DVMA) */
175
176static int
177fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
178{
179 unsigned long offset = 0;
180
181 if (alignment <= sizeof(int))
182 alignment = 0;
183
184 chunk->alloc_size = size + alignment;
185 chunk->direction = direction;
186
187 chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL);
188 if (chunk->alloc_addr == NULL)
189 return -ENOMEM;
190
191 if (alignment > 0)
192 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
193
194 chunk->align_addr = chunk->alloc_addr + offset;
195
196 chunk->dma_addr = dma_map_single(fore200e->dev, chunk->align_addr,
197 size, direction);
198 if (dma_mapping_error(fore200e->dev, chunk->dma_addr)) {
199 kfree(chunk->alloc_addr);
200 return -ENOMEM;
201 }
202 return 0;
203}
204
205
206/* free a chunk of memory */
207
208static void
209fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
210{
211 dma_unmap_single(fore200e->dev, chunk->dma_addr, chunk->dma_size,
212 chunk->direction);
213 kfree(chunk->alloc_addr);
214}
215
216/*
217 * Allocate a DMA consistent chunk of memory intended to act as a communication
218 * mechanism (to hold descriptors, status, queues, etc.) shared by the driver
219 * and the adapter.
220 */
221static int
222fore200e_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
223 int size, int nbr, int alignment)
224{
225 /* returned chunks are page-aligned */
226 chunk->alloc_size = size * nbr;
227 chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size,
228 &chunk->dma_addr, GFP_KERNEL);
229 if (!chunk->alloc_addr)
230 return -ENOMEM;
231 chunk->align_addr = chunk->alloc_addr;
232 return 0;
233}
234
235/*
236 * Free a DMA consistent chunk of memory.
237 */
238static void
239fore200e_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
240{
241 dma_free_coherent(fore200e->dev, chunk->alloc_size, chunk->alloc_addr,
242 chunk->dma_addr);
243}
244
245static void
246fore200e_spin(int msecs)
247{
248 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
249 while (time_before(jiffies, timeout));
250}
251
252
253static int
254fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
255{
256 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
257 int ok;
258
259 mb();
260 do {
261 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
262 break;
263
264 } while (time_before(jiffies, timeout));
265
266#if 1
267 if (!ok) {
268 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
269 *addr, val);
270 }
271#endif
272
273 return ok;
274}
275
276
277static int
278fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
279{
280 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
281 int ok;
282
283 do {
284 if ((ok = (fore200e->bus->read(addr) == val)))
285 break;
286
287 } while (time_before(jiffies, timeout));
288
289#if 1
290 if (!ok) {
291 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
292 fore200e->bus->read(addr), val);
293 }
294#endif
295
296 return ok;
297}
298
299
300static void
301fore200e_free_rx_buf(struct fore200e* fore200e)
302{
303 int scheme, magn, nbr;
304 struct buffer* buffer;
305
306 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
307 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
308
309 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
310
311 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
312
313 struct chunk* data = &buffer[ nbr ].data;
314
315 if (data->alloc_addr != NULL)
316 fore200e_chunk_free(fore200e, data);
317 }
318 }
319 }
320 }
321}
322
323
324static void
325fore200e_uninit_bs_queue(struct fore200e* fore200e)
326{
327 int scheme, magn;
328
329 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
330 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
331
332 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
333 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
334
335 if (status->alloc_addr)
336 fore200e_dma_chunk_free(fore200e, status);
337
338 if (rbd_block->alloc_addr)
339 fore200e_dma_chunk_free(fore200e, rbd_block);
340 }
341 }
342}
343
344
345static int
346fore200e_reset(struct fore200e* fore200e, int diag)
347{
348 int ok;
349
350 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
351
352 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
353
354 fore200e->bus->reset(fore200e);
355
356 if (diag) {
357 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
358 if (ok == 0) {
359
360 printk(FORE200E "device %s self-test failed\n", fore200e->name);
361 return -ENODEV;
362 }
363
364 printk(FORE200E "device %s self-test passed\n", fore200e->name);
365
366 fore200e->state = FORE200E_STATE_RESET;
367 }
368
369 return 0;
370}
371
372
373static void
374fore200e_shutdown(struct fore200e* fore200e)
375{
376 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
377 fore200e->name, fore200e->phys_base,
378 fore200e_irq_itoa(fore200e->irq));
379
380 if (fore200e->state > FORE200E_STATE_RESET) {
381 /* first, reset the board to prevent further interrupts or data transfers */
382 fore200e_reset(fore200e, 0);
383 }
384
385 /* then, release all allocated resources */
386 switch(fore200e->state) {
387
388 case FORE200E_STATE_COMPLETE:
389 kfree(fore200e->stats);
390
391 /* fall through */
392 case FORE200E_STATE_IRQ:
393 free_irq(fore200e->irq, fore200e->atm_dev);
394
395 /* fall through */
396 case FORE200E_STATE_ALLOC_BUF:
397 fore200e_free_rx_buf(fore200e);
398
399 /* fall through */
400 case FORE200E_STATE_INIT_BSQ:
401 fore200e_uninit_bs_queue(fore200e);
402
403 /* fall through */
404 case FORE200E_STATE_INIT_RXQ:
405 fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status);
406 fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
407
408 /* fall through */
409 case FORE200E_STATE_INIT_TXQ:
410 fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status);
411 fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
412
413 /* fall through */
414 case FORE200E_STATE_INIT_CMDQ:
415 fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
416
417 /* fall through */
418 case FORE200E_STATE_INITIALIZE:
419 /* nothing to do for that state */
420
421 case FORE200E_STATE_START_FW:
422 /* nothing to do for that state */
423
424 case FORE200E_STATE_RESET:
425 /* nothing to do for that state */
426
427 case FORE200E_STATE_MAP:
428 fore200e->bus->unmap(fore200e);
429
430 /* fall through */
431 case FORE200E_STATE_CONFIGURE:
432 /* nothing to do for that state */
433
434 case FORE200E_STATE_REGISTER:
435 /* XXX shouldn't we *start* by deregistering the device? */
436 atm_dev_deregister(fore200e->atm_dev);
437
438 case FORE200E_STATE_BLANK:
439 /* nothing to do for that state */
440 break;
441 }
442}
443
444
445#ifdef CONFIG_PCI
446
447static u32 fore200e_pca_read(volatile u32 __iomem *addr)
448{
449 /* on big-endian hosts, the board is configured to convert
450 the endianess of slave RAM accesses */
451 return le32_to_cpu(readl(addr));
452}
453
454
455static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
456{
457 /* on big-endian hosts, the board is configured to convert
458 the endianess of slave RAM accesses */
459 writel(cpu_to_le32(val), addr);
460}
461
462static int
463fore200e_pca_irq_check(struct fore200e* fore200e)
464{
465 /* this is a 1 bit register */
466 int irq_posted = readl(fore200e->regs.pca.psr);
467
468#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
469 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
470 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
471 }
472#endif
473
474 return irq_posted;
475}
476
477
478static void
479fore200e_pca_irq_ack(struct fore200e* fore200e)
480{
481 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
482}
483
484
485static void
486fore200e_pca_reset(struct fore200e* fore200e)
487{
488 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
489 fore200e_spin(10);
490 writel(0, fore200e->regs.pca.hcr);
491}
492
493
494static int fore200e_pca_map(struct fore200e* fore200e)
495{
496 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
497
498 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
499
500 if (fore200e->virt_base == NULL) {
501 printk(FORE200E "can't map device %s\n", fore200e->name);
502 return -EFAULT;
503 }
504
505 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
506
507 /* gain access to the PCA specific registers */
508 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
509 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
510 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
511
512 fore200e->state = FORE200E_STATE_MAP;
513 return 0;
514}
515
516
517static void
518fore200e_pca_unmap(struct fore200e* fore200e)
519{
520 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
521
522 if (fore200e->virt_base != NULL)
523 iounmap(fore200e->virt_base);
524}
525
526
527static int fore200e_pca_configure(struct fore200e *fore200e)
528{
529 struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
530 u8 master_ctrl, latency;
531
532 DPRINTK(2, "device %s being configured\n", fore200e->name);
533
534 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
535 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
536 return -EIO;
537 }
538
539 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
540
541 master_ctrl = master_ctrl
542#if defined(__BIG_ENDIAN)
543 /* request the PCA board to convert the endianess of slave RAM accesses */
544 | PCA200E_CTRL_CONVERT_ENDIAN
545#endif
546#if 0
547 | PCA200E_CTRL_DIS_CACHE_RD
548 | PCA200E_CTRL_DIS_WRT_INVAL
549 | PCA200E_CTRL_ENA_CONT_REQ_MODE
550 | PCA200E_CTRL_2_CACHE_WRT_INVAL
551#endif
552 | PCA200E_CTRL_LARGE_PCI_BURSTS;
553
554 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
555
556 /* raise latency from 32 (default) to 192, as this seems to prevent NIC
557 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
558 this may impact the performances of other PCI devices on the same bus, though */
559 latency = 192;
560 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
561
562 fore200e->state = FORE200E_STATE_CONFIGURE;
563 return 0;
564}
565
566
567static int __init
568fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
569{
570 struct host_cmdq* cmdq = &fore200e->host_cmdq;
571 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
572 struct prom_opcode opcode;
573 int ok;
574 u32 prom_dma;
575
576 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
577
578 opcode.opcode = OPCODE_GET_PROM;
579 opcode.pad = 0;
580
581 prom_dma = dma_map_single(fore200e->dev, prom, sizeof(struct prom_data),
582 DMA_FROM_DEVICE);
583 if (dma_mapping_error(fore200e->dev, prom_dma))
584 return -ENOMEM;
585
586 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
587
588 *entry->status = STATUS_PENDING;
589
590 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
591
592 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
593
594 *entry->status = STATUS_FREE;
595
596 dma_unmap_single(fore200e->dev, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
597
598 if (ok == 0) {
599 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
600 return -EIO;
601 }
602
603#if defined(__BIG_ENDIAN)
604
605#define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
606
607 /* MAC address is stored as little-endian */
608 swap_here(&prom->mac_addr[0]);
609 swap_here(&prom->mac_addr[4]);
610#endif
611
612 return 0;
613}
614
615
616static int
617fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
618{
619 struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
620
621 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
622 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
623}
624
625static const struct fore200e_bus fore200e_pci_ops = {
626 .model_name = "PCA-200E",
627 .proc_name = "pca200e",
628 .descr_alignment = 32,
629 .buffer_alignment = 4,
630 .status_alignment = 32,
631 .read = fore200e_pca_read,
632 .write = fore200e_pca_write,
633 .configure = fore200e_pca_configure,
634 .map = fore200e_pca_map,
635 .reset = fore200e_pca_reset,
636 .prom_read = fore200e_pca_prom_read,
637 .unmap = fore200e_pca_unmap,
638 .irq_check = fore200e_pca_irq_check,
639 .irq_ack = fore200e_pca_irq_ack,
640 .proc_read = fore200e_pca_proc_read,
641};
642#endif /* CONFIG_PCI */
643
644#ifdef CONFIG_SBUS
645
646static u32 fore200e_sba_read(volatile u32 __iomem *addr)
647{
648 return sbus_readl(addr);
649}
650
651static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
652{
653 sbus_writel(val, addr);
654}
655
656static void fore200e_sba_irq_enable(struct fore200e *fore200e)
657{
658 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
659 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
660}
661
662static int fore200e_sba_irq_check(struct fore200e *fore200e)
663{
664 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
665}
666
667static void fore200e_sba_irq_ack(struct fore200e *fore200e)
668{
669 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
670 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
671}
672
673static void fore200e_sba_reset(struct fore200e *fore200e)
674{
675 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
676 fore200e_spin(10);
677 fore200e->bus->write(0, fore200e->regs.sba.hcr);
678}
679
680static int __init fore200e_sba_map(struct fore200e *fore200e)
681{
682 struct platform_device *op = to_platform_device(fore200e->dev);
683 unsigned int bursts;
684
685 /* gain access to the SBA specific registers */
686 fore200e->regs.sba.hcr = of_ioremap(&op->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
687 fore200e->regs.sba.bsr = of_ioremap(&op->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
688 fore200e->regs.sba.isr = of_ioremap(&op->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
689 fore200e->virt_base = of_ioremap(&op->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
690
691 if (!fore200e->virt_base) {
692 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
693 return -EFAULT;
694 }
695
696 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
697
698 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
699
700 /* get the supported DVMA burst sizes */
701 bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00);
702
703 if (sbus_can_dma_64bit())
704 sbus_set_sbus64(&op->dev, bursts);
705
706 fore200e->state = FORE200E_STATE_MAP;
707 return 0;
708}
709
710static void fore200e_sba_unmap(struct fore200e *fore200e)
711{
712 struct platform_device *op = to_platform_device(fore200e->dev);
713
714 of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
715 of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
716 of_iounmap(&op->resource[2], fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
717 of_iounmap(&op->resource[3], fore200e->virt_base, SBA200E_RAM_LENGTH);
718}
719
720static int __init fore200e_sba_configure(struct fore200e *fore200e)
721{
722 fore200e->state = FORE200E_STATE_CONFIGURE;
723 return 0;
724}
725
726static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom)
727{
728 struct platform_device *op = to_platform_device(fore200e->dev);
729 const u8 *prop;
730 int len;
731
732 prop = of_get_property(op->dev.of_node, "madaddrlo2", &len);
733 if (!prop)
734 return -ENODEV;
735 memcpy(&prom->mac_addr[4], prop, 4);
736
737 prop = of_get_property(op->dev.of_node, "madaddrhi4", &len);
738 if (!prop)
739 return -ENODEV;
740 memcpy(&prom->mac_addr[2], prop, 4);
741
742 prom->serial_number = of_getintprop_default(op->dev.of_node,
743 "serialnumber", 0);
744 prom->hw_revision = of_getintprop_default(op->dev.of_node,
745 "promversion", 0);
746
747 return 0;
748}
749
750static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
751{
752 struct platform_device *op = to_platform_device(fore200e->dev);
753 const struct linux_prom_registers *regs;
754
755 regs = of_get_property(op->dev.of_node, "reg", NULL);
756
757 return sprintf(page, " SBUS slot/device:\t\t%d/'%pOFn'\n",
758 (regs ? regs->which_io : 0), op->dev.of_node);
759}
760
761static const struct fore200e_bus fore200e_sbus_ops = {
762 .model_name = "SBA-200E",
763 .proc_name = "sba200e",
764 .descr_alignment = 32,
765 .buffer_alignment = 64,
766 .status_alignment = 32,
767 .read = fore200e_sba_read,
768 .write = fore200e_sba_write,
769 .configure = fore200e_sba_configure,
770 .map = fore200e_sba_map,
771 .reset = fore200e_sba_reset,
772 .prom_read = fore200e_sba_prom_read,
773 .unmap = fore200e_sba_unmap,
774 .irq_enable = fore200e_sba_irq_enable,
775 .irq_check = fore200e_sba_irq_check,
776 .irq_ack = fore200e_sba_irq_ack,
777 .proc_read = fore200e_sba_proc_read,
778};
779#endif /* CONFIG_SBUS */
780
781static void
782fore200e_tx_irq(struct fore200e* fore200e)
783{
784 struct host_txq* txq = &fore200e->host_txq;
785 struct host_txq_entry* entry;
786 struct atm_vcc* vcc;
787 struct fore200e_vc_map* vc_map;
788
789 if (fore200e->host_txq.txing == 0)
790 return;
791
792 for (;;) {
793
794 entry = &txq->host_entry[ txq->tail ];
795
796 if ((*entry->status & STATUS_COMPLETE) == 0) {
797 break;
798 }
799
800 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
801 entry, txq->tail, entry->vc_map, entry->skb);
802
803 /* free copy of misaligned data */
804 kfree(entry->data);
805
806 /* remove DMA mapping */
807 dma_unmap_single(fore200e->dev, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
808 DMA_TO_DEVICE);
809
810 vc_map = entry->vc_map;
811
812 /* vcc closed since the time the entry was submitted for tx? */
813 if ((vc_map->vcc == NULL) ||
814 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
815
816 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
817 fore200e->atm_dev->number);
818
819 dev_kfree_skb_any(entry->skb);
820 }
821 else {
822 ASSERT(vc_map->vcc);
823
824 /* vcc closed then immediately re-opened? */
825 if (vc_map->incarn != entry->incarn) {
826
827 /* when a vcc is closed, some PDUs may be still pending in the tx queue.
828 if the same vcc is immediately re-opened, those pending PDUs must
829 not be popped after the completion of their emission, as they refer
830 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
831 would be decremented by the size of the (unrelated) skb, possibly
832 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
833 we thus bind the tx entry to the current incarnation of the vcc
834 when the entry is submitted for tx. When the tx later completes,
835 if the incarnation number of the tx entry does not match the one
836 of the vcc, then this implies that the vcc has been closed then re-opened.
837 we thus just drop the skb here. */
838
839 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
840 fore200e->atm_dev->number);
841
842 dev_kfree_skb_any(entry->skb);
843 }
844 else {
845 vcc = vc_map->vcc;
846 ASSERT(vcc);
847
848 /* notify tx completion */
849 if (vcc->pop) {
850 vcc->pop(vcc, entry->skb);
851 }
852 else {
853 dev_kfree_skb_any(entry->skb);
854 }
855
856 /* check error condition */
857 if (*entry->status & STATUS_ERROR)
858 atomic_inc(&vcc->stats->tx_err);
859 else
860 atomic_inc(&vcc->stats->tx);
861 }
862 }
863
864 *entry->status = STATUS_FREE;
865
866 fore200e->host_txq.txing--;
867
868 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
869 }
870}
871
872
873#ifdef FORE200E_BSQ_DEBUG
874int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
875{
876 struct buffer* buffer;
877 int count = 0;
878
879 buffer = bsq->freebuf;
880 while (buffer) {
881
882 if (buffer->supplied) {
883 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
884 where, scheme, magn, buffer->index);
885 }
886
887 if (buffer->magn != magn) {
888 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
889 where, scheme, magn, buffer->index, buffer->magn);
890 }
891
892 if (buffer->scheme != scheme) {
893 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
894 where, scheme, magn, buffer->index, buffer->scheme);
895 }
896
897 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
898 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
899 where, scheme, magn, buffer->index);
900 }
901
902 count++;
903 buffer = buffer->next;
904 }
905
906 if (count != bsq->freebuf_count) {
907 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
908 where, scheme, magn, count, bsq->freebuf_count);
909 }
910 return 0;
911}
912#endif
913
914
915static void
916fore200e_supply(struct fore200e* fore200e)
917{
918 int scheme, magn, i;
919
920 struct host_bsq* bsq;
921 struct host_bsq_entry* entry;
922 struct buffer* buffer;
923
924 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
925 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
926
927 bsq = &fore200e->host_bsq[ scheme ][ magn ];
928
929#ifdef FORE200E_BSQ_DEBUG
930 bsq_audit(1, bsq, scheme, magn);
931#endif
932 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
933
934 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
935 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
936
937 entry = &bsq->host_entry[ bsq->head ];
938
939 for (i = 0; i < RBD_BLK_SIZE; i++) {
940
941 /* take the first buffer in the free buffer list */
942 buffer = bsq->freebuf;
943 if (!buffer) {
944 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
945 scheme, magn, bsq->freebuf_count);
946 return;
947 }
948 bsq->freebuf = buffer->next;
949
950#ifdef FORE200E_BSQ_DEBUG
951 if (buffer->supplied)
952 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
953 scheme, magn, buffer->index);
954 buffer->supplied = 1;
955#endif
956 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
957 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
958 }
959
960 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
961
962 /* decrease accordingly the number of free rx buffers */
963 bsq->freebuf_count -= RBD_BLK_SIZE;
964
965 *entry->status = STATUS_PENDING;
966 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
967 }
968 }
969 }
970}
971
972
973static int
974fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
975{
976 struct sk_buff* skb;
977 struct buffer* buffer;
978 struct fore200e_vcc* fore200e_vcc;
979 int i, pdu_len = 0;
980#ifdef FORE200E_52BYTE_AAL0_SDU
981 u32 cell_header = 0;
982#endif
983
984 ASSERT(vcc);
985
986 fore200e_vcc = FORE200E_VCC(vcc);
987 ASSERT(fore200e_vcc);
988
989#ifdef FORE200E_52BYTE_AAL0_SDU
990 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
991
992 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
993 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
994 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
995 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
996 rpd->atm_header.clp;
997 pdu_len = 4;
998 }
999#endif
1000
1001 /* compute total PDU length */
1002 for (i = 0; i < rpd->nseg; i++)
1003 pdu_len += rpd->rsd[ i ].length;
1004
1005 skb = alloc_skb(pdu_len, GFP_ATOMIC);
1006 if (skb == NULL) {
1007 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1008
1009 atomic_inc(&vcc->stats->rx_drop);
1010 return -ENOMEM;
1011 }
1012
1013 __net_timestamp(skb);
1014
1015#ifdef FORE200E_52BYTE_AAL0_SDU
1016 if (cell_header) {
1017 *((u32*)skb_put(skb, 4)) = cell_header;
1018 }
1019#endif
1020
1021 /* reassemble segments */
1022 for (i = 0; i < rpd->nseg; i++) {
1023
1024 /* rebuild rx buffer address from rsd handle */
1025 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1026
1027 /* Make device DMA transfer visible to CPU. */
1028 dma_sync_single_for_cpu(fore200e->dev, buffer->data.dma_addr,
1029 rpd->rsd[i].length, DMA_FROM_DEVICE);
1030
1031 skb_put_data(skb, buffer->data.align_addr, rpd->rsd[i].length);
1032
1033 /* Now let the device get at it again. */
1034 dma_sync_single_for_device(fore200e->dev, buffer->data.dma_addr,
1035 rpd->rsd[i].length, DMA_FROM_DEVICE);
1036 }
1037
1038 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1039
1040 if (pdu_len < fore200e_vcc->rx_min_pdu)
1041 fore200e_vcc->rx_min_pdu = pdu_len;
1042 if (pdu_len > fore200e_vcc->rx_max_pdu)
1043 fore200e_vcc->rx_max_pdu = pdu_len;
1044 fore200e_vcc->rx_pdu++;
1045
1046 /* push PDU */
1047 if (atm_charge(vcc, skb->truesize) == 0) {
1048
1049 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1050 vcc->itf, vcc->vpi, vcc->vci);
1051
1052 dev_kfree_skb_any(skb);
1053
1054 atomic_inc(&vcc->stats->rx_drop);
1055 return -ENOMEM;
1056 }
1057
1058 vcc->push(vcc, skb);
1059 atomic_inc(&vcc->stats->rx);
1060
1061 return 0;
1062}
1063
1064
1065static void
1066fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1067{
1068 struct host_bsq* bsq;
1069 struct buffer* buffer;
1070 int i;
1071
1072 for (i = 0; i < rpd->nseg; i++) {
1073
1074 /* rebuild rx buffer address from rsd handle */
1075 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1076
1077 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1078
1079#ifdef FORE200E_BSQ_DEBUG
1080 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1081
1082 if (buffer->supplied == 0)
1083 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1084 buffer->scheme, buffer->magn, buffer->index);
1085 buffer->supplied = 0;
1086#endif
1087
1088 /* re-insert the buffer into the free buffer list */
1089 buffer->next = bsq->freebuf;
1090 bsq->freebuf = buffer;
1091
1092 /* then increment the number of free rx buffers */
1093 bsq->freebuf_count++;
1094 }
1095}
1096
1097
1098static void
1099fore200e_rx_irq(struct fore200e* fore200e)
1100{
1101 struct host_rxq* rxq = &fore200e->host_rxq;
1102 struct host_rxq_entry* entry;
1103 struct atm_vcc* vcc;
1104 struct fore200e_vc_map* vc_map;
1105
1106 for (;;) {
1107
1108 entry = &rxq->host_entry[ rxq->head ];
1109
1110 /* no more received PDUs */
1111 if ((*entry->status & STATUS_COMPLETE) == 0)
1112 break;
1113
1114 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1115
1116 if ((vc_map->vcc == NULL) ||
1117 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1118
1119 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1120 fore200e->atm_dev->number,
1121 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1122 }
1123 else {
1124 vcc = vc_map->vcc;
1125 ASSERT(vcc);
1126
1127 if ((*entry->status & STATUS_ERROR) == 0) {
1128
1129 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1130 }
1131 else {
1132 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1133 fore200e->atm_dev->number,
1134 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1135 atomic_inc(&vcc->stats->rx_err);
1136 }
1137 }
1138
1139 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1140
1141 fore200e_collect_rpd(fore200e, entry->rpd);
1142
1143 /* rewrite the rpd address to ack the received PDU */
1144 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1145 *entry->status = STATUS_FREE;
1146
1147 fore200e_supply(fore200e);
1148 }
1149}
1150
1151
1152#ifndef FORE200E_USE_TASKLET
1153static void
1154fore200e_irq(struct fore200e* fore200e)
1155{
1156 unsigned long flags;
1157
1158 spin_lock_irqsave(&fore200e->q_lock, flags);
1159 fore200e_rx_irq(fore200e);
1160 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1161
1162 spin_lock_irqsave(&fore200e->q_lock, flags);
1163 fore200e_tx_irq(fore200e);
1164 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1165}
1166#endif
1167
1168
1169static irqreturn_t
1170fore200e_interrupt(int irq, void* dev)
1171{
1172 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1173
1174 if (fore200e->bus->irq_check(fore200e) == 0) {
1175
1176 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1177 return IRQ_NONE;
1178 }
1179 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1180
1181#ifdef FORE200E_USE_TASKLET
1182 tasklet_schedule(&fore200e->tx_tasklet);
1183 tasklet_schedule(&fore200e->rx_tasklet);
1184#else
1185 fore200e_irq(fore200e);
1186#endif
1187
1188 fore200e->bus->irq_ack(fore200e);
1189 return IRQ_HANDLED;
1190}
1191
1192
1193#ifdef FORE200E_USE_TASKLET
1194static void
1195fore200e_tx_tasklet(unsigned long data)
1196{
1197 struct fore200e* fore200e = (struct fore200e*) data;
1198 unsigned long flags;
1199
1200 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1201
1202 spin_lock_irqsave(&fore200e->q_lock, flags);
1203 fore200e_tx_irq(fore200e);
1204 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1205}
1206
1207
1208static void
1209fore200e_rx_tasklet(unsigned long data)
1210{
1211 struct fore200e* fore200e = (struct fore200e*) data;
1212 unsigned long flags;
1213
1214 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1215
1216 spin_lock_irqsave(&fore200e->q_lock, flags);
1217 fore200e_rx_irq((struct fore200e*) data);
1218 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1219}
1220#endif
1221
1222
1223static int
1224fore200e_select_scheme(struct atm_vcc* vcc)
1225{
1226 /* fairly balance the VCs over (identical) buffer schemes */
1227 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1228
1229 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1230 vcc->itf, vcc->vpi, vcc->vci, scheme);
1231
1232 return scheme;
1233}
1234
1235
1236static int
1237fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1238{
1239 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1240 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1241 struct activate_opcode activ_opcode;
1242 struct deactivate_opcode deactiv_opcode;
1243 struct vpvc vpvc;
1244 int ok;
1245 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1246
1247 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1248
1249 if (activate) {
1250 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1251
1252 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1253 activ_opcode.aal = aal;
1254 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1255 activ_opcode.pad = 0;
1256 }
1257 else {
1258 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1259 deactiv_opcode.pad = 0;
1260 }
1261
1262 vpvc.vci = vcc->vci;
1263 vpvc.vpi = vcc->vpi;
1264
1265 *entry->status = STATUS_PENDING;
1266
1267 if (activate) {
1268
1269#ifdef FORE200E_52BYTE_AAL0_SDU
1270 mtu = 48;
1271#endif
1272 /* the MTU is not used by the cp, except in the case of AAL0 */
1273 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1274 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1275 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1276 }
1277 else {
1278 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1279 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1280 }
1281
1282 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1283
1284 *entry->status = STATUS_FREE;
1285
1286 if (ok == 0) {
1287 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1288 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1289 return -EIO;
1290 }
1291
1292 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1293 activate ? "open" : "clos");
1294
1295 return 0;
1296}
1297
1298
1299#define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
1300
1301static void
1302fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1303{
1304 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1305
1306 /* compute the data cells to idle cells ratio from the tx PCR */
1307 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1308 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1309 }
1310 else {
1311 /* disable rate control */
1312 rate->data_cells = rate->idle_cells = 0;
1313 }
1314}
1315
1316
1317static int
1318fore200e_open(struct atm_vcc *vcc)
1319{
1320 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1321 struct fore200e_vcc* fore200e_vcc;
1322 struct fore200e_vc_map* vc_map;
1323 unsigned long flags;
1324 int vci = vcc->vci;
1325 short vpi = vcc->vpi;
1326
1327 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1328 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1329
1330 spin_lock_irqsave(&fore200e->q_lock, flags);
1331
1332 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1333 if (vc_map->vcc) {
1334
1335 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1336
1337 printk(FORE200E "VC %d.%d.%d already in use\n",
1338 fore200e->atm_dev->number, vpi, vci);
1339
1340 return -EINVAL;
1341 }
1342
1343 vc_map->vcc = vcc;
1344
1345 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1346
1347 fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1348 if (fore200e_vcc == NULL) {
1349 vc_map->vcc = NULL;
1350 return -ENOMEM;
1351 }
1352
1353 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1354 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1355 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1356 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1357 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1358 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1359 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1360
1361 /* pseudo-CBR bandwidth requested? */
1362 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1363
1364 mutex_lock(&fore200e->rate_mtx);
1365 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1366 mutex_unlock(&fore200e->rate_mtx);
1367
1368 kfree(fore200e_vcc);
1369 vc_map->vcc = NULL;
1370 return -EAGAIN;
1371 }
1372
1373 /* reserve bandwidth */
1374 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1375 mutex_unlock(&fore200e->rate_mtx);
1376 }
1377
1378 vcc->itf = vcc->dev->number;
1379
1380 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1381 set_bit(ATM_VF_ADDR, &vcc->flags);
1382
1383 vcc->dev_data = fore200e_vcc;
1384
1385 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1386
1387 vc_map->vcc = NULL;
1388
1389 clear_bit(ATM_VF_ADDR, &vcc->flags);
1390 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1391
1392 vcc->dev_data = NULL;
1393
1394 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1395
1396 kfree(fore200e_vcc);
1397 return -EINVAL;
1398 }
1399
1400 /* compute rate control parameters */
1401 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1402
1403 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1404 set_bit(ATM_VF_HASQOS, &vcc->flags);
1405
1406 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1407 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1408 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1409 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1410 }
1411
1412 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1413 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1414 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1415
1416 /* new incarnation of the vcc */
1417 vc_map->incarn = ++fore200e->incarn_count;
1418
1419 /* VC unusable before this flag is set */
1420 set_bit(ATM_VF_READY, &vcc->flags);
1421
1422 return 0;
1423}
1424
1425
1426static void
1427fore200e_close(struct atm_vcc* vcc)
1428{
1429 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1430 struct fore200e_vcc* fore200e_vcc;
1431 struct fore200e_vc_map* vc_map;
1432 unsigned long flags;
1433
1434 ASSERT(vcc);
1435 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1436 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1437
1438 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1439
1440 clear_bit(ATM_VF_READY, &vcc->flags);
1441
1442 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1443
1444 spin_lock_irqsave(&fore200e->q_lock, flags);
1445
1446 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1447
1448 /* the vc is no longer considered as "in use" by fore200e_open() */
1449 vc_map->vcc = NULL;
1450
1451 vcc->itf = vcc->vci = vcc->vpi = 0;
1452
1453 fore200e_vcc = FORE200E_VCC(vcc);
1454 vcc->dev_data = NULL;
1455
1456 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1457
1458 /* release reserved bandwidth, if any */
1459 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1460
1461 mutex_lock(&fore200e->rate_mtx);
1462 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1463 mutex_unlock(&fore200e->rate_mtx);
1464
1465 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1466 }
1467
1468 clear_bit(ATM_VF_ADDR, &vcc->flags);
1469 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1470
1471 ASSERT(fore200e_vcc);
1472 kfree(fore200e_vcc);
1473}
1474
1475
1476static int
1477fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1478{
1479 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1480 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1481 struct fore200e_vc_map* vc_map;
1482 struct host_txq* txq = &fore200e->host_txq;
1483 struct host_txq_entry* entry;
1484 struct tpd* tpd;
1485 struct tpd_haddr tpd_haddr;
1486 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1487 int tx_copy = 0;
1488 int tx_len = skb->len;
1489 u32* cell_header = NULL;
1490 unsigned char* skb_data;
1491 int skb_len;
1492 unsigned char* data;
1493 unsigned long flags;
1494
1495 ASSERT(vcc);
1496 ASSERT(fore200e);
1497 ASSERT(fore200e_vcc);
1498
1499 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1500 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1501 dev_kfree_skb_any(skb);
1502 return -EINVAL;
1503 }
1504
1505#ifdef FORE200E_52BYTE_AAL0_SDU
1506 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1507 cell_header = (u32*) skb->data;
1508 skb_data = skb->data + 4; /* skip 4-byte cell header */
1509 skb_len = tx_len = skb->len - 4;
1510
1511 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1512 }
1513 else
1514#endif
1515 {
1516 skb_data = skb->data;
1517 skb_len = skb->len;
1518 }
1519
1520 if (((unsigned long)skb_data) & 0x3) {
1521
1522 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1523 tx_copy = 1;
1524 tx_len = skb_len;
1525 }
1526
1527 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1528
1529 /* this simply NUKES the PCA board */
1530 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1531 tx_copy = 1;
1532 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1533 }
1534
1535 if (tx_copy) {
1536 data = kmalloc(tx_len, GFP_ATOMIC);
1537 if (data == NULL) {
1538 if (vcc->pop) {
1539 vcc->pop(vcc, skb);
1540 }
1541 else {
1542 dev_kfree_skb_any(skb);
1543 }
1544 return -ENOMEM;
1545 }
1546
1547 memcpy(data, skb_data, skb_len);
1548 if (skb_len < tx_len)
1549 memset(data + skb_len, 0x00, tx_len - skb_len);
1550 }
1551 else {
1552 data = skb_data;
1553 }
1554
1555 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1556 ASSERT(vc_map->vcc == vcc);
1557
1558 retry_here:
1559
1560 spin_lock_irqsave(&fore200e->q_lock, flags);
1561
1562 entry = &txq->host_entry[ txq->head ];
1563
1564 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1565
1566 /* try to free completed tx queue entries */
1567 fore200e_tx_irq(fore200e);
1568
1569 if (*entry->status != STATUS_FREE) {
1570
1571 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1572
1573 /* retry once again? */
1574 if (--retry > 0) {
1575 udelay(50);
1576 goto retry_here;
1577 }
1578
1579 atomic_inc(&vcc->stats->tx_err);
1580
1581 fore200e->tx_sat++;
1582 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1583 fore200e->name, fore200e->cp_queues->heartbeat);
1584 if (vcc->pop) {
1585 vcc->pop(vcc, skb);
1586 }
1587 else {
1588 dev_kfree_skb_any(skb);
1589 }
1590
1591 if (tx_copy)
1592 kfree(data);
1593
1594 return -ENOBUFS;
1595 }
1596 }
1597
1598 entry->incarn = vc_map->incarn;
1599 entry->vc_map = vc_map;
1600 entry->skb = skb;
1601 entry->data = tx_copy ? data : NULL;
1602
1603 tpd = entry->tpd;
1604 tpd->tsd[ 0 ].buffer = dma_map_single(fore200e->dev, data, tx_len,
1605 DMA_TO_DEVICE);
1606 if (dma_mapping_error(fore200e->dev, tpd->tsd[0].buffer)) {
1607 if (tx_copy)
1608 kfree(data);
1609 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1610 return -ENOMEM;
1611 }
1612 tpd->tsd[ 0 ].length = tx_len;
1613
1614 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1615 txq->txing++;
1616
1617 /* The dma_map call above implies a dma_sync so the device can use it,
1618 * thus no explicit dma_sync call is necessary here.
1619 */
1620
1621 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1622 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1623 tpd->tsd[0].length, skb_len);
1624
1625 if (skb_len < fore200e_vcc->tx_min_pdu)
1626 fore200e_vcc->tx_min_pdu = skb_len;
1627 if (skb_len > fore200e_vcc->tx_max_pdu)
1628 fore200e_vcc->tx_max_pdu = skb_len;
1629 fore200e_vcc->tx_pdu++;
1630
1631 /* set tx rate control information */
1632 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1633 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1634
1635 if (cell_header) {
1636 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1637 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1638 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1639 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1640 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1641 }
1642 else {
1643 /* set the ATM header, common to all cells conveying the PDU */
1644 tpd->atm_header.clp = 0;
1645 tpd->atm_header.plt = 0;
1646 tpd->atm_header.vci = vcc->vci;
1647 tpd->atm_header.vpi = vcc->vpi;
1648 tpd->atm_header.gfc = 0;
1649 }
1650
1651 tpd->spec.length = tx_len;
1652 tpd->spec.nseg = 1;
1653 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1654 tpd->spec.intr = 1;
1655
1656 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
1657 tpd_haddr.pad = 0;
1658 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
1659
1660 *entry->status = STATUS_PENDING;
1661 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1662
1663 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1664
1665 return 0;
1666}
1667
1668
1669static int
1670fore200e_getstats(struct fore200e* fore200e)
1671{
1672 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1673 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1674 struct stats_opcode opcode;
1675 int ok;
1676 u32 stats_dma_addr;
1677
1678 if (fore200e->stats == NULL) {
1679 fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL);
1680 if (fore200e->stats == NULL)
1681 return -ENOMEM;
1682 }
1683
1684 stats_dma_addr = dma_map_single(fore200e->dev, fore200e->stats,
1685 sizeof(struct stats), DMA_FROM_DEVICE);
1686 if (dma_mapping_error(fore200e->dev, stats_dma_addr))
1687 return -ENOMEM;
1688
1689 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1690
1691 opcode.opcode = OPCODE_GET_STATS;
1692 opcode.pad = 0;
1693
1694 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1695
1696 *entry->status = STATUS_PENDING;
1697
1698 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1699
1700 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1701
1702 *entry->status = STATUS_FREE;
1703
1704 dma_unmap_single(fore200e->dev, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1705
1706 if (ok == 0) {
1707 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1708 return -EIO;
1709 }
1710
1711 return 0;
1712}
1713
1714
1715static int
1716fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1717{
1718 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1719
1720 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1721 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1722
1723 return -EINVAL;
1724}
1725
1726
1727static int
1728fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, unsigned int optlen)
1729{
1730 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1731
1732 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1733 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1734
1735 return -EINVAL;
1736}
1737
1738
1739#if 0 /* currently unused */
1740static int
1741fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1742{
1743 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1744 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1745 struct oc3_opcode opcode;
1746 int ok;
1747 u32 oc3_regs_dma_addr;
1748
1749 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1750
1751 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1752
1753 opcode.opcode = OPCODE_GET_OC3;
1754 opcode.reg = 0;
1755 opcode.value = 0;
1756 opcode.mask = 0;
1757
1758 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1759
1760 *entry->status = STATUS_PENDING;
1761
1762 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1763
1764 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1765
1766 *entry->status = STATUS_FREE;
1767
1768 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1769
1770 if (ok == 0) {
1771 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1772 return -EIO;
1773 }
1774
1775 return 0;
1776}
1777#endif
1778
1779
1780static int
1781fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1782{
1783 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1784 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1785 struct oc3_opcode opcode;
1786 int ok;
1787
1788 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1789
1790 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1791
1792 opcode.opcode = OPCODE_SET_OC3;
1793 opcode.reg = reg;
1794 opcode.value = value;
1795 opcode.mask = mask;
1796
1797 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1798
1799 *entry->status = STATUS_PENDING;
1800
1801 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1802
1803 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1804
1805 *entry->status = STATUS_FREE;
1806
1807 if (ok == 0) {
1808 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1809 return -EIO;
1810 }
1811
1812 return 0;
1813}
1814
1815
1816static int
1817fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1818{
1819 u32 mct_value, mct_mask;
1820 int error;
1821
1822 if (!capable(CAP_NET_ADMIN))
1823 return -EPERM;
1824
1825 switch (loop_mode) {
1826
1827 case ATM_LM_NONE:
1828 mct_value = 0;
1829 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
1830 break;
1831
1832 case ATM_LM_LOC_PHY:
1833 mct_value = mct_mask = SUNI_MCT_DLE;
1834 break;
1835
1836 case ATM_LM_RMT_PHY:
1837 mct_value = mct_mask = SUNI_MCT_LLE;
1838 break;
1839
1840 default:
1841 return -EINVAL;
1842 }
1843
1844 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1845 if (error == 0)
1846 fore200e->loop_mode = loop_mode;
1847
1848 return error;
1849}
1850
1851
1852static int
1853fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
1854{
1855 struct sonet_stats tmp;
1856
1857 if (fore200e_getstats(fore200e) < 0)
1858 return -EIO;
1859
1860 tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors);
1861 tmp.line_bip = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors);
1862 tmp.path_bip = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors);
1863 tmp.line_febe = be32_to_cpu(fore200e->stats->oc3.line_febe_errors);
1864 tmp.path_febe = be32_to_cpu(fore200e->stats->oc3.path_febe_errors);
1865 tmp.corr_hcs = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors);
1866 tmp.uncorr_hcs = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors);
1867 tmp.tx_cells = be32_to_cpu(fore200e->stats->aal0.cells_transmitted) +
1868 be32_to_cpu(fore200e->stats->aal34.cells_transmitted) +
1869 be32_to_cpu(fore200e->stats->aal5.cells_transmitted);
1870 tmp.rx_cells = be32_to_cpu(fore200e->stats->aal0.cells_received) +
1871 be32_to_cpu(fore200e->stats->aal34.cells_received) +
1872 be32_to_cpu(fore200e->stats->aal5.cells_received);
1873
1874 if (arg)
1875 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
1876
1877 return 0;
1878}
1879
1880
1881static int
1882fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
1883{
1884 struct fore200e* fore200e = FORE200E_DEV(dev);
1885
1886 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
1887
1888 switch (cmd) {
1889
1890 case SONET_GETSTAT:
1891 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
1892
1893 case SONET_GETDIAG:
1894 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
1895
1896 case ATM_SETLOOP:
1897 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
1898
1899 case ATM_GETLOOP:
1900 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
1901
1902 case ATM_QUERYLOOP:
1903 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
1904 }
1905
1906 return -ENOSYS; /* not implemented */
1907}
1908
1909
1910static int
1911fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
1912{
1913 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1914 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1915
1916 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1917 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
1918 return -EINVAL;
1919 }
1920
1921 DPRINTK(2, "change_qos %d.%d.%d, "
1922 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1923 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
1924 "available_cell_rate = %u",
1925 vcc->itf, vcc->vpi, vcc->vci,
1926 fore200e_traffic_class[ qos->txtp.traffic_class ],
1927 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
1928 fore200e_traffic_class[ qos->rxtp.traffic_class ],
1929 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
1930 flags, fore200e->available_cell_rate);
1931
1932 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
1933
1934 mutex_lock(&fore200e->rate_mtx);
1935 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
1936 mutex_unlock(&fore200e->rate_mtx);
1937 return -EAGAIN;
1938 }
1939
1940 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1941 fore200e->available_cell_rate -= qos->txtp.max_pcr;
1942
1943 mutex_unlock(&fore200e->rate_mtx);
1944
1945 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
1946
1947 /* update rate control parameters */
1948 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
1949
1950 set_bit(ATM_VF_HASQOS, &vcc->flags);
1951
1952 return 0;
1953 }
1954
1955 return -EINVAL;
1956}
1957
1958
1959static int fore200e_irq_request(struct fore200e *fore200e)
1960{
1961 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
1962
1963 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
1964 fore200e_irq_itoa(fore200e->irq), fore200e->name);
1965 return -EBUSY;
1966 }
1967
1968 printk(FORE200E "IRQ %s reserved for device %s\n",
1969 fore200e_irq_itoa(fore200e->irq), fore200e->name);
1970
1971#ifdef FORE200E_USE_TASKLET
1972 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
1973 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
1974#endif
1975
1976 fore200e->state = FORE200E_STATE_IRQ;
1977 return 0;
1978}
1979
1980
1981static int fore200e_get_esi(struct fore200e *fore200e)
1982{
1983 struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL);
1984 int ok, i;
1985
1986 if (!prom)
1987 return -ENOMEM;
1988
1989 ok = fore200e->bus->prom_read(fore200e, prom);
1990 if (ok < 0) {
1991 kfree(prom);
1992 return -EBUSY;
1993 }
1994
1995 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n",
1996 fore200e->name,
1997 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
1998 prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
1999
2000 for (i = 0; i < ESI_LEN; i++) {
2001 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2002 }
2003
2004 kfree(prom);
2005
2006 return 0;
2007}
2008
2009
2010static int fore200e_alloc_rx_buf(struct fore200e *fore200e)
2011{
2012 int scheme, magn, nbr, size, i;
2013
2014 struct host_bsq* bsq;
2015 struct buffer* buffer;
2016
2017 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2018 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2019
2020 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2021
2022 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
2023 size = fore200e_rx_buf_size[ scheme ][ magn ];
2024
2025 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2026
2027 /* allocate the array of receive buffers */
2028 buffer = bsq->buffer = kcalloc(nbr, sizeof(struct buffer),
2029 GFP_KERNEL);
2030
2031 if (buffer == NULL)
2032 return -ENOMEM;
2033
2034 bsq->freebuf = NULL;
2035
2036 for (i = 0; i < nbr; i++) {
2037
2038 buffer[ i ].scheme = scheme;
2039 buffer[ i ].magn = magn;
2040#ifdef FORE200E_BSQ_DEBUG
2041 buffer[ i ].index = i;
2042 buffer[ i ].supplied = 0;
2043#endif
2044
2045 /* allocate the receive buffer body */
2046 if (fore200e_chunk_alloc(fore200e,
2047 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2048 DMA_FROM_DEVICE) < 0) {
2049
2050 while (i > 0)
2051 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2052 kfree(buffer);
2053
2054 return -ENOMEM;
2055 }
2056
2057 /* insert the buffer into the free buffer list */
2058 buffer[ i ].next = bsq->freebuf;
2059 bsq->freebuf = &buffer[ i ];
2060 }
2061 /* all the buffers are free, initially */
2062 bsq->freebuf_count = nbr;
2063
2064#ifdef FORE200E_BSQ_DEBUG
2065 bsq_audit(3, bsq, scheme, magn);
2066#endif
2067 }
2068 }
2069
2070 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2071 return 0;
2072}
2073
2074
2075static int fore200e_init_bs_queue(struct fore200e *fore200e)
2076{
2077 int scheme, magn, i;
2078
2079 struct host_bsq* bsq;
2080 struct cp_bsq_entry __iomem * cp_entry;
2081
2082 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2083 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2084
2085 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2086
2087 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2088
2089 /* allocate and align the array of status words */
2090 if (fore200e_dma_chunk_alloc(fore200e,
2091 &bsq->status,
2092 sizeof(enum status),
2093 QUEUE_SIZE_BS,
2094 fore200e->bus->status_alignment) < 0) {
2095 return -ENOMEM;
2096 }
2097
2098 /* allocate and align the array of receive buffer descriptors */
2099 if (fore200e_dma_chunk_alloc(fore200e,
2100 &bsq->rbd_block,
2101 sizeof(struct rbd_block),
2102 QUEUE_SIZE_BS,
2103 fore200e->bus->descr_alignment) < 0) {
2104
2105 fore200e_dma_chunk_free(fore200e, &bsq->status);
2106 return -ENOMEM;
2107 }
2108
2109 /* get the base address of the cp resident buffer supply queue entries */
2110 cp_entry = fore200e->virt_base +
2111 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2112
2113 /* fill the host resident and cp resident buffer supply queue entries */
2114 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2115
2116 bsq->host_entry[ i ].status =
2117 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2118 bsq->host_entry[ i ].rbd_block =
2119 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2120 bsq->host_entry[ i ].rbd_block_dma =
2121 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2122 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2123
2124 *bsq->host_entry[ i ].status = STATUS_FREE;
2125
2126 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2127 &cp_entry[ i ].status_haddr);
2128 }
2129 }
2130 }
2131
2132 fore200e->state = FORE200E_STATE_INIT_BSQ;
2133 return 0;
2134}
2135
2136
2137static int fore200e_init_rx_queue(struct fore200e *fore200e)
2138{
2139 struct host_rxq* rxq = &fore200e->host_rxq;
2140 struct cp_rxq_entry __iomem * cp_entry;
2141 int i;
2142
2143 DPRINTK(2, "receive queue is being initialized\n");
2144
2145 /* allocate and align the array of status words */
2146 if (fore200e_dma_chunk_alloc(fore200e,
2147 &rxq->status,
2148 sizeof(enum status),
2149 QUEUE_SIZE_RX,
2150 fore200e->bus->status_alignment) < 0) {
2151 return -ENOMEM;
2152 }
2153
2154 /* allocate and align the array of receive PDU descriptors */
2155 if (fore200e_dma_chunk_alloc(fore200e,
2156 &rxq->rpd,
2157 sizeof(struct rpd),
2158 QUEUE_SIZE_RX,
2159 fore200e->bus->descr_alignment) < 0) {
2160
2161 fore200e_dma_chunk_free(fore200e, &rxq->status);
2162 return -ENOMEM;
2163 }
2164
2165 /* get the base address of the cp resident rx queue entries */
2166 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2167
2168 /* fill the host resident and cp resident rx entries */
2169 for (i=0; i < QUEUE_SIZE_RX; i++) {
2170
2171 rxq->host_entry[ i ].status =
2172 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2173 rxq->host_entry[ i ].rpd =
2174 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2175 rxq->host_entry[ i ].rpd_dma =
2176 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2177 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2178
2179 *rxq->host_entry[ i ].status = STATUS_FREE;
2180
2181 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2182 &cp_entry[ i ].status_haddr);
2183
2184 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2185 &cp_entry[ i ].rpd_haddr);
2186 }
2187
2188 /* set the head entry of the queue */
2189 rxq->head = 0;
2190
2191 fore200e->state = FORE200E_STATE_INIT_RXQ;
2192 return 0;
2193}
2194
2195
2196static int fore200e_init_tx_queue(struct fore200e *fore200e)
2197{
2198 struct host_txq* txq = &fore200e->host_txq;
2199 struct cp_txq_entry __iomem * cp_entry;
2200 int i;
2201
2202 DPRINTK(2, "transmit queue is being initialized\n");
2203
2204 /* allocate and align the array of status words */
2205 if (fore200e_dma_chunk_alloc(fore200e,
2206 &txq->status,
2207 sizeof(enum status),
2208 QUEUE_SIZE_TX,
2209 fore200e->bus->status_alignment) < 0) {
2210 return -ENOMEM;
2211 }
2212
2213 /* allocate and align the array of transmit PDU descriptors */
2214 if (fore200e_dma_chunk_alloc(fore200e,
2215 &txq->tpd,
2216 sizeof(struct tpd),
2217 QUEUE_SIZE_TX,
2218 fore200e->bus->descr_alignment) < 0) {
2219
2220 fore200e_dma_chunk_free(fore200e, &txq->status);
2221 return -ENOMEM;
2222 }
2223
2224 /* get the base address of the cp resident tx queue entries */
2225 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2226
2227 /* fill the host resident and cp resident tx entries */
2228 for (i=0; i < QUEUE_SIZE_TX; i++) {
2229
2230 txq->host_entry[ i ].status =
2231 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2232 txq->host_entry[ i ].tpd =
2233 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2234 txq->host_entry[ i ].tpd_dma =
2235 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2236 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2237
2238 *txq->host_entry[ i ].status = STATUS_FREE;
2239
2240 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2241 &cp_entry[ i ].status_haddr);
2242
2243 /* although there is a one-to-one mapping of tx queue entries and tpds,
2244 we do not write here the DMA (physical) base address of each tpd into
2245 the related cp resident entry, because the cp relies on this write
2246 operation to detect that a new pdu has been submitted for tx */
2247 }
2248
2249 /* set the head and tail entries of the queue */
2250 txq->head = 0;
2251 txq->tail = 0;
2252
2253 fore200e->state = FORE200E_STATE_INIT_TXQ;
2254 return 0;
2255}
2256
2257
2258static int fore200e_init_cmd_queue(struct fore200e *fore200e)
2259{
2260 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2261 struct cp_cmdq_entry __iomem * cp_entry;
2262 int i;
2263
2264 DPRINTK(2, "command queue is being initialized\n");
2265
2266 /* allocate and align the array of status words */
2267 if (fore200e_dma_chunk_alloc(fore200e,
2268 &cmdq->status,
2269 sizeof(enum status),
2270 QUEUE_SIZE_CMD,
2271 fore200e->bus->status_alignment) < 0) {
2272 return -ENOMEM;
2273 }
2274
2275 /* get the base address of the cp resident cmd queue entries */
2276 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2277
2278 /* fill the host resident and cp resident cmd entries */
2279 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2280
2281 cmdq->host_entry[ i ].status =
2282 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2283 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2284
2285 *cmdq->host_entry[ i ].status = STATUS_FREE;
2286
2287 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2288 &cp_entry[ i ].status_haddr);
2289 }
2290
2291 /* set the head entry of the queue */
2292 cmdq->head = 0;
2293
2294 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2295 return 0;
2296}
2297
2298
2299static void fore200e_param_bs_queue(struct fore200e *fore200e,
2300 enum buffer_scheme scheme,
2301 enum buffer_magn magn, int queue_length,
2302 int pool_size, int supply_blksize)
2303{
2304 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2305
2306 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2307 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2308 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2309 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2310}
2311
2312
2313static int fore200e_initialize(struct fore200e *fore200e)
2314{
2315 struct cp_queues __iomem * cpq;
2316 int ok, scheme, magn;
2317
2318 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2319
2320 mutex_init(&fore200e->rate_mtx);
2321 spin_lock_init(&fore200e->q_lock);
2322
2323 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2324
2325 /* enable cp to host interrupts */
2326 fore200e->bus->write(1, &cpq->imask);
2327
2328 if (fore200e->bus->irq_enable)
2329 fore200e->bus->irq_enable(fore200e);
2330
2331 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2332
2333 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2334 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2335 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2336
2337 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2338 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2339
2340 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2341 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2342 fore200e_param_bs_queue(fore200e, scheme, magn,
2343 QUEUE_SIZE_BS,
2344 fore200e_rx_buf_nbr[ scheme ][ magn ],
2345 RBD_BLK_SIZE);
2346
2347 /* issue the initialize command */
2348 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2349 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2350
2351 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2352 if (ok == 0) {
2353 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2354 return -ENODEV;
2355 }
2356
2357 printk(FORE200E "device %s initialized\n", fore200e->name);
2358
2359 fore200e->state = FORE200E_STATE_INITIALIZE;
2360 return 0;
2361}
2362
2363
2364static void fore200e_monitor_putc(struct fore200e *fore200e, char c)
2365{
2366 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2367
2368#if 0
2369 printk("%c", c);
2370#endif
2371 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2372}
2373
2374
2375static int fore200e_monitor_getc(struct fore200e *fore200e)
2376{
2377 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2378 unsigned long timeout = jiffies + msecs_to_jiffies(50);
2379 int c;
2380
2381 while (time_before(jiffies, timeout)) {
2382
2383 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2384
2385 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2386
2387 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2388#if 0
2389 printk("%c", c & 0xFF);
2390#endif
2391 return c & 0xFF;
2392 }
2393 }
2394
2395 return -1;
2396}
2397
2398
2399static void fore200e_monitor_puts(struct fore200e *fore200e, char *str)
2400{
2401 while (*str) {
2402
2403 /* the i960 monitor doesn't accept any new character if it has something to say */
2404 while (fore200e_monitor_getc(fore200e) >= 0);
2405
2406 fore200e_monitor_putc(fore200e, *str++);
2407 }
2408
2409 while (fore200e_monitor_getc(fore200e) >= 0);
2410}
2411
2412#ifdef __LITTLE_ENDIAN
2413#define FW_EXT ".bin"
2414#else
2415#define FW_EXT "_ecd.bin2"
2416#endif
2417
2418static int fore200e_load_and_start_fw(struct fore200e *fore200e)
2419{
2420 const struct firmware *firmware;
2421 const struct fw_header *fw_header;
2422 const __le32 *fw_data;
2423 u32 fw_size;
2424 u32 __iomem *load_addr;
2425 char buf[48];
2426 int err;
2427
2428 sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
2429 if ((err = request_firmware(&firmware, buf, fore200e->dev)) < 0) {
2430 printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
2431 return err;
2432 }
2433
2434 fw_data = (const __le32 *)firmware->data;
2435 fw_size = firmware->size / sizeof(u32);
2436 fw_header = (const struct fw_header *)firmware->data;
2437 load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2438
2439 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2440 fore200e->name, load_addr, fw_size);
2441
2442 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2443 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2444 goto release;
2445 }
2446
2447 for (; fw_size--; fw_data++, load_addr++)
2448 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2449
2450 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2451
2452#if defined(__sparc_v9__)
2453 /* reported to be required by SBA cards on some sparc64 hosts */
2454 fore200e_spin(100);
2455#endif
2456
2457 sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2458 fore200e_monitor_puts(fore200e, buf);
2459
2460 if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) {
2461 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2462 goto release;
2463 }
2464
2465 printk(FORE200E "device %s firmware started\n", fore200e->name);
2466
2467 fore200e->state = FORE200E_STATE_START_FW;
2468 err = 0;
2469
2470release:
2471 release_firmware(firmware);
2472 return err;
2473}
2474
2475
2476static int fore200e_register(struct fore200e *fore200e, struct device *parent)
2477{
2478 struct atm_dev* atm_dev;
2479
2480 DPRINTK(2, "device %s being registered\n", fore200e->name);
2481
2482 atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops,
2483 -1, NULL);
2484 if (atm_dev == NULL) {
2485 printk(FORE200E "unable to register device %s\n", fore200e->name);
2486 return -ENODEV;
2487 }
2488
2489 atm_dev->dev_data = fore200e;
2490 fore200e->atm_dev = atm_dev;
2491
2492 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2493 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2494
2495 fore200e->available_cell_rate = ATM_OC3_PCR;
2496
2497 fore200e->state = FORE200E_STATE_REGISTER;
2498 return 0;
2499}
2500
2501
2502static int fore200e_init(struct fore200e *fore200e, struct device *parent)
2503{
2504 if (fore200e_register(fore200e, parent) < 0)
2505 return -ENODEV;
2506
2507 if (fore200e->bus->configure(fore200e) < 0)
2508 return -ENODEV;
2509
2510 if (fore200e->bus->map(fore200e) < 0)
2511 return -ENODEV;
2512
2513 if (fore200e_reset(fore200e, 1) < 0)
2514 return -ENODEV;
2515
2516 if (fore200e_load_and_start_fw(fore200e) < 0)
2517 return -ENODEV;
2518
2519 if (fore200e_initialize(fore200e) < 0)
2520 return -ENODEV;
2521
2522 if (fore200e_init_cmd_queue(fore200e) < 0)
2523 return -ENOMEM;
2524
2525 if (fore200e_init_tx_queue(fore200e) < 0)
2526 return -ENOMEM;
2527
2528 if (fore200e_init_rx_queue(fore200e) < 0)
2529 return -ENOMEM;
2530
2531 if (fore200e_init_bs_queue(fore200e) < 0)
2532 return -ENOMEM;
2533
2534 if (fore200e_alloc_rx_buf(fore200e) < 0)
2535 return -ENOMEM;
2536
2537 if (fore200e_get_esi(fore200e) < 0)
2538 return -EIO;
2539
2540 if (fore200e_irq_request(fore200e) < 0)
2541 return -EBUSY;
2542
2543 fore200e_supply(fore200e);
2544
2545 /* all done, board initialization is now complete */
2546 fore200e->state = FORE200E_STATE_COMPLETE;
2547 return 0;
2548}
2549
2550#ifdef CONFIG_SBUS
2551static const struct of_device_id fore200e_sba_match[];
2552static int fore200e_sba_probe(struct platform_device *op)
2553{
2554 const struct of_device_id *match;
2555 struct fore200e *fore200e;
2556 static int index = 0;
2557 int err;
2558
2559 match = of_match_device(fore200e_sba_match, &op->dev);
2560 if (!match)
2561 return -EINVAL;
2562
2563 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2564 if (!fore200e)
2565 return -ENOMEM;
2566
2567 fore200e->bus = &fore200e_sbus_ops;
2568 fore200e->dev = &op->dev;
2569 fore200e->irq = op->archdata.irqs[0];
2570 fore200e->phys_base = op->resource[0].start;
2571
2572 sprintf(fore200e->name, "SBA-200E-%d", index);
2573
2574 err = fore200e_init(fore200e, &op->dev);
2575 if (err < 0) {
2576 fore200e_shutdown(fore200e);
2577 kfree(fore200e);
2578 return err;
2579 }
2580
2581 index++;
2582 dev_set_drvdata(&op->dev, fore200e);
2583
2584 return 0;
2585}
2586
2587static int fore200e_sba_remove(struct platform_device *op)
2588{
2589 struct fore200e *fore200e = dev_get_drvdata(&op->dev);
2590
2591 fore200e_shutdown(fore200e);
2592 kfree(fore200e);
2593
2594 return 0;
2595}
2596
2597static const struct of_device_id fore200e_sba_match[] = {
2598 {
2599 .name = SBA200E_PROM_NAME,
2600 },
2601 {},
2602};
2603MODULE_DEVICE_TABLE(of, fore200e_sba_match);
2604
2605static struct platform_driver fore200e_sba_driver = {
2606 .driver = {
2607 .name = "fore_200e",
2608 .of_match_table = fore200e_sba_match,
2609 },
2610 .probe = fore200e_sba_probe,
2611 .remove = fore200e_sba_remove,
2612};
2613#endif
2614
2615#ifdef CONFIG_PCI
2616static int fore200e_pca_detect(struct pci_dev *pci_dev,
2617 const struct pci_device_id *pci_ent)
2618{
2619 struct fore200e* fore200e;
2620 int err = 0;
2621 static int index = 0;
2622
2623 if (pci_enable_device(pci_dev)) {
2624 err = -EINVAL;
2625 goto out;
2626 }
2627
2628 if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
2629 err = -EINVAL;
2630 goto out;
2631 }
2632
2633 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2634 if (fore200e == NULL) {
2635 err = -ENOMEM;
2636 goto out_disable;
2637 }
2638
2639 fore200e->bus = &fore200e_pci_ops;
2640 fore200e->dev = &pci_dev->dev;
2641 fore200e->irq = pci_dev->irq;
2642 fore200e->phys_base = pci_resource_start(pci_dev, 0);
2643
2644 sprintf(fore200e->name, "PCA-200E-%d", index - 1);
2645
2646 pci_set_master(pci_dev);
2647
2648 printk(FORE200E "device PCA-200E found at 0x%lx, IRQ %s\n",
2649 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2650
2651 sprintf(fore200e->name, "PCA-200E-%d", index);
2652
2653 err = fore200e_init(fore200e, &pci_dev->dev);
2654 if (err < 0) {
2655 fore200e_shutdown(fore200e);
2656 goto out_free;
2657 }
2658
2659 ++index;
2660 pci_set_drvdata(pci_dev, fore200e);
2661
2662out:
2663 return err;
2664
2665out_free:
2666 kfree(fore200e);
2667out_disable:
2668 pci_disable_device(pci_dev);
2669 goto out;
2670}
2671
2672
2673static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
2674{
2675 struct fore200e *fore200e;
2676
2677 fore200e = pci_get_drvdata(pci_dev);
2678
2679 fore200e_shutdown(fore200e);
2680 kfree(fore200e);
2681 pci_disable_device(pci_dev);
2682}
2683
2684
2685static const struct pci_device_id fore200e_pca_tbl[] = {
2686 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID },
2687 { 0, }
2688};
2689
2690MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2691
2692static struct pci_driver fore200e_pca_driver = {
2693 .name = "fore_200e",
2694 .probe = fore200e_pca_detect,
2695 .remove = fore200e_pca_remove_one,
2696 .id_table = fore200e_pca_tbl,
2697};
2698#endif
2699
2700static int __init fore200e_module_init(void)
2701{
2702 int err = 0;
2703
2704 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2705
2706#ifdef CONFIG_SBUS
2707 err = platform_driver_register(&fore200e_sba_driver);
2708 if (err)
2709 return err;
2710#endif
2711
2712#ifdef CONFIG_PCI
2713 err = pci_register_driver(&fore200e_pca_driver);
2714#endif
2715
2716#ifdef CONFIG_SBUS
2717 if (err)
2718 platform_driver_unregister(&fore200e_sba_driver);
2719#endif
2720
2721 return err;
2722}
2723
2724static void __exit fore200e_module_cleanup(void)
2725{
2726#ifdef CONFIG_PCI
2727 pci_unregister_driver(&fore200e_pca_driver);
2728#endif
2729#ifdef CONFIG_SBUS
2730 platform_driver_unregister(&fore200e_sba_driver);
2731#endif
2732}
2733
2734static int
2735fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2736{
2737 struct fore200e* fore200e = FORE200E_DEV(dev);
2738 struct fore200e_vcc* fore200e_vcc;
2739 struct atm_vcc* vcc;
2740 int i, len, left = *pos;
2741 unsigned long flags;
2742
2743 if (!left--) {
2744
2745 if (fore200e_getstats(fore200e) < 0)
2746 return -EIO;
2747
2748 len = sprintf(page,"\n"
2749 " device:\n"
2750 " internal name:\t\t%s\n", fore200e->name);
2751
2752 /* print bus-specific information */
2753 if (fore200e->bus->proc_read)
2754 len += fore200e->bus->proc_read(fore200e, page + len);
2755
2756 len += sprintf(page + len,
2757 " interrupt line:\t\t%s\n"
2758 " physical base address:\t0x%p\n"
2759 " virtual base address:\t0x%p\n"
2760 " factory address (ESI):\t%pM\n"
2761 " board serial number:\t\t%d\n\n",
2762 fore200e_irq_itoa(fore200e->irq),
2763 (void*)fore200e->phys_base,
2764 fore200e->virt_base,
2765 fore200e->esi,
2766 fore200e->esi[4] * 256 + fore200e->esi[5]);
2767
2768 return len;
2769 }
2770
2771 if (!left--)
2772 return sprintf(page,
2773 " free small bufs, scheme 1:\t%d\n"
2774 " free large bufs, scheme 1:\t%d\n"
2775 " free small bufs, scheme 2:\t%d\n"
2776 " free large bufs, scheme 2:\t%d\n",
2777 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2778 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2779 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2780 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2781
2782 if (!left--) {
2783 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2784
2785 len = sprintf(page,"\n\n"
2786 " cell processor:\n"
2787 " heartbeat state:\t\t");
2788
2789 if (hb >> 16 != 0xDEAD)
2790 len += sprintf(page + len, "0x%08x\n", hb);
2791 else
2792 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2793
2794 return len;
2795 }
2796
2797 if (!left--) {
2798 static const char* media_name[] = {
2799 "unshielded twisted pair",
2800 "multimode optical fiber ST",
2801 "multimode optical fiber SC",
2802 "single-mode optical fiber ST",
2803 "single-mode optical fiber SC",
2804 "unknown"
2805 };
2806
2807 static const char* oc3_mode[] = {
2808 "normal operation",
2809 "diagnostic loopback",
2810 "line loopback",
2811 "unknown"
2812 };
2813
2814 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2815 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2816 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2817 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2818 u32 oc3_index;
2819
2820 if (media_index > 4)
2821 media_index = 5;
2822
2823 switch (fore200e->loop_mode) {
2824 case ATM_LM_NONE: oc3_index = 0;
2825 break;
2826 case ATM_LM_LOC_PHY: oc3_index = 1;
2827 break;
2828 case ATM_LM_RMT_PHY: oc3_index = 2;
2829 break;
2830 default: oc3_index = 3;
2831 }
2832
2833 return sprintf(page,
2834 " firmware release:\t\t%d.%d.%d\n"
2835 " monitor release:\t\t%d.%d\n"
2836 " media type:\t\t\t%s\n"
2837 " OC-3 revision:\t\t0x%x\n"
2838 " OC-3 mode:\t\t\t%s",
2839 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2840 mon960_release >> 16, mon960_release << 16 >> 16,
2841 media_name[ media_index ],
2842 oc3_revision,
2843 oc3_mode[ oc3_index ]);
2844 }
2845
2846 if (!left--) {
2847 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2848
2849 return sprintf(page,
2850 "\n\n"
2851 " monitor:\n"
2852 " version number:\t\t%d\n"
2853 " boot status word:\t\t0x%08x\n",
2854 fore200e->bus->read(&cp_monitor->mon_version),
2855 fore200e->bus->read(&cp_monitor->bstat));
2856 }
2857
2858 if (!left--)
2859 return sprintf(page,
2860 "\n"
2861 " device statistics:\n"
2862 " 4b5b:\n"
2863 " crc_header_errors:\t\t%10u\n"
2864 " framing_errors:\t\t%10u\n",
2865 be32_to_cpu(fore200e->stats->phy.crc_header_errors),
2866 be32_to_cpu(fore200e->stats->phy.framing_errors));
2867
2868 if (!left--)
2869 return sprintf(page, "\n"
2870 " OC-3:\n"
2871 " section_bip8_errors:\t%10u\n"
2872 " path_bip8_errors:\t\t%10u\n"
2873 " line_bip24_errors:\t\t%10u\n"
2874 " line_febe_errors:\t\t%10u\n"
2875 " path_febe_errors:\t\t%10u\n"
2876 " corr_hcs_errors:\t\t%10u\n"
2877 " ucorr_hcs_errors:\t\t%10u\n",
2878 be32_to_cpu(fore200e->stats->oc3.section_bip8_errors),
2879 be32_to_cpu(fore200e->stats->oc3.path_bip8_errors),
2880 be32_to_cpu(fore200e->stats->oc3.line_bip24_errors),
2881 be32_to_cpu(fore200e->stats->oc3.line_febe_errors),
2882 be32_to_cpu(fore200e->stats->oc3.path_febe_errors),
2883 be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors),
2884 be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors));
2885
2886 if (!left--)
2887 return sprintf(page,"\n"
2888 " ATM:\t\t\t\t cells\n"
2889 " TX:\t\t\t%10u\n"
2890 " RX:\t\t\t%10u\n"
2891 " vpi out of range:\t\t%10u\n"
2892 " vpi no conn:\t\t%10u\n"
2893 " vci out of range:\t\t%10u\n"
2894 " vci no conn:\t\t%10u\n",
2895 be32_to_cpu(fore200e->stats->atm.cells_transmitted),
2896 be32_to_cpu(fore200e->stats->atm.cells_received),
2897 be32_to_cpu(fore200e->stats->atm.vpi_bad_range),
2898 be32_to_cpu(fore200e->stats->atm.vpi_no_conn),
2899 be32_to_cpu(fore200e->stats->atm.vci_bad_range),
2900 be32_to_cpu(fore200e->stats->atm.vci_no_conn));
2901
2902 if (!left--)
2903 return sprintf(page,"\n"
2904 " AAL0:\t\t\t cells\n"
2905 " TX:\t\t\t%10u\n"
2906 " RX:\t\t\t%10u\n"
2907 " dropped:\t\t\t%10u\n",
2908 be32_to_cpu(fore200e->stats->aal0.cells_transmitted),
2909 be32_to_cpu(fore200e->stats->aal0.cells_received),
2910 be32_to_cpu(fore200e->stats->aal0.cells_dropped));
2911
2912 if (!left--)
2913 return sprintf(page,"\n"
2914 " AAL3/4:\n"
2915 " SAR sublayer:\t\t cells\n"
2916 " TX:\t\t\t%10u\n"
2917 " RX:\t\t\t%10u\n"
2918 " dropped:\t\t\t%10u\n"
2919 " CRC errors:\t\t%10u\n"
2920 " protocol errors:\t\t%10u\n\n"
2921 " CS sublayer:\t\t PDUs\n"
2922 " TX:\t\t\t%10u\n"
2923 " RX:\t\t\t%10u\n"
2924 " dropped:\t\t\t%10u\n"
2925 " protocol errors:\t\t%10u\n",
2926 be32_to_cpu(fore200e->stats->aal34.cells_transmitted),
2927 be32_to_cpu(fore200e->stats->aal34.cells_received),
2928 be32_to_cpu(fore200e->stats->aal34.cells_dropped),
2929 be32_to_cpu(fore200e->stats->aal34.cells_crc_errors),
2930 be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors),
2931 be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted),
2932 be32_to_cpu(fore200e->stats->aal34.cspdus_received),
2933 be32_to_cpu(fore200e->stats->aal34.cspdus_dropped),
2934 be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors));
2935
2936 if (!left--)
2937 return sprintf(page,"\n"
2938 " AAL5:\n"
2939 " SAR sublayer:\t\t cells\n"
2940 " TX:\t\t\t%10u\n"
2941 " RX:\t\t\t%10u\n"
2942 " dropped:\t\t\t%10u\n"
2943 " congestions:\t\t%10u\n\n"
2944 " CS sublayer:\t\t PDUs\n"
2945 " TX:\t\t\t%10u\n"
2946 " RX:\t\t\t%10u\n"
2947 " dropped:\t\t\t%10u\n"
2948 " CRC errors:\t\t%10u\n"
2949 " protocol errors:\t\t%10u\n",
2950 be32_to_cpu(fore200e->stats->aal5.cells_transmitted),
2951 be32_to_cpu(fore200e->stats->aal5.cells_received),
2952 be32_to_cpu(fore200e->stats->aal5.cells_dropped),
2953 be32_to_cpu(fore200e->stats->aal5.congestion_experienced),
2954 be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted),
2955 be32_to_cpu(fore200e->stats->aal5.cspdus_received),
2956 be32_to_cpu(fore200e->stats->aal5.cspdus_dropped),
2957 be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors),
2958 be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors));
2959
2960 if (!left--)
2961 return sprintf(page,"\n"
2962 " AUX:\t\t allocation failures\n"
2963 " small b1:\t\t\t%10u\n"
2964 " large b1:\t\t\t%10u\n"
2965 " small b2:\t\t\t%10u\n"
2966 " large b2:\t\t\t%10u\n"
2967 " RX PDUs:\t\t\t%10u\n"
2968 " TX PDUs:\t\t\t%10lu\n",
2969 be32_to_cpu(fore200e->stats->aux.small_b1_failed),
2970 be32_to_cpu(fore200e->stats->aux.large_b1_failed),
2971 be32_to_cpu(fore200e->stats->aux.small_b2_failed),
2972 be32_to_cpu(fore200e->stats->aux.large_b2_failed),
2973 be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed),
2974 fore200e->tx_sat);
2975
2976 if (!left--)
2977 return sprintf(page,"\n"
2978 " receive carrier:\t\t\t%s\n",
2979 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
2980
2981 if (!left--) {
2982 return sprintf(page,"\n"
2983 " VCCs:\n address VPI VCI AAL "
2984 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
2985 }
2986
2987 for (i = 0; i < NBR_CONNECT; i++) {
2988
2989 vcc = fore200e->vc_map[i].vcc;
2990
2991 if (vcc == NULL)
2992 continue;
2993
2994 spin_lock_irqsave(&fore200e->q_lock, flags);
2995
2996 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
2997
2998 fore200e_vcc = FORE200E_VCC(vcc);
2999 ASSERT(fore200e_vcc);
3000
3001 len = sprintf(page,
3002 " %pK %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
3003 vcc,
3004 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3005 fore200e_vcc->tx_pdu,
3006 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3007 fore200e_vcc->tx_max_pdu,
3008 fore200e_vcc->rx_pdu,
3009 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3010 fore200e_vcc->rx_max_pdu);
3011
3012 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3013 return len;
3014 }
3015
3016 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3017 }
3018
3019 return 0;
3020}
3021
3022module_init(fore200e_module_init);
3023module_exit(fore200e_module_cleanup);
3024
3025
3026static const struct atmdev_ops fore200e_ops = {
3027 .open = fore200e_open,
3028 .close = fore200e_close,
3029 .ioctl = fore200e_ioctl,
3030 .getsockopt = fore200e_getsockopt,
3031 .setsockopt = fore200e_setsockopt,
3032 .send = fore200e_send,
3033 .change_qos = fore200e_change_qos,
3034 .proc_read = fore200e_proc_read,
3035 .owner = THIS_MODULE
3036};
3037
3038MODULE_LICENSE("GPL");
3039#ifdef CONFIG_PCI
3040#ifdef __LITTLE_ENDIAN__
3041MODULE_FIRMWARE("pca200e.bin");
3042#else
3043MODULE_FIRMWARE("pca200e_ecd.bin2");
3044#endif
3045#endif /* CONFIG_PCI */
3046#ifdef CONFIG_SBUS
3047MODULE_FIRMWARE("sba200e_ecd.bin2");
3048#endif
3049