1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *
4 * hfcpci.c low level driver for CCD's hfc-pci based cards
5 *
6 * Author Werner Cornelius (werner@isdn4linux.de)
7 * based on existing driver for CCD hfc ISA cards
8 * type approval valid for HFC-S PCI A based card
9 *
10 * Copyright 1999 by Werner Cornelius (werner@isdn-development.de)
11 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
12 *
13 * Module options:
14 *
15 * debug:
16 * NOTE: only one poll value must be given for all cards
17 * See hfc_pci.h for debug flags.
18 *
19 * poll:
20 * NOTE: only one poll value must be given for all cards
21 * Give the number of samples for each fifo process.
22 * By default 128 is used. Decrease to reduce delay, increase to
23 * reduce cpu load. If unsure, don't mess with it!
24 * A value of 128 will use controller's interrupt. Other values will
25 * use kernel timer, because the controller will not allow lower values
26 * than 128.
27 * Also note that the value depends on the kernel timer frequency.
28 * If kernel uses a frequency of 1000 Hz, steps of 8 samples are possible.
29 * If the kernel uses 100 Hz, steps of 80 samples are possible.
30 * If the kernel uses 300 Hz, steps of about 26 samples are possible.
31 */
32
33#include <linux/interrupt.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/delay.h>
37#include <linux/mISDNhw.h>
38#include <linux/slab.h>
39
40#include "hfc_pci.h"
41
42static const char *hfcpci_revision = "2.0";
43
44static int HFC_cnt;
45static uint debug;
46static uint poll, tics;
47static struct timer_list hfc_tl;
48static unsigned long hfc_jiffies;
49
50MODULE_AUTHOR("Karsten Keil");
51MODULE_LICENSE("GPL");
52module_param(debug, uint, S_IRUGO | S_IWUSR);
53module_param(poll, uint, S_IRUGO | S_IWUSR);
54
55enum {
56 HFC_CCD_2BD0,
57 HFC_CCD_B000,
58 HFC_CCD_B006,
59 HFC_CCD_B007,
60 HFC_CCD_B008,
61 HFC_CCD_B009,
62 HFC_CCD_B00A,
63 HFC_CCD_B00B,
64 HFC_CCD_B00C,
65 HFC_CCD_B100,
66 HFC_CCD_B700,
67 HFC_CCD_B701,
68 HFC_ASUS_0675,
69 HFC_BERKOM_A1T,
70 HFC_BERKOM_TCONCEPT,
71 HFC_ANIGMA_MC145575,
72 HFC_ZOLTRIX_2BD0,
73 HFC_DIGI_DF_M_IOM2_E,
74 HFC_DIGI_DF_M_E,
75 HFC_DIGI_DF_M_IOM2_A,
76 HFC_DIGI_DF_M_A,
77 HFC_ABOCOM_2BD1,
78 HFC_SITECOM_DC105V2,
79};
80
81struct hfcPCI_hw {
82 unsigned char cirm;
83 unsigned char ctmt;
84 unsigned char clkdel;
85 unsigned char states;
86 unsigned char conn;
87 unsigned char mst_m;
88 unsigned char int_m1;
89 unsigned char int_m2;
90 unsigned char sctrl;
91 unsigned char sctrl_r;
92 unsigned char sctrl_e;
93 unsigned char trm;
94 unsigned char fifo_en;
95 unsigned char bswapped;
96 unsigned char protocol;
97 int nt_timer;
98 unsigned char __iomem *pci_io; /* start of PCI IO memory */
99 dma_addr_t dmahandle;
100 void *fifos; /* FIFO memory */
101 int last_bfifo_cnt[2];
102 /* marker saving last b-fifo frame count */
103 struct timer_list timer;
104};
105
106#define HFC_CFG_MASTER 1
107#define HFC_CFG_SLAVE 2
108#define HFC_CFG_PCM 3
109#define HFC_CFG_2HFC 4
110#define HFC_CFG_SLAVEHFC 5
111#define HFC_CFG_NEG_F0 6
112#define HFC_CFG_SW_DD_DU 7
113
114#define FLG_HFC_TIMER_T1 16
115#define FLG_HFC_TIMER_T3 17
116
117#define NT_T1_COUNT 1120 /* number of 3.125ms interrupts (3.5s) */
118#define NT_T3_COUNT 31 /* number of 3.125ms interrupts (97 ms) */
119#define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
120#define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
121
122
123struct hfc_pci {
124 u_char subtype;
125 u_char chanlimit;
126 u_char initdone;
127 u_long cfg;
128 u_int irq;
129 u_int irqcnt;
130 struct pci_dev *pdev;
131 struct hfcPCI_hw hw;
132 spinlock_t lock; /* card lock */
133 struct dchannel dch;
134 struct bchannel bch[2];
135};
136
137/* Interface functions */
138static void
139enable_hwirq(struct hfc_pci *hc)
140{
141 hc->hw.int_m2 |= HFCPCI_IRQ_ENABLE;
142 Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
143}
144
145static void
146disable_hwirq(struct hfc_pci *hc)
147{
148 hc->hw.int_m2 &= ~((u_char)HFCPCI_IRQ_ENABLE);
149 Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
150}
151
152/*
153 * free hardware resources used by driver
154 */
155static void
156release_io_hfcpci(struct hfc_pci *hc)
157{
158 /* disable memory mapped ports + busmaster */
159 pci_write_config_word(dev: hc->pdev, PCI_COMMAND, val: 0);
160 del_timer(timer: &hc->hw.timer);
161 dma_free_coherent(dev: &hc->pdev->dev, size: 0x8000, cpu_addr: hc->hw.fifos,
162 dma_handle: hc->hw.dmahandle);
163 iounmap(addr: hc->hw.pci_io);
164}
165
166/*
167 * set mode (NT or TE)
168 */
169static void
170hfcpci_setmode(struct hfc_pci *hc)
171{
172 if (hc->hw.protocol == ISDN_P_NT_S0) {
173 hc->hw.clkdel = CLKDEL_NT; /* ST-Bit delay for NT-Mode */
174 hc->hw.sctrl |= SCTRL_MODE_NT; /* NT-MODE */
175 hc->hw.states = 1; /* G1 */
176 } else {
177 hc->hw.clkdel = CLKDEL_TE; /* ST-Bit delay for TE-Mode */
178 hc->hw.sctrl &= ~SCTRL_MODE_NT; /* TE-MODE */
179 hc->hw.states = 2; /* F2 */
180 }
181 Write_hfc(hc, HFCPCI_CLKDEL, hc->hw.clkdel);
182 Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | hc->hw.states);
183 udelay(10);
184 Write_hfc(hc, HFCPCI_STATES, hc->hw.states | 0x40); /* Deactivate */
185 Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
186}
187
188/*
189 * function called to reset the HFC PCI chip. A complete software reset of chip
190 * and fifos is done.
191 */
192static void
193reset_hfcpci(struct hfc_pci *hc)
194{
195 u_char val;
196 int cnt = 0;
197
198 printk(KERN_DEBUG "reset_hfcpci: entered\n");
199 val = Read_hfc(hc, HFCPCI_CHIP_ID);
200 printk(KERN_INFO "HFC_PCI: resetting HFC ChipId(%x)\n", val);
201 /* enable memory mapped ports, disable busmaster */
202 pci_write_config_word(dev: hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
203 disable_hwirq(hc);
204 /* enable memory ports + busmaster */
205 pci_write_config_word(dev: hc->pdev, PCI_COMMAND,
206 PCI_ENA_MEMIO + PCI_ENA_MASTER);
207 val = Read_hfc(hc, HFCPCI_STATUS);
208 printk(KERN_DEBUG "HFC-PCI status(%x) before reset\n", val);
209 hc->hw.cirm = HFCPCI_RESET; /* Reset On */
210 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
211 set_current_state(TASK_UNINTERRUPTIBLE);
212 mdelay(10); /* Timeout 10ms */
213 hc->hw.cirm = 0; /* Reset Off */
214 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
215 val = Read_hfc(hc, HFCPCI_STATUS);
216 printk(KERN_DEBUG "HFC-PCI status(%x) after reset\n", val);
217 while (cnt < 50000) { /* max 50000 us */
218 udelay(5);
219 cnt += 5;
220 val = Read_hfc(hc, HFCPCI_STATUS);
221 if (!(val & 2))
222 break;
223 }
224 printk(KERN_DEBUG "HFC-PCI status(%x) after %dus\n", val, cnt);
225
226 hc->hw.fifo_en = 0x30; /* only D fifos enabled */
227
228 hc->hw.bswapped = 0; /* no exchange */
229 hc->hw.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
230 hc->hw.trm = HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
231 hc->hw.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
232 hc->hw.sctrl_r = 0;
233 hc->hw.sctrl_e = HFCPCI_AUTO_AWAKE; /* S/T Auto awake */
234 hc->hw.mst_m = 0;
235 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
236 hc->hw.mst_m |= HFCPCI_MASTER; /* HFC Master Mode */
237 if (test_bit(HFC_CFG_NEG_F0, &hc->cfg))
238 hc->hw.mst_m |= HFCPCI_F0_NEGATIV;
239 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
240 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
241 Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
242 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
243
244 hc->hw.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
245 HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
246 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
247
248 /* Clear already pending ints */
249 val = Read_hfc(hc, HFCPCI_INT_S1);
250
251 /* set NT/TE mode */
252 hfcpci_setmode(hc);
253
254 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
255 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
256
257 /*
258 * Init GCI/IOM2 in master mode
259 * Slots 0 and 1 are set for B-chan 1 and 2
260 * D- and monitor/CI channel are not enabled
261 * STIO1 is used as output for data, B1+B2 from ST->IOM+HFC
262 * STIO2 is used as data input, B1+B2 from IOM->ST
263 * ST B-channel send disabled -> continuous 1s
264 * The IOM slots are always enabled
265 */
266 if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
267 /* set data flow directions: connect B1,B2: HFC to/from PCM */
268 hc->hw.conn = 0x09;
269 } else {
270 hc->hw.conn = 0x36; /* set data flow directions */
271 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
272 Write_hfc(hc, HFCPCI_B1_SSL, 0xC0);
273 Write_hfc(hc, HFCPCI_B2_SSL, 0xC1);
274 Write_hfc(hc, HFCPCI_B1_RSL, 0xC0);
275 Write_hfc(hc, HFCPCI_B2_RSL, 0xC1);
276 } else {
277 Write_hfc(hc, HFCPCI_B1_SSL, 0x80);
278 Write_hfc(hc, HFCPCI_B2_SSL, 0x81);
279 Write_hfc(hc, HFCPCI_B1_RSL, 0x80);
280 Write_hfc(hc, HFCPCI_B2_RSL, 0x81);
281 }
282 }
283 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
284 val = Read_hfc(hc, HFCPCI_INT_S2);
285}
286
287/*
288 * Timer function called when kernel timer expires
289 */
290static void
291hfcpci_Timer(struct timer_list *t)
292{
293 struct hfc_pci *hc = from_timer(hc, t, hw.timer);
294 hc->hw.timer.expires = jiffies + 75;
295 /* WD RESET */
296/*
297 * WriteReg(hc, HFCD_DATA, HFCD_CTMT, hc->hw.ctmt | 0x80);
298 * add_timer(&hc->hw.timer);
299 */
300}
301
302
303/*
304 * select a b-channel entry matching and active
305 */
306static struct bchannel *
307Sel_BCS(struct hfc_pci *hc, int channel)
308{
309 if (test_bit(FLG_ACTIVE, &hc->bch[0].Flags) &&
310 (hc->bch[0].nr & channel))
311 return &hc->bch[0];
312 else if (test_bit(FLG_ACTIVE, &hc->bch[1].Flags) &&
313 (hc->bch[1].nr & channel))
314 return &hc->bch[1];
315 else
316 return NULL;
317}
318
319/*
320 * clear the desired B-channel rx fifo
321 */
322static void
323hfcpci_clear_fifo_rx(struct hfc_pci *hc, int fifo)
324{
325 u_char fifo_state;
326 struct bzfifo *bzr;
327
328 if (fifo) {
329 bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
330 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2RX;
331 } else {
332 bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
333 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1RX;
334 }
335 if (fifo_state)
336 hc->hw.fifo_en ^= fifo_state;
337 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
338 hc->hw.last_bfifo_cnt[fifo] = 0;
339 bzr->f1 = MAX_B_FRAMES;
340 bzr->f2 = bzr->f1; /* init F pointers to remain constant */
341 bzr->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
342 bzr->za[MAX_B_FRAMES].z2 = cpu_to_le16(
343 le16_to_cpu(bzr->za[MAX_B_FRAMES].z1));
344 if (fifo_state)
345 hc->hw.fifo_en |= fifo_state;
346 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
347}
348
349/*
350 * clear the desired B-channel tx fifo
351 */
352static void hfcpci_clear_fifo_tx(struct hfc_pci *hc, int fifo)
353{
354 u_char fifo_state;
355 struct bzfifo *bzt;
356
357 if (fifo) {
358 bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
359 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2TX;
360 } else {
361 bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
362 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1TX;
363 }
364 if (fifo_state)
365 hc->hw.fifo_en ^= fifo_state;
366 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
367 if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
368 printk(KERN_DEBUG "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) "
369 "z1(%x) z2(%x) state(%x)\n",
370 fifo, bzt->f1, bzt->f2,
371 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
372 le16_to_cpu(bzt->za[MAX_B_FRAMES].z2),
373 fifo_state);
374 bzt->f2 = MAX_B_FRAMES;
375 bzt->f1 = bzt->f2; /* init F pointers to remain constant */
376 bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
377 bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 2);
378 if (fifo_state)
379 hc->hw.fifo_en |= fifo_state;
380 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
381 if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
382 printk(KERN_DEBUG
383 "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) z1(%x) z2(%x)\n",
384 fifo, bzt->f1, bzt->f2,
385 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
386 le16_to_cpu(bzt->za[MAX_B_FRAMES].z2));
387}
388
389/*
390 * read a complete B-frame out of the buffer
391 */
392static void
393hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz,
394 u_char *bdata, int count)
395{
396 u_char *ptr, *ptr1, new_f2;
397 int maxlen, new_z2;
398 struct zt *zp;
399
400 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
401 printk(KERN_DEBUG "hfcpci_empty_fifo\n");
402 zp = &bz->za[bz->f2]; /* point to Z-Regs */
403 new_z2 = le16_to_cpu(zp->z2) + count; /* new position in fifo */
404 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
405 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
406 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
407 if ((count > MAX_DATA_SIZE + 3) || (count < 4) ||
408 (*(bdata + (le16_to_cpu(zp->z1) - B_SUB_VAL)))) {
409 if (bch->debug & DEBUG_HW)
410 printk(KERN_DEBUG "hfcpci_empty_fifo: incoming packet "
411 "invalid length %d or crc\n", count);
412#ifdef ERROR_STATISTIC
413 bch->err_inv++;
414#endif
415 bz->za[new_f2].z2 = cpu_to_le16(new_z2);
416 bz->f2 = new_f2; /* next buffer */
417 } else {
418 bch->rx_skb = mI_alloc_skb(len: count - 3, GFP_ATOMIC);
419 if (!bch->rx_skb) {
420 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
421 return;
422 }
423 count -= 3;
424 ptr = skb_put(skb: bch->rx_skb, len: count);
425
426 if (le16_to_cpu(zp->z2) + count <= B_FIFO_SIZE + B_SUB_VAL)
427 maxlen = count; /* complete transfer */
428 else
429 maxlen = B_FIFO_SIZE + B_SUB_VAL -
430 le16_to_cpu(zp->z2); /* maximum */
431
432 ptr1 = bdata + (le16_to_cpu(zp->z2) - B_SUB_VAL);
433 /* start of data */
434 memcpy(ptr, ptr1, maxlen); /* copy data */
435 count -= maxlen;
436
437 if (count) { /* rest remaining */
438 ptr += maxlen;
439 ptr1 = bdata; /* start of buffer */
440 memcpy(ptr, ptr1, count); /* rest */
441 }
442 bz->za[new_f2].z2 = cpu_to_le16(new_z2);
443 bz->f2 = new_f2; /* next buffer */
444 recv_Bchannel(bch, MISDN_ID_ANY, false);
445 }
446}
447
448/*
449 * D-channel receive procedure
450 */
451static int
452receive_dmsg(struct hfc_pci *hc)
453{
454 struct dchannel *dch = &hc->dch;
455 int maxlen;
456 int rcnt, total;
457 int count = 5;
458 u_char *ptr, *ptr1;
459 struct dfifo *df;
460 struct zt *zp;
461
462 df = &((union fifo_area *)(hc->hw.fifos))->d_chan.d_rx;
463 while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
464 zp = &df->za[df->f2 & D_FREG_MASK];
465 rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
466 if (rcnt < 0)
467 rcnt += D_FIFO_SIZE;
468 rcnt++;
469 if (dch->debug & DEBUG_HW_DCHANNEL)
470 printk(KERN_DEBUG
471 "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)\n",
472 df->f1, df->f2,
473 le16_to_cpu(zp->z1),
474 le16_to_cpu(zp->z2),
475 rcnt);
476
477 if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
478 (df->data[le16_to_cpu(zp->z1)])) {
479 if (dch->debug & DEBUG_HW)
480 printk(KERN_DEBUG
481 "empty_fifo hfcpci packet inv. len "
482 "%d or crc %d\n",
483 rcnt,
484 df->data[le16_to_cpu(zp->z1)]);
485#ifdef ERROR_STATISTIC
486 cs->err_rx++;
487#endif
488 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
489 (MAX_D_FRAMES + 1); /* next buffer */
490 df->za[df->f2 & D_FREG_MASK].z2 =
491 cpu_to_le16((le16_to_cpu(zp->z2) + rcnt) &
492 (D_FIFO_SIZE - 1));
493 } else {
494 dch->rx_skb = mI_alloc_skb(len: rcnt - 3, GFP_ATOMIC);
495 if (!dch->rx_skb) {
496 printk(KERN_WARNING
497 "HFC-PCI: D receive out of memory\n");
498 break;
499 }
500 total = rcnt;
501 rcnt -= 3;
502 ptr = skb_put(skb: dch->rx_skb, len: rcnt);
503
504 if (le16_to_cpu(zp->z2) + rcnt <= D_FIFO_SIZE)
505 maxlen = rcnt; /* complete transfer */
506 else
507 maxlen = D_FIFO_SIZE - le16_to_cpu(zp->z2);
508 /* maximum */
509
510 ptr1 = df->data + le16_to_cpu(zp->z2);
511 /* start of data */
512 memcpy(ptr, ptr1, maxlen); /* copy data */
513 rcnt -= maxlen;
514
515 if (rcnt) { /* rest remaining */
516 ptr += maxlen;
517 ptr1 = df->data; /* start of buffer */
518 memcpy(ptr, ptr1, rcnt); /* rest */
519 }
520 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
521 (MAX_D_FRAMES + 1); /* next buffer */
522 df->za[df->f2 & D_FREG_MASK].z2 = cpu_to_le16((
523 le16_to_cpu(zp->z2) + total) & (D_FIFO_SIZE - 1));
524 recv_Dchannel(dch);
525 }
526 }
527 return 1;
528}
529
530/*
531 * check for transparent receive data and read max one 'poll' size if avail
532 */
533static void
534hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz,
535 struct bzfifo *txbz, u_char *bdata)
536{
537 __le16 *z1r, *z2r, *z1t, *z2t;
538 int new_z2, fcnt_rx, fcnt_tx, maxlen;
539 u_char *ptr, *ptr1;
540
541 z1r = &rxbz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
542 z2r = z1r + 1;
543 z1t = &txbz->za[MAX_B_FRAMES].z1;
544 z2t = z1t + 1;
545
546 fcnt_rx = le16_to_cpu(*z1r) - le16_to_cpu(*z2r);
547 if (!fcnt_rx)
548 return; /* no data avail */
549
550 if (fcnt_rx <= 0)
551 fcnt_rx += B_FIFO_SIZE; /* bytes actually buffered */
552 new_z2 = le16_to_cpu(*z2r) + fcnt_rx; /* new position in fifo */
553 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
554 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
555
556 fcnt_tx = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
557 if (fcnt_tx <= 0)
558 fcnt_tx += B_FIFO_SIZE;
559 /* fcnt_tx contains available bytes in tx-fifo */
560 fcnt_tx = B_FIFO_SIZE - fcnt_tx;
561 /* remaining bytes to send (bytes in tx-fifo) */
562
563 if (test_bit(FLG_RX_OFF, &bch->Flags)) {
564 bch->dropcnt += fcnt_rx;
565 *z2r = cpu_to_le16(new_z2);
566 return;
567 }
568 maxlen = bchannel_get_rxbuf(bch, fcnt_rx);
569 if (maxlen < 0) {
570 pr_warn("B%d: No bufferspace for %d bytes\n", bch->nr, fcnt_rx);
571 } else {
572 ptr = skb_put(skb: bch->rx_skb, len: fcnt_rx);
573 if (le16_to_cpu(*z2r) + fcnt_rx <= B_FIFO_SIZE + B_SUB_VAL)
574 maxlen = fcnt_rx; /* complete transfer */
575 else
576 maxlen = B_FIFO_SIZE + B_SUB_VAL - le16_to_cpu(*z2r);
577 /* maximum */
578
579 ptr1 = bdata + (le16_to_cpu(*z2r) - B_SUB_VAL);
580 /* start of data */
581 memcpy(ptr, ptr1, maxlen); /* copy data */
582 fcnt_rx -= maxlen;
583
584 if (fcnt_rx) { /* rest remaining */
585 ptr += maxlen;
586 ptr1 = bdata; /* start of buffer */
587 memcpy(ptr, ptr1, fcnt_rx); /* rest */
588 }
589 recv_Bchannel(bch, fcnt_tx, false); /* bch, id, !force */
590 }
591 *z2r = cpu_to_le16(new_z2); /* new position */
592}
593
594/*
595 * B-channel main receive routine
596 */
597static void
598main_rec_hfcpci(struct bchannel *bch)
599{
600 struct hfc_pci *hc = bch->hw;
601 int rcnt, real_fifo;
602 int receive = 0, count = 5;
603 struct bzfifo *txbz, *rxbz;
604 u_char *bdata;
605 struct zt *zp;
606
607 if ((bch->nr & 2) && (!hc->hw.bswapped)) {
608 rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
609 txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
610 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2;
611 real_fifo = 1;
612 } else {
613 rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
614 txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
615 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b1;
616 real_fifo = 0;
617 }
618Begin:
619 count--;
620 if (rxbz->f1 != rxbz->f2) {
621 if (bch->debug & DEBUG_HW_BCHANNEL)
622 printk(KERN_DEBUG "hfcpci rec ch(%x) f1(%d) f2(%d)\n",
623 bch->nr, rxbz->f1, rxbz->f2);
624 zp = &rxbz->za[rxbz->f2];
625
626 rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
627 if (rcnt < 0)
628 rcnt += B_FIFO_SIZE;
629 rcnt++;
630 if (bch->debug & DEBUG_HW_BCHANNEL)
631 printk(KERN_DEBUG
632 "hfcpci rec ch(%x) z1(%x) z2(%x) cnt(%d)\n",
633 bch->nr, le16_to_cpu(zp->z1),
634 le16_to_cpu(zp->z2), rcnt);
635 hfcpci_empty_bfifo(bch, bz: rxbz, bdata, count: rcnt);
636 rcnt = rxbz->f1 - rxbz->f2;
637 if (rcnt < 0)
638 rcnt += MAX_B_FRAMES + 1;
639 if (hc->hw.last_bfifo_cnt[real_fifo] > rcnt + 1) {
640 rcnt = 0;
641 hfcpci_clear_fifo_rx(hc, fifo: real_fifo);
642 }
643 hc->hw.last_bfifo_cnt[real_fifo] = rcnt;
644 if (rcnt > 1)
645 receive = 1;
646 else
647 receive = 0;
648 } else if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
649 hfcpci_empty_fifo_trans(bch, rxbz, txbz, bdata);
650 return;
651 } else
652 receive = 0;
653 if (count && receive)
654 goto Begin;
655
656}
657
658/*
659 * D-channel send routine
660 */
661static void
662hfcpci_fill_dfifo(struct hfc_pci *hc)
663{
664 struct dchannel *dch = &hc->dch;
665 int fcnt;
666 int count, new_z1, maxlen;
667 struct dfifo *df;
668 u_char *src, *dst, new_f1;
669
670 if ((dch->debug & DEBUG_HW_DCHANNEL) && !(dch->debug & DEBUG_HW_DFIFO))
671 printk(KERN_DEBUG "%s\n", __func__);
672
673 if (!dch->tx_skb)
674 return;
675 count = dch->tx_skb->len - dch->tx_idx;
676 if (count <= 0)
677 return;
678 df = &((union fifo_area *) (hc->hw.fifos))->d_chan.d_tx;
679
680 if (dch->debug & DEBUG_HW_DFIFO)
681 printk(KERN_DEBUG "%s:f1(%d) f2(%d) z1(f1)(%x)\n", __func__,
682 df->f1, df->f2,
683 le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1));
684 fcnt = df->f1 - df->f2; /* frame count actually buffered */
685 if (fcnt < 0)
686 fcnt += (MAX_D_FRAMES + 1); /* if wrap around */
687 if (fcnt > (MAX_D_FRAMES - 1)) {
688 if (dch->debug & DEBUG_HW_DCHANNEL)
689 printk(KERN_DEBUG
690 "hfcpci_fill_Dfifo more as 14 frames\n");
691#ifdef ERROR_STATISTIC
692 cs->err_tx++;
693#endif
694 return;
695 }
696 /* now determine free bytes in FIFO buffer */
697 maxlen = le16_to_cpu(df->za[df->f2 & D_FREG_MASK].z2) -
698 le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) - 1;
699 if (maxlen <= 0)
700 maxlen += D_FIFO_SIZE; /* count now contains available bytes */
701
702 if (dch->debug & DEBUG_HW_DCHANNEL)
703 printk(KERN_DEBUG "hfcpci_fill_Dfifo count(%d/%d)\n",
704 count, maxlen);
705 if (count > maxlen) {
706 if (dch->debug & DEBUG_HW_DCHANNEL)
707 printk(KERN_DEBUG "hfcpci_fill_Dfifo no fifo mem\n");
708 return;
709 }
710 new_z1 = (le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) + count) &
711 (D_FIFO_SIZE - 1);
712 new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
713 src = dch->tx_skb->data + dch->tx_idx; /* source pointer */
714 dst = df->data + le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
715 maxlen = D_FIFO_SIZE - le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
716 /* end fifo */
717 if (maxlen > count)
718 maxlen = count; /* limit size */
719 memcpy(dst, src, maxlen); /* first copy */
720
721 count -= maxlen; /* remaining bytes */
722 if (count) {
723 dst = df->data; /* start of buffer */
724 src += maxlen; /* new position */
725 memcpy(dst, src, count);
726 }
727 df->za[new_f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
728 /* for next buffer */
729 df->za[df->f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
730 /* new pos actual buffer */
731 df->f1 = new_f1; /* next frame */
732 dch->tx_idx = dch->tx_skb->len;
733}
734
735/*
736 * B-channel send routine
737 */
738static void
739hfcpci_fill_fifo(struct bchannel *bch)
740{
741 struct hfc_pci *hc = bch->hw;
742 int maxlen, fcnt;
743 int count, new_z1;
744 struct bzfifo *bz;
745 u_char *bdata;
746 u_char new_f1, *src, *dst;
747 __le16 *z1t, *z2t;
748
749 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
750 printk(KERN_DEBUG "%s\n", __func__);
751 if ((!bch->tx_skb) || bch->tx_skb->len == 0) {
752 if (!test_bit(FLG_FILLEMPTY, &bch->Flags) &&
753 !test_bit(FLG_TRANSPARENT, &bch->Flags))
754 return;
755 count = HFCPCI_FILLEMPTY;
756 } else {
757 count = bch->tx_skb->len - bch->tx_idx;
758 }
759 if ((bch->nr & 2) && (!hc->hw.bswapped)) {
760 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
761 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b2;
762 } else {
763 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
764 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b1;
765 }
766
767 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
768 z1t = &bz->za[MAX_B_FRAMES].z1;
769 z2t = z1t + 1;
770 if (bch->debug & DEBUG_HW_BCHANNEL)
771 printk(KERN_DEBUG "hfcpci_fill_fifo_trans ch(%x) "
772 "cnt(%d) z1(%x) z2(%x)\n", bch->nr, count,
773 le16_to_cpu(*z1t), le16_to_cpu(*z2t));
774 fcnt = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
775 if (fcnt <= 0)
776 fcnt += B_FIFO_SIZE;
777 if (test_bit(FLG_FILLEMPTY, &bch->Flags)) {
778 /* fcnt contains available bytes in fifo */
779 if (count > fcnt)
780 count = fcnt;
781 new_z1 = le16_to_cpu(*z1t) + count;
782 /* new buffer Position */
783 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
784 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
785 dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
786 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
787 /* end of fifo */
788 if (bch->debug & DEBUG_HW_BFIFO)
789 printk(KERN_DEBUG "hfcpci_FFt fillempty "
790 "fcnt(%d) maxl(%d) nz1(%x) dst(%p)\n",
791 fcnt, maxlen, new_z1, dst);
792 if (maxlen > count)
793 maxlen = count; /* limit size */
794 memset(dst, bch->fill[0], maxlen); /* first copy */
795 count -= maxlen; /* remaining bytes */
796 if (count) {
797 dst = bdata; /* start of buffer */
798 memset(dst, bch->fill[0], count);
799 }
800 *z1t = cpu_to_le16(new_z1); /* now send data */
801 return;
802 }
803 /* fcnt contains available bytes in fifo */
804 fcnt = B_FIFO_SIZE - fcnt;
805 /* remaining bytes to send (bytes in fifo) */
806
807 next_t_frame:
808 count = bch->tx_skb->len - bch->tx_idx;
809 /* maximum fill shall be poll*2 */
810 if (count > (poll << 1) - fcnt)
811 count = (poll << 1) - fcnt;
812 if (count <= 0)
813 return;
814 /* data is suitable for fifo */
815 new_z1 = le16_to_cpu(*z1t) + count;
816 /* new buffer Position */
817 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
818 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
819 src = bch->tx_skb->data + bch->tx_idx;
820 /* source pointer */
821 dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
822 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
823 /* end of fifo */
824 if (bch->debug & DEBUG_HW_BFIFO)
825 printk(KERN_DEBUG "hfcpci_FFt fcnt(%d) "
826 "maxl(%d) nz1(%x) dst(%p)\n",
827 fcnt, maxlen, new_z1, dst);
828 fcnt += count;
829 bch->tx_idx += count;
830 if (maxlen > count)
831 maxlen = count; /* limit size */
832 memcpy(dst, src, maxlen); /* first copy */
833 count -= maxlen; /* remaining bytes */
834 if (count) {
835 dst = bdata; /* start of buffer */
836 src += maxlen; /* new position */
837 memcpy(dst, src, count);
838 }
839 *z1t = cpu_to_le16(new_z1); /* now send data */
840 if (bch->tx_idx < bch->tx_skb->len)
841 return;
842 dev_kfree_skb_any(skb: bch->tx_skb);
843 if (get_next_bframe(bch))
844 goto next_t_frame;
845 return;
846 }
847 if (bch->debug & DEBUG_HW_BCHANNEL)
848 printk(KERN_DEBUG
849 "%s: ch(%x) f1(%d) f2(%d) z1(f1)(%x)\n",
850 __func__, bch->nr, bz->f1, bz->f2,
851 bz->za[bz->f1].z1);
852 fcnt = bz->f1 - bz->f2; /* frame count actually buffered */
853 if (fcnt < 0)
854 fcnt += (MAX_B_FRAMES + 1); /* if wrap around */
855 if (fcnt > (MAX_B_FRAMES - 1)) {
856 if (bch->debug & DEBUG_HW_BCHANNEL)
857 printk(KERN_DEBUG
858 "hfcpci_fill_Bfifo more as 14 frames\n");
859 return;
860 }
861 /* now determine free bytes in FIFO buffer */
862 maxlen = le16_to_cpu(bz->za[bz->f2].z2) -
863 le16_to_cpu(bz->za[bz->f1].z1) - 1;
864 if (maxlen <= 0)
865 maxlen += B_FIFO_SIZE; /* count now contains available bytes */
866
867 if (bch->debug & DEBUG_HW_BCHANNEL)
868 printk(KERN_DEBUG "hfcpci_fill_fifo ch(%x) count(%d/%d)\n",
869 bch->nr, count, maxlen);
870
871 if (maxlen < count) {
872 if (bch->debug & DEBUG_HW_BCHANNEL)
873 printk(KERN_DEBUG "hfcpci_fill_fifo no fifo mem\n");
874 return;
875 }
876 new_z1 = le16_to_cpu(bz->za[bz->f1].z1) + count;
877 /* new buffer Position */
878 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
879 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
880
881 new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
882 src = bch->tx_skb->data + bch->tx_idx; /* source pointer */
883 dst = bdata + (le16_to_cpu(bz->za[bz->f1].z1) - B_SUB_VAL);
884 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(bz->za[bz->f1].z1);
885 /* end fifo */
886 if (maxlen > count)
887 maxlen = count; /* limit size */
888 memcpy(dst, src, maxlen); /* first copy */
889
890 count -= maxlen; /* remaining bytes */
891 if (count) {
892 dst = bdata; /* start of buffer */
893 src += maxlen; /* new position */
894 memcpy(dst, src, count);
895 }
896 bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */
897 bz->f1 = new_f1; /* next frame */
898 dev_kfree_skb_any(skb: bch->tx_skb);
899 get_next_bframe(bch);
900}
901
902
903
904/*
905 * handle L1 state changes TE
906 */
907
908static void
909ph_state_te(struct dchannel *dch)
910{
911 if (dch->debug)
912 printk(KERN_DEBUG "%s: TE newstate %x\n",
913 __func__, dch->state);
914 switch (dch->state) {
915 case 0:
916 l1_event(dch->l1, HW_RESET_IND);
917 break;
918 case 3:
919 l1_event(dch->l1, HW_DEACT_IND);
920 break;
921 case 5:
922 case 8:
923 l1_event(dch->l1, ANYSIGNAL);
924 break;
925 case 6:
926 l1_event(dch->l1, INFO2);
927 break;
928 case 7:
929 l1_event(dch->l1, INFO4_P8);
930 break;
931 }
932}
933
934/*
935 * handle L1 state changes NT
936 */
937
938static void
939handle_nt_timer3(struct dchannel *dch) {
940 struct hfc_pci *hc = dch->hw;
941
942 test_and_clear_bit(FLG_HFC_TIMER_T3, addr: &dch->Flags);
943 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
944 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
945 hc->hw.nt_timer = 0;
946 test_and_set_bit(FLG_ACTIVE, addr: &dch->Flags);
947 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
948 hc->hw.mst_m |= HFCPCI_MASTER;
949 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
950 _queue_data(ch: &dch->dev.D, PH_ACTIVATE_IND,
951 MISDN_ID_ANY, len: 0, NULL, GFP_ATOMIC);
952}
953
954static void
955ph_state_nt(struct dchannel *dch)
956{
957 struct hfc_pci *hc = dch->hw;
958
959 if (dch->debug)
960 printk(KERN_DEBUG "%s: NT newstate %x\n",
961 __func__, dch->state);
962 switch (dch->state) {
963 case 2:
964 if (hc->hw.nt_timer < 0) {
965 hc->hw.nt_timer = 0;
966 test_and_clear_bit(FLG_HFC_TIMER_T3, addr: &dch->Flags);
967 test_and_clear_bit(FLG_HFC_TIMER_T1, addr: &dch->Flags);
968 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
969 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
970 /* Clear already pending ints */
971 (void) Read_hfc(hc, HFCPCI_INT_S1);
972 Write_hfc(hc, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
973 udelay(10);
974 Write_hfc(hc, HFCPCI_STATES, 4);
975 dch->state = 4;
976 } else if (hc->hw.nt_timer == 0) {
977 hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
978 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
979 hc->hw.nt_timer = NT_T1_COUNT;
980 hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
981 hc->hw.ctmt |= HFCPCI_TIM3_125;
982 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
983 HFCPCI_CLTIMER);
984 test_and_clear_bit(FLG_HFC_TIMER_T3, addr: &dch->Flags);
985 test_and_set_bit(FLG_HFC_TIMER_T1, addr: &dch->Flags);
986 /* allow G2 -> G3 transition */
987 Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
988 } else {
989 Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
990 }
991 break;
992 case 1:
993 hc->hw.nt_timer = 0;
994 test_and_clear_bit(FLG_HFC_TIMER_T3, addr: &dch->Flags);
995 test_and_clear_bit(FLG_HFC_TIMER_T1, addr: &dch->Flags);
996 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
997 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
998 test_and_clear_bit(FLG_ACTIVE, addr: &dch->Flags);
999 hc->hw.mst_m &= ~HFCPCI_MASTER;
1000 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1001 test_and_clear_bit(FLG_L2_ACTIVATED, addr: &dch->Flags);
1002 _queue_data(ch: &dch->dev.D, PH_DEACTIVATE_IND,
1003 MISDN_ID_ANY, len: 0, NULL, GFP_ATOMIC);
1004 break;
1005 case 4:
1006 hc->hw.nt_timer = 0;
1007 test_and_clear_bit(FLG_HFC_TIMER_T3, addr: &dch->Flags);
1008 test_and_clear_bit(FLG_HFC_TIMER_T1, addr: &dch->Flags);
1009 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1010 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1011 break;
1012 case 3:
1013 if (!test_and_set_bit(FLG_HFC_TIMER_T3, addr: &dch->Flags)) {
1014 if (!test_and_clear_bit(FLG_L2_ACTIVATED,
1015 addr: &dch->Flags)) {
1016 handle_nt_timer3(dch);
1017 break;
1018 }
1019 test_and_clear_bit(FLG_HFC_TIMER_T1, addr: &dch->Flags);
1020 hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
1021 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1022 hc->hw.nt_timer = NT_T3_COUNT;
1023 hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
1024 hc->hw.ctmt |= HFCPCI_TIM3_125;
1025 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
1026 HFCPCI_CLTIMER);
1027 }
1028 break;
1029 }
1030}
1031
1032static void
1033ph_state(struct dchannel *dch)
1034{
1035 struct hfc_pci *hc = dch->hw;
1036
1037 if (hc->hw.protocol == ISDN_P_NT_S0) {
1038 if (test_bit(FLG_HFC_TIMER_T3, &dch->Flags) &&
1039 hc->hw.nt_timer < 0)
1040 handle_nt_timer3(dch);
1041 else
1042 ph_state_nt(dch);
1043 } else
1044 ph_state_te(dch);
1045}
1046
1047/*
1048 * Layer 1 callback function
1049 */
1050static int
1051hfc_l1callback(struct dchannel *dch, u_int cmd)
1052{
1053 struct hfc_pci *hc = dch->hw;
1054
1055 switch (cmd) {
1056 case INFO3_P8:
1057 case INFO3_P10:
1058 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1059 hc->hw.mst_m |= HFCPCI_MASTER;
1060 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1061 break;
1062 case HW_RESET_REQ:
1063 Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3);
1064 /* HFC ST 3 */
1065 udelay(6);
1066 Write_hfc(hc, HFCPCI_STATES, 3); /* HFC ST 2 */
1067 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1068 hc->hw.mst_m |= HFCPCI_MASTER;
1069 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1070 Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1071 HFCPCI_DO_ACTION);
1072 l1_event(dch->l1, HW_POWERUP_IND);
1073 break;
1074 case HW_DEACT_REQ:
1075 hc->hw.mst_m &= ~HFCPCI_MASTER;
1076 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1077 skb_queue_purge(list: &dch->squeue);
1078 if (dch->tx_skb) {
1079 dev_kfree_skb(dch->tx_skb);
1080 dch->tx_skb = NULL;
1081 }
1082 dch->tx_idx = 0;
1083 if (dch->rx_skb) {
1084 dev_kfree_skb(dch->rx_skb);
1085 dch->rx_skb = NULL;
1086 }
1087 test_and_clear_bit(FLG_TX_BUSY, addr: &dch->Flags);
1088 if (test_and_clear_bit(FLG_BUSY_TIMER, addr: &dch->Flags))
1089 del_timer(timer: &dch->timer);
1090 break;
1091 case HW_POWERUP_REQ:
1092 Write_hfc(hc, HFCPCI_STATES, HFCPCI_DO_ACTION);
1093 break;
1094 case PH_ACTIVATE_IND:
1095 test_and_set_bit(FLG_ACTIVE, addr: &dch->Flags);
1096 _queue_data(ch: &dch->dev.D, prim: cmd, MISDN_ID_ANY, len: 0, NULL,
1097 GFP_ATOMIC);
1098 break;
1099 case PH_DEACTIVATE_IND:
1100 test_and_clear_bit(FLG_ACTIVE, addr: &dch->Flags);
1101 _queue_data(ch: &dch->dev.D, prim: cmd, MISDN_ID_ANY, len: 0, NULL,
1102 GFP_ATOMIC);
1103 break;
1104 default:
1105 if (dch->debug & DEBUG_HW)
1106 printk(KERN_DEBUG "%s: unknown command %x\n",
1107 __func__, cmd);
1108 return -1;
1109 }
1110 return 0;
1111}
1112
1113/*
1114 * Interrupt handler
1115 */
1116static inline void
1117tx_birq(struct bchannel *bch)
1118{
1119 if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
1120 hfcpci_fill_fifo(bch);
1121 else {
1122 dev_kfree_skb_any(skb: bch->tx_skb);
1123 if (get_next_bframe(bch))
1124 hfcpci_fill_fifo(bch);
1125 }
1126}
1127
1128static inline void
1129tx_dirq(struct dchannel *dch)
1130{
1131 if (dch->tx_skb && dch->tx_idx < dch->tx_skb->len)
1132 hfcpci_fill_dfifo(hc: dch->hw);
1133 else {
1134 dev_kfree_skb(dch->tx_skb);
1135 if (get_next_dframe(dch))
1136 hfcpci_fill_dfifo(hc: dch->hw);
1137 }
1138}
1139
1140static irqreturn_t
1141hfcpci_int(int intno, void *dev_id)
1142{
1143 struct hfc_pci *hc = dev_id;
1144 u_char exval;
1145 struct bchannel *bch;
1146 u_char val, stat;
1147
1148 spin_lock(lock: &hc->lock);
1149 if (!(hc->hw.int_m2 & 0x08)) {
1150 spin_unlock(lock: &hc->lock);
1151 return IRQ_NONE; /* not initialised */
1152 }
1153 stat = Read_hfc(hc, HFCPCI_STATUS);
1154 if (HFCPCI_ANYINT & stat) {
1155 val = Read_hfc(hc, HFCPCI_INT_S1);
1156 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1157 printk(KERN_DEBUG
1158 "HFC-PCI: stat(%02x) s1(%02x)\n", stat, val);
1159 } else {
1160 /* shared */
1161 spin_unlock(lock: &hc->lock);
1162 return IRQ_NONE;
1163 }
1164 hc->irqcnt++;
1165
1166 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1167 printk(KERN_DEBUG "HFC-PCI irq %x\n", val);
1168 val &= hc->hw.int_m1;
1169 if (val & 0x40) { /* state machine irq */
1170 exval = Read_hfc(hc, HFCPCI_STATES) & 0xf;
1171 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1172 printk(KERN_DEBUG "ph_state chg %d->%d\n",
1173 hc->dch.state, exval);
1174 hc->dch.state = exval;
1175 schedule_event(&hc->dch, FLG_PHCHANGE);
1176 val &= ~0x40;
1177 }
1178 if (val & 0x80) { /* timer irq */
1179 if (hc->hw.protocol == ISDN_P_NT_S0) {
1180 if ((--hc->hw.nt_timer) < 0)
1181 schedule_event(&hc->dch, FLG_PHCHANGE);
1182 }
1183 val &= ~0x80;
1184 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER);
1185 }
1186 if (val & 0x08) { /* B1 rx */
1187 bch = Sel_BCS(hc, channel: hc->hw.bswapped ? 2 : 1);
1188 if (bch)
1189 main_rec_hfcpci(bch);
1190 else if (hc->dch.debug)
1191 printk(KERN_DEBUG "hfcpci spurious 0x08 IRQ\n");
1192 }
1193 if (val & 0x10) { /* B2 rx */
1194 bch = Sel_BCS(hc, channel: 2);
1195 if (bch)
1196 main_rec_hfcpci(bch);
1197 else if (hc->dch.debug)
1198 printk(KERN_DEBUG "hfcpci spurious 0x10 IRQ\n");
1199 }
1200 if (val & 0x01) { /* B1 tx */
1201 bch = Sel_BCS(hc, channel: hc->hw.bswapped ? 2 : 1);
1202 if (bch)
1203 tx_birq(bch);
1204 else if (hc->dch.debug)
1205 printk(KERN_DEBUG "hfcpci spurious 0x01 IRQ\n");
1206 }
1207 if (val & 0x02) { /* B2 tx */
1208 bch = Sel_BCS(hc, channel: 2);
1209 if (bch)
1210 tx_birq(bch);
1211 else if (hc->dch.debug)
1212 printk(KERN_DEBUG "hfcpci spurious 0x02 IRQ\n");
1213 }
1214 if (val & 0x20) /* D rx */
1215 receive_dmsg(hc);
1216 if (val & 0x04) { /* D tx */
1217 if (test_and_clear_bit(FLG_BUSY_TIMER, addr: &hc->dch.Flags))
1218 del_timer(timer: &hc->dch.timer);
1219 tx_dirq(dch: &hc->dch);
1220 }
1221 spin_unlock(lock: &hc->lock);
1222 return IRQ_HANDLED;
1223}
1224
1225/*
1226 * timer callback for D-chan busy resolution. Currently no function
1227 */
1228static void
1229hfcpci_dbusy_timer(struct timer_list *t)
1230{
1231}
1232
1233/*
1234 * activate/deactivate hardware for selected channels and mode
1235 */
1236static int
1237mode_hfcpci(struct bchannel *bch, int bc, int protocol)
1238{
1239 struct hfc_pci *hc = bch->hw;
1240 int fifo2;
1241 u_char rx_slot = 0, tx_slot = 0, pcm_mode;
1242
1243 if (bch->debug & DEBUG_HW_BCHANNEL)
1244 printk(KERN_DEBUG
1245 "HFCPCI bchannel protocol %x-->%x ch %x-->%x\n",
1246 bch->state, protocol, bch->nr, bc);
1247
1248 fifo2 = bc;
1249 pcm_mode = (bc >> 24) & 0xff;
1250 if (pcm_mode) { /* PCM SLOT USE */
1251 if (!test_bit(HFC_CFG_PCM, &hc->cfg))
1252 printk(KERN_WARNING
1253 "%s: pcm channel id without HFC_CFG_PCM\n",
1254 __func__);
1255 rx_slot = (bc >> 8) & 0xff;
1256 tx_slot = (bc >> 16) & 0xff;
1257 bc = bc & 0xff;
1258 } else if (test_bit(HFC_CFG_PCM, &hc->cfg) && (protocol > ISDN_P_NONE))
1259 printk(KERN_WARNING "%s: no pcm channel id but HFC_CFG_PCM\n",
1260 __func__);
1261 if (hc->chanlimit > 1) {
1262 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1263 hc->hw.sctrl_e &= ~0x80;
1264 } else {
1265 if (bc & 2) {
1266 if (protocol != ISDN_P_NONE) {
1267 hc->hw.bswapped = 1; /* B1 and B2 exchanged */
1268 hc->hw.sctrl_e |= 0x80;
1269 } else {
1270 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1271 hc->hw.sctrl_e &= ~0x80;
1272 }
1273 fifo2 = 1;
1274 } else {
1275 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1276 hc->hw.sctrl_e &= ~0x80;
1277 }
1278 }
1279 switch (protocol) {
1280 case (-1): /* used for init */
1281 bch->state = -1;
1282 bch->nr = bc;
1283 fallthrough;
1284 case (ISDN_P_NONE):
1285 if (bch->state == ISDN_P_NONE)
1286 return 0;
1287 if (bc & 2) {
1288 hc->hw.sctrl &= ~SCTRL_B2_ENA;
1289 hc->hw.sctrl_r &= ~SCTRL_B2_ENA;
1290 } else {
1291 hc->hw.sctrl &= ~SCTRL_B1_ENA;
1292 hc->hw.sctrl_r &= ~SCTRL_B1_ENA;
1293 }
1294 if (fifo2 & 2) {
1295 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2;
1296 hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS |
1297 HFCPCI_INTS_B2REC);
1298 } else {
1299 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1;
1300 hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS |
1301 HFCPCI_INTS_B1REC);
1302 }
1303#ifdef REVERSE_BITORDER
1304 if (bch->nr & 2)
1305 hc->hw.cirm &= 0x7f;
1306 else
1307 hc->hw.cirm &= 0xbf;
1308#endif
1309 bch->state = ISDN_P_NONE;
1310 bch->nr = bc;
1311 test_and_clear_bit(FLG_HDLC, addr: &bch->Flags);
1312 test_and_clear_bit(FLG_TRANSPARENT, addr: &bch->Flags);
1313 break;
1314 case (ISDN_P_B_RAW):
1315 bch->state = protocol;
1316 bch->nr = bc;
1317 hfcpci_clear_fifo_rx(hc, fifo: (fifo2 & 2) ? 1 : 0);
1318 hfcpci_clear_fifo_tx(hc, fifo: (fifo2 & 2) ? 1 : 0);
1319 if (bc & 2) {
1320 hc->hw.sctrl |= SCTRL_B2_ENA;
1321 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1322#ifdef REVERSE_BITORDER
1323 hc->hw.cirm |= 0x80;
1324#endif
1325 } else {
1326 hc->hw.sctrl |= SCTRL_B1_ENA;
1327 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1328#ifdef REVERSE_BITORDER
1329 hc->hw.cirm |= 0x40;
1330#endif
1331 }
1332 if (fifo2 & 2) {
1333 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1334 if (!tics)
1335 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
1336 HFCPCI_INTS_B2REC);
1337 hc->hw.ctmt |= 2;
1338 hc->hw.conn &= ~0x18;
1339 } else {
1340 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1341 if (!tics)
1342 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
1343 HFCPCI_INTS_B1REC);
1344 hc->hw.ctmt |= 1;
1345 hc->hw.conn &= ~0x03;
1346 }
1347 test_and_set_bit(FLG_TRANSPARENT, addr: &bch->Flags);
1348 break;
1349 case (ISDN_P_B_HDLC):
1350 bch->state = protocol;
1351 bch->nr = bc;
1352 hfcpci_clear_fifo_rx(hc, fifo: (fifo2 & 2) ? 1 : 0);
1353 hfcpci_clear_fifo_tx(hc, fifo: (fifo2 & 2) ? 1 : 0);
1354 if (bc & 2) {
1355 hc->hw.sctrl |= SCTRL_B2_ENA;
1356 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1357 } else {
1358 hc->hw.sctrl |= SCTRL_B1_ENA;
1359 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1360 }
1361 if (fifo2 & 2) {
1362 hc->hw.last_bfifo_cnt[1] = 0;
1363 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1364 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
1365 HFCPCI_INTS_B2REC);
1366 hc->hw.ctmt &= ~2;
1367 hc->hw.conn &= ~0x18;
1368 } else {
1369 hc->hw.last_bfifo_cnt[0] = 0;
1370 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1371 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
1372 HFCPCI_INTS_B1REC);
1373 hc->hw.ctmt &= ~1;
1374 hc->hw.conn &= ~0x03;
1375 }
1376 test_and_set_bit(FLG_HDLC, addr: &bch->Flags);
1377 break;
1378 default:
1379 printk(KERN_DEBUG "prot not known %x\n", protocol);
1380 return -ENOPROTOOPT;
1381 }
1382 if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
1383 if ((protocol == ISDN_P_NONE) ||
1384 (protocol == -1)) { /* init case */
1385 rx_slot = 0;
1386 tx_slot = 0;
1387 } else {
1388 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
1389 rx_slot |= 0xC0;
1390 tx_slot |= 0xC0;
1391 } else {
1392 rx_slot |= 0x80;
1393 tx_slot |= 0x80;
1394 }
1395 }
1396 if (bc & 2) {
1397 hc->hw.conn &= 0xc7;
1398 hc->hw.conn |= 0x08;
1399 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL 0x%x\n",
1400 __func__, tx_slot);
1401 printk(KERN_DEBUG "%s: Write_hfc: B2_RSL 0x%x\n",
1402 __func__, rx_slot);
1403 Write_hfc(hc, HFCPCI_B2_SSL, tx_slot);
1404 Write_hfc(hc, HFCPCI_B2_RSL, rx_slot);
1405 } else {
1406 hc->hw.conn &= 0xf8;
1407 hc->hw.conn |= 0x01;
1408 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL 0x%x\n",
1409 __func__, tx_slot);
1410 printk(KERN_DEBUG "%s: Write_hfc: B1_RSL 0x%x\n",
1411 __func__, rx_slot);
1412 Write_hfc(hc, HFCPCI_B1_SSL, tx_slot);
1413 Write_hfc(hc, HFCPCI_B1_RSL, rx_slot);
1414 }
1415 }
1416 Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
1417 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1418 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1419 Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
1420 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1421 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1422 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1423#ifdef REVERSE_BITORDER
1424 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1425#endif
1426 return 0;
1427}
1428
1429static int
1430set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan)
1431{
1432 struct hfc_pci *hc = bch->hw;
1433
1434 if (bch->debug & DEBUG_HW_BCHANNEL)
1435 printk(KERN_DEBUG
1436 "HFCPCI bchannel test rx protocol %x-->%x ch %x-->%x\n",
1437 bch->state, protocol, bch->nr, chan);
1438 if (bch->nr != chan) {
1439 printk(KERN_DEBUG
1440 "HFCPCI rxtest wrong channel parameter %x/%x\n",
1441 bch->nr, chan);
1442 return -EINVAL;
1443 }
1444 switch (protocol) {
1445 case (ISDN_P_B_RAW):
1446 bch->state = protocol;
1447 hfcpci_clear_fifo_rx(hc, fifo: (chan & 2) ? 1 : 0);
1448 if (chan & 2) {
1449 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1450 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1451 if (!tics)
1452 hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1453 hc->hw.ctmt |= 2;
1454 hc->hw.conn &= ~0x18;
1455#ifdef REVERSE_BITORDER
1456 hc->hw.cirm |= 0x80;
1457#endif
1458 } else {
1459 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1460 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1461 if (!tics)
1462 hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1463 hc->hw.ctmt |= 1;
1464 hc->hw.conn &= ~0x03;
1465#ifdef REVERSE_BITORDER
1466 hc->hw.cirm |= 0x40;
1467#endif
1468 }
1469 break;
1470 case (ISDN_P_B_HDLC):
1471 bch->state = protocol;
1472 hfcpci_clear_fifo_rx(hc, fifo: (chan & 2) ? 1 : 0);
1473 if (chan & 2) {
1474 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1475 hc->hw.last_bfifo_cnt[1] = 0;
1476 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1477 hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1478 hc->hw.ctmt &= ~2;
1479 hc->hw.conn &= ~0x18;
1480 } else {
1481 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1482 hc->hw.last_bfifo_cnt[0] = 0;
1483 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1484 hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1485 hc->hw.ctmt &= ~1;
1486 hc->hw.conn &= ~0x03;
1487 }
1488 break;
1489 default:
1490 printk(KERN_DEBUG "prot not known %x\n", protocol);
1491 return -ENOPROTOOPT;
1492 }
1493 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1494 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1495 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1496 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1497 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1498#ifdef REVERSE_BITORDER
1499 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1500#endif
1501 return 0;
1502}
1503
1504static void
1505deactivate_bchannel(struct bchannel *bch)
1506{
1507 struct hfc_pci *hc = bch->hw;
1508 u_long flags;
1509
1510 spin_lock_irqsave(&hc->lock, flags);
1511 mISDN_clear_bchannel(bch);
1512 mode_hfcpci(bch, bc: bch->nr, ISDN_P_NONE);
1513 spin_unlock_irqrestore(lock: &hc->lock, flags);
1514}
1515
1516/*
1517 * Layer 1 B-channel hardware access
1518 */
1519static int
1520channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
1521{
1522 return mISDN_ctrl_bchannel(bch, cq);
1523}
1524static int
1525hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1526{
1527 struct bchannel *bch = container_of(ch, struct bchannel, ch);
1528 struct hfc_pci *hc = bch->hw;
1529 int ret = -EINVAL;
1530 u_long flags;
1531
1532 if (bch->debug & DEBUG_HW)
1533 printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg);
1534 switch (cmd) {
1535 case HW_TESTRX_RAW:
1536 spin_lock_irqsave(&hc->lock, flags);
1537 ret = set_hfcpci_rxtest(bch, ISDN_P_B_RAW, chan: (int)(long)arg);
1538 spin_unlock_irqrestore(lock: &hc->lock, flags);
1539 break;
1540 case HW_TESTRX_HDLC:
1541 spin_lock_irqsave(&hc->lock, flags);
1542 ret = set_hfcpci_rxtest(bch, ISDN_P_B_HDLC, chan: (int)(long)arg);
1543 spin_unlock_irqrestore(lock: &hc->lock, flags);
1544 break;
1545 case HW_TESTRX_OFF:
1546 spin_lock_irqsave(&hc->lock, flags);
1547 mode_hfcpci(bch, bc: bch->nr, ISDN_P_NONE);
1548 spin_unlock_irqrestore(lock: &hc->lock, flags);
1549 ret = 0;
1550 break;
1551 case CLOSE_CHANNEL:
1552 test_and_clear_bit(FLG_OPEN, addr: &bch->Flags);
1553 deactivate_bchannel(bch);
1554 ch->protocol = ISDN_P_NONE;
1555 ch->peer = NULL;
1556 module_put(THIS_MODULE);
1557 ret = 0;
1558 break;
1559 case CONTROL_CHANNEL:
1560 ret = channel_bctrl(bch, cq: arg);
1561 break;
1562 default:
1563 printk(KERN_WARNING "%s: unknown prim(%x)\n",
1564 __func__, cmd);
1565 }
1566 return ret;
1567}
1568
1569/*
1570 * Layer2 -> Layer 1 Dchannel data
1571 */
1572static int
1573hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
1574{
1575 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
1576 struct dchannel *dch = container_of(dev, struct dchannel, dev);
1577 struct hfc_pci *hc = dch->hw;
1578 int ret = -EINVAL;
1579 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1580 unsigned int id;
1581 u_long flags;
1582
1583 switch (hh->prim) {
1584 case PH_DATA_REQ:
1585 spin_lock_irqsave(&hc->lock, flags);
1586 ret = dchannel_senddata(dch, skb);
1587 if (ret > 0) { /* direct TX */
1588 id = hh->id; /* skb can be freed */
1589 hfcpci_fill_dfifo(hc: dch->hw);
1590 ret = 0;
1591 spin_unlock_irqrestore(lock: &hc->lock, flags);
1592 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
1593 } else
1594 spin_unlock_irqrestore(lock: &hc->lock, flags);
1595 return ret;
1596 case PH_ACTIVATE_REQ:
1597 spin_lock_irqsave(&hc->lock, flags);
1598 if (hc->hw.protocol == ISDN_P_NT_S0) {
1599 ret = 0;
1600 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1601 hc->hw.mst_m |= HFCPCI_MASTER;
1602 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1603 if (test_bit(FLG_ACTIVE, &dch->Flags)) {
1604 spin_unlock_irqrestore(lock: &hc->lock, flags);
1605 _queue_data(ch: &dch->dev.D, PH_ACTIVATE_IND,
1606 MISDN_ID_ANY, len: 0, NULL, GFP_ATOMIC);
1607 break;
1608 }
1609 test_and_set_bit(FLG_L2_ACTIVATED, addr: &dch->Flags);
1610 Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1611 HFCPCI_DO_ACTION | 1);
1612 } else
1613 ret = l1_event(dch->l1, hh->prim);
1614 spin_unlock_irqrestore(lock: &hc->lock, flags);
1615 break;
1616 case PH_DEACTIVATE_REQ:
1617 test_and_clear_bit(FLG_L2_ACTIVATED, addr: &dch->Flags);
1618 spin_lock_irqsave(&hc->lock, flags);
1619 if (hc->hw.protocol == ISDN_P_NT_S0) {
1620 struct sk_buff_head free_queue;
1621
1622 __skb_queue_head_init(list: &free_queue);
1623 /* prepare deactivation */
1624 Write_hfc(hc, HFCPCI_STATES, 0x40);
1625 skb_queue_splice_init(list: &dch->squeue, head: &free_queue);
1626 if (dch->tx_skb) {
1627 __skb_queue_tail(list: &free_queue, newsk: dch->tx_skb);
1628 dch->tx_skb = NULL;
1629 }
1630 dch->tx_idx = 0;
1631 if (dch->rx_skb) {
1632 __skb_queue_tail(list: &free_queue, newsk: dch->rx_skb);
1633 dch->rx_skb = NULL;
1634 }
1635 test_and_clear_bit(FLG_TX_BUSY, addr: &dch->Flags);
1636 if (test_and_clear_bit(FLG_BUSY_TIMER, addr: &dch->Flags))
1637 del_timer(timer: &dch->timer);
1638#ifdef FIXME
1639 if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
1640 dchannel_sched_event(&hc->dch, D_CLEARBUSY);
1641#endif
1642 hc->hw.mst_m &= ~HFCPCI_MASTER;
1643 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1644 ret = 0;
1645 spin_unlock_irqrestore(lock: &hc->lock, flags);
1646 __skb_queue_purge(list: &free_queue);
1647 } else {
1648 ret = l1_event(dch->l1, hh->prim);
1649 spin_unlock_irqrestore(lock: &hc->lock, flags);
1650 }
1651 break;
1652 }
1653 if (!ret)
1654 dev_kfree_skb(skb);
1655 return ret;
1656}
1657
1658/*
1659 * Layer2 -> Layer 1 Bchannel data
1660 */
1661static int
1662hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
1663{
1664 struct bchannel *bch = container_of(ch, struct bchannel, ch);
1665 struct hfc_pci *hc = bch->hw;
1666 int ret = -EINVAL;
1667 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1668 unsigned long flags;
1669
1670 switch (hh->prim) {
1671 case PH_DATA_REQ:
1672 spin_lock_irqsave(&hc->lock, flags);
1673 ret = bchannel_senddata(bch, skb);
1674 if (ret > 0) { /* direct TX */
1675 hfcpci_fill_fifo(bch);
1676 ret = 0;
1677 }
1678 spin_unlock_irqrestore(lock: &hc->lock, flags);
1679 return ret;
1680 case PH_ACTIVATE_REQ:
1681 spin_lock_irqsave(&hc->lock, flags);
1682 if (!test_and_set_bit(FLG_ACTIVE, addr: &bch->Flags))
1683 ret = mode_hfcpci(bch, bc: bch->nr, protocol: ch->protocol);
1684 else
1685 ret = 0;
1686 spin_unlock_irqrestore(lock: &hc->lock, flags);
1687 if (!ret)
1688 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, len: 0,
1689 NULL, GFP_KERNEL);
1690 break;
1691 case PH_DEACTIVATE_REQ:
1692 deactivate_bchannel(bch);
1693 _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, len: 0,
1694 NULL, GFP_KERNEL);
1695 ret = 0;
1696 break;
1697 }
1698 if (!ret)
1699 dev_kfree_skb(skb);
1700 return ret;
1701}
1702
1703/*
1704 * called for card init message
1705 */
1706
1707static void
1708inithfcpci(struct hfc_pci *hc)
1709{
1710 printk(KERN_DEBUG "inithfcpci: entered\n");
1711 timer_setup(&hc->dch.timer, hfcpci_dbusy_timer, 0);
1712 hc->chanlimit = 2;
1713 mode_hfcpci(bch: &hc->bch[0], bc: 1, protocol: -1);
1714 mode_hfcpci(bch: &hc->bch[1], bc: 2, protocol: -1);
1715}
1716
1717
1718static int
1719init_card(struct hfc_pci *hc)
1720{
1721 int cnt = 3;
1722 u_long flags;
1723
1724 printk(KERN_DEBUG "init_card: entered\n");
1725
1726
1727 spin_lock_irqsave(&hc->lock, flags);
1728 disable_hwirq(hc);
1729 spin_unlock_irqrestore(lock: &hc->lock, flags);
1730 if (request_irq(irq: hc->irq, handler: hfcpci_int, IRQF_SHARED, name: "HFC PCI", dev: hc)) {
1731 printk(KERN_WARNING
1732 "mISDN: couldn't get interrupt %d\n", hc->irq);
1733 return -EIO;
1734 }
1735 spin_lock_irqsave(&hc->lock, flags);
1736 reset_hfcpci(hc);
1737 while (cnt) {
1738 inithfcpci(hc);
1739 /*
1740 * Finally enable IRQ output
1741 * this is only allowed, if an IRQ routine is already
1742 * established for this HFC, so don't do that earlier
1743 */
1744 enable_hwirq(hc);
1745 spin_unlock_irqrestore(lock: &hc->lock, flags);
1746 /* Timeout 80ms */
1747 set_current_state(TASK_UNINTERRUPTIBLE);
1748 schedule_timeout(timeout: (80 * HZ) / 1000);
1749 printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
1750 hc->irq, hc->irqcnt);
1751 /* now switch timer interrupt off */
1752 spin_lock_irqsave(&hc->lock, flags);
1753 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1754 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1755 /* reinit mode reg */
1756 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1757 if (!hc->irqcnt) {
1758 printk(KERN_WARNING
1759 "HFC PCI: IRQ(%d) getting no interrupts "
1760 "during init %d\n", hc->irq, 4 - cnt);
1761 if (cnt == 1)
1762 break;
1763 else {
1764 reset_hfcpci(hc);
1765 cnt--;
1766 }
1767 } else {
1768 spin_unlock_irqrestore(lock: &hc->lock, flags);
1769 hc->initdone = 1;
1770 return 0;
1771 }
1772 }
1773 disable_hwirq(hc);
1774 spin_unlock_irqrestore(lock: &hc->lock, flags);
1775 free_irq(hc->irq, hc);
1776 return -EIO;
1777}
1778
1779static int
1780channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq)
1781{
1782 int ret = 0;
1783 u_char slot;
1784
1785 switch (cq->op) {
1786 case MISDN_CTRL_GETOP:
1787 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
1788 MISDN_CTRL_DISCONNECT | MISDN_CTRL_L1_TIMER3;
1789 break;
1790 case MISDN_CTRL_LOOP:
1791 /* channel 0 disabled loop */
1792 if (cq->channel < 0 || cq->channel > 2) {
1793 ret = -EINVAL;
1794 break;
1795 }
1796 if (cq->channel & 1) {
1797 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1798 slot = 0xC0;
1799 else
1800 slot = 0x80;
1801 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1802 __func__, slot);
1803 Write_hfc(hc, HFCPCI_B1_SSL, slot);
1804 Write_hfc(hc, HFCPCI_B1_RSL, slot);
1805 hc->hw.conn = (hc->hw.conn & ~7) | 6;
1806 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1807 }
1808 if (cq->channel & 2) {
1809 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1810 slot = 0xC1;
1811 else
1812 slot = 0x81;
1813 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1814 __func__, slot);
1815 Write_hfc(hc, HFCPCI_B2_SSL, slot);
1816 Write_hfc(hc, HFCPCI_B2_RSL, slot);
1817 hc->hw.conn = (hc->hw.conn & ~0x38) | 0x30;
1818 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1819 }
1820 if (cq->channel & 3)
1821 hc->hw.trm |= 0x80; /* enable IOM-loop */
1822 else {
1823 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1824 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1825 hc->hw.trm &= 0x7f; /* disable IOM-loop */
1826 }
1827 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1828 break;
1829 case MISDN_CTRL_CONNECT:
1830 if (cq->channel == cq->p1) {
1831 ret = -EINVAL;
1832 break;
1833 }
1834 if (cq->channel < 1 || cq->channel > 2 ||
1835 cq->p1 < 1 || cq->p1 > 2) {
1836 ret = -EINVAL;
1837 break;
1838 }
1839 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1840 slot = 0xC0;
1841 else
1842 slot = 0x80;
1843 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1844 __func__, slot);
1845 Write_hfc(hc, HFCPCI_B1_SSL, slot);
1846 Write_hfc(hc, HFCPCI_B2_RSL, slot);
1847 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1848 slot = 0xC1;
1849 else
1850 slot = 0x81;
1851 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1852 __func__, slot);
1853 Write_hfc(hc, HFCPCI_B2_SSL, slot);
1854 Write_hfc(hc, HFCPCI_B1_RSL, slot);
1855 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x36;
1856 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1857 hc->hw.trm |= 0x80;
1858 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1859 break;
1860 case MISDN_CTRL_DISCONNECT:
1861 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1862 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1863 hc->hw.trm &= 0x7f; /* disable IOM-loop */
1864 break;
1865 case MISDN_CTRL_L1_TIMER3:
1866 ret = l1_event(hc->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
1867 break;
1868 default:
1869 printk(KERN_WARNING "%s: unknown Op %x\n",
1870 __func__, cq->op);
1871 ret = -EINVAL;
1872 break;
1873 }
1874 return ret;
1875}
1876
1877static int
1878open_dchannel(struct hfc_pci *hc, struct mISDNchannel *ch,
1879 struct channel_req *rq)
1880{
1881 int err = 0;
1882
1883 if (debug & DEBUG_HW_OPEN)
1884 printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
1885 hc->dch.dev.id, __builtin_return_address(0));
1886 if (rq->protocol == ISDN_P_NONE)
1887 return -EINVAL;
1888 if (rq->adr.channel == 1) {
1889 /* TODO: E-Channel */
1890 return -EINVAL;
1891 }
1892 if (!hc->initdone) {
1893 if (rq->protocol == ISDN_P_TE_S0) {
1894 err = create_l1(&hc->dch, hfc_l1callback);
1895 if (err)
1896 return err;
1897 }
1898 hc->hw.protocol = rq->protocol;
1899 ch->protocol = rq->protocol;
1900 err = init_card(hc);
1901 if (err)
1902 return err;
1903 } else {
1904 if (rq->protocol != ch->protocol) {
1905 if (hc->hw.protocol == ISDN_P_TE_S0)
1906 l1_event(hc->dch.l1, CLOSE_CHANNEL);
1907 if (rq->protocol == ISDN_P_TE_S0) {
1908 err = create_l1(&hc->dch, hfc_l1callback);
1909 if (err)
1910 return err;
1911 }
1912 hc->hw.protocol = rq->protocol;
1913 ch->protocol = rq->protocol;
1914 hfcpci_setmode(hc);
1915 }
1916 }
1917
1918 if (((ch->protocol == ISDN_P_NT_S0) && (hc->dch.state == 3)) ||
1919 ((ch->protocol == ISDN_P_TE_S0) && (hc->dch.state == 7))) {
1920 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
1921 len: 0, NULL, GFP_KERNEL);
1922 }
1923 rq->ch = ch;
1924 if (!try_module_get(THIS_MODULE))
1925 printk(KERN_WARNING "%s:cannot get module\n", __func__);
1926 return 0;
1927}
1928
1929static int
1930open_bchannel(struct hfc_pci *hc, struct channel_req *rq)
1931{
1932 struct bchannel *bch;
1933
1934 if (rq->adr.channel == 0 || rq->adr.channel > 2)
1935 return -EINVAL;
1936 if (rq->protocol == ISDN_P_NONE)
1937 return -EINVAL;
1938 bch = &hc->bch[rq->adr.channel - 1];
1939 if (test_and_set_bit(FLG_OPEN, addr: &bch->Flags))
1940 return -EBUSY; /* b-channel can be only open once */
1941 bch->ch.protocol = rq->protocol;
1942 rq->ch = &bch->ch; /* TODO: E-channel */
1943 if (!try_module_get(THIS_MODULE))
1944 printk(KERN_WARNING "%s:cannot get module\n", __func__);
1945 return 0;
1946}
1947
1948/*
1949 * device control function
1950 */
1951static int
1952hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1953{
1954 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
1955 struct dchannel *dch = container_of(dev, struct dchannel, dev);
1956 struct hfc_pci *hc = dch->hw;
1957 struct channel_req *rq;
1958 int err = 0;
1959
1960 if (dch->debug & DEBUG_HW)
1961 printk(KERN_DEBUG "%s: cmd:%x %p\n",
1962 __func__, cmd, arg);
1963 switch (cmd) {
1964 case OPEN_CHANNEL:
1965 rq = arg;
1966 if ((rq->protocol == ISDN_P_TE_S0) ||
1967 (rq->protocol == ISDN_P_NT_S0))
1968 err = open_dchannel(hc, ch, rq);
1969 else
1970 err = open_bchannel(hc, rq);
1971 break;
1972 case CLOSE_CHANNEL:
1973 if (debug & DEBUG_HW_OPEN)
1974 printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
1975 __func__, hc->dch.dev.id,
1976 __builtin_return_address(0));
1977 module_put(THIS_MODULE);
1978 break;
1979 case CONTROL_CHANNEL:
1980 err = channel_ctrl(hc, cq: arg);
1981 break;
1982 default:
1983 if (dch->debug & DEBUG_HW)
1984 printk(KERN_DEBUG "%s: unknown command %x\n",
1985 __func__, cmd);
1986 return -EINVAL;
1987 }
1988 return err;
1989}
1990
1991static int
1992setup_hw(struct hfc_pci *hc)
1993{
1994 void *buffer;
1995
1996 printk(KERN_INFO "mISDN: HFC-PCI driver %s\n", hfcpci_revision);
1997 hc->hw.cirm = 0;
1998 hc->dch.state = 0;
1999 pci_set_master(dev: hc->pdev);
2000 if (!hc->irq) {
2001 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
2002 return -EINVAL;
2003 }
2004 hc->hw.pci_io =
2005 (char __iomem *)(unsigned long)hc->pdev->resource[1].start;
2006
2007 if (!hc->hw.pci_io) {
2008 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
2009 return -ENOMEM;
2010 }
2011 /* Allocate memory for FIFOS */
2012 /* the memory needs to be on a 32k boundary within the first 4G */
2013 if (dma_set_mask(dev: &hc->pdev->dev, mask: 0xFFFF8000)) {
2014 printk(KERN_WARNING
2015 "HFC-PCI: No usable DMA configuration!\n");
2016 return -EIO;
2017 }
2018 buffer = dma_alloc_coherent(dev: &hc->pdev->dev, size: 0x8000, dma_handle: &hc->hw.dmahandle,
2019 GFP_KERNEL);
2020 /* We silently assume the address is okay if nonzero */
2021 if (!buffer) {
2022 printk(KERN_WARNING
2023 "HFC-PCI: Error allocating memory for FIFO!\n");
2024 return -ENOMEM;
2025 }
2026 hc->hw.fifos = buffer;
2027 pci_write_config_dword(dev: hc->pdev, where: 0x80, val: hc->hw.dmahandle);
2028 hc->hw.pci_io = ioremap(offset: (ulong) hc->hw.pci_io, size: 256);
2029 if (unlikely(!hc->hw.pci_io)) {
2030 printk(KERN_WARNING
2031 "HFC-PCI: Error in ioremap for PCI!\n");
2032 dma_free_coherent(dev: &hc->pdev->dev, size: 0x8000, cpu_addr: hc->hw.fifos,
2033 dma_handle: hc->hw.dmahandle);
2034 return -ENOMEM;
2035 }
2036
2037 printk(KERN_INFO
2038 "HFC-PCI: defined at mem %#lx fifo %p(%pad) IRQ %d HZ %d\n",
2039 (u_long) hc->hw.pci_io, hc->hw.fifos,
2040 &hc->hw.dmahandle, hc->irq, HZ);
2041
2042 /* enable memory mapped ports, disable busmaster */
2043 pci_write_config_word(dev: hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
2044 hc->hw.int_m2 = 0;
2045 disable_hwirq(hc);
2046 hc->hw.int_m1 = 0;
2047 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
2048 /* At this point the needed PCI config is done */
2049 /* fifos are still not enabled */
2050 timer_setup(&hc->hw.timer, hfcpci_Timer, 0);
2051 /* default PCM master */
2052 test_and_set_bit(HFC_CFG_MASTER, addr: &hc->cfg);
2053 return 0;
2054}
2055
2056static void
2057release_card(struct hfc_pci *hc) {
2058 u_long flags;
2059
2060 spin_lock_irqsave(&hc->lock, flags);
2061 hc->hw.int_m2 = 0; /* interrupt output off ! */
2062 disable_hwirq(hc);
2063 mode_hfcpci(bch: &hc->bch[0], bc: 1, ISDN_P_NONE);
2064 mode_hfcpci(bch: &hc->bch[1], bc: 2, ISDN_P_NONE);
2065 if (hc->dch.timer.function != NULL) {
2066 del_timer(timer: &hc->dch.timer);
2067 hc->dch.timer.function = NULL;
2068 }
2069 spin_unlock_irqrestore(lock: &hc->lock, flags);
2070 if (hc->hw.protocol == ISDN_P_TE_S0)
2071 l1_event(hc->dch.l1, CLOSE_CHANNEL);
2072 if (hc->initdone)
2073 free_irq(hc->irq, hc);
2074 release_io_hfcpci(hc); /* must release after free_irq! */
2075 mISDN_unregister_device(&hc->dch.dev);
2076 mISDN_freebchannel(&hc->bch[1]);
2077 mISDN_freebchannel(&hc->bch[0]);
2078 mISDN_freedchannel(&hc->dch);
2079 pci_set_drvdata(pdev: hc->pdev, NULL);
2080 kfree(objp: hc);
2081}
2082
2083static int
2084setup_card(struct hfc_pci *card)
2085{
2086 int err = -EINVAL;
2087 u_int i;
2088 char name[MISDN_MAX_IDLEN];
2089
2090 card->dch.debug = debug;
2091 spin_lock_init(&card->lock);
2092 mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, ph_state);
2093 card->dch.hw = card;
2094 card->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
2095 card->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
2096 (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
2097 card->dch.dev.D.send = hfcpci_l2l1D;
2098 card->dch.dev.D.ctrl = hfc_dctrl;
2099 card->dch.dev.nrbchan = 2;
2100 for (i = 0; i < 2; i++) {
2101 card->bch[i].nr = i + 1;
2102 set_channelmap(nr: i + 1, map: card->dch.dev.channelmap);
2103 card->bch[i].debug = debug;
2104 mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM, poll >> 1);
2105 card->bch[i].hw = card;
2106 card->bch[i].ch.send = hfcpci_l2l1B;
2107 card->bch[i].ch.ctrl = hfc_bctrl;
2108 card->bch[i].ch.nr = i + 1;
2109 list_add(new: &card->bch[i].ch.list, head: &card->dch.dev.bchannels);
2110 }
2111 err = setup_hw(card);
2112 if (err)
2113 goto error;
2114 snprintf(buf: name, MISDN_MAX_IDLEN - 1, fmt: "hfc-pci.%d", HFC_cnt + 1);
2115 err = mISDN_register_device(&card->dch.dev, parent: &card->pdev->dev, name);
2116 if (err)
2117 goto error;
2118 HFC_cnt++;
2119 printk(KERN_INFO "HFC %d cards installed\n", HFC_cnt);
2120 return 0;
2121error:
2122 mISDN_freebchannel(&card->bch[1]);
2123 mISDN_freebchannel(&card->bch[0]);
2124 mISDN_freedchannel(&card->dch);
2125 kfree(objp: card);
2126 return err;
2127}
2128
2129/* private data in the PCI devices list */
2130struct _hfc_map {
2131 u_int subtype;
2132 u_int flag;
2133 char *name;
2134};
2135
2136static const struct _hfc_map hfc_map[] =
2137{
2138 {HFC_CCD_2BD0, 0, "CCD/Billion/Asuscom 2BD0"},
2139 {HFC_CCD_B000, 0, "Billion B000"},
2140 {HFC_CCD_B006, 0, "Billion B006"},
2141 {HFC_CCD_B007, 0, "Billion B007"},
2142 {HFC_CCD_B008, 0, "Billion B008"},
2143 {HFC_CCD_B009, 0, "Billion B009"},
2144 {HFC_CCD_B00A, 0, "Billion B00A"},
2145 {HFC_CCD_B00B, 0, "Billion B00B"},
2146 {HFC_CCD_B00C, 0, "Billion B00C"},
2147 {HFC_CCD_B100, 0, "Seyeon B100"},
2148 {HFC_CCD_B700, 0, "Primux II S0 B700"},
2149 {HFC_CCD_B701, 0, "Primux II S0 NT B701"},
2150 {HFC_ABOCOM_2BD1, 0, "Abocom/Magitek 2BD1"},
2151 {HFC_ASUS_0675, 0, "Asuscom/Askey 675"},
2152 {HFC_BERKOM_TCONCEPT, 0, "German telekom T-Concept"},
2153 {HFC_BERKOM_A1T, 0, "German telekom A1T"},
2154 {HFC_ANIGMA_MC145575, 0, "Motorola MC145575"},
2155 {HFC_ZOLTRIX_2BD0, 0, "Zoltrix 2BD0"},
2156 {HFC_DIGI_DF_M_IOM2_E, 0,
2157 "Digi International DataFire Micro V IOM2 (Europe)"},
2158 {HFC_DIGI_DF_M_E, 0,
2159 "Digi International DataFire Micro V (Europe)"},
2160 {HFC_DIGI_DF_M_IOM2_A, 0,
2161 "Digi International DataFire Micro V IOM2 (North America)"},
2162 {HFC_DIGI_DF_M_A, 0,
2163 "Digi International DataFire Micro V (North America)"},
2164 {HFC_SITECOM_DC105V2, 0, "Sitecom Connectivity DC-105 ISDN TA"},
2165 {},
2166};
2167
2168static const struct pci_device_id hfc_ids[] =
2169{
2170 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0),
2171 (unsigned long) &hfc_map[0] },
2172 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B000),
2173 (unsigned long) &hfc_map[1] },
2174 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B006),
2175 (unsigned long) &hfc_map[2] },
2176 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B007),
2177 (unsigned long) &hfc_map[3] },
2178 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B008),
2179 (unsigned long) &hfc_map[4] },
2180 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B009),
2181 (unsigned long) &hfc_map[5] },
2182 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00A),
2183 (unsigned long) &hfc_map[6] },
2184 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00B),
2185 (unsigned long) &hfc_map[7] },
2186 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00C),
2187 (unsigned long) &hfc_map[8] },
2188 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B100),
2189 (unsigned long) &hfc_map[9] },
2190 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B700),
2191 (unsigned long) &hfc_map[10] },
2192 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B701),
2193 (unsigned long) &hfc_map[11] },
2194 { PCI_VDEVICE(ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1),
2195 (unsigned long) &hfc_map[12] },
2196 { PCI_VDEVICE(ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675),
2197 (unsigned long) &hfc_map[13] },
2198 { PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT),
2199 (unsigned long) &hfc_map[14] },
2200 { PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_A1T),
2201 (unsigned long) &hfc_map[15] },
2202 { PCI_VDEVICE(ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575),
2203 (unsigned long) &hfc_map[16] },
2204 { PCI_VDEVICE(ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0),
2205 (unsigned long) &hfc_map[17] },
2206 { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E),
2207 (unsigned long) &hfc_map[18] },
2208 { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_E),
2209 (unsigned long) &hfc_map[19] },
2210 { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A),
2211 (unsigned long) &hfc_map[20] },
2212 { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_A),
2213 (unsigned long) &hfc_map[21] },
2214 { PCI_VDEVICE(SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2),
2215 (unsigned long) &hfc_map[22] },
2216 {},
2217};
2218
2219static int
2220hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2221{
2222 int err = -ENOMEM;
2223 struct hfc_pci *card;
2224 struct _hfc_map *m = (struct _hfc_map *)ent->driver_data;
2225
2226 card = kzalloc(size: sizeof(struct hfc_pci), GFP_KERNEL);
2227 if (!card) {
2228 printk(KERN_ERR "No kmem for HFC card\n");
2229 return err;
2230 }
2231 card->pdev = pdev;
2232 card->subtype = m->subtype;
2233 err = pci_enable_device(dev: pdev);
2234 if (err) {
2235 kfree(objp: card);
2236 return err;
2237 }
2238
2239 printk(KERN_INFO "mISDN_hfcpci: found adapter %s at %s\n",
2240 m->name, pci_name(pdev));
2241
2242 card->irq = pdev->irq;
2243 pci_set_drvdata(pdev, data: card);
2244 err = setup_card(card);
2245 if (err)
2246 pci_set_drvdata(pdev, NULL);
2247 return err;
2248}
2249
2250static void
2251hfc_remove_pci(struct pci_dev *pdev)
2252{
2253 struct hfc_pci *card = pci_get_drvdata(pdev);
2254
2255 if (card)
2256 release_card(hc: card);
2257 else
2258 if (debug)
2259 printk(KERN_DEBUG "%s: drvdata already removed\n",
2260 __func__);
2261}
2262
2263
2264static struct pci_driver hfc_driver = {
2265 .name = "hfcpci",
2266 .probe = hfc_probe,
2267 .remove = hfc_remove_pci,
2268 .id_table = hfc_ids,
2269};
2270
2271static int
2272_hfcpci_softirq(struct device *dev, void *unused)
2273{
2274 struct hfc_pci *hc = dev_get_drvdata(dev);
2275 struct bchannel *bch;
2276 if (hc == NULL)
2277 return 0;
2278
2279 if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) {
2280 spin_lock_irq(lock: &hc->lock);
2281 bch = Sel_BCS(hc, channel: hc->hw.bswapped ? 2 : 1);
2282 if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */
2283 main_rec_hfcpci(bch);
2284 tx_birq(bch);
2285 }
2286 bch = Sel_BCS(hc, channel: hc->hw.bswapped ? 1 : 2);
2287 if (bch && bch->state == ISDN_P_B_RAW) { /* B2 rx&tx */
2288 main_rec_hfcpci(bch);
2289 tx_birq(bch);
2290 }
2291 spin_unlock_irq(lock: &hc->lock);
2292 }
2293 return 0;
2294}
2295
2296static void
2297hfcpci_softirq(struct timer_list *unused)
2298{
2299 WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, NULL,
2300 _hfcpci_softirq) != 0);
2301
2302 /* if next event would be in the past ... */
2303 if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
2304 hfc_jiffies = jiffies + 1;
2305 else
2306 hfc_jiffies += tics;
2307 hfc_tl.expires = hfc_jiffies;
2308 add_timer(timer: &hfc_tl);
2309}
2310
2311static int __init
2312HFC_init(void)
2313{
2314 int err;
2315
2316 if (!poll)
2317 poll = HFCPCI_BTRANS_THRESHOLD;
2318
2319 if (poll != HFCPCI_BTRANS_THRESHOLD) {
2320 tics = (poll * HZ) / 8000;
2321 if (tics < 1)
2322 tics = 1;
2323 poll = (tics * 8000) / HZ;
2324 if (poll > 256 || poll < 8) {
2325 printk(KERN_ERR "%s: Wrong poll value %d not in range "
2326 "of 8..256.\n", __func__, poll);
2327 err = -EINVAL;
2328 return err;
2329 }
2330 }
2331 if (poll != HFCPCI_BTRANS_THRESHOLD) {
2332 printk(KERN_INFO "%s: Using alternative poll value of %d\n",
2333 __func__, poll);
2334 timer_setup(&hfc_tl, hfcpci_softirq, 0);
2335 hfc_tl.expires = jiffies + tics;
2336 hfc_jiffies = hfc_tl.expires;
2337 add_timer(timer: &hfc_tl);
2338 } else
2339 tics = 0; /* indicate the use of controller's timer */
2340
2341 err = pci_register_driver(&hfc_driver);
2342 if (err) {
2343 if (timer_pending(timer: &hfc_tl))
2344 del_timer(timer: &hfc_tl);
2345 }
2346
2347 return err;
2348}
2349
2350static void __exit
2351HFC_cleanup(void)
2352{
2353 del_timer_sync(timer: &hfc_tl);
2354
2355 pci_unregister_driver(dev: &hfc_driver);
2356}
2357
2358module_init(HFC_init);
2359module_exit(HFC_cleanup);
2360
2361MODULE_DEVICE_TABLE(pci, hfc_ids);
2362

source code of linux/drivers/isdn/hardware/mISDN/hfcpci.c