1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /****************************************************************************** |
3 | * |
4 | * (C)Copyright 1998,1999 SysKonnect, |
5 | * a business unit of Schneider & Koch & Co. Datensysteme GmbH. |
6 | * |
7 | * See the file "skfddi.c" for further information. |
8 | * |
9 | * The information in this file is provided "AS IS" without warranty. |
10 | * |
11 | ******************************************************************************/ |
12 | |
13 | #define HWMTM |
14 | |
15 | #ifndef FDDI |
16 | #define FDDI |
17 | #endif |
18 | |
19 | #include "h/types.h" |
20 | #include "h/fddi.h" |
21 | #include "h/smc.h" |
22 | #include "h/supern_2.h" |
23 | #include "h/skfbiinc.h" |
24 | |
25 | /* |
26 | ------------------------------------------------------------- |
27 | DOCUMENTATION |
28 | ------------------------------------------------------------- |
29 | BEGIN_MANUAL_ENTRY(DOCUMENTATION) |
30 | |
31 | T B D |
32 | |
33 | END_MANUAL_ENTRY |
34 | */ |
35 | /* |
36 | ------------------------------------------------------------- |
37 | LOCAL VARIABLES: |
38 | ------------------------------------------------------------- |
39 | */ |
40 | #ifdef COMMON_MB_POOL |
41 | static SMbuf *mb_start; |
42 | static SMbuf *mb_free; |
43 | static int mb_init = FALSE ; |
44 | static int call_count; |
45 | #endif |
46 | |
47 | /* |
48 | ------------------------------------------------------------- |
49 | EXTERNE VARIABLES: |
50 | ------------------------------------------------------------- |
51 | */ |
52 | |
53 | #ifdef DEBUG |
54 | #ifndef DEBUG_BRD |
55 | extern struct smt_debug debug ; |
56 | #endif |
57 | #endif |
58 | |
59 | #ifdef NDIS_OS2 |
60 | extern u_char offDepth ; |
61 | extern u_char force_irq_pending ; |
62 | #endif |
63 | |
64 | /* |
65 | ------------------------------------------------------------- |
66 | LOCAL FUNCTIONS: |
67 | ------------------------------------------------------------- |
68 | */ |
69 | |
70 | static void queue_llc_rx(struct s_smc *smc, SMbuf *mb); |
71 | static void smt_to_llc(struct s_smc *smc, SMbuf *mb); |
72 | static void init_txd_ring(struct s_smc *smc); |
73 | static void init_rxd_ring(struct s_smc *smc); |
74 | static void queue_txd_mb(struct s_smc *smc, SMbuf *mb); |
75 | static u_long init_descr_ring(struct s_smc *smc, union s_fp_descr volatile *start, |
76 | int count); |
77 | static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue); |
78 | static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue); |
79 | static SMbuf* get_llc_rx(struct s_smc *smc); |
80 | static SMbuf* get_txd_mb(struct s_smc *smc); |
81 | static void mac_drv_clear_txd(struct s_smc *smc); |
82 | |
83 | /* |
84 | ------------------------------------------------------------- |
85 | EXTERNAL FUNCTIONS: |
86 | ------------------------------------------------------------- |
87 | */ |
88 | /* The external SMT functions are listed in cmtdef.h */ |
89 | |
90 | extern void* mac_drv_get_space(struct s_smc *smc, unsigned int size); |
91 | extern void* mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size); |
92 | extern void mac_drv_fill_rxd(struct s_smc *smc); |
93 | extern void mac_drv_tx_complete(struct s_smc *smc, |
94 | volatile struct s_smt_fp_txd *txd); |
95 | extern void mac_drv_rx_complete(struct s_smc *smc, |
96 | volatile struct s_smt_fp_rxd *rxd, |
97 | int frag_count, int len); |
98 | extern void mac_drv_requeue_rxd(struct s_smc *smc, |
99 | volatile struct s_smt_fp_rxd *rxd, |
100 | int frag_count); |
101 | extern void mac_drv_clear_rxd(struct s_smc *smc, |
102 | volatile struct s_smt_fp_rxd *rxd, int frag_count); |
103 | |
104 | #ifdef USE_OS_CPY |
105 | extern void hwm_cpy_rxd2mb(void); |
106 | extern void hwm_cpy_txd2mb(void); |
107 | #endif |
108 | |
109 | #ifdef ALL_RX_COMPLETE |
110 | extern void mac_drv_all_receives_complete(void); |
111 | #endif |
112 | |
113 | extern u_long mac_drv_virt2phys(struct s_smc *smc, void *virt); |
114 | extern u_long dma_master(struct s_smc *smc, void *virt, int len, int flag); |
115 | |
116 | #ifdef NDIS_OS2 |
117 | extern void post_proc(void); |
118 | #else |
119 | extern void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, |
120 | int flag); |
121 | #endif |
122 | |
123 | extern int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead, |
124 | int la_len); |
125 | |
126 | /* |
127 | ------------------------------------------------------------- |
128 | PUBLIC FUNCTIONS: |
129 | ------------------------------------------------------------- |
130 | */ |
131 | void process_receive(struct s_smc *smc); |
132 | void fddi_isr(struct s_smc *smc); |
133 | void smt_free_mbuf(struct s_smc *smc, SMbuf *mb); |
134 | void init_driver_fplus(struct s_smc *smc); |
135 | void mac_drv_rx_mode(struct s_smc *smc, int mode); |
136 | void init_fddi_driver(struct s_smc *smc, u_char *mac_addr); |
137 | void mac_drv_clear_tx_queue(struct s_smc *smc); |
138 | void mac_drv_clear_rx_queue(struct s_smc *smc); |
139 | void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, |
140 | int frame_status); |
141 | void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, |
142 | int frame_status); |
143 | |
144 | int mac_drv_init(struct s_smc *smc); |
145 | int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len, |
146 | int frame_status); |
147 | |
148 | u_int mac_drv_check_space(void); |
149 | |
150 | SMbuf* smt_get_mbuf(struct s_smc *smc); |
151 | |
152 | #ifdef DEBUG |
153 | void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev); |
154 | #endif |
155 | |
156 | /* |
157 | ------------------------------------------------------------- |
158 | MACROS: |
159 | ------------------------------------------------------------- |
160 | */ |
161 | #ifndef UNUSED |
162 | #ifdef lint |
163 | #define UNUSED(x) (x) = (x) |
164 | #else |
165 | #define UNUSED(x) |
166 | #endif |
167 | #endif |
168 | |
169 | #ifdef USE_CAN_ADDR |
170 | #define MA smc->hw.fddi_canon_addr.a |
171 | #define GROUP_ADDR_BIT 0x01 |
172 | #else |
173 | #define MA smc->hw.fddi_home_addr.a |
174 | #define GROUP_ADDR_BIT 0x80 |
175 | #endif |
176 | |
177 | #define RXD_TXD_COUNT (HWM_ASYNC_TXD_COUNT+HWM_SYNC_TXD_COUNT+\ |
178 | SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT) |
179 | |
180 | #ifdef MB_OUTSIDE_SMC |
181 | #define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd) +\ |
182 | MAX_MBUF*sizeof(SMbuf)) |
183 | #define EXT_VIRT_MEM_2 ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)) |
184 | #else |
185 | #define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)) |
186 | #endif |
187 | |
188 | /* |
189 | * define critical read for 16 Bit drivers |
190 | */ |
191 | #if defined(NDIS_OS2) || defined(ODI2) |
192 | #define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff)) |
193 | #else |
194 | #define CR_READ(var) (__le32)(var) |
195 | #endif |
196 | |
197 | #define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \ |
198 | IS_MINTR1 | IS_MINTR2 | IS_MINTR3 | IS_R1_P | \ |
199 | IS_R1_C | IS_XA_C | IS_XS_C) |
200 | |
201 | /* |
202 | ------------------------------------------------------------- |
203 | INIT- AND SMT FUNCTIONS: |
204 | ------------------------------------------------------------- |
205 | */ |
206 | |
207 | |
208 | /* |
209 | * BEGIN_MANUAL_ENTRY(mac_drv_check_space) |
210 | * u_int mac_drv_check_space() |
211 | * |
212 | * function DOWNCALL (drvsr.c) |
213 | * This function calculates the needed non virtual |
214 | * memory for MBufs, RxD and TxD descriptors etc. |
215 | * needed by the driver. |
216 | * |
217 | * return u_int memory in bytes |
218 | * |
219 | * END_MANUAL_ENTRY |
220 | */ |
221 | u_int mac_drv_check_space(void) |
222 | { |
223 | #ifdef MB_OUTSIDE_SMC |
224 | #ifdef COMMON_MB_POOL |
225 | call_count++ ; |
226 | if (call_count == 1) { |
227 | return EXT_VIRT_MEM; |
228 | } |
229 | else { |
230 | return EXT_VIRT_MEM_2; |
231 | } |
232 | #else |
233 | return EXT_VIRT_MEM; |
234 | #endif |
235 | #else |
236 | return 0; |
237 | #endif |
238 | } |
239 | |
240 | /* |
241 | * BEGIN_MANUAL_ENTRY(mac_drv_init) |
242 | * void mac_drv_init(smc) |
243 | * |
244 | * function DOWNCALL (drvsr.c) |
245 | * In this function the hardware module allocates it's |
246 | * memory. |
247 | * The operating system dependent module should call |
248 | * mac_drv_init once, after the adatper is detected. |
249 | * END_MANUAL_ENTRY |
250 | */ |
251 | int mac_drv_init(struct s_smc *smc) |
252 | { |
253 | if (sizeof(struct s_smt_fp_rxd) % 16) { |
254 | SMT_PANIC(smc,HWM_E0001,HWM_E0001_MSG) ; |
255 | } |
256 | if (sizeof(struct s_smt_fp_txd) % 16) { |
257 | SMT_PANIC(smc,HWM_E0002,HWM_E0002_MSG) ; |
258 | } |
259 | |
260 | /* |
261 | * get the required memory for the RxDs and TxDs |
262 | */ |
263 | if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *) |
264 | mac_drv_get_desc_mem(smc,size: (u_int) |
265 | (RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) { |
266 | return 1; /* no space the hwm modul can't work */ |
267 | } |
268 | |
269 | /* |
270 | * get the memory for the SMT MBufs |
271 | */ |
272 | #ifndef MB_OUTSIDE_SMC |
273 | smc->os.hwm.mbuf_pool.mb_start=(SMbuf *)(&smc->os.hwm.mbuf_pool.mb[0]) ; |
274 | #else |
275 | #ifndef COMMON_MB_POOL |
276 | if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc, |
277 | MAX_MBUF*sizeof(SMbuf)))) { |
278 | return 1; /* no space the hwm modul can't work */ |
279 | } |
280 | #else |
281 | if (!mb_start) { |
282 | if (!(mb_start = (SMbuf *) mac_drv_get_space(smc, |
283 | MAX_MBUF*sizeof(SMbuf)))) { |
284 | return 1; /* no space the hwm modul can't work */ |
285 | } |
286 | } |
287 | #endif |
288 | #endif |
289 | return 0; |
290 | } |
291 | |
292 | /* |
293 | * BEGIN_MANUAL_ENTRY(init_driver_fplus) |
294 | * init_driver_fplus(smc) |
295 | * |
296 | * Sets hardware modul specific values for the mode register 2 |
297 | * (e.g. the byte alignment for the received frames, the position of the |
298 | * least significant byte etc.) |
299 | * END_MANUAL_ENTRY |
300 | */ |
301 | void init_driver_fplus(struct s_smc *smc) |
302 | { |
303 | smc->hw.fp.mdr2init = FM_LSB | FM_BMMODE | FM_ENNPRQ | FM_ENHSRQ | 3 ; |
304 | |
305 | #ifdef PCI |
306 | smc->hw.fp.mdr2init |= FM_CHKPAR | FM_PARITY ; |
307 | #endif |
308 | smc->hw.fp.mdr3init = FM_MENRQAUNLCK | FM_MENRS ; |
309 | |
310 | #ifdef USE_CAN_ADDR |
311 | /* enable address bit swapping */ |
312 | smc->hw.fp.frselreg_init = FM_ENXMTADSWAP | FM_ENRCVADSWAP ; |
313 | #endif |
314 | } |
315 | |
316 | static u_long init_descr_ring(struct s_smc *smc, |
317 | union s_fp_descr volatile *start, |
318 | int count) |
319 | { |
320 | int i ; |
321 | union s_fp_descr volatile *d1 ; |
322 | union s_fp_descr volatile *d2 ; |
323 | u_long phys ; |
324 | |
325 | DB_GEN(3, "descr ring starts at = %p" , start); |
326 | for (i=count-1, d1=start; i ; i--) { |
327 | d2 = d1 ; |
328 | d1++ ; /* descr is owned by the host */ |
329 | d2->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ; |
330 | d2->r.rxd_next = &d1->r ; |
331 | phys = mac_drv_virt2phys(smc,virt: (void *)d1) ; |
332 | d2->r.rxd_nrdadr = cpu_to_le32(phys) ; |
333 | } |
334 | DB_GEN(3, "descr ring ends at = %p" , d1); |
335 | d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ; |
336 | d1->r.rxd_next = &start->r ; |
337 | phys = mac_drv_virt2phys(smc,virt: (void *)start) ; |
338 | d1->r.rxd_nrdadr = cpu_to_le32(phys) ; |
339 | |
340 | for (i=count, d1=start; i ; i--) { |
341 | DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ; |
342 | d1++; |
343 | } |
344 | return phys; |
345 | } |
346 | |
347 | static void init_txd_ring(struct s_smc *smc) |
348 | { |
349 | struct s_smt_fp_txd volatile *ds ; |
350 | struct s_smt_tx_queue *queue ; |
351 | u_long phys ; |
352 | |
353 | /* |
354 | * initialize the transmit descriptors |
355 | */ |
356 | ds = (struct s_smt_fp_txd volatile *) ((char *)smc->os.hwm.descr_p + |
357 | SMT_R1_RXD_COUNT*sizeof(struct s_smt_fp_rxd)) ; |
358 | queue = smc->hw.fp.tx[QUEUE_A0] ; |
359 | DB_GEN(3, "Init async TxD ring, %d TxDs" , HWM_ASYNC_TXD_COUNT); |
360 | (void)init_descr_ring(smc,start: (union s_fp_descr volatile *)ds, |
361 | HWM_ASYNC_TXD_COUNT) ; |
362 | phys = le32_to_cpu(ds->txd_ntdadr) ; |
363 | ds++ ; |
364 | queue->tx_curr_put = queue->tx_curr_get = ds ; |
365 | ds-- ; |
366 | queue->tx_free = HWM_ASYNC_TXD_COUNT ; |
367 | queue->tx_used = 0 ; |
368 | outpd(ADDR(B5_XA_DA),phys) ; |
369 | |
370 | ds = (struct s_smt_fp_txd volatile *) ((char *)ds + |
371 | HWM_ASYNC_TXD_COUNT*sizeof(struct s_smt_fp_txd)) ; |
372 | queue = smc->hw.fp.tx[QUEUE_S] ; |
373 | DB_GEN(3, "Init sync TxD ring, %d TxDs" , HWM_SYNC_TXD_COUNT); |
374 | (void)init_descr_ring(smc,start: (union s_fp_descr volatile *)ds, |
375 | HWM_SYNC_TXD_COUNT) ; |
376 | phys = le32_to_cpu(ds->txd_ntdadr) ; |
377 | ds++ ; |
378 | queue->tx_curr_put = queue->tx_curr_get = ds ; |
379 | queue->tx_free = HWM_SYNC_TXD_COUNT ; |
380 | queue->tx_used = 0 ; |
381 | outpd(ADDR(B5_XS_DA),phys) ; |
382 | } |
383 | |
384 | static void init_rxd_ring(struct s_smc *smc) |
385 | { |
386 | struct s_smt_fp_rxd volatile *ds ; |
387 | struct s_smt_rx_queue *queue ; |
388 | u_long phys ; |
389 | |
390 | /* |
391 | * initialize the receive descriptors |
392 | */ |
393 | ds = (struct s_smt_fp_rxd volatile *) smc->os.hwm.descr_p ; |
394 | queue = smc->hw.fp.rx[QUEUE_R1] ; |
395 | DB_GEN(3, "Init RxD ring, %d RxDs" , SMT_R1_RXD_COUNT); |
396 | (void)init_descr_ring(smc,start: (union s_fp_descr volatile *)ds, |
397 | SMT_R1_RXD_COUNT) ; |
398 | phys = le32_to_cpu(ds->rxd_nrdadr) ; |
399 | ds++ ; |
400 | queue->rx_curr_put = queue->rx_curr_get = ds ; |
401 | queue->rx_free = SMT_R1_RXD_COUNT ; |
402 | queue->rx_used = 0 ; |
403 | outpd(ADDR(B4_R1_DA),phys) ; |
404 | } |
405 | |
406 | /* |
407 | * BEGIN_MANUAL_ENTRY(init_fddi_driver) |
408 | * void init_fddi_driver(smc,mac_addr) |
409 | * |
410 | * initializes the driver and it's variables |
411 | * |
412 | * END_MANUAL_ENTRY |
413 | */ |
414 | void init_fddi_driver(struct s_smc *smc, u_char *mac_addr) |
415 | { |
416 | SMbuf *mb ; |
417 | int i ; |
418 | |
419 | init_board(smc,mac_addr) ; |
420 | (void)init_fplus(smc) ; |
421 | |
422 | /* |
423 | * initialize the SMbufs for the SMT |
424 | */ |
425 | #ifndef COMMON_MB_POOL |
426 | mb = smc->os.hwm.mbuf_pool.mb_start ; |
427 | smc->os.hwm.mbuf_pool.mb_free = (SMbuf *)NULL ; |
428 | for (i = 0; i < MAX_MBUF; i++) { |
429 | mb->sm_use_count = 1 ; |
430 | smt_free_mbuf(smc,mb) ; |
431 | mb++ ; |
432 | } |
433 | #else |
434 | mb = mb_start ; |
435 | if (!mb_init) { |
436 | mb_free = 0 ; |
437 | for (i = 0; i < MAX_MBUF; i++) { |
438 | mb->sm_use_count = 1 ; |
439 | smt_free_mbuf(smc,mb) ; |
440 | mb++ ; |
441 | } |
442 | mb_init = TRUE ; |
443 | } |
444 | #endif |
445 | |
446 | /* |
447 | * initialize the other variables |
448 | */ |
449 | smc->os.hwm.llc_rx_pipe = smc->os.hwm.llc_rx_tail = (SMbuf *)NULL ; |
450 | smc->os.hwm.txd_tx_pipe = smc->os.hwm.txd_tx_tail = NULL ; |
451 | smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = smc->os.hwm.pass_DB = 0 ; |
452 | smc->os.hwm.pass_llc_promisc = TRUE ; |
453 | smc->os.hwm.queued_rx_frames = smc->os.hwm.queued_txd_mb = 0 ; |
454 | smc->os.hwm.detec_count = 0 ; |
455 | smc->os.hwm.rx_break = 0 ; |
456 | smc->os.hwm.rx_len_error = 0 ; |
457 | smc->os.hwm.isr_flag = FALSE ; |
458 | |
459 | /* |
460 | * make sure that the start pointer is 16 byte aligned |
461 | */ |
462 | i = 16 - ((long)smc->os.hwm.descr_p & 0xf) ; |
463 | if (i != 16) { |
464 | DB_GEN(3, "i = %d" , i); |
465 | smc->os.hwm.descr_p = (union s_fp_descr volatile *) |
466 | ((char *)smc->os.hwm.descr_p+i) ; |
467 | } |
468 | DB_GEN(3, "pt to descr area = %p" , smc->os.hwm.descr_p); |
469 | |
470 | init_txd_ring(smc) ; |
471 | init_rxd_ring(smc) ; |
472 | mac_drv_fill_rxd(smc) ; |
473 | |
474 | init_plc(smc) ; |
475 | } |
476 | |
477 | |
478 | SMbuf *smt_get_mbuf(struct s_smc *smc) |
479 | { |
480 | register SMbuf *mb ; |
481 | |
482 | #ifndef COMMON_MB_POOL |
483 | mb = smc->os.hwm.mbuf_pool.mb_free ; |
484 | #else |
485 | mb = mb_free ; |
486 | #endif |
487 | if (mb) { |
488 | #ifndef COMMON_MB_POOL |
489 | smc->os.hwm.mbuf_pool.mb_free = mb->sm_next ; |
490 | #else |
491 | mb_free = mb->sm_next ; |
492 | #endif |
493 | mb->sm_off = 8 ; |
494 | mb->sm_use_count = 1 ; |
495 | } |
496 | DB_GEN(3, "get SMbuf: mb = %p" , mb); |
497 | return mb; /* May be NULL */ |
498 | } |
499 | |
500 | void smt_free_mbuf(struct s_smc *smc, SMbuf *mb) |
501 | { |
502 | |
503 | if (mb) { |
504 | mb->sm_use_count-- ; |
505 | DB_GEN(3, "free_mbuf: sm_use_count = %d" , mb->sm_use_count); |
506 | /* |
507 | * If the use_count is != zero the MBuf is queued |
508 | * more than once and must not queued into the |
509 | * free MBuf queue |
510 | */ |
511 | if (!mb->sm_use_count) { |
512 | DB_GEN(3, "free SMbuf: mb = %p" , mb); |
513 | #ifndef COMMON_MB_POOL |
514 | mb->sm_next = smc->os.hwm.mbuf_pool.mb_free ; |
515 | smc->os.hwm.mbuf_pool.mb_free = mb ; |
516 | #else |
517 | mb->sm_next = mb_free ; |
518 | mb_free = mb ; |
519 | #endif |
520 | } |
521 | } |
522 | else |
523 | SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ; |
524 | } |
525 | |
526 | |
527 | /* |
528 | * BEGIN_MANUAL_ENTRY(mac_drv_repair_descr) |
529 | * void mac_drv_repair_descr(smc) |
530 | * |
531 | * function called from SMT (HWM / hwmtm.c) |
532 | * The BMU is idle when this function is called. |
533 | * Mac_drv_repair_descr sets up the physical address |
534 | * for all receive and transmit queues where the BMU |
535 | * should continue. |
536 | * It may be that the BMU was reseted during a fragmented |
537 | * transfer. In this case there are some fragments which will |
538 | * never completed by the BMU. The OWN bit of this fragments |
539 | * must be switched to be owned by the host. |
540 | * |
541 | * Give a start command to the receive BMU. |
542 | * Start the transmit BMUs if transmit frames pending. |
543 | * |
544 | * END_MANUAL_ENTRY |
545 | */ |
546 | void mac_drv_repair_descr(struct s_smc *smc) |
547 | { |
548 | u_long phys ; |
549 | |
550 | if (smc->hw.hw_state != STOPPED) { |
551 | SK_BREAK() ; |
552 | SMT_PANIC(smc,HWM_E0013,HWM_E0013_MSG) ; |
553 | return ; |
554 | } |
555 | |
556 | /* |
557 | * repair tx queues: don't start |
558 | */ |
559 | phys = repair_txd_ring(smc,queue: smc->hw.fp.tx[QUEUE_A0]) ; |
560 | outpd(ADDR(B5_XA_DA),phys) ; |
561 | if (smc->hw.fp.tx_q[QUEUE_A0].tx_used) { |
562 | outpd(ADDR(B0_XA_CSR),CSR_START) ; |
563 | } |
564 | phys = repair_txd_ring(smc,queue: smc->hw.fp.tx[QUEUE_S]) ; |
565 | outpd(ADDR(B5_XS_DA),phys) ; |
566 | if (smc->hw.fp.tx_q[QUEUE_S].tx_used) { |
567 | outpd(ADDR(B0_XS_CSR),CSR_START) ; |
568 | } |
569 | |
570 | /* |
571 | * repair rx queues |
572 | */ |
573 | phys = repair_rxd_ring(smc,queue: smc->hw.fp.rx[QUEUE_R1]) ; |
574 | outpd(ADDR(B4_R1_DA),phys) ; |
575 | outpd(ADDR(B0_R1_CSR),CSR_START) ; |
576 | } |
577 | |
578 | static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue) |
579 | { |
580 | int i ; |
581 | int tx_used ; |
582 | u_long phys ; |
583 | u_long tbctrl ; |
584 | struct s_smt_fp_txd volatile *t ; |
585 | |
586 | SK_UNUSED(smc) ; |
587 | |
588 | t = queue->tx_curr_get ; |
589 | tx_used = queue->tx_used ; |
590 | for (i = tx_used+queue->tx_free-1 ; i ; i-- ) { |
591 | t = t->txd_next ; |
592 | } |
593 | phys = le32_to_cpu(t->txd_ntdadr) ; |
594 | |
595 | t = queue->tx_curr_get ; |
596 | while (tx_used) { |
597 | DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; |
598 | tbctrl = le32_to_cpu(t->txd_tbctrl) ; |
599 | |
600 | if (tbctrl & BMU_OWN) { |
601 | if (tbctrl & BMU_STF) { |
602 | break ; /* exit the loop */ |
603 | } |
604 | else { |
605 | /* |
606 | * repair the descriptor |
607 | */ |
608 | t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ; |
609 | } |
610 | } |
611 | phys = le32_to_cpu(t->txd_ntdadr) ; |
612 | DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; |
613 | t = t->txd_next ; |
614 | tx_used-- ; |
615 | } |
616 | return phys; |
617 | } |
618 | |
619 | /* |
620 | * Repairs the receive descriptor ring and returns the physical address |
621 | * where the BMU should continue working. |
622 | * |
623 | * o The physical address where the BMU was stopped has to be |
624 | * determined. This is the next RxD after rx_curr_get with an OWN |
625 | * bit set. |
626 | * o The BMU should start working at beginning of the next frame. |
627 | * RxDs with an OWN bit set but with a reset STF bit should be |
628 | * skipped and owned by the driver (OWN = 0). |
629 | */ |
630 | static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue) |
631 | { |
632 | int i ; |
633 | int rx_used ; |
634 | u_long phys ; |
635 | u_long rbctrl ; |
636 | struct s_smt_fp_rxd volatile *r ; |
637 | |
638 | SK_UNUSED(smc) ; |
639 | |
640 | r = queue->rx_curr_get ; |
641 | rx_used = queue->rx_used ; |
642 | for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) { |
643 | r = r->rxd_next ; |
644 | } |
645 | phys = le32_to_cpu(r->rxd_nrdadr) ; |
646 | |
647 | r = queue->rx_curr_get ; |
648 | while (rx_used) { |
649 | DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; |
650 | rbctrl = le32_to_cpu(r->rxd_rbctrl) ; |
651 | |
652 | if (rbctrl & BMU_OWN) { |
653 | if (rbctrl & BMU_STF) { |
654 | break ; /* exit the loop */ |
655 | } |
656 | else { |
657 | /* |
658 | * repair the descriptor |
659 | */ |
660 | r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; |
661 | } |
662 | } |
663 | phys = le32_to_cpu(r->rxd_nrdadr) ; |
664 | DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; |
665 | r = r->rxd_next ; |
666 | rx_used-- ; |
667 | } |
668 | return phys; |
669 | } |
670 | |
671 | |
672 | /* |
673 | ------------------------------------------------------------- |
674 | INTERRUPT SERVICE ROUTINE: |
675 | ------------------------------------------------------------- |
676 | */ |
677 | |
678 | /* |
679 | * BEGIN_MANUAL_ENTRY(fddi_isr) |
680 | * void fddi_isr(smc) |
681 | * |
682 | * function DOWNCALL (drvsr.c) |
683 | * interrupt service routine, handles the interrupt requests |
684 | * generated by the FDDI adapter. |
685 | * |
686 | * NOTE: The operating system dependent module must guarantee that the |
687 | * interrupts of the adapter are disabled when it calls fddi_isr. |
688 | * |
689 | * About the USE_BREAK_ISR mechanismn: |
690 | * |
691 | * The main requirement of this mechanismn is to force an timer IRQ when |
692 | * leaving process_receive() with leave_isr set. process_receive() may |
693 | * be called at any time from anywhere! |
694 | * To be sure we don't miss such event we set 'force_irq' per default. |
695 | * We have to force and Timer IRQ if 'smc->os.hwm.leave_isr' AND |
696 | * 'force_irq' are set. 'force_irq' may be reset if a receive complete |
697 | * IRQ is pending. |
698 | * |
699 | * END_MANUAL_ENTRY |
700 | */ |
701 | void fddi_isr(struct s_smc *smc) |
702 | { |
703 | u_long is ; /* ISR source */ |
704 | u_short stu, stl ; |
705 | SMbuf *mb ; |
706 | |
707 | #ifdef USE_BREAK_ISR |
708 | int force_irq ; |
709 | #endif |
710 | |
711 | #ifdef ODI2 |
712 | if (smc->os.hwm.rx_break) { |
713 | mac_drv_fill_rxd(smc) ; |
714 | if (smc->hw.fp.rx_q[QUEUE_R1].rx_used > 0) { |
715 | smc->os.hwm.rx_break = 0 ; |
716 | process_receive(smc) ; |
717 | } |
718 | else { |
719 | smc->os.hwm.detec_count = 0 ; |
720 | smt_force_irq(smc) ; |
721 | } |
722 | } |
723 | #endif |
724 | smc->os.hwm.isr_flag = TRUE ; |
725 | |
726 | #ifdef USE_BREAK_ISR |
727 | force_irq = TRUE ; |
728 | if (smc->os.hwm.leave_isr) { |
729 | smc->os.hwm.leave_isr = FALSE ; |
730 | process_receive(smc) ; |
731 | } |
732 | #endif |
733 | |
734 | while ((is = GET_ISR() & ISR_MASK)) { |
735 | NDD_TRACE("CH0B" ,is,0,0) ; |
736 | DB_GEN(7, "ISA = 0x%lx" , is); |
737 | |
738 | if (is & IMASK_SLOW) { |
739 | NDD_TRACE("CH1b" ,is,0,0) ; |
740 | if (is & IS_PLINT1) { /* PLC1 */ |
741 | plc1_irq(smc) ; |
742 | } |
743 | if (is & IS_PLINT2) { /* PLC2 */ |
744 | plc2_irq(smc) ; |
745 | } |
746 | if (is & IS_MINTR1) { /* FORMAC+ STU1(U/L) */ |
747 | stu = inpw(FM_A(FM_ST1U)) ; |
748 | stl = inpw(FM_A(FM_ST1L)) ; |
749 | DB_GEN(6, "Slow transmit complete" ); |
750 | mac1_irq(smc,stu,stl) ; |
751 | } |
752 | if (is & IS_MINTR2) { /* FORMAC+ STU2(U/L) */ |
753 | stu= inpw(FM_A(FM_ST2U)) ; |
754 | stl= inpw(FM_A(FM_ST2L)) ; |
755 | DB_GEN(6, "Slow receive complete" ); |
756 | DB_GEN(7, "stl = %x : stu = %x" , stl, stu); |
757 | mac2_irq(smc,code_s2u: stu,code_s2l: stl) ; |
758 | } |
759 | if (is & IS_MINTR3) { /* FORMAC+ STU3(U/L) */ |
760 | stu= inpw(FM_A(FM_ST3U)) ; |
761 | stl= inpw(FM_A(FM_ST3L)) ; |
762 | DB_GEN(6, "FORMAC Mode Register 3" ); |
763 | mac3_irq(smc,code_s3u: stu,code_s3l: stl) ; |
764 | } |
765 | if (is & IS_TIMINT) { /* Timer 82C54-2 */ |
766 | timer_irq(smc) ; |
767 | #ifdef NDIS_OS2 |
768 | force_irq_pending = 0 ; |
769 | #endif |
770 | /* |
771 | * out of RxD detection |
772 | */ |
773 | if (++smc->os.hwm.detec_count > 4) { |
774 | /* |
775 | * check out of RxD condition |
776 | */ |
777 | process_receive(smc) ; |
778 | } |
779 | } |
780 | if (is & IS_TOKEN) { /* Restricted Token Monitor */ |
781 | rtm_irq(smc) ; |
782 | } |
783 | if (is & IS_R1_P) { /* Parity error rx queue 1 */ |
784 | /* clear IRQ */ |
785 | outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_P) ; |
786 | SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ; |
787 | } |
788 | if (is & IS_R1_C) { /* Encoding error rx queue 1 */ |
789 | /* clear IRQ */ |
790 | outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_C) ; |
791 | SMT_PANIC(smc,HWM_E0005,HWM_E0005_MSG) ; |
792 | } |
793 | if (is & IS_XA_C) { /* Encoding error async tx q */ |
794 | /* clear IRQ */ |
795 | outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_C) ; |
796 | SMT_PANIC(smc,HWM_E0006,HWM_E0006_MSG) ; |
797 | } |
798 | if (is & IS_XS_C) { /* Encoding error sync tx q */ |
799 | /* clear IRQ */ |
800 | outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_C) ; |
801 | SMT_PANIC(smc,HWM_E0007,HWM_E0007_MSG) ; |
802 | } |
803 | } |
804 | |
805 | /* |
806 | * Fast Tx complete Async/Sync Queue (BMU service) |
807 | */ |
808 | if (is & (IS_XS_F|IS_XA_F)) { |
809 | DB_GEN(6, "Fast tx complete queue" ); |
810 | /* |
811 | * clear IRQ, Note: no IRQ is lost, because |
812 | * we always service both queues |
813 | */ |
814 | outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_F) ; |
815 | outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_F) ; |
816 | mac_drv_clear_txd(smc) ; |
817 | llc_restart_tx(smc) ; |
818 | } |
819 | |
820 | /* |
821 | * Fast Rx Complete (BMU service) |
822 | */ |
823 | if (is & IS_R1_F) { |
824 | DB_GEN(6, "Fast receive complete" ); |
825 | /* clear IRQ */ |
826 | #ifndef USE_BREAK_ISR |
827 | outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ; |
828 | process_receive(smc) ; |
829 | #else |
830 | process_receive(smc) ; |
831 | if (smc->os.hwm.leave_isr) { |
832 | force_irq = FALSE ; |
833 | } else { |
834 | outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ; |
835 | process_receive(smc) ; |
836 | } |
837 | #endif |
838 | } |
839 | |
840 | #ifndef NDIS_OS2 |
841 | while ((mb = get_llc_rx(smc))) { |
842 | smt_to_llc(smc,mb) ; |
843 | } |
844 | #else |
845 | if (offDepth) |
846 | post_proc() ; |
847 | |
848 | while (!offDepth && (mb = get_llc_rx(smc))) { |
849 | smt_to_llc(smc,mb) ; |
850 | } |
851 | |
852 | if (!offDepth && smc->os.hwm.rx_break) { |
853 | process_receive(smc) ; |
854 | } |
855 | #endif |
856 | if (smc->q.ev_get != smc->q.ev_put) { |
857 | NDD_TRACE("CH2a" ,0,0,0) ; |
858 | ev_dispatcher(smc) ; |
859 | } |
860 | #ifdef NDIS_OS2 |
861 | post_proc() ; |
862 | if (offDepth) { /* leave fddi_isr because */ |
863 | break ; /* indications not allowed */ |
864 | } |
865 | #endif |
866 | #ifdef USE_BREAK_ISR |
867 | if (smc->os.hwm.leave_isr) { |
868 | break ; /* leave fddi_isr */ |
869 | } |
870 | #endif |
871 | |
872 | /* NOTE: when the isr is left, no rx is pending */ |
873 | } /* end of interrupt source polling loop */ |
874 | |
875 | #ifdef USE_BREAK_ISR |
876 | if (smc->os.hwm.leave_isr && force_irq) { |
877 | smt_force_irq(smc) ; |
878 | } |
879 | #endif |
880 | smc->os.hwm.isr_flag = FALSE ; |
881 | NDD_TRACE("CH0E" ,0,0,0) ; |
882 | } |
883 | |
884 | |
885 | /* |
886 | ------------------------------------------------------------- |
887 | RECEIVE FUNCTIONS: |
888 | ------------------------------------------------------------- |
889 | */ |
890 | |
891 | #ifndef NDIS_OS2 |
892 | /* |
893 | * BEGIN_MANUAL_ENTRY(mac_drv_rx_mode) |
894 | * void mac_drv_rx_mode(smc,mode) |
895 | * |
896 | * function DOWNCALL (fplus.c) |
897 | * Corresponding to the parameter mode, the operating system |
898 | * dependent module can activate several receive modes. |
899 | * |
900 | * para mode = 1: RX_ENABLE_ALLMULTI enable all multicasts |
901 | * = 2: RX_DISABLE_ALLMULTI disable "enable all multicasts" |
902 | * = 3: RX_ENABLE_PROMISC enable promiscuous |
903 | * = 4: RX_DISABLE_PROMISC disable promiscuous |
904 | * = 5: RX_ENABLE_NSA enable rec. of all NSA frames |
905 | * (disabled after 'driver reset' & 'set station address') |
906 | * = 6: RX_DISABLE_NSA disable rec. of all NSA frames |
907 | * |
908 | * = 21: RX_ENABLE_PASS_SMT ( see description ) |
909 | * = 22: RX_DISABLE_PASS_SMT ( " " ) |
910 | * = 23: RX_ENABLE_PASS_NSA ( " " ) |
911 | * = 24: RX_DISABLE_PASS_NSA ( " " ) |
912 | * = 25: RX_ENABLE_PASS_DB ( " " ) |
913 | * = 26: RX_DISABLE_PASS_DB ( " " ) |
914 | * = 27: RX_DISABLE_PASS_ALL ( " " ) |
915 | * = 28: RX_DISABLE_LLC_PROMISC ( " " ) |
916 | * = 29: RX_ENABLE_LLC_PROMISC ( " " ) |
917 | * |
918 | * |
919 | * RX_ENABLE_PASS_SMT / RX_DISABLE_PASS_SMT |
920 | * |
921 | * If the operating system dependent module activates the |
922 | * mode RX_ENABLE_PASS_SMT, the hardware module |
923 | * duplicates all SMT frames with the frame control |
924 | * FC_SMT_INFO and passes them to the LLC receive channel |
925 | * by calling mac_drv_rx_init. |
926 | * The SMT Frames which are sent by the local SMT and the NSA |
927 | * frames whose A- and C-Indicator is not set are also duplicated |
928 | * and passed. |
929 | * The receive mode RX_DISABLE_PASS_SMT disables the passing |
930 | * of SMT frames. |
931 | * |
932 | * RX_ENABLE_PASS_NSA / RX_DISABLE_PASS_NSA |
933 | * |
934 | * If the operating system dependent module activates the |
935 | * mode RX_ENABLE_PASS_NSA, the hardware module |
936 | * duplicates all NSA frames with frame control FC_SMT_NSA |
937 | * and a set A-Indicator and passed them to the LLC |
938 | * receive channel by calling mac_drv_rx_init. |
939 | * All NSA Frames which are sent by the local SMT |
940 | * are also duplicated and passed. |
941 | * The receive mode RX_DISABLE_PASS_NSA disables the passing |
942 | * of NSA frames with the A- or C-Indicator set. |
943 | * |
944 | * NOTE: For fear that the hardware module receives NSA frames with |
945 | * a reset A-Indicator, the operating system dependent module |
946 | * has to call mac_drv_rx_mode with the mode RX_ENABLE_NSA |
947 | * before activate the RX_ENABLE_PASS_NSA mode and after every |
948 | * 'driver reset' and 'set station address'. |
949 | * |
950 | * RX_ENABLE_PASS_DB / RX_DISABLE_PASS_DB |
951 | * |
952 | * If the operating system dependent module activates the |
953 | * mode RX_ENABLE_PASS_DB, direct BEACON frames |
954 | * (FC_BEACON frame control) are passed to the LLC receive |
955 | * channel by mac_drv_rx_init. |
956 | * The receive mode RX_DISABLE_PASS_DB disables the passing |
957 | * of direct BEACON frames. |
958 | * |
959 | * RX_DISABLE_PASS_ALL |
960 | * |
961 | * Disables all special receives modes. It is equal to |
962 | * call mac_drv_set_rx_mode successively with the |
963 | * parameters RX_DISABLE_NSA, RX_DISABLE_PASS_SMT, |
964 | * RX_DISABLE_PASS_NSA and RX_DISABLE_PASS_DB. |
965 | * |
966 | * RX_ENABLE_LLC_PROMISC |
967 | * |
968 | * (default) all received LLC frames and all SMT/NSA/DBEACON |
969 | * frames depending on the attitude of the flags |
970 | * PASS_SMT/PASS_NSA/PASS_DBEACON will be delivered to the |
971 | * LLC layer |
972 | * |
973 | * RX_DISABLE_LLC_PROMISC |
974 | * |
975 | * all received SMT/NSA/DBEACON frames depending on the |
976 | * attitude of the flags PASS_SMT/PASS_NSA/PASS_DBEACON |
977 | * will be delivered to the LLC layer. |
978 | * all received LLC frames with a directed address, Multicast |
979 | * or Broadcast address will be delivered to the LLC |
980 | * layer too. |
981 | * |
982 | * END_MANUAL_ENTRY |
983 | */ |
984 | void mac_drv_rx_mode(struct s_smc *smc, int mode) |
985 | { |
986 | switch(mode) { |
987 | case RX_ENABLE_PASS_SMT: |
988 | smc->os.hwm.pass_SMT = TRUE ; |
989 | break ; |
990 | case RX_DISABLE_PASS_SMT: |
991 | smc->os.hwm.pass_SMT = FALSE ; |
992 | break ; |
993 | case RX_ENABLE_PASS_NSA: |
994 | smc->os.hwm.pass_NSA = TRUE ; |
995 | break ; |
996 | case RX_DISABLE_PASS_NSA: |
997 | smc->os.hwm.pass_NSA = FALSE ; |
998 | break ; |
999 | case RX_ENABLE_PASS_DB: |
1000 | smc->os.hwm.pass_DB = TRUE ; |
1001 | break ; |
1002 | case RX_DISABLE_PASS_DB: |
1003 | smc->os.hwm.pass_DB = FALSE ; |
1004 | break ; |
1005 | case RX_DISABLE_PASS_ALL: |
1006 | smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = FALSE ; |
1007 | smc->os.hwm.pass_DB = FALSE ; |
1008 | smc->os.hwm.pass_llc_promisc = TRUE ; |
1009 | mac_set_rx_mode(smc,RX_DISABLE_NSA) ; |
1010 | break ; |
1011 | case RX_DISABLE_LLC_PROMISC: |
1012 | smc->os.hwm.pass_llc_promisc = FALSE ; |
1013 | break ; |
1014 | case RX_ENABLE_LLC_PROMISC: |
1015 | smc->os.hwm.pass_llc_promisc = TRUE ; |
1016 | break ; |
1017 | case RX_ENABLE_ALLMULTI: |
1018 | case RX_DISABLE_ALLMULTI: |
1019 | case RX_ENABLE_PROMISC: |
1020 | case RX_DISABLE_PROMISC: |
1021 | case RX_ENABLE_NSA: |
1022 | case RX_DISABLE_NSA: |
1023 | default: |
1024 | mac_set_rx_mode(smc,mode) ; |
1025 | break ; |
1026 | } |
1027 | } |
1028 | #endif /* ifndef NDIS_OS2 */ |
1029 | |
1030 | /* |
1031 | * process receive queue |
1032 | */ |
1033 | void process_receive(struct s_smc *smc) |
1034 | { |
1035 | int i ; |
1036 | int n ; |
1037 | int frag_count ; /* number of RxDs of the curr rx buf */ |
1038 | int used_frags ; /* number of RxDs of the curr frame */ |
1039 | struct s_smt_rx_queue *queue ; /* points to the queue ctl struct */ |
1040 | struct s_smt_fp_rxd volatile *r ; /* rxd pointer */ |
1041 | struct s_smt_fp_rxd volatile *rxd ; /* first rxd of rx frame */ |
1042 | u_long rbctrl ; /* receive buffer control word */ |
1043 | u_long rfsw ; /* receive frame status word */ |
1044 | u_short rx_used ; |
1045 | u_char far *virt ; |
1046 | char far *data ; |
1047 | SMbuf *mb ; |
1048 | u_char fc ; /* Frame control */ |
1049 | int len ; /* Frame length */ |
1050 | |
1051 | smc->os.hwm.detec_count = 0 ; |
1052 | queue = smc->hw.fp.rx[QUEUE_R1] ; |
1053 | NDD_TRACE("RHxB" ,0,0,0) ; |
1054 | for ( ; ; ) { |
1055 | r = queue->rx_curr_get ; |
1056 | rx_used = queue->rx_used ; |
1057 | frag_count = 0 ; |
1058 | |
1059 | #ifdef USE_BREAK_ISR |
1060 | if (smc->os.hwm.leave_isr) { |
1061 | goto rx_end ; |
1062 | } |
1063 | #endif |
1064 | #ifdef NDIS_OS2 |
1065 | if (offDepth) { |
1066 | smc->os.hwm.rx_break = 1 ; |
1067 | goto rx_end ; |
1068 | } |
1069 | smc->os.hwm.rx_break = 0 ; |
1070 | #endif |
1071 | #ifdef ODI2 |
1072 | if (smc->os.hwm.rx_break) { |
1073 | goto rx_end ; |
1074 | } |
1075 | #endif |
1076 | n = 0 ; |
1077 | do { |
1078 | DB_RX(5, "Check RxD %p for OWN and EOF" , r); |
1079 | DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; |
1080 | rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl)); |
1081 | |
1082 | if (rbctrl & BMU_OWN) { |
1083 | NDD_TRACE("RHxE" ,r,rfsw,rbctrl) ; |
1084 | DB_RX(4, "End of RxDs" ); |
1085 | goto rx_end ; |
1086 | } |
1087 | /* |
1088 | * out of RxD detection |
1089 | */ |
1090 | if (!rx_used) { |
1091 | SK_BREAK() ; |
1092 | SMT_PANIC(smc,HWM_E0009,HWM_E0009_MSG) ; |
1093 | /* Either we don't have an RxD or all |
1094 | * RxDs are filled. Therefore it's allowed |
1095 | * for to set the STOPPED flag */ |
1096 | smc->hw.hw_state = STOPPED ; |
1097 | mac_drv_clear_rx_queue(smc) ; |
1098 | smc->hw.hw_state = STARTED ; |
1099 | mac_drv_fill_rxd(smc) ; |
1100 | smc->os.hwm.detec_count = 0 ; |
1101 | goto rx_end ; |
1102 | } |
1103 | rfsw = le32_to_cpu(r->rxd_rfsw) ; |
1104 | if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) { |
1105 | /* |
1106 | * The BMU_STF bit is deleted, 1 frame is |
1107 | * placed into more than 1 rx buffer |
1108 | * |
1109 | * skip frame by setting the rx len to 0 |
1110 | * |
1111 | * if fragment count == 0 |
1112 | * The missing STF bit belongs to the |
1113 | * current frame, search for the |
1114 | * EOF bit to complete the frame |
1115 | * else |
1116 | * the fragment belongs to the next frame, |
1117 | * exit the loop and process the frame |
1118 | */ |
1119 | SK_BREAK() ; |
1120 | rfsw = 0 ; |
1121 | if (frag_count) { |
1122 | break ; |
1123 | } |
1124 | } |
1125 | n += rbctrl & 0xffff ; |
1126 | r = r->rxd_next ; |
1127 | frag_count++ ; |
1128 | rx_used-- ; |
1129 | } while (!(rbctrl & BMU_EOF)) ; |
1130 | used_frags = frag_count ; |
1131 | DB_RX(5, "EOF set in RxD, used_frags = %d" , used_frags); |
1132 | |
1133 | /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */ |
1134 | /* BMU_ST_BUF will not be changed by the ASIC */ |
1135 | DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; |
1136 | while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) { |
1137 | DB_RX(5, "Check STF bit in %p" , r); |
1138 | r = r->rxd_next ; |
1139 | DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; |
1140 | frag_count++ ; |
1141 | rx_used-- ; |
1142 | } |
1143 | DB_RX(5, "STF bit found" ); |
1144 | |
1145 | /* |
1146 | * The received frame is finished for the process receive |
1147 | */ |
1148 | rxd = queue->rx_curr_get ; |
1149 | queue->rx_curr_get = r ; |
1150 | queue->rx_free += frag_count ; |
1151 | queue->rx_used = rx_used ; |
1152 | |
1153 | /* |
1154 | * ASIC Errata no. 7 (STF - Bit Bug) |
1155 | */ |
1156 | rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ; |
1157 | |
1158 | for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){ |
1159 | DB_RX(5, "dma_complete for RxD %p" , r); |
1160 | dma_complete(smc,descr: (union s_fp_descr volatile *)r,DMA_WR); |
1161 | } |
1162 | smc->hw.fp.err_stats.err_valid++ ; |
1163 | smc->mib.m[MAC0].fddiMACCopied_Ct++ ; |
1164 | |
1165 | /* the length of the data including the FC */ |
1166 | len = (rfsw & RD_LENGTH) - 4 ; |
1167 | |
1168 | DB_RX(4, "frame length = %d" , len); |
1169 | /* |
1170 | * check the frame_length and all error flags |
1171 | */ |
1172 | if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){ |
1173 | if (rfsw & RD_S_MSRABT) { |
1174 | DB_RX(2, "Frame aborted by the FORMAC" ); |
1175 | smc->hw.fp.err_stats.err_abort++ ; |
1176 | } |
1177 | /* |
1178 | * check frame status |
1179 | */ |
1180 | if (rfsw & RD_S_SEAC2) { |
1181 | DB_RX(2, "E-Indicator set" ); |
1182 | smc->hw.fp.err_stats.err_e_indicator++ ; |
1183 | } |
1184 | if (rfsw & RD_S_SFRMERR) { |
1185 | DB_RX(2, "CRC error" ); |
1186 | smc->hw.fp.err_stats.err_crc++ ; |
1187 | } |
1188 | if (rfsw & RX_FS_IMPL) { |
1189 | DB_RX(2, "Implementer frame" ); |
1190 | smc->hw.fp.err_stats.err_imp_frame++ ; |
1191 | } |
1192 | goto abort_frame ; |
1193 | } |
1194 | if (len > FDDI_RAW_MTU-4) { |
1195 | DB_RX(2, "Frame too long error" ); |
1196 | smc->hw.fp.err_stats.err_too_long++ ; |
1197 | goto abort_frame ; |
1198 | } |
1199 | /* |
1200 | * SUPERNET 3 Bug: FORMAC delivers status words |
1201 | * of aborted frames to the BMU |
1202 | */ |
1203 | if (len <= 4) { |
1204 | DB_RX(2, "Frame length = 0" ); |
1205 | goto abort_frame ; |
1206 | } |
1207 | |
1208 | if (len != (n-4)) { |
1209 | DB_RX(4, "BMU: rx len differs: [%d:%d]" , len, n); |
1210 | smc->os.hwm.rx_len_error++ ; |
1211 | goto abort_frame ; |
1212 | } |
1213 | |
1214 | /* |
1215 | * Check SA == MA |
1216 | */ |
1217 | virt = (u_char far *) rxd->rxd_virt ; |
1218 | DB_RX(2, "FC = %x" , *virt); |
1219 | if (virt[12] == MA[5] && |
1220 | virt[11] == MA[4] && |
1221 | virt[10] == MA[3] && |
1222 | virt[9] == MA[2] && |
1223 | virt[8] == MA[1] && |
1224 | (virt[7] & ~GROUP_ADDR_BIT) == MA[0]) { |
1225 | goto abort_frame ; |
1226 | } |
1227 | |
1228 | /* |
1229 | * test if LLC frame |
1230 | */ |
1231 | if (rfsw & RX_FS_LLC) { |
1232 | /* |
1233 | * if pass_llc_promisc is disable |
1234 | * if DA != Multicast or Broadcast or DA!=MA |
1235 | * abort the frame |
1236 | */ |
1237 | if (!smc->os.hwm.pass_llc_promisc) { |
1238 | if(!(virt[1] & GROUP_ADDR_BIT)) { |
1239 | if (virt[6] != MA[5] || |
1240 | virt[5] != MA[4] || |
1241 | virt[4] != MA[3] || |
1242 | virt[3] != MA[2] || |
1243 | virt[2] != MA[1] || |
1244 | virt[1] != MA[0]) { |
1245 | DB_RX(2, "DA != MA and not multi- or broadcast" ); |
1246 | goto abort_frame ; |
1247 | } |
1248 | } |
1249 | } |
1250 | |
1251 | /* |
1252 | * LLC frame received |
1253 | */ |
1254 | DB_RX(4, "LLC - receive" ); |
1255 | mac_drv_rx_complete(smc,rxd,frag_count,len) ; |
1256 | } |
1257 | else { |
1258 | if (!(mb = smt_get_mbuf(smc))) { |
1259 | smc->hw.fp.err_stats.err_no_buf++ ; |
1260 | DB_RX(4, "No SMbuf; receive terminated" ); |
1261 | goto abort_frame ; |
1262 | } |
1263 | data = smtod(mb,char *) - 1 ; |
1264 | |
1265 | /* |
1266 | * copy the frame into a SMT_MBuf |
1267 | */ |
1268 | #ifdef USE_OS_CPY |
1269 | hwm_cpy_rxd2mb(rxd,data,len) ; |
1270 | #else |
1271 | for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){ |
1272 | n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ; |
1273 | DB_RX(6, "cp SMT frame to mb: len = %d" , n); |
1274 | memcpy(data,r->rxd_virt,n) ; |
1275 | data += n ; |
1276 | } |
1277 | data = smtod(mb,char *) - 1 ; |
1278 | #endif |
1279 | fc = *(char *)mb->sm_data = *data ; |
1280 | mb->sm_len = len - 1 ; /* len - fc */ |
1281 | data++ ; |
1282 | |
1283 | /* |
1284 | * SMT frame received |
1285 | */ |
1286 | switch(fc) { |
1287 | case FC_SMT_INFO : |
1288 | smc->hw.fp.err_stats.err_smt_frame++ ; |
1289 | DB_RX(5, "SMT frame received" ); |
1290 | |
1291 | if (smc->os.hwm.pass_SMT) { |
1292 | DB_RX(5, "pass SMT frame" ); |
1293 | mac_drv_rx_complete(smc, rxd, |
1294 | frag_count,len) ; |
1295 | } |
1296 | else { |
1297 | DB_RX(5, "requeue RxD" ); |
1298 | mac_drv_requeue_rxd(smc,rxd,frag_count); |
1299 | } |
1300 | |
1301 | smt_received_pack(smc,mb,fs: (int)(rfsw>>25)) ; |
1302 | break ; |
1303 | case FC_SMT_NSA : |
1304 | smc->hw.fp.err_stats.err_smt_frame++ ; |
1305 | DB_RX(5, "SMT frame received" ); |
1306 | |
1307 | /* if pass_NSA set pass the NSA frame or */ |
1308 | /* pass_SMT set and the A-Indicator */ |
1309 | /* is not set, pass the NSA frame */ |
1310 | if (smc->os.hwm.pass_NSA || |
1311 | (smc->os.hwm.pass_SMT && |
1312 | !(rfsw & A_INDIC))) { |
1313 | DB_RX(5, "pass SMT frame" ); |
1314 | mac_drv_rx_complete(smc, rxd, |
1315 | frag_count,len) ; |
1316 | } |
1317 | else { |
1318 | DB_RX(5, "requeue RxD" ); |
1319 | mac_drv_requeue_rxd(smc,rxd,frag_count); |
1320 | } |
1321 | |
1322 | smt_received_pack(smc,mb,fs: (int)(rfsw>>25)) ; |
1323 | break ; |
1324 | case FC_BEACON : |
1325 | if (smc->os.hwm.pass_DB) { |
1326 | DB_RX(5, "pass DB frame" ); |
1327 | mac_drv_rx_complete(smc, rxd, |
1328 | frag_count,len) ; |
1329 | } |
1330 | else { |
1331 | DB_RX(5, "requeue RxD" ); |
1332 | mac_drv_requeue_rxd(smc,rxd,frag_count); |
1333 | } |
1334 | smt_free_mbuf(smc,mb) ; |
1335 | break ; |
1336 | default : |
1337 | /* |
1338 | * unknown FC abort the frame |
1339 | */ |
1340 | DB_RX(2, "unknown FC error" ); |
1341 | smt_free_mbuf(smc,mb) ; |
1342 | DB_RX(5, "requeue RxD" ); |
1343 | mac_drv_requeue_rxd(smc,rxd,frag_count) ; |
1344 | if ((fc & 0xf0) == FC_MAC) |
1345 | smc->hw.fp.err_stats.err_mac_frame++ ; |
1346 | else |
1347 | smc->hw.fp.err_stats.err_imp_frame++ ; |
1348 | |
1349 | break ; |
1350 | } |
1351 | } |
1352 | |
1353 | DB_RX(3, "next RxD is %p" , queue->rx_curr_get); |
1354 | NDD_TRACE("RHx1" ,queue->rx_curr_get,0,0) ; |
1355 | |
1356 | continue ; |
1357 | /*--------------------------------------------------------------------*/ |
1358 | abort_frame: |
1359 | DB_RX(5, "requeue RxD" ); |
1360 | mac_drv_requeue_rxd(smc,rxd,frag_count) ; |
1361 | |
1362 | DB_RX(3, "next RxD is %p" , queue->rx_curr_get); |
1363 | NDD_TRACE("RHx2" ,queue->rx_curr_get,0,0) ; |
1364 | } |
1365 | rx_end: |
1366 | #ifdef ALL_RX_COMPLETE |
1367 | mac_drv_all_receives_complete(smc) ; |
1368 | #endif |
1369 | return ; /* lint bug: needs return detect end of function */ |
1370 | } |
1371 | |
1372 | static void smt_to_llc(struct s_smc *smc, SMbuf *mb) |
1373 | { |
1374 | u_char fc ; |
1375 | |
1376 | DB_RX(4, "send a queued frame to the llc layer" ); |
1377 | smc->os.hwm.r.len = mb->sm_len ; |
1378 | smc->os.hwm.r.mb_pos = smtod(mb,char *) ; |
1379 | fc = *smc->os.hwm.r.mb_pos ; |
1380 | (void)mac_drv_rx_init(smc,len: (int)mb->sm_len,fc: (int)fc, |
1381 | look_ahead: smc->os.hwm.r.mb_pos,la_len: (int)mb->sm_len) ; |
1382 | smt_free_mbuf(smc,mb) ; |
1383 | } |
1384 | |
1385 | /* |
1386 | * BEGIN_MANUAL_ENTRY(hwm_rx_frag) |
1387 | * void hwm_rx_frag(smc,virt,phys,len,frame_status) |
1388 | * |
1389 | * function MACRO (hardware module, hwmtm.h) |
1390 | * This function calls dma_master for preparing the |
1391 | * system hardware for the DMA transfer and initializes |
1392 | * the current RxD with the length and the physical and |
1393 | * virtual address of the fragment. Furthermore, it sets the |
1394 | * STF and EOF bits depending on the frame status byte, |
1395 | * switches the OWN flag of the RxD, so that it is owned by the |
1396 | * adapter and issues an rx_start. |
1397 | * |
1398 | * para virt virtual pointer to the fragment |
1399 | * len the length of the fragment |
1400 | * frame_status status of the frame, see design description |
1401 | * |
1402 | * NOTE: It is possible to call this function with a fragment length |
1403 | * of zero. |
1404 | * |
1405 | * END_MANUAL_ENTRY |
1406 | */ |
1407 | void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, |
1408 | int frame_status) |
1409 | { |
1410 | struct s_smt_fp_rxd volatile *r ; |
1411 | __le32 rbctrl; |
1412 | |
1413 | NDD_TRACE("RHfB" ,virt,len,frame_status) ; |
1414 | DB_RX(2, "hwm_rx_frag: len = %d, frame_status = %x" , len, frame_status); |
1415 | r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ; |
1416 | r->rxd_virt = virt ; |
1417 | r->rxd_rbadr = cpu_to_le32(phys) ; |
1418 | rbctrl = cpu_to_le32( (((__u32)frame_status & |
1419 | (FIRST_FRAG|LAST_FRAG))<<26) | |
1420 | (((u_long) frame_status & FIRST_FRAG) << 21) | |
1421 | BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ; |
1422 | r->rxd_rbctrl = rbctrl ; |
1423 | |
1424 | DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; |
1425 | outpd(ADDR(B0_R1_CSR),CSR_START) ; |
1426 | smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ; |
1427 | smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ; |
1428 | smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ; |
1429 | NDD_TRACE("RHfE" ,r,le32_to_cpu(r->rxd_rbadr),0) ; |
1430 | } |
1431 | |
1432 | /* |
1433 | * BEGINN_MANUAL_ENTRY(mac_drv_clear_rx_queue) |
1434 | * |
1435 | * void mac_drv_clear_rx_queue(smc) |
1436 | * struct s_smc *smc ; |
1437 | * |
1438 | * function DOWNCALL (hardware module, hwmtm.c) |
1439 | * mac_drv_clear_rx_queue is called by the OS-specific module |
1440 | * after it has issued a card_stop. |
1441 | * In this case, the frames in the receive queue are obsolete and |
1442 | * should be removed. For removing mac_drv_clear_rx_queue |
1443 | * calls dma_master for each RxD and mac_drv_clear_rxd for each |
1444 | * receive buffer. |
1445 | * |
1446 | * NOTE: calling sequence card_stop: |
1447 | * CLI_FBI(), card_stop(), |
1448 | * mac_drv_clear_tx_queue(), mac_drv_clear_rx_queue(), |
1449 | * |
1450 | * NOTE: The caller is responsible that the BMUs are idle |
1451 | * when this function is called. |
1452 | * |
1453 | * END_MANUAL_ENTRY |
1454 | */ |
1455 | void mac_drv_clear_rx_queue(struct s_smc *smc) |
1456 | { |
1457 | struct s_smt_fp_rxd volatile *r ; |
1458 | struct s_smt_fp_rxd volatile *next_rxd ; |
1459 | struct s_smt_rx_queue *queue ; |
1460 | int frag_count ; |
1461 | int i ; |
1462 | |
1463 | if (smc->hw.hw_state != STOPPED) { |
1464 | SK_BREAK() ; |
1465 | SMT_PANIC(smc,HWM_E0012,HWM_E0012_MSG) ; |
1466 | return ; |
1467 | } |
1468 | |
1469 | queue = smc->hw.fp.rx[QUEUE_R1] ; |
1470 | DB_RX(5, "clear_rx_queue" ); |
1471 | |
1472 | /* |
1473 | * dma_complete and mac_drv_clear_rxd for all RxDs / receive buffers |
1474 | */ |
1475 | r = queue->rx_curr_get ; |
1476 | while (queue->rx_used) { |
1477 | DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; |
1478 | DB_RX(5, "switch OWN bit of RxD 0x%p" , r); |
1479 | r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; |
1480 | frag_count = 1 ; |
1481 | DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; |
1482 | r = r->rxd_next ; |
1483 | DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; |
1484 | while (r != queue->rx_curr_put && |
1485 | !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) { |
1486 | DB_RX(5, "Check STF bit in %p" , r); |
1487 | r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; |
1488 | DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; |
1489 | r = r->rxd_next ; |
1490 | DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; |
1491 | frag_count++ ; |
1492 | } |
1493 | DB_RX(5, "STF bit found" ); |
1494 | next_rxd = r ; |
1495 | |
1496 | for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){ |
1497 | DB_RX(5, "dma_complete for RxD %p" , r); |
1498 | dma_complete(smc,descr: (union s_fp_descr volatile *)r,DMA_WR); |
1499 | } |
1500 | |
1501 | DB_RX(5, "mac_drv_clear_rxd: RxD %p frag_count %d" , |
1502 | queue->rx_curr_get, frag_count); |
1503 | mac_drv_clear_rxd(smc,rxd: queue->rx_curr_get,frag_count) ; |
1504 | |
1505 | queue->rx_curr_get = next_rxd ; |
1506 | queue->rx_used -= frag_count ; |
1507 | queue->rx_free += frag_count ; |
1508 | } |
1509 | } |
1510 | |
1511 | |
1512 | /* |
1513 | ------------------------------------------------------------- |
1514 | SEND FUNCTIONS: |
1515 | ------------------------------------------------------------- |
1516 | */ |
1517 | |
1518 | /* |
1519 | * BEGIN_MANUAL_ENTRY(hwm_tx_init) |
1520 | * int hwm_tx_init(smc,fc,frag_count,frame_len,frame_status) |
1521 | * |
1522 | * function DOWN_CALL (hardware module, hwmtm.c) |
1523 | * hwm_tx_init checks if the frame can be sent through the |
1524 | * corresponding send queue. |
1525 | * |
1526 | * para fc the frame control. To determine through which |
1527 | * send queue the frame should be transmitted. |
1528 | * 0x50 - 0x57: asynchronous LLC frame |
1529 | * 0xD0 - 0xD7: synchronous LLC frame |
1530 | * 0x41, 0x4F: SMT frame to the network |
1531 | * 0x42: SMT frame to the network and to the local SMT |
1532 | * 0x43: SMT frame to the local SMT |
1533 | * frag_count count of the fragments for this frame |
1534 | * frame_len length of the frame |
1535 | * frame_status status of the frame, the send queue bit is already |
1536 | * specified |
1537 | * |
1538 | * return frame_status |
1539 | * |
1540 | * END_MANUAL_ENTRY |
1541 | */ |
1542 | int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len, |
1543 | int frame_status) |
1544 | { |
1545 | NDD_TRACE("THiB" ,fc,frag_count,frame_len) ; |
1546 | smc->os.hwm.tx_p = smc->hw.fp.tx[frame_status & QUEUE_A0] ; |
1547 | smc->os.hwm.tx_descr = TX_DESCRIPTOR | (((u_long)(frame_len-1)&3)<<27) ; |
1548 | smc->os.hwm.tx_len = frame_len ; |
1549 | DB_TX(3, "hwm_tx_init: fc = %x, len = %d" , fc, frame_len); |
1550 | if ((fc & ~(FC_SYNC_BIT|FC_LLC_PRIOR)) == FC_ASYNC_LLC) { |
1551 | frame_status |= LAN_TX ; |
1552 | } |
1553 | else { |
1554 | switch (fc) { |
1555 | case FC_SMT_INFO : |
1556 | case FC_SMT_NSA : |
1557 | frame_status |= LAN_TX ; |
1558 | break ; |
1559 | case FC_SMT_LOC : |
1560 | frame_status |= LOC_TX ; |
1561 | break ; |
1562 | case FC_SMT_LAN_LOC : |
1563 | frame_status |= LAN_TX | LOC_TX ; |
1564 | break ; |
1565 | default : |
1566 | SMT_PANIC(smc,HWM_E0010,HWM_E0010_MSG) ; |
1567 | } |
1568 | } |
1569 | if (!smc->hw.mac_ring_is_up) { |
1570 | frame_status &= ~LAN_TX ; |
1571 | frame_status |= RING_DOWN ; |
1572 | DB_TX(2, "Ring is down: terminate LAN_TX" ); |
1573 | } |
1574 | if (frag_count > smc->os.hwm.tx_p->tx_free) { |
1575 | #ifndef NDIS_OS2 |
1576 | mac_drv_clear_txd(smc) ; |
1577 | if (frag_count > smc->os.hwm.tx_p->tx_free) { |
1578 | DB_TX(2, "Out of TxDs, terminate LAN_TX" ); |
1579 | frame_status &= ~LAN_TX ; |
1580 | frame_status |= OUT_OF_TXD ; |
1581 | } |
1582 | #else |
1583 | DB_TX(2, "Out of TxDs, terminate LAN_TX" ); |
1584 | frame_status &= ~LAN_TX ; |
1585 | frame_status |= OUT_OF_TXD ; |
1586 | #endif |
1587 | } |
1588 | DB_TX(3, "frame_status = %x" , frame_status); |
1589 | NDD_TRACE("THiE" ,frame_status,smc->os.hwm.tx_p->tx_free,0) ; |
1590 | return frame_status; |
1591 | } |
1592 | |
1593 | /* |
1594 | * BEGIN_MANUAL_ENTRY(hwm_tx_frag) |
1595 | * void hwm_tx_frag(smc,virt,phys,len,frame_status) |
1596 | * |
1597 | * function DOWNCALL (hardware module, hwmtm.c) |
1598 | * If the frame should be sent to the LAN, this function calls |
1599 | * dma_master, fills the current TxD with the virtual and the |
1600 | * physical address, sets the STF and EOF bits dependent on |
1601 | * the frame status, and requests the BMU to start the |
1602 | * transmit. |
1603 | * If the frame should be sent to the local SMT, an SMT_MBuf |
1604 | * is allocated if the FIRST_FRAG bit is set in the frame_status. |
1605 | * The fragment of the frame is copied into the SMT MBuf. |
1606 | * The function smt_received_pack is called if the LAST_FRAG |
1607 | * bit is set in the frame_status word. |
1608 | * |
1609 | * para virt virtual pointer to the fragment |
1610 | * len the length of the fragment |
1611 | * frame_status status of the frame, see design description |
1612 | * |
1613 | * return nothing returned, no parameter is modified |
1614 | * |
1615 | * NOTE: It is possible to invoke this macro with a fragment length |
1616 | * of zero. |
1617 | * |
1618 | * END_MANUAL_ENTRY |
1619 | */ |
1620 | void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, |
1621 | int frame_status) |
1622 | { |
1623 | struct s_smt_fp_txd volatile *t ; |
1624 | struct s_smt_tx_queue *queue ; |
1625 | __le32 tbctrl ; |
1626 | |
1627 | queue = smc->os.hwm.tx_p ; |
1628 | |
1629 | NDD_TRACE("THfB" ,virt,len,frame_status) ; |
1630 | /* Bug fix: AF / May 31 1999 (#missing) |
1631 | * snmpinfo problem reported by IBM is caused by invalid |
1632 | * t-pointer (txd) if LAN_TX is not set but LOC_TX only. |
1633 | * Set: t = queue->tx_curr_put here ! |
1634 | */ |
1635 | t = queue->tx_curr_put ; |
1636 | |
1637 | DB_TX(2, "hwm_tx_frag: len = %d, frame_status = %x" , len, frame_status); |
1638 | if (frame_status & LAN_TX) { |
1639 | /* '*t' is already defined */ |
1640 | DB_TX(3, "LAN_TX: TxD = %p, virt = %p" , t, virt); |
1641 | t->txd_virt = virt ; |
1642 | t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ; |
1643 | t->txd_tbadr = cpu_to_le32(phys) ; |
1644 | tbctrl = cpu_to_le32((((__u32)frame_status & |
1645 | (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) | |
1646 | BMU_OWN|BMU_CHECK |len) ; |
1647 | t->txd_tbctrl = tbctrl ; |
1648 | |
1649 | #ifndef AIX |
1650 | DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; |
1651 | outpd(queue->tx_bmu_ctl,CSR_START) ; |
1652 | #else /* ifndef AIX */ |
1653 | DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; |
1654 | if (frame_status & QUEUE_A0) { |
1655 | outpd(ADDR(B0_XA_CSR),CSR_START) ; |
1656 | } |
1657 | else { |
1658 | outpd(ADDR(B0_XS_CSR),CSR_START) ; |
1659 | } |
1660 | #endif |
1661 | queue->tx_free-- ; |
1662 | queue->tx_used++ ; |
1663 | queue->tx_curr_put = t->txd_next ; |
1664 | if (frame_status & LAST_FRAG) { |
1665 | smc->mib.m[MAC0].fddiMACTransmit_Ct++ ; |
1666 | } |
1667 | } |
1668 | if (frame_status & LOC_TX) { |
1669 | DB_TX(3, "LOC_TX:" ); |
1670 | if (frame_status & FIRST_FRAG) { |
1671 | if(!(smc->os.hwm.tx_mb = smt_get_mbuf(smc))) { |
1672 | smc->hw.fp.err_stats.err_no_buf++ ; |
1673 | DB_TX(4, "No SMbuf; transmit terminated" ); |
1674 | } |
1675 | else { |
1676 | smc->os.hwm.tx_data = |
1677 | smtod(smc->os.hwm.tx_mb,char *) - 1 ; |
1678 | #ifdef USE_OS_CPY |
1679 | #ifdef PASS_1ST_TXD_2_TX_COMP |
1680 | hwm_cpy_txd2mb(t,smc->os.hwm.tx_data, |
1681 | smc->os.hwm.tx_len) ; |
1682 | #endif |
1683 | #endif |
1684 | } |
1685 | } |
1686 | if (smc->os.hwm.tx_mb) { |
1687 | #ifndef USE_OS_CPY |
1688 | DB_TX(3, "copy fragment into MBuf" ); |
1689 | memcpy(smc->os.hwm.tx_data,virt,len) ; |
1690 | smc->os.hwm.tx_data += len ; |
1691 | #endif |
1692 | if (frame_status & LAST_FRAG) { |
1693 | #ifdef USE_OS_CPY |
1694 | #ifndef PASS_1ST_TXD_2_TX_COMP |
1695 | /* |
1696 | * hwm_cpy_txd2mb(txd,data,len) copies 'len' |
1697 | * bytes from the virtual pointer in 'rxd' |
1698 | * to 'data'. The virtual pointer of the |
1699 | * os-specific tx-buffer should be written |
1700 | * in the LAST txd. |
1701 | */ |
1702 | hwm_cpy_txd2mb(t,smc->os.hwm.tx_data, |
1703 | smc->os.hwm.tx_len) ; |
1704 | #endif /* nPASS_1ST_TXD_2_TX_COMP */ |
1705 | #endif /* USE_OS_CPY */ |
1706 | smc->os.hwm.tx_data = |
1707 | smtod(smc->os.hwm.tx_mb,char *) - 1 ; |
1708 | *(char *)smc->os.hwm.tx_mb->sm_data = |
1709 | *smc->os.hwm.tx_data ; |
1710 | smc->os.hwm.tx_data++ ; |
1711 | smc->os.hwm.tx_mb->sm_len = |
1712 | smc->os.hwm.tx_len - 1 ; |
1713 | DB_TX(3, "pass LLC frame to SMT" ); |
1714 | smt_received_pack(smc,mb: smc->os.hwm.tx_mb, |
1715 | RD_FS_LOCAL) ; |
1716 | } |
1717 | } |
1718 | } |
1719 | NDD_TRACE("THfE" ,t,queue->tx_free,0) ; |
1720 | } |
1721 | |
1722 | |
1723 | /* |
1724 | * queues a receive for later send |
1725 | */ |
1726 | static void queue_llc_rx(struct s_smc *smc, SMbuf *mb) |
1727 | { |
1728 | DB_GEN(4, "queue_llc_rx: mb = %p" , mb); |
1729 | smc->os.hwm.queued_rx_frames++ ; |
1730 | mb->sm_next = (SMbuf *)NULL ; |
1731 | if (smc->os.hwm.llc_rx_pipe == NULL) { |
1732 | smc->os.hwm.llc_rx_pipe = mb ; |
1733 | } |
1734 | else { |
1735 | smc->os.hwm.llc_rx_tail->sm_next = mb ; |
1736 | } |
1737 | smc->os.hwm.llc_rx_tail = mb ; |
1738 | |
1739 | /* |
1740 | * force an timer IRQ to receive the data |
1741 | */ |
1742 | if (!smc->os.hwm.isr_flag) { |
1743 | smt_force_irq(smc) ; |
1744 | } |
1745 | } |
1746 | |
1747 | /* |
1748 | * get a SMbuf from the llc_rx_queue |
1749 | */ |
1750 | static SMbuf *get_llc_rx(struct s_smc *smc) |
1751 | { |
1752 | SMbuf *mb ; |
1753 | |
1754 | if ((mb = smc->os.hwm.llc_rx_pipe)) { |
1755 | smc->os.hwm.queued_rx_frames-- ; |
1756 | smc->os.hwm.llc_rx_pipe = mb->sm_next ; |
1757 | } |
1758 | DB_GEN(4, "get_llc_rx: mb = 0x%p" , mb); |
1759 | return mb; |
1760 | } |
1761 | |
1762 | /* |
1763 | * queues a transmit SMT MBuf during the time were the MBuf is |
1764 | * queued the TxD ring |
1765 | */ |
1766 | static void queue_txd_mb(struct s_smc *smc, SMbuf *mb) |
1767 | { |
1768 | DB_GEN(4, "_rx: queue_txd_mb = %p" , mb); |
1769 | smc->os.hwm.queued_txd_mb++ ; |
1770 | mb->sm_next = (SMbuf *)NULL ; |
1771 | if (smc->os.hwm.txd_tx_pipe == NULL) { |
1772 | smc->os.hwm.txd_tx_pipe = mb ; |
1773 | } |
1774 | else { |
1775 | smc->os.hwm.txd_tx_tail->sm_next = mb ; |
1776 | } |
1777 | smc->os.hwm.txd_tx_tail = mb ; |
1778 | } |
1779 | |
1780 | /* |
1781 | * get a SMbuf from the txd_tx_queue |
1782 | */ |
1783 | static SMbuf *get_txd_mb(struct s_smc *smc) |
1784 | { |
1785 | SMbuf *mb ; |
1786 | |
1787 | if ((mb = smc->os.hwm.txd_tx_pipe)) { |
1788 | smc->os.hwm.queued_txd_mb-- ; |
1789 | smc->os.hwm.txd_tx_pipe = mb->sm_next ; |
1790 | } |
1791 | DB_GEN(4, "get_txd_mb: mb = 0x%p" , mb); |
1792 | return mb; |
1793 | } |
1794 | |
1795 | /* |
1796 | * SMT Send function |
1797 | */ |
1798 | void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc) |
1799 | { |
1800 | char far *data ; |
1801 | int len ; |
1802 | int n ; |
1803 | int i ; |
1804 | int frag_count ; |
1805 | int frame_status ; |
1806 | SK_LOC_DECL(char far,*virt[3]) ; |
1807 | int frag_len[3] ; |
1808 | struct s_smt_tx_queue *queue ; |
1809 | struct s_smt_fp_txd volatile *t ; |
1810 | u_long phys ; |
1811 | __le32 tbctrl; |
1812 | |
1813 | NDD_TRACE("THSB" ,mb,fc,0) ; |
1814 | DB_TX(4, "smt_send_mbuf: mb = 0x%p, fc = 0x%x" , mb, fc); |
1815 | |
1816 | mb->sm_off-- ; /* set to fc */ |
1817 | mb->sm_len++ ; /* + fc */ |
1818 | data = smtod(mb,char *) ; |
1819 | *data = fc ; |
1820 | if (fc == FC_SMT_LOC) |
1821 | *data = FC_SMT_INFO ; |
1822 | |
1823 | /* |
1824 | * determine the frag count and the virt addresses of the frags |
1825 | */ |
1826 | frag_count = 0 ; |
1827 | len = mb->sm_len ; |
1828 | while (len) { |
1829 | n = SMT_PAGESIZE - ((long)data & (SMT_PAGESIZE-1)) ; |
1830 | if (n >= len) { |
1831 | n = len ; |
1832 | } |
1833 | DB_TX(5, "frag: virt/len = 0x%p/%d" , data, n); |
1834 | virt[frag_count] = data ; |
1835 | frag_len[frag_count] = n ; |
1836 | frag_count++ ; |
1837 | len -= n ; |
1838 | data += n ; |
1839 | } |
1840 | |
1841 | /* |
1842 | * determine the frame status |
1843 | */ |
1844 | queue = smc->hw.fp.tx[QUEUE_A0] ; |
1845 | if (fc == FC_BEACON || fc == FC_SMT_LOC) { |
1846 | frame_status = LOC_TX ; |
1847 | } |
1848 | else { |
1849 | frame_status = LAN_TX ; |
1850 | if ((smc->os.hwm.pass_NSA &&(fc == FC_SMT_NSA)) || |
1851 | (smc->os.hwm.pass_SMT &&(fc == FC_SMT_INFO))) |
1852 | frame_status |= LOC_TX ; |
1853 | } |
1854 | |
1855 | if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) { |
1856 | frame_status &= ~LAN_TX; |
1857 | if (frame_status) { |
1858 | DB_TX(2, "Ring is down: terminate LAN_TX" ); |
1859 | } |
1860 | else { |
1861 | DB_TX(2, "Ring is down: terminate transmission" ); |
1862 | smt_free_mbuf(smc,mb) ; |
1863 | return ; |
1864 | } |
1865 | } |
1866 | DB_TX(5, "frame_status = 0x%x" , frame_status); |
1867 | |
1868 | if ((frame_status & LAN_TX) && (frame_status & LOC_TX)) { |
1869 | mb->sm_use_count = 2 ; |
1870 | } |
1871 | |
1872 | if (frame_status & LAN_TX) { |
1873 | t = queue->tx_curr_put ; |
1874 | frame_status |= FIRST_FRAG ; |
1875 | for (i = 0; i < frag_count; i++) { |
1876 | DB_TX(5, "init TxD = 0x%p" , t); |
1877 | if (i == frag_count-1) { |
1878 | frame_status |= LAST_FRAG ; |
1879 | t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR | |
1880 | (((__u32)(mb->sm_len-1)&3) << 27)) ; |
1881 | } |
1882 | t->txd_virt = virt[i] ; |
1883 | phys = dma_master(smc, virt: (void far *)virt[i], |
1884 | len: frag_len[i], DMA_RD|SMT_BUF) ; |
1885 | t->txd_tbadr = cpu_to_le32(phys) ; |
1886 | tbctrl = cpu_to_le32((((__u32)frame_status & |
1887 | (FIRST_FRAG|LAST_FRAG)) << 26) | |
1888 | BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ; |
1889 | t->txd_tbctrl = tbctrl ; |
1890 | #ifndef AIX |
1891 | DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; |
1892 | outpd(queue->tx_bmu_ctl,CSR_START) ; |
1893 | #else |
1894 | DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; |
1895 | outpd(ADDR(B0_XA_CSR),CSR_START) ; |
1896 | #endif |
1897 | frame_status &= ~FIRST_FRAG ; |
1898 | queue->tx_curr_put = t = t->txd_next ; |
1899 | queue->tx_free-- ; |
1900 | queue->tx_used++ ; |
1901 | } |
1902 | smc->mib.m[MAC0].fddiMACTransmit_Ct++ ; |
1903 | queue_txd_mb(smc,mb) ; |
1904 | } |
1905 | |
1906 | if (frame_status & LOC_TX) { |
1907 | DB_TX(5, "pass Mbuf to LLC queue" ); |
1908 | queue_llc_rx(smc,mb) ; |
1909 | } |
1910 | |
1911 | /* |
1912 | * We need to unqueue the free SMT_MBUFs here, because it may |
1913 | * be that the SMT want's to send more than 1 frame for one down call |
1914 | */ |
1915 | mac_drv_clear_txd(smc) ; |
1916 | NDD_TRACE("THSE" ,t,queue->tx_free,frag_count) ; |
1917 | } |
1918 | |
1919 | /* BEGIN_MANUAL_ENTRY(mac_drv_clear_txd) |
1920 | * void mac_drv_clear_txd(smc) |
1921 | * |
1922 | * function DOWNCALL (hardware module, hwmtm.c) |
1923 | * mac_drv_clear_txd searches in both send queues for TxD's |
1924 | * which were finished by the adapter. It calls dma_complete |
1925 | * for each TxD. If the last fragment of an LLC frame is |
1926 | * reached, it calls mac_drv_tx_complete to release the |
1927 | * send buffer. |
1928 | * |
1929 | * return nothing |
1930 | * |
1931 | * END_MANUAL_ENTRY |
1932 | */ |
1933 | static void mac_drv_clear_txd(struct s_smc *smc) |
1934 | { |
1935 | struct s_smt_tx_queue *queue ; |
1936 | struct s_smt_fp_txd volatile *t1 ; |
1937 | struct s_smt_fp_txd volatile *t2 = NULL ; |
1938 | SMbuf *mb ; |
1939 | u_long tbctrl ; |
1940 | int i ; |
1941 | int frag_count ; |
1942 | int n ; |
1943 | |
1944 | NDD_TRACE("THcB" ,0,0,0) ; |
1945 | for (i = QUEUE_S; i <= QUEUE_A0; i++) { |
1946 | queue = smc->hw.fp.tx[i] ; |
1947 | t1 = queue->tx_curr_get ; |
1948 | DB_TX(5, "clear_txd: QUEUE = %d (0=sync/1=async)" , i); |
1949 | |
1950 | for ( ; ; ) { |
1951 | frag_count = 0 ; |
1952 | |
1953 | do { |
1954 | DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ; |
1955 | DB_TX(5, "check OWN/EOF bit of TxD 0x%p" , t1); |
1956 | tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl)); |
1957 | |
1958 | if (tbctrl & BMU_OWN || !queue->tx_used){ |
1959 | DB_TX(4, "End of TxDs queue %d" , i); |
1960 | goto free_next_queue ; /* next queue */ |
1961 | } |
1962 | t1 = t1->txd_next ; |
1963 | frag_count++ ; |
1964 | } while (!(tbctrl & BMU_EOF)) ; |
1965 | |
1966 | t1 = queue->tx_curr_get ; |
1967 | for (n = frag_count; n; n--) { |
1968 | tbctrl = le32_to_cpu(t1->txd_tbctrl) ; |
1969 | dma_complete(smc, |
1970 | descr: (union s_fp_descr volatile *) t1, |
1971 | flag: (int) (DMA_RD | |
1972 | ((tbctrl & BMU_SMT_TX) >> 18))) ; |
1973 | t2 = t1 ; |
1974 | t1 = t1->txd_next ; |
1975 | } |
1976 | |
1977 | if (tbctrl & BMU_SMT_TX) { |
1978 | mb = get_txd_mb(smc) ; |
1979 | smt_free_mbuf(smc,mb) ; |
1980 | } |
1981 | else { |
1982 | #ifndef PASS_1ST_TXD_2_TX_COMP |
1983 | DB_TX(4, "mac_drv_tx_comp for TxD 0x%p" , t2); |
1984 | mac_drv_tx_complete(smc,txd: t2) ; |
1985 | #else |
1986 | DB_TX(4, "mac_drv_tx_comp for TxD 0x%x" , |
1987 | queue->tx_curr_get); |
1988 | mac_drv_tx_complete(smc,queue->tx_curr_get) ; |
1989 | #endif |
1990 | } |
1991 | queue->tx_curr_get = t1 ; |
1992 | queue->tx_free += frag_count ; |
1993 | queue->tx_used -= frag_count ; |
1994 | } |
1995 | free_next_queue: ; |
1996 | } |
1997 | NDD_TRACE("THcE" ,0,0,0) ; |
1998 | } |
1999 | |
2000 | /* |
2001 | * BEGINN_MANUAL_ENTRY(mac_drv_clear_tx_queue) |
2002 | * |
2003 | * void mac_drv_clear_tx_queue(smc) |
2004 | * struct s_smc *smc ; |
2005 | * |
2006 | * function DOWNCALL (hardware module, hwmtm.c) |
2007 | * mac_drv_clear_tx_queue is called from the SMT when |
2008 | * the RMT state machine has entered the ISOLATE state. |
2009 | * This function is also called by the os-specific module |
2010 | * after it has called the function card_stop(). |
2011 | * In this case, the frames in the send queues are obsolete and |
2012 | * should be removed. |
2013 | * |
2014 | * note calling sequence: |
2015 | * CLI_FBI(), card_stop(), |
2016 | * mac_drv_clear_tx_queue(), mac_drv_clear_rx_queue(), |
2017 | * |
2018 | * NOTE: The caller is responsible that the BMUs are idle |
2019 | * when this function is called. |
2020 | * |
2021 | * END_MANUAL_ENTRY |
2022 | */ |
2023 | void mac_drv_clear_tx_queue(struct s_smc *smc) |
2024 | { |
2025 | struct s_smt_fp_txd volatile *t ; |
2026 | struct s_smt_tx_queue *queue ; |
2027 | int tx_used ; |
2028 | int i ; |
2029 | |
2030 | if (smc->hw.hw_state != STOPPED) { |
2031 | SK_BREAK() ; |
2032 | SMT_PANIC(smc,HWM_E0011,HWM_E0011_MSG) ; |
2033 | return ; |
2034 | } |
2035 | |
2036 | for (i = QUEUE_S; i <= QUEUE_A0; i++) { |
2037 | queue = smc->hw.fp.tx[i] ; |
2038 | DB_TX(5, "clear_tx_queue: QUEUE = %d (0=sync/1=async)" , i); |
2039 | |
2040 | /* |
2041 | * switch the OWN bit of all pending frames to the host |
2042 | */ |
2043 | t = queue->tx_curr_get ; |
2044 | tx_used = queue->tx_used ; |
2045 | while (tx_used) { |
2046 | DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; |
2047 | DB_TX(5, "switch OWN bit of TxD 0x%p" , t); |
2048 | t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ; |
2049 | DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; |
2050 | t = t->txd_next ; |
2051 | tx_used-- ; |
2052 | } |
2053 | } |
2054 | |
2055 | /* |
2056 | * release all TxD's for both send queues |
2057 | */ |
2058 | mac_drv_clear_txd(smc) ; |
2059 | |
2060 | for (i = QUEUE_S; i <= QUEUE_A0; i++) { |
2061 | queue = smc->hw.fp.tx[i] ; |
2062 | t = queue->tx_curr_get ; |
2063 | |
2064 | /* |
2065 | * write the phys pointer of the NEXT descriptor into the |
2066 | * BMU's current address descriptor pointer and set |
2067 | * tx_curr_get and tx_curr_put to this position |
2068 | */ |
2069 | if (i == QUEUE_S) { |
2070 | outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ; |
2071 | } |
2072 | else { |
2073 | outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ; |
2074 | } |
2075 | |
2076 | queue->tx_curr_put = queue->tx_curr_get->txd_next ; |
2077 | queue->tx_curr_get = queue->tx_curr_put ; |
2078 | } |
2079 | } |
2080 | |
2081 | |
2082 | /* |
2083 | ------------------------------------------------------------- |
2084 | TEST FUNCTIONS: |
2085 | ------------------------------------------------------------- |
2086 | */ |
2087 | |
2088 | #ifdef DEBUG |
2089 | /* |
2090 | * BEGIN_MANUAL_ENTRY(mac_drv_debug_lev) |
2091 | * void mac_drv_debug_lev(smc,flag,lev) |
2092 | * |
2093 | * function DOWNCALL (drvsr.c) |
2094 | * To get a special debug info the user can assign a debug level |
2095 | * to any debug flag. |
2096 | * |
2097 | * para flag debug flag, possible values are: |
2098 | * = 0: reset all debug flags (the defined level is |
2099 | * ignored) |
2100 | * = 1: debug.d_smtf |
2101 | * = 2: debug.d_smt |
2102 | * = 3: debug.d_ecm |
2103 | * = 4: debug.d_rmt |
2104 | * = 5: debug.d_cfm |
2105 | * = 6: debug.d_pcm |
2106 | * |
2107 | * = 10: debug.d_os.hwm_rx (hardware module receive path) |
2108 | * = 11: debug.d_os.hwm_tx(hardware module transmit path) |
2109 | * = 12: debug.d_os.hwm_gen(hardware module general flag) |
2110 | * |
2111 | * lev debug level |
2112 | * |
2113 | * END_MANUAL_ENTRY |
2114 | */ |
2115 | void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev) |
2116 | { |
2117 | switch(flag) { |
2118 | case (int)NULL: |
2119 | DB_P.d_smtf = DB_P.d_smt = DB_P.d_ecm = DB_P.d_rmt = 0 ; |
2120 | DB_P.d_cfm = 0 ; |
2121 | DB_P.d_os.hwm_rx = DB_P.d_os.hwm_tx = DB_P.d_os.hwm_gen = 0 ; |
2122 | #ifdef SBA |
2123 | DB_P.d_sba = 0 ; |
2124 | #endif |
2125 | #ifdef ESS |
2126 | DB_P.d_ess = 0 ; |
2127 | #endif |
2128 | break ; |
2129 | case DEBUG_SMTF: |
2130 | DB_P.d_smtf = lev ; |
2131 | break ; |
2132 | case DEBUG_SMT: |
2133 | DB_P.d_smt = lev ; |
2134 | break ; |
2135 | case DEBUG_ECM: |
2136 | DB_P.d_ecm = lev ; |
2137 | break ; |
2138 | case DEBUG_RMT: |
2139 | DB_P.d_rmt = lev ; |
2140 | break ; |
2141 | case DEBUG_CFM: |
2142 | DB_P.d_cfm = lev ; |
2143 | break ; |
2144 | case DEBUG_PCM: |
2145 | DB_P.d_pcm = lev ; |
2146 | break ; |
2147 | case DEBUG_SBA: |
2148 | #ifdef SBA |
2149 | DB_P.d_sba = lev ; |
2150 | #endif |
2151 | break ; |
2152 | case DEBUG_ESS: |
2153 | #ifdef ESS |
2154 | DB_P.d_ess = lev ; |
2155 | #endif |
2156 | break ; |
2157 | case DB_HWM_RX: |
2158 | DB_P.d_os.hwm_rx = lev ; |
2159 | break ; |
2160 | case DB_HWM_TX: |
2161 | DB_P.d_os.hwm_tx = lev ; |
2162 | break ; |
2163 | case DB_HWM_GEN: |
2164 | DB_P.d_os.hwm_gen = lev ; |
2165 | break ; |
2166 | default: |
2167 | break ; |
2168 | } |
2169 | } |
2170 | #endif |
2171 | |