1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Intel IXP4xx HSS (synchronous serial port) driver for Linux |
4 | * |
5 | * Copyright (C) 2007-2008 Krzysztof HaĆasa <khc@pm.waw.pl> |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | |
10 | #include <linux/module.h> |
11 | #include <linux/bitops.h> |
12 | #include <linux/cdev.h> |
13 | #include <linux/dma-mapping.h> |
14 | #include <linux/dmapool.h> |
15 | #include <linux/fs.h> |
16 | #include <linux/hdlc.h> |
17 | #include <linux/io.h> |
18 | #include <linux/kernel.h> |
19 | #include <linux/mfd/syscon.h> |
20 | #include <linux/platform_device.h> |
21 | #include <linux/poll.h> |
22 | #include <linux/regmap.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/gpio/consumer.h> |
25 | #include <linux/of.h> |
26 | #include <linux/soc/ixp4xx/npe.h> |
27 | #include <linux/soc/ixp4xx/qmgr.h> |
28 | #include <linux/soc/ixp4xx/cpu.h> |
29 | |
30 | /* This is what all IXP4xx platforms we know uses, if more frequencies |
31 | * are needed, we need to migrate to the clock framework. |
32 | */ |
33 | #define IXP4XX_TIMER_FREQ 66666000 |
34 | |
35 | #define DEBUG_DESC 0 |
36 | #define DEBUG_RX 0 |
37 | #define DEBUG_TX 0 |
38 | #define DEBUG_PKT_BYTES 0 |
39 | #define DEBUG_CLOSE 0 |
40 | |
41 | #define DRV_NAME "ixp4xx_hss" |
42 | |
43 | #define 0 /* orig 1 */ |
44 | #define PKT_NUM_PIPES 1 /* 1, 2 or 4 */ |
45 | #define PKT_PIPE_FIFO_SIZEW 4 /* total 4 dwords per HSS */ |
46 | |
47 | #define RX_DESCS 16 /* also length of all RX queues */ |
48 | #define TX_DESCS 16 /* also length of all TX queues */ |
49 | |
50 | #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS)) |
51 | #define RX_SIZE (HDLC_MAX_MRU + 4) /* NPE needs more space */ |
52 | #define MAX_CLOSE_WAIT 1000 /* microseconds */ |
53 | #define HSS_COUNT 2 |
54 | #define FRAME_SIZE 256 /* doesn't matter at this point */ |
55 | #define FRAME_OFFSET 0 |
56 | #define MAX_CHANNELS (FRAME_SIZE / 8) |
57 | |
58 | #define NAPI_WEIGHT 16 |
59 | |
60 | /* Queue IDs */ |
61 | #define HSS0_PKT_RX_QUEUE 13 /* orig size = 32 dwords */ |
62 | #define HSS0_PKT_TX0_QUEUE 14 /* orig size = 16 dwords */ |
63 | #define HSS0_PKT_TX1_QUEUE 15 |
64 | #define HSS0_PKT_TX2_QUEUE 16 |
65 | #define HSS0_PKT_TX3_QUEUE 17 |
66 | #define HSS0_PKT_RXFREE0_QUEUE 18 /* orig size = 16 dwords */ |
67 | #define HSS0_PKT_RXFREE1_QUEUE 19 |
68 | #define HSS0_PKT_RXFREE2_QUEUE 20 |
69 | #define HSS0_PKT_RXFREE3_QUEUE 21 |
70 | #define HSS0_PKT_TXDONE_QUEUE 22 /* orig size = 64 dwords */ |
71 | |
72 | #define HSS1_PKT_RX_QUEUE 0 |
73 | #define HSS1_PKT_TX0_QUEUE 5 |
74 | #define HSS1_PKT_TX1_QUEUE 6 |
75 | #define HSS1_PKT_TX2_QUEUE 7 |
76 | #define HSS1_PKT_TX3_QUEUE 8 |
77 | #define HSS1_PKT_RXFREE0_QUEUE 1 |
78 | #define HSS1_PKT_RXFREE1_QUEUE 2 |
79 | #define HSS1_PKT_RXFREE2_QUEUE 3 |
80 | #define HSS1_PKT_RXFREE3_QUEUE 4 |
81 | #define HSS1_PKT_TXDONE_QUEUE 9 |
82 | |
83 | #define NPE_PKT_MODE_HDLC 0 |
84 | #define NPE_PKT_MODE_RAW 1 |
85 | #define NPE_PKT_MODE_56KMODE 2 |
86 | #define NPE_PKT_MODE_56KENDIAN_MSB 4 |
87 | |
88 | /* PKT_PIPE_HDLC_CFG_WRITE flags */ |
89 | #define PKT_HDLC_IDLE_ONES 0x1 /* default = flags */ |
90 | #define PKT_HDLC_CRC_32 0x2 /* default = CRC-16 */ |
91 | #define PKT_HDLC_MSB_ENDIAN 0x4 /* default = LE */ |
92 | |
93 | /* hss_config, PCRs */ |
94 | /* Frame sync sampling, default = active low */ |
95 | #define PCR_FRM_SYNC_ACTIVE_HIGH 0x40000000 |
96 | #define PCR_FRM_SYNC_FALLINGEDGE 0x80000000 |
97 | #define PCR_FRM_SYNC_RISINGEDGE 0xC0000000 |
98 | |
99 | /* Frame sync pin: input (default) or output generated off a given clk edge */ |
100 | #define PCR_FRM_SYNC_OUTPUT_FALLING 0x20000000 |
101 | #define PCR_FRM_SYNC_OUTPUT_RISING 0x30000000 |
102 | |
103 | /* Frame and data clock sampling on edge, default = falling */ |
104 | #define PCR_FCLK_EDGE_RISING 0x08000000 |
105 | #define PCR_DCLK_EDGE_RISING 0x04000000 |
106 | |
107 | /* Clock direction, default = input */ |
108 | #define PCR_SYNC_CLK_DIR_OUTPUT 0x02000000 |
109 | |
110 | /* Generate/Receive frame pulses, default = enabled */ |
111 | #define PCR_FRM_PULSE_DISABLED 0x01000000 |
112 | |
113 | /* Data rate is full (default) or half the configured clk speed */ |
114 | #define PCR_HALF_CLK_RATE 0x00200000 |
115 | |
116 | /* Invert data between NPE and HSS FIFOs? (default = no) */ |
117 | #define PCR_DATA_POLARITY_INVERT 0x00100000 |
118 | |
119 | /* TX/RX endianness, default = LSB */ |
120 | #define PCR_MSB_ENDIAN 0x00080000 |
121 | |
122 | /* Normal (default) / open drain mode (TX only) */ |
123 | #define PCR_TX_PINS_OPEN_DRAIN 0x00040000 |
124 | |
125 | /* No framing bit transmitted and expected on RX? (default = framing bit) */ |
126 | #define PCR_SOF_NO_FBIT 0x00020000 |
127 | |
128 | /* Drive data pins? */ |
129 | #define PCR_TX_DATA_ENABLE 0x00010000 |
130 | |
131 | /* Voice 56k type: drive the data pins low (default), high, high Z */ |
132 | #define PCR_TX_V56K_HIGH 0x00002000 |
133 | #define PCR_TX_V56K_HIGH_IMP 0x00004000 |
134 | |
135 | /* Unassigned type: drive the data pins low (default), high, high Z */ |
136 | #define PCR_TX_UNASS_HIGH 0x00000800 |
137 | #define PCR_TX_UNASS_HIGH_IMP 0x00001000 |
138 | |
139 | /* T1 @ 1.544MHz only: Fbit dictated in FIFO (default) or high Z */ |
140 | #define PCR_TX_FB_HIGH_IMP 0x00000400 |
141 | |
142 | /* 56k data endiannes - which bit unused: high (default) or low */ |
143 | #define PCR_TX_56KE_BIT_0_UNUSED 0x00000200 |
144 | |
145 | /* 56k data transmission type: 32/8 bit data (default) or 56K data */ |
146 | #define PCR_TX_56KS_56K_DATA 0x00000100 |
147 | |
148 | /* hss_config, cCR */ |
149 | /* Number of packetized clients, default = 1 */ |
150 | #define CCR_NPE_HFIFO_2_HDLC 0x04000000 |
151 | #define CCR_NPE_HFIFO_3_OR_4HDLC 0x08000000 |
152 | |
153 | /* default = no loopback */ |
154 | #define CCR_LOOPBACK 0x02000000 |
155 | |
156 | /* HSS number, default = 0 (first) */ |
157 | #define CCR_SECOND_HSS 0x01000000 |
158 | |
159 | /* hss_config, clkCR: main:10, num:10, denom:12 */ |
160 | #define CLK42X_SPEED_EXP ((0x3FF << 22) | (2 << 12) | 15) /*65 KHz*/ |
161 | |
162 | #define CLK42X_SPEED_512KHZ ((130 << 22) | (2 << 12) | 15) |
163 | #define CLK42X_SPEED_1536KHZ ((43 << 22) | (18 << 12) | 47) |
164 | #define CLK42X_SPEED_1544KHZ ((43 << 22) | (33 << 12) | 192) |
165 | #define CLK42X_SPEED_2048KHZ ((32 << 22) | (34 << 12) | 63) |
166 | #define CLK42X_SPEED_4096KHZ ((16 << 22) | (34 << 12) | 127) |
167 | #define CLK42X_SPEED_8192KHZ ((8 << 22) | (34 << 12) | 255) |
168 | |
169 | #define CLK46X_SPEED_512KHZ ((130 << 22) | (24 << 12) | 127) |
170 | #define CLK46X_SPEED_1536KHZ ((43 << 22) | (152 << 12) | 383) |
171 | #define CLK46X_SPEED_1544KHZ ((43 << 22) | (66 << 12) | 385) |
172 | #define CLK46X_SPEED_2048KHZ ((32 << 22) | (280 << 12) | 511) |
173 | #define CLK46X_SPEED_4096KHZ ((16 << 22) | (280 << 12) | 1023) |
174 | #define CLK46X_SPEED_8192KHZ ((8 << 22) | (280 << 12) | 2047) |
175 | |
176 | /* HSS_CONFIG_CLOCK_CR register consists of 3 parts: |
177 | * A (10 bits), B (10 bits) and C (12 bits). |
178 | * IXP42x HSS clock generator operation (verified with an oscilloscope): |
179 | * Each clock bit takes 7.5 ns (1 / 133.xx MHz). |
180 | * The clock sequence consists of (C - B) states of 0s and 1s, each state is |
181 | * A bits wide. It's followed by (B + 1) states of 0s and 1s, each state is |
182 | * (A + 1) bits wide. |
183 | * |
184 | * The resulting average clock frequency (assuming 33.333 MHz oscillator) is: |
185 | * freq = 66.666 MHz / (A + (B + 1) / (C + 1)) |
186 | * minimum freq = 66.666 MHz / (A + 1) |
187 | * maximum freq = 66.666 MHz / A |
188 | * |
189 | * Example: A = 2, B = 2, C = 7, CLOCK_CR register = 2 << 22 | 2 << 12 | 7 |
190 | * freq = 66.666 MHz / (2 + (2 + 1) / (7 + 1)) = 28.07 MHz (Mb/s). |
191 | * The clock sequence is: 1100110011 (5 doubles) 000111000 (3 triples). |
192 | * The sequence takes (C - B) * A + (B + 1) * (A + 1) = 5 * 2 + 3 * 3 bits |
193 | * = 19 bits (each 7.5 ns long) = 142.5 ns (then the sequence repeats). |
194 | * The sequence consists of 4 complete clock periods, thus the average |
195 | * frequency (= clock rate) is 4 / 142.5 ns = 28.07 MHz (Mb/s). |
196 | * (max specified clock rate for IXP42x HSS is 8.192 Mb/s). |
197 | */ |
198 | |
199 | /* hss_config, LUT entries */ |
200 | #define TDMMAP_UNASSIGNED 0 |
201 | #define TDMMAP_HDLC 1 /* HDLC - packetized */ |
202 | #define TDMMAP_VOICE56K 2 /* Voice56K - 7-bit channelized */ |
203 | #define TDMMAP_VOICE64K 3 /* Voice64K - 8-bit channelized */ |
204 | |
205 | /* offsets into HSS config */ |
206 | #define HSS_CONFIG_TX_PCR 0x00 /* port configuration registers */ |
207 | #define HSS_CONFIG_RX_PCR 0x04 |
208 | #define HSS_CONFIG_CORE_CR 0x08 /* loopback control, HSS# */ |
209 | #define HSS_CONFIG_CLOCK_CR 0x0C /* clock generator control */ |
210 | #define HSS_CONFIG_TX_FCR 0x10 /* frame configuration registers */ |
211 | #define HSS_CONFIG_RX_FCR 0x14 |
212 | #define HSS_CONFIG_TX_LUT 0x18 /* channel look-up tables */ |
213 | #define HSS_CONFIG_RX_LUT 0x38 |
214 | |
215 | /* NPE command codes */ |
216 | /* writes the ConfigWord value to the location specified by offset */ |
217 | #define PORT_CONFIG_WRITE 0x40 |
218 | |
219 | /* triggers the NPE to load the contents of the configuration table */ |
220 | #define PORT_CONFIG_LOAD 0x41 |
221 | |
222 | /* triggers the NPE to return an HssErrorReadResponse message */ |
223 | #define PORT_ERROR_READ 0x42 |
224 | |
225 | /* triggers the NPE to reset internal status and enable the HssPacketized |
226 | * operation for the flow specified by pPipe |
227 | */ |
228 | #define PKT_PIPE_FLOW_ENABLE 0x50 |
229 | #define PKT_PIPE_FLOW_DISABLE 0x51 |
230 | #define PKT_NUM_PIPES_WRITE 0x52 |
231 | #define PKT_PIPE_FIFO_SIZEW_WRITE 0x53 |
232 | #define PKT_PIPE_HDLC_CFG_WRITE 0x54 |
233 | #define PKT_PIPE_IDLE_PATTERN_WRITE 0x55 |
234 | #define PKT_PIPE_RX_SIZE_WRITE 0x56 |
235 | #define PKT_PIPE_MODE_WRITE 0x57 |
236 | |
237 | /* HDLC packet status values - desc->status */ |
238 | #define ERR_SHUTDOWN 1 /* stop or shutdown occurrence */ |
239 | #define ERR_HDLC_ALIGN 2 /* HDLC alignment error */ |
240 | #define ERR_HDLC_FCS 3 /* HDLC Frame Check Sum error */ |
241 | #define ERR_RXFREE_Q_EMPTY 4 /* RX-free queue became empty while receiving |
242 | * this packet (if buf_len < pkt_len) |
243 | */ |
244 | #define ERR_HDLC_TOO_LONG 5 /* HDLC frame size too long */ |
245 | #define ERR_HDLC_ABORT 6 /* abort sequence received */ |
246 | #define ERR_DISCONNECTING 7 /* disconnect is in progress */ |
247 | |
248 | #ifdef __ARMEB__ |
249 | typedef struct sk_buff buffer_t; |
250 | #define free_buffer dev_kfree_skb |
251 | #define free_buffer_irq dev_consume_skb_irq |
252 | #else |
253 | typedef void buffer_t; |
254 | #define free_buffer kfree |
255 | #define free_buffer_irq kfree |
256 | #endif |
257 | |
258 | struct port { |
259 | struct device *dev; |
260 | struct npe *npe; |
261 | unsigned int txreadyq; |
262 | unsigned int rxtrigq; |
263 | unsigned int rxfreeq; |
264 | unsigned int rxq; |
265 | unsigned int txq; |
266 | unsigned int txdoneq; |
267 | struct gpio_desc *cts; |
268 | struct gpio_desc *rts; |
269 | struct gpio_desc *dcd; |
270 | struct gpio_desc *dtr; |
271 | struct gpio_desc *clk_internal; |
272 | struct net_device *netdev; |
273 | struct napi_struct napi; |
274 | buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; |
275 | struct desc *desc_tab; /* coherent */ |
276 | dma_addr_t desc_tab_phys; |
277 | unsigned int id; |
278 | unsigned int clock_type, clock_rate, loopback; |
279 | unsigned int initialized, carrier; |
280 | u8 hdlc_cfg; |
281 | u32 clock_reg; |
282 | }; |
283 | |
284 | /* NPE message structure */ |
285 | struct msg { |
286 | #ifdef __ARMEB__ |
287 | u8 cmd, unused, hss_port, index; |
288 | union { |
289 | struct { u8 data8a, data8b, data8c, data8d; }; |
290 | struct { u16 data16a, data16b; }; |
291 | struct { u32 data32; }; |
292 | }; |
293 | #else |
294 | u8 index, hss_port, unused, cmd; |
295 | union { |
296 | struct { u8 data8d, data8c, data8b, data8a; }; |
297 | struct { u16 data16b, data16a; }; |
298 | struct { u32 data32; }; |
299 | }; |
300 | #endif |
301 | }; |
302 | |
303 | /* HDLC packet descriptor */ |
304 | struct desc { |
305 | u32 next; /* pointer to next buffer, unused */ |
306 | |
307 | #ifdef __ARMEB__ |
308 | u16 buf_len; /* buffer length */ |
309 | u16 pkt_len; /* packet length */ |
310 | u32 data; /* pointer to data buffer in RAM */ |
311 | u8 status; |
312 | u8 error_count; |
313 | u16 __reserved; |
314 | #else |
315 | u16 pkt_len; /* packet length */ |
316 | u16 buf_len; /* buffer length */ |
317 | u32 data; /* pointer to data buffer in RAM */ |
318 | u16 __reserved; |
319 | u8 error_count; |
320 | u8 status; |
321 | #endif |
322 | u32 __reserved1[4]; |
323 | }; |
324 | |
325 | #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ |
326 | (n) * sizeof(struct desc)) |
327 | #define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) |
328 | |
329 | #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ |
330 | ((n) + RX_DESCS) * sizeof(struct desc)) |
331 | #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) |
332 | |
333 | /***************************************************************************** |
334 | * global variables |
335 | ****************************************************************************/ |
336 | |
337 | static int ports_open; |
338 | static struct dma_pool *dma_pool; |
339 | static DEFINE_SPINLOCK(npe_lock); |
340 | |
341 | /***************************************************************************** |
342 | * utility functions |
343 | ****************************************************************************/ |
344 | |
345 | static inline struct port *dev_to_port(struct net_device *dev) |
346 | { |
347 | return dev_to_hdlc(dev)->priv; |
348 | } |
349 | |
350 | #ifndef __ARMEB__ |
351 | static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt) |
352 | { |
353 | int i; |
354 | |
355 | for (i = 0; i < cnt; i++) |
356 | dest[i] = swab32(src[i]); |
357 | } |
358 | #endif |
359 | |
360 | /***************************************************************************** |
361 | * HSS access |
362 | ****************************************************************************/ |
363 | |
364 | static void hss_npe_send(struct port *port, struct msg *msg, const char *what) |
365 | { |
366 | u32 *val = (u32 *)msg; |
367 | |
368 | if (npe_send_message(npe: port->npe, msg, what)) { |
369 | pr_crit("HSS-%i: unable to send command [%08X:%08X] to %s\n" , |
370 | port->id, val[0], val[1], npe_name(port->npe)); |
371 | BUG(); |
372 | } |
373 | } |
374 | |
375 | static void hss_config_set_lut(struct port *port) |
376 | { |
377 | struct msg msg; |
378 | int ch; |
379 | |
380 | memset(&msg, 0, sizeof(msg)); |
381 | msg.cmd = PORT_CONFIG_WRITE; |
382 | msg.hss_port = port->id; |
383 | |
384 | for (ch = 0; ch < MAX_CHANNELS; ch++) { |
385 | msg.data32 >>= 2; |
386 | msg.data32 |= TDMMAP_HDLC << 30; |
387 | |
388 | if (ch % 16 == 15) { |
389 | msg.index = HSS_CONFIG_TX_LUT + ((ch / 4) & ~3); |
390 | hss_npe_send(port, msg: &msg, what: "HSS_SET_TX_LUT" ); |
391 | |
392 | msg.index += HSS_CONFIG_RX_LUT - HSS_CONFIG_TX_LUT; |
393 | hss_npe_send(port, msg: &msg, what: "HSS_SET_RX_LUT" ); |
394 | } |
395 | } |
396 | } |
397 | |
398 | static void hss_config(struct port *port) |
399 | { |
400 | struct msg msg; |
401 | |
402 | memset(&msg, 0, sizeof(msg)); |
403 | msg.cmd = PORT_CONFIG_WRITE; |
404 | msg.hss_port = port->id; |
405 | msg.index = HSS_CONFIG_TX_PCR; |
406 | msg.data32 = PCR_FRM_PULSE_DISABLED | PCR_MSB_ENDIAN | |
407 | PCR_TX_DATA_ENABLE | PCR_SOF_NO_FBIT; |
408 | if (port->clock_type == CLOCK_INT) |
409 | msg.data32 |= PCR_SYNC_CLK_DIR_OUTPUT; |
410 | hss_npe_send(port, msg: &msg, what: "HSS_SET_TX_PCR" ); |
411 | |
412 | msg.index = HSS_CONFIG_RX_PCR; |
413 | msg.data32 ^= PCR_TX_DATA_ENABLE | PCR_DCLK_EDGE_RISING; |
414 | hss_npe_send(port, msg: &msg, what: "HSS_SET_RX_PCR" ); |
415 | |
416 | memset(&msg, 0, sizeof(msg)); |
417 | msg.cmd = PORT_CONFIG_WRITE; |
418 | msg.hss_port = port->id; |
419 | msg.index = HSS_CONFIG_CORE_CR; |
420 | msg.data32 = (port->loopback ? CCR_LOOPBACK : 0) | |
421 | (port->id ? CCR_SECOND_HSS : 0); |
422 | hss_npe_send(port, msg: &msg, what: "HSS_SET_CORE_CR" ); |
423 | |
424 | memset(&msg, 0, sizeof(msg)); |
425 | msg.cmd = PORT_CONFIG_WRITE; |
426 | msg.hss_port = port->id; |
427 | msg.index = HSS_CONFIG_CLOCK_CR; |
428 | msg.data32 = port->clock_reg; |
429 | hss_npe_send(port, msg: &msg, what: "HSS_SET_CLOCK_CR" ); |
430 | |
431 | memset(&msg, 0, sizeof(msg)); |
432 | msg.cmd = PORT_CONFIG_WRITE; |
433 | msg.hss_port = port->id; |
434 | msg.index = HSS_CONFIG_TX_FCR; |
435 | msg.data16a = FRAME_OFFSET; |
436 | msg.data16b = FRAME_SIZE - 1; |
437 | hss_npe_send(port, msg: &msg, what: "HSS_SET_TX_FCR" ); |
438 | |
439 | memset(&msg, 0, sizeof(msg)); |
440 | msg.cmd = PORT_CONFIG_WRITE; |
441 | msg.hss_port = port->id; |
442 | msg.index = HSS_CONFIG_RX_FCR; |
443 | msg.data16a = FRAME_OFFSET; |
444 | msg.data16b = FRAME_SIZE - 1; |
445 | hss_npe_send(port, msg: &msg, what: "HSS_SET_RX_FCR" ); |
446 | |
447 | hss_config_set_lut(port); |
448 | |
449 | memset(&msg, 0, sizeof(msg)); |
450 | msg.cmd = PORT_CONFIG_LOAD; |
451 | msg.hss_port = port->id; |
452 | hss_npe_send(port, msg: &msg, what: "HSS_LOAD_CONFIG" ); |
453 | |
454 | if (npe_recv_message(npe: port->npe, msg: &msg, what: "HSS_LOAD_CONFIG" ) || |
455 | /* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */ |
456 | msg.cmd != PORT_CONFIG_LOAD || msg.data32) { |
457 | pr_crit("HSS-%i: HSS_LOAD_CONFIG failed\n" , port->id); |
458 | BUG(); |
459 | } |
460 | |
461 | /* HDLC may stop working without this - check FIXME */ |
462 | npe_recv_message(npe: port->npe, msg: &msg, what: "FLUSH_IT" ); |
463 | } |
464 | |
465 | static void hss_set_hdlc_cfg(struct port *port) |
466 | { |
467 | struct msg msg; |
468 | |
469 | memset(&msg, 0, sizeof(msg)); |
470 | msg.cmd = PKT_PIPE_HDLC_CFG_WRITE; |
471 | msg.hss_port = port->id; |
472 | msg.data8a = port->hdlc_cfg; /* rx_cfg */ |
473 | msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */ |
474 | hss_npe_send(port, msg: &msg, what: "HSS_SET_HDLC_CFG" ); |
475 | } |
476 | |
477 | static u32 hss_get_status(struct port *port) |
478 | { |
479 | struct msg msg; |
480 | |
481 | memset(&msg, 0, sizeof(msg)); |
482 | msg.cmd = PORT_ERROR_READ; |
483 | msg.hss_port = port->id; |
484 | hss_npe_send(port, msg: &msg, what: "PORT_ERROR_READ" ); |
485 | if (npe_recv_message(npe: port->npe, msg: &msg, what: "PORT_ERROR_READ" )) { |
486 | pr_crit("HSS-%i: unable to read HSS status\n" , port->id); |
487 | BUG(); |
488 | } |
489 | |
490 | return msg.data32; |
491 | } |
492 | |
493 | static void hss_start_hdlc(struct port *port) |
494 | { |
495 | struct msg msg; |
496 | |
497 | memset(&msg, 0, sizeof(msg)); |
498 | msg.cmd = PKT_PIPE_FLOW_ENABLE; |
499 | msg.hss_port = port->id; |
500 | msg.data32 = 0; |
501 | hss_npe_send(port, msg: &msg, what: "HSS_ENABLE_PKT_PIPE" ); |
502 | } |
503 | |
504 | static void hss_stop_hdlc(struct port *port) |
505 | { |
506 | struct msg msg; |
507 | |
508 | memset(&msg, 0, sizeof(msg)); |
509 | msg.cmd = PKT_PIPE_FLOW_DISABLE; |
510 | msg.hss_port = port->id; |
511 | hss_npe_send(port, msg: &msg, what: "HSS_DISABLE_PKT_PIPE" ); |
512 | hss_get_status(port); /* make sure it's halted */ |
513 | } |
514 | |
515 | static int hss_load_firmware(struct port *port) |
516 | { |
517 | struct msg msg; |
518 | int err; |
519 | |
520 | if (port->initialized) |
521 | return 0; |
522 | |
523 | if (!npe_running(npe: port->npe)) { |
524 | err = npe_load_firmware(npe: port->npe, name: npe_name(npe: port->npe), |
525 | dev: port->dev); |
526 | if (err) |
527 | return err; |
528 | } |
529 | |
530 | /* HDLC mode configuration */ |
531 | memset(&msg, 0, sizeof(msg)); |
532 | msg.cmd = PKT_NUM_PIPES_WRITE; |
533 | msg.hss_port = port->id; |
534 | msg.data8a = PKT_NUM_PIPES; |
535 | hss_npe_send(port, msg: &msg, what: "HSS_SET_PKT_PIPES" ); |
536 | |
537 | msg.cmd = PKT_PIPE_FIFO_SIZEW_WRITE; |
538 | msg.data8a = PKT_PIPE_FIFO_SIZEW; |
539 | hss_npe_send(port, msg: &msg, what: "HSS_SET_PKT_FIFO" ); |
540 | |
541 | msg.cmd = PKT_PIPE_MODE_WRITE; |
542 | msg.data8a = NPE_PKT_MODE_HDLC; |
543 | /* msg.data8b = inv_mask */ |
544 | /* msg.data8c = or_mask */ |
545 | hss_npe_send(port, msg: &msg, what: "HSS_SET_PKT_MODE" ); |
546 | |
547 | msg.cmd = PKT_PIPE_RX_SIZE_WRITE; |
548 | msg.data16a = HDLC_MAX_MRU; /* including CRC */ |
549 | hss_npe_send(port, msg: &msg, what: "HSS_SET_PKT_RX_SIZE" ); |
550 | |
551 | msg.cmd = PKT_PIPE_IDLE_PATTERN_WRITE; |
552 | msg.data32 = 0x7F7F7F7F; /* ??? FIXME */ |
553 | hss_npe_send(port, msg: &msg, what: "HSS_SET_PKT_IDLE" ); |
554 | |
555 | port->initialized = 1; |
556 | return 0; |
557 | } |
558 | |
559 | /***************************************************************************** |
560 | * packetized (HDLC) operation |
561 | ****************************************************************************/ |
562 | |
563 | static inline void debug_pkt(struct net_device *dev, const char *func, |
564 | u8 *data, int len) |
565 | { |
566 | #if DEBUG_PKT_BYTES |
567 | int i; |
568 | |
569 | printk(KERN_DEBUG "%s: %s(%i)" , dev->name, func, len); |
570 | for (i = 0; i < len; i++) { |
571 | if (i >= DEBUG_PKT_BYTES) |
572 | break; |
573 | printk("%s%02X" , !(i % 4) ? " " : "" , data[i]); |
574 | } |
575 | printk("\n" ); |
576 | #endif |
577 | } |
578 | |
579 | static inline void debug_desc(u32 phys, struct desc *desc) |
580 | { |
581 | #if DEBUG_DESC |
582 | printk(KERN_DEBUG "%X: %X %3X %3X %08X %X %X\n" , |
583 | phys, desc->next, desc->buf_len, desc->pkt_len, |
584 | desc->data, desc->status, desc->error_count); |
585 | #endif |
586 | } |
587 | |
588 | static inline int queue_get_desc(unsigned int queue, struct port *port, |
589 | int is_tx) |
590 | { |
591 | u32 phys, tab_phys, n_desc; |
592 | struct desc *tab; |
593 | |
594 | phys = qmgr_get_entry(queue); |
595 | if (!phys) |
596 | return -1; |
597 | |
598 | BUG_ON(phys & 0x1F); |
599 | tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0); |
600 | tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0); |
601 | n_desc = (phys - tab_phys) / sizeof(struct desc); |
602 | BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS)); |
603 | debug_desc(phys, desc: &tab[n_desc]); |
604 | BUG_ON(tab[n_desc].next); |
605 | return n_desc; |
606 | } |
607 | |
608 | static inline void queue_put_desc(unsigned int queue, u32 phys, |
609 | struct desc *desc) |
610 | { |
611 | debug_desc(phys, desc); |
612 | BUG_ON(phys & 0x1F); |
613 | qmgr_put_entry(queue, val: phys); |
614 | /* Don't check for queue overflow here, we've allocated sufficient |
615 | * length and queues >= 32 don't support this check anyway. |
616 | */ |
617 | } |
618 | |
619 | static inline void dma_unmap_tx(struct port *port, struct desc *desc) |
620 | { |
621 | #ifdef __ARMEB__ |
622 | dma_unmap_single(&port->netdev->dev, desc->data, |
623 | desc->buf_len, DMA_TO_DEVICE); |
624 | #else |
625 | dma_unmap_single(&port->netdev->dev, desc->data & ~3, |
626 | ALIGN((desc->data & 3) + desc->buf_len, 4), |
627 | DMA_TO_DEVICE); |
628 | #endif |
629 | } |
630 | |
631 | static void hss_hdlc_set_carrier(void *pdev, int carrier) |
632 | { |
633 | struct net_device *netdev = pdev; |
634 | struct port *port = dev_to_port(dev: netdev); |
635 | unsigned long flags; |
636 | |
637 | spin_lock_irqsave(&npe_lock, flags); |
638 | port->carrier = carrier; |
639 | if (!port->loopback) { |
640 | if (carrier) |
641 | netif_carrier_on(dev: netdev); |
642 | else |
643 | netif_carrier_off(dev: netdev); |
644 | } |
645 | spin_unlock_irqrestore(lock: &npe_lock, flags); |
646 | } |
647 | |
648 | static void hss_hdlc_rx_irq(void *pdev) |
649 | { |
650 | struct net_device *dev = pdev; |
651 | struct port *port = dev_to_port(dev); |
652 | |
653 | #if DEBUG_RX |
654 | printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n" , dev->name); |
655 | #endif |
656 | qmgr_disable_irq(queue: port->rxq); |
657 | napi_schedule(n: &port->napi); |
658 | } |
659 | |
660 | static int hss_hdlc_poll(struct napi_struct *napi, int budget) |
661 | { |
662 | struct port *port = container_of(napi, struct port, napi); |
663 | struct net_device *dev = port->netdev; |
664 | unsigned int rxq = port->rxq; |
665 | unsigned int rxfreeq = port->rxfreeq; |
666 | int received = 0; |
667 | |
668 | #if DEBUG_RX |
669 | printk(KERN_DEBUG "%s: hss_hdlc_poll\n" , dev->name); |
670 | #endif |
671 | |
672 | while (received < budget) { |
673 | struct sk_buff *skb; |
674 | struct desc *desc; |
675 | int n; |
676 | #ifdef __ARMEB__ |
677 | struct sk_buff *temp; |
678 | u32 phys; |
679 | #endif |
680 | |
681 | n = queue_get_desc(queue: rxq, port, is_tx: 0); |
682 | if (n < 0) { |
683 | #if DEBUG_RX |
684 | printk(KERN_DEBUG "%s: hss_hdlc_poll" |
685 | " napi_complete\n" , dev->name); |
686 | #endif |
687 | napi_complete(n: napi); |
688 | qmgr_enable_irq(queue: rxq); |
689 | if (!qmgr_stat_empty(queue: rxq) && |
690 | napi_schedule(n: napi)) { |
691 | #if DEBUG_RX |
692 | printk(KERN_DEBUG "%s: hss_hdlc_poll" |
693 | " napi_schedule succeeded\n" , |
694 | dev->name); |
695 | #endif |
696 | qmgr_disable_irq(queue: rxq); |
697 | continue; |
698 | } |
699 | #if DEBUG_RX |
700 | printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n" , |
701 | dev->name); |
702 | #endif |
703 | return received; /* all work done */ |
704 | } |
705 | |
706 | desc = rx_desc_ptr(port, n); |
707 | #if 0 /* FIXME - error_count counts modulo 256, perhaps we should use it */ |
708 | if (desc->error_count) |
709 | printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X" |
710 | " errors %u\n" , dev->name, desc->status, |
711 | desc->error_count); |
712 | #endif |
713 | skb = NULL; |
714 | switch (desc->status) { |
715 | case 0: |
716 | #ifdef __ARMEB__ |
717 | skb = netdev_alloc_skb(dev, RX_SIZE); |
718 | if (skb) { |
719 | phys = dma_map_single(&dev->dev, skb->data, |
720 | RX_SIZE, |
721 | DMA_FROM_DEVICE); |
722 | if (dma_mapping_error(&dev->dev, phys)) { |
723 | dev_kfree_skb(skb); |
724 | skb = NULL; |
725 | } |
726 | } |
727 | #else |
728 | skb = netdev_alloc_skb(dev, length: desc->pkt_len); |
729 | #endif |
730 | if (!skb) |
731 | dev->stats.rx_dropped++; |
732 | break; |
733 | case ERR_HDLC_ALIGN: |
734 | case ERR_HDLC_ABORT: |
735 | dev->stats.rx_frame_errors++; |
736 | dev->stats.rx_errors++; |
737 | break; |
738 | case ERR_HDLC_FCS: |
739 | dev->stats.rx_crc_errors++; |
740 | dev->stats.rx_errors++; |
741 | break; |
742 | case ERR_HDLC_TOO_LONG: |
743 | dev->stats.rx_length_errors++; |
744 | dev->stats.rx_errors++; |
745 | break; |
746 | default: /* FIXME - remove printk */ |
747 | netdev_err(dev, format: "hss_hdlc_poll: status 0x%02X errors %u\n" , |
748 | desc->status, desc->error_count); |
749 | dev->stats.rx_errors++; |
750 | } |
751 | |
752 | if (!skb) { |
753 | /* put the desc back on RX-ready queue */ |
754 | desc->buf_len = RX_SIZE; |
755 | desc->pkt_len = desc->status = 0; |
756 | queue_put_desc(queue: rxfreeq, rx_desc_phys(port, n), desc); |
757 | continue; |
758 | } |
759 | |
760 | /* process received frame */ |
761 | #ifdef __ARMEB__ |
762 | temp = skb; |
763 | skb = port->rx_buff_tab[n]; |
764 | dma_unmap_single(&dev->dev, desc->data, |
765 | RX_SIZE, DMA_FROM_DEVICE); |
766 | #else |
767 | dma_sync_single_for_cpu(dev: &dev->dev, addr: desc->data, |
768 | RX_SIZE, dir: DMA_FROM_DEVICE); |
769 | memcpy_swab32(dest: (u32 *)skb->data, src: (u32 *)port->rx_buff_tab[n], |
770 | ALIGN(desc->pkt_len, 4) / 4); |
771 | #endif |
772 | skb_put(skb, len: desc->pkt_len); |
773 | |
774 | debug_pkt(dev, func: "hss_hdlc_poll" , data: skb->data, len: skb->len); |
775 | |
776 | skb->protocol = hdlc_type_trans(skb, dev); |
777 | dev->stats.rx_packets++; |
778 | dev->stats.rx_bytes += skb->len; |
779 | netif_receive_skb(skb); |
780 | |
781 | /* put the new buffer on RX-free queue */ |
782 | #ifdef __ARMEB__ |
783 | port->rx_buff_tab[n] = temp; |
784 | desc->data = phys; |
785 | #endif |
786 | desc->buf_len = RX_SIZE; |
787 | desc->pkt_len = 0; |
788 | queue_put_desc(queue: rxfreeq, rx_desc_phys(port, n), desc); |
789 | received++; |
790 | } |
791 | #if DEBUG_RX |
792 | printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n" ); |
793 | #endif |
794 | return received; /* not all work done */ |
795 | } |
796 | |
797 | static void hss_hdlc_txdone_irq(void *pdev) |
798 | { |
799 | struct net_device *dev = pdev; |
800 | struct port *port = dev_to_port(dev); |
801 | int n_desc; |
802 | |
803 | #if DEBUG_TX |
804 | printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n" ); |
805 | #endif |
806 | while ((n_desc = queue_get_desc(queue: port->txdoneq, |
807 | port, is_tx: 1)) >= 0) { |
808 | struct desc *desc; |
809 | int start; |
810 | |
811 | desc = tx_desc_ptr(port, n_desc); |
812 | |
813 | dev->stats.tx_packets++; |
814 | dev->stats.tx_bytes += desc->pkt_len; |
815 | |
816 | dma_unmap_tx(port, desc); |
817 | #if DEBUG_TX |
818 | printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq free %p\n" , |
819 | dev->name, port->tx_buff_tab[n_desc]); |
820 | #endif |
821 | free_buffer_irq(objp: port->tx_buff_tab[n_desc]); |
822 | port->tx_buff_tab[n_desc] = NULL; |
823 | |
824 | start = qmgr_stat_below_low_watermark(queue: port->txreadyq); |
825 | queue_put_desc(queue: port->txreadyq, |
826 | tx_desc_phys(port, n_desc), desc); |
827 | if (start) { /* TX-ready queue was empty */ |
828 | #if DEBUG_TX |
829 | printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit" |
830 | " ready\n" , dev->name); |
831 | #endif |
832 | netif_wake_queue(dev); |
833 | } |
834 | } |
835 | } |
836 | |
837 | static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev) |
838 | { |
839 | struct port *port = dev_to_port(dev); |
840 | unsigned int txreadyq = port->txreadyq; |
841 | int len, offset, bytes, n; |
842 | void *mem; |
843 | u32 phys; |
844 | struct desc *desc; |
845 | |
846 | #if DEBUG_TX |
847 | printk(KERN_DEBUG "%s: hss_hdlc_xmit\n" , dev->name); |
848 | #endif |
849 | |
850 | if (unlikely(skb->len > HDLC_MAX_MRU)) { |
851 | dev_kfree_skb(skb); |
852 | dev->stats.tx_errors++; |
853 | return NETDEV_TX_OK; |
854 | } |
855 | |
856 | debug_pkt(dev, func: "hss_hdlc_xmit" , data: skb->data, len: skb->len); |
857 | |
858 | len = skb->len; |
859 | #ifdef __ARMEB__ |
860 | offset = 0; /* no need to keep alignment */ |
861 | bytes = len; |
862 | mem = skb->data; |
863 | #else |
864 | offset = (int)skb->data & 3; /* keep 32-bit alignment */ |
865 | bytes = ALIGN(offset + len, 4); |
866 | mem = kmalloc(size: bytes, GFP_ATOMIC); |
867 | if (!mem) { |
868 | dev_kfree_skb(skb); |
869 | dev->stats.tx_dropped++; |
870 | return NETDEV_TX_OK; |
871 | } |
872 | memcpy_swab32(dest: mem, src: (u32 *)((uintptr_t)skb->data & ~3), cnt: bytes / 4); |
873 | dev_kfree_skb(skb); |
874 | #endif |
875 | |
876 | phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); |
877 | if (dma_mapping_error(dev: &dev->dev, dma_addr: phys)) { |
878 | #ifdef __ARMEB__ |
879 | dev_kfree_skb(skb); |
880 | #else |
881 | kfree(objp: mem); |
882 | #endif |
883 | dev->stats.tx_dropped++; |
884 | return NETDEV_TX_OK; |
885 | } |
886 | |
887 | n = queue_get_desc(queue: txreadyq, port, is_tx: 1); |
888 | BUG_ON(n < 0); |
889 | desc = tx_desc_ptr(port, n); |
890 | |
891 | #ifdef __ARMEB__ |
892 | port->tx_buff_tab[n] = skb; |
893 | #else |
894 | port->tx_buff_tab[n] = mem; |
895 | #endif |
896 | desc->data = phys + offset; |
897 | desc->buf_len = desc->pkt_len = len; |
898 | |
899 | wmb(); |
900 | queue_put_desc(queue: port->txq, tx_desc_phys(port, n), desc); |
901 | |
902 | if (qmgr_stat_below_low_watermark(queue: txreadyq)) { /* empty */ |
903 | #if DEBUG_TX |
904 | printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n" , dev->name); |
905 | #endif |
906 | netif_stop_queue(dev); |
907 | /* we could miss TX ready interrupt */ |
908 | if (!qmgr_stat_below_low_watermark(queue: txreadyq)) { |
909 | #if DEBUG_TX |
910 | printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n" , |
911 | dev->name); |
912 | #endif |
913 | netif_wake_queue(dev); |
914 | } |
915 | } |
916 | |
917 | #if DEBUG_TX |
918 | printk(KERN_DEBUG "%s: hss_hdlc_xmit end\n" , dev->name); |
919 | #endif |
920 | return NETDEV_TX_OK; |
921 | } |
922 | |
923 | static int request_hdlc_queues(struct port *port) |
924 | { |
925 | int err; |
926 | |
927 | err = qmgr_request_queue(port->rxfreeq, RX_DESCS, 0, 0, |
928 | "%s:RX-free" , port->netdev->name); |
929 | if (err) |
930 | return err; |
931 | |
932 | err = qmgr_request_queue(port->rxq, RX_DESCS, 0, 0, |
933 | "%s:RX" , port->netdev->name); |
934 | if (err) |
935 | goto rel_rxfree; |
936 | |
937 | err = qmgr_request_queue(port->txq, TX_DESCS, 0, 0, |
938 | "%s:TX" , port->netdev->name); |
939 | if (err) |
940 | goto rel_rx; |
941 | |
942 | err = qmgr_request_queue(port->txreadyq, TX_DESCS, 0, 0, |
943 | "%s:TX-ready" , port->netdev->name); |
944 | if (err) |
945 | goto rel_tx; |
946 | |
947 | err = qmgr_request_queue(port->txdoneq, TX_DESCS, 0, 0, |
948 | "%s:TX-done" , port->netdev->name); |
949 | if (err) |
950 | goto rel_txready; |
951 | return 0; |
952 | |
953 | rel_txready: |
954 | qmgr_release_queue(queue: port->txreadyq); |
955 | rel_tx: |
956 | qmgr_release_queue(queue: port->txq); |
957 | rel_rx: |
958 | qmgr_release_queue(queue: port->rxq); |
959 | rel_rxfree: |
960 | qmgr_release_queue(queue: port->rxfreeq); |
961 | printk(KERN_DEBUG "%s: unable to request hardware queues\n" , |
962 | port->netdev->name); |
963 | return err; |
964 | } |
965 | |
966 | static void release_hdlc_queues(struct port *port) |
967 | { |
968 | qmgr_release_queue(queue: port->rxfreeq); |
969 | qmgr_release_queue(queue: port->rxq); |
970 | qmgr_release_queue(queue: port->txdoneq); |
971 | qmgr_release_queue(queue: port->txq); |
972 | qmgr_release_queue(queue: port->txreadyq); |
973 | } |
974 | |
975 | static int init_hdlc_queues(struct port *port) |
976 | { |
977 | int i; |
978 | |
979 | if (!ports_open) { |
980 | dma_pool = dma_pool_create(DRV_NAME, dev: &port->netdev->dev, |
981 | POOL_ALLOC_SIZE, align: 32, allocation: 0); |
982 | if (!dma_pool) |
983 | return -ENOMEM; |
984 | } |
985 | |
986 | port->desc_tab = dma_pool_zalloc(pool: dma_pool, GFP_KERNEL, |
987 | handle: &port->desc_tab_phys); |
988 | if (!port->desc_tab) |
989 | return -ENOMEM; |
990 | memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ |
991 | memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); |
992 | |
993 | /* Setup RX buffers */ |
994 | for (i = 0; i < RX_DESCS; i++) { |
995 | struct desc *desc = rx_desc_ptr(port, i); |
996 | buffer_t *buff; |
997 | void *data; |
998 | #ifdef __ARMEB__ |
999 | buff = netdev_alloc_skb(port->netdev, RX_SIZE); |
1000 | if (!buff) |
1001 | return -ENOMEM; |
1002 | data = buff->data; |
1003 | #else |
1004 | buff = kmalloc(RX_SIZE, GFP_KERNEL); |
1005 | if (!buff) |
1006 | return -ENOMEM; |
1007 | data = buff; |
1008 | #endif |
1009 | desc->buf_len = RX_SIZE; |
1010 | desc->data = dma_map_single(&port->netdev->dev, data, |
1011 | RX_SIZE, DMA_FROM_DEVICE); |
1012 | if (dma_mapping_error(dev: &port->netdev->dev, dma_addr: desc->data)) { |
1013 | free_buffer(objp: buff); |
1014 | return -EIO; |
1015 | } |
1016 | port->rx_buff_tab[i] = buff; |
1017 | } |
1018 | |
1019 | return 0; |
1020 | } |
1021 | |
1022 | static void destroy_hdlc_queues(struct port *port) |
1023 | { |
1024 | int i; |
1025 | |
1026 | if (port->desc_tab) { |
1027 | for (i = 0; i < RX_DESCS; i++) { |
1028 | struct desc *desc = rx_desc_ptr(port, i); |
1029 | buffer_t *buff = port->rx_buff_tab[i]; |
1030 | |
1031 | if (buff) { |
1032 | dma_unmap_single(&port->netdev->dev, |
1033 | desc->data, RX_SIZE, |
1034 | DMA_FROM_DEVICE); |
1035 | free_buffer(objp: buff); |
1036 | } |
1037 | } |
1038 | for (i = 0; i < TX_DESCS; i++) { |
1039 | struct desc *desc = tx_desc_ptr(port, i); |
1040 | buffer_t *buff = port->tx_buff_tab[i]; |
1041 | |
1042 | if (buff) { |
1043 | dma_unmap_tx(port, desc); |
1044 | free_buffer(objp: buff); |
1045 | } |
1046 | } |
1047 | dma_pool_free(pool: dma_pool, vaddr: port->desc_tab, addr: port->desc_tab_phys); |
1048 | port->desc_tab = NULL; |
1049 | } |
1050 | |
1051 | if (!ports_open && dma_pool) { |
1052 | dma_pool_destroy(pool: dma_pool); |
1053 | dma_pool = NULL; |
1054 | } |
1055 | } |
1056 | |
1057 | static irqreturn_t hss_hdlc_dcd_irq(int irq, void *data) |
1058 | { |
1059 | struct net_device *dev = data; |
1060 | struct port *port = dev_to_port(dev); |
1061 | int val; |
1062 | |
1063 | val = gpiod_get_value(desc: port->dcd); |
1064 | hss_hdlc_set_carrier(pdev: dev, carrier: val); |
1065 | |
1066 | return IRQ_HANDLED; |
1067 | } |
1068 | |
1069 | static int hss_hdlc_open(struct net_device *dev) |
1070 | { |
1071 | struct port *port = dev_to_port(dev); |
1072 | unsigned long flags; |
1073 | int i, err = 0; |
1074 | int val; |
1075 | |
1076 | err = hdlc_open(dev); |
1077 | if (err) |
1078 | return err; |
1079 | |
1080 | err = hss_load_firmware(port); |
1081 | if (err) |
1082 | goto err_hdlc_close; |
1083 | |
1084 | err = request_hdlc_queues(port); |
1085 | if (err) |
1086 | goto err_hdlc_close; |
1087 | |
1088 | err = init_hdlc_queues(port); |
1089 | if (err) |
1090 | goto err_destroy_queues; |
1091 | |
1092 | spin_lock_irqsave(&npe_lock, flags); |
1093 | |
1094 | /* Set the carrier, the GPIO is flagged active low so this will return |
1095 | * 1 if DCD is asserted. |
1096 | */ |
1097 | val = gpiod_get_value(desc: port->dcd); |
1098 | hss_hdlc_set_carrier(pdev: dev, carrier: val); |
1099 | |
1100 | /* Set up an IRQ for DCD */ |
1101 | err = request_irq(irq: gpiod_to_irq(desc: port->dcd), handler: hss_hdlc_dcd_irq, flags: 0, name: "IXP4xx HSS" , dev); |
1102 | if (err) { |
1103 | dev_err(&dev->dev, "ixp4xx_hss: failed to request DCD IRQ (%i)\n" , err); |
1104 | goto err_unlock; |
1105 | } |
1106 | |
1107 | /* GPIOs are flagged active low so this asserts DTR and RTS */ |
1108 | gpiod_set_value(desc: port->dtr, value: 1); |
1109 | gpiod_set_value(desc: port->rts, value: 1); |
1110 | |
1111 | spin_unlock_irqrestore(lock: &npe_lock, flags); |
1112 | |
1113 | /* Populate queues with buffers, no failure after this point */ |
1114 | for (i = 0; i < TX_DESCS; i++) |
1115 | queue_put_desc(queue: port->txreadyq, |
1116 | tx_desc_phys(port, i), tx_desc_ptr(port, i)); |
1117 | |
1118 | for (i = 0; i < RX_DESCS; i++) |
1119 | queue_put_desc(queue: port->rxfreeq, |
1120 | rx_desc_phys(port, i), rx_desc_ptr(port, i)); |
1121 | |
1122 | napi_enable(n: &port->napi); |
1123 | netif_start_queue(dev); |
1124 | |
1125 | qmgr_set_irq(queue: port->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, |
1126 | handler: hss_hdlc_rx_irq, pdev: dev); |
1127 | |
1128 | qmgr_set_irq(queue: port->txdoneq, QUEUE_IRQ_SRC_NOT_EMPTY, |
1129 | handler: hss_hdlc_txdone_irq, pdev: dev); |
1130 | qmgr_enable_irq(queue: port->txdoneq); |
1131 | |
1132 | ports_open++; |
1133 | |
1134 | hss_set_hdlc_cfg(port); |
1135 | hss_config(port); |
1136 | |
1137 | hss_start_hdlc(port); |
1138 | |
1139 | /* we may already have RX data, enables IRQ */ |
1140 | napi_schedule(n: &port->napi); |
1141 | return 0; |
1142 | |
1143 | err_unlock: |
1144 | spin_unlock_irqrestore(lock: &npe_lock, flags); |
1145 | err_destroy_queues: |
1146 | destroy_hdlc_queues(port); |
1147 | release_hdlc_queues(port); |
1148 | err_hdlc_close: |
1149 | hdlc_close(dev); |
1150 | return err; |
1151 | } |
1152 | |
1153 | static int hss_hdlc_close(struct net_device *dev) |
1154 | { |
1155 | struct port *port = dev_to_port(dev); |
1156 | unsigned long flags; |
1157 | int i, buffs = RX_DESCS; /* allocated RX buffers */ |
1158 | |
1159 | spin_lock_irqsave(&npe_lock, flags); |
1160 | ports_open--; |
1161 | qmgr_disable_irq(queue: port->rxq); |
1162 | netif_stop_queue(dev); |
1163 | napi_disable(n: &port->napi); |
1164 | |
1165 | hss_stop_hdlc(port); |
1166 | |
1167 | while (queue_get_desc(queue: port->rxfreeq, port, is_tx: 0) >= 0) |
1168 | buffs--; |
1169 | while (queue_get_desc(queue: port->rxq, port, is_tx: 0) >= 0) |
1170 | buffs--; |
1171 | |
1172 | if (buffs) |
1173 | netdev_crit(dev, format: "unable to drain RX queue, %i buffer(s) left in NPE\n" , |
1174 | buffs); |
1175 | |
1176 | buffs = TX_DESCS; |
1177 | while (queue_get_desc(queue: port->txq, port, is_tx: 1) >= 0) |
1178 | buffs--; /* cancel TX */ |
1179 | |
1180 | i = 0; |
1181 | do { |
1182 | while (queue_get_desc(queue: port->txreadyq, port, is_tx: 1) >= 0) |
1183 | buffs--; |
1184 | if (!buffs) |
1185 | break; |
1186 | } while (++i < MAX_CLOSE_WAIT); |
1187 | |
1188 | if (buffs) |
1189 | netdev_crit(dev, format: "unable to drain TX queue, %i buffer(s) left in NPE\n" , |
1190 | buffs); |
1191 | #if DEBUG_CLOSE |
1192 | if (!buffs) |
1193 | printk(KERN_DEBUG "Draining TX queues took %i cycles\n" , i); |
1194 | #endif |
1195 | qmgr_disable_irq(queue: port->txdoneq); |
1196 | |
1197 | free_irq(gpiod_to_irq(desc: port->dcd), dev); |
1198 | /* GPIOs are flagged active low so this de-asserts DTR and RTS */ |
1199 | gpiod_set_value(desc: port->dtr, value: 0); |
1200 | gpiod_set_value(desc: port->rts, value: 0); |
1201 | spin_unlock_irqrestore(lock: &npe_lock, flags); |
1202 | |
1203 | destroy_hdlc_queues(port); |
1204 | release_hdlc_queues(port); |
1205 | hdlc_close(dev); |
1206 | return 0; |
1207 | } |
1208 | |
1209 | static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding, |
1210 | unsigned short parity) |
1211 | { |
1212 | struct port *port = dev_to_port(dev); |
1213 | |
1214 | if (encoding != ENCODING_NRZ) |
1215 | return -EINVAL; |
1216 | |
1217 | switch (parity) { |
1218 | case PARITY_CRC16_PR1_CCITT: |
1219 | port->hdlc_cfg = 0; |
1220 | return 0; |
1221 | |
1222 | case PARITY_CRC32_PR1_CCITT: |
1223 | port->hdlc_cfg = PKT_HDLC_CRC_32; |
1224 | return 0; |
1225 | |
1226 | default: |
1227 | return -EINVAL; |
1228 | } |
1229 | } |
1230 | |
1231 | static u32 check_clock(u32 timer_freq, u32 rate, u32 a, u32 b, u32 c, |
1232 | u32 *best, u32 *best_diff, u32 *reg) |
1233 | { |
1234 | /* a is 10-bit, b is 10-bit, c is 12-bit */ |
1235 | u64 new_rate; |
1236 | u32 new_diff; |
1237 | |
1238 | new_rate = timer_freq * (u64)(c + 1); |
1239 | do_div(new_rate, a * (c + 1) + b + 1); |
1240 | new_diff = abs((u32)new_rate - rate); |
1241 | |
1242 | if (new_diff < *best_diff) { |
1243 | *best = new_rate; |
1244 | *best_diff = new_diff; |
1245 | *reg = (a << 22) | (b << 12) | c; |
1246 | } |
1247 | return new_diff; |
1248 | } |
1249 | |
1250 | static void find_best_clock(u32 timer_freq, u32 rate, u32 *best, u32 *reg) |
1251 | { |
1252 | u32 a, b, diff = 0xFFFFFFFF; |
1253 | |
1254 | a = timer_freq / rate; |
1255 | |
1256 | if (a > 0x3FF) { /* 10-bit value - we can go as slow as ca. 65 kb/s */ |
1257 | check_clock(timer_freq, rate, a: 0x3FF, b: 1, c: 1, best, best_diff: &diff, reg); |
1258 | return; |
1259 | } |
1260 | if (a == 0) { /* > 66.666 MHz */ |
1261 | a = 1; /* minimum divider is 1 (a = 0, b = 1, c = 1) */ |
1262 | rate = timer_freq; |
1263 | } |
1264 | |
1265 | if (rate * a == timer_freq) { /* don't divide by 0 later */ |
1266 | check_clock(timer_freq, rate, a: a - 1, b: 1, c: 1, best, best_diff: &diff, reg); |
1267 | return; |
1268 | } |
1269 | |
1270 | for (b = 0; b < 0x400; b++) { |
1271 | u64 c = (b + 1) * (u64)rate; |
1272 | |
1273 | do_div(c, timer_freq - rate * a); |
1274 | c--; |
1275 | if (c >= 0xFFF) { /* 12-bit - no need to check more 'b's */ |
1276 | if (b == 0 && /* also try a bit higher rate */ |
1277 | !check_clock(timer_freq, rate, a: a - 1, b: 1, c: 1, best, |
1278 | best_diff: &diff, reg)) |
1279 | return; |
1280 | check_clock(timer_freq, rate, a, b, c: 0xFFF, best, |
1281 | best_diff: &diff, reg); |
1282 | return; |
1283 | } |
1284 | if (!check_clock(timer_freq, rate, a, b, c, best, best_diff: &diff, reg)) |
1285 | return; |
1286 | if (!check_clock(timer_freq, rate, a, b, c: c + 1, best, best_diff: &diff, |
1287 | reg)) |
1288 | return; |
1289 | } |
1290 | } |
1291 | |
1292 | static int hss_hdlc_set_clock(struct port *port, unsigned int clock_type) |
1293 | { |
1294 | switch (clock_type) { |
1295 | case CLOCK_DEFAULT: |
1296 | case CLOCK_EXT: |
1297 | gpiod_set_value(desc: port->clk_internal, value: 0); |
1298 | return CLOCK_EXT; |
1299 | case CLOCK_INT: |
1300 | gpiod_set_value(desc: port->clk_internal, value: 1); |
1301 | return CLOCK_INT; |
1302 | default: |
1303 | return -EINVAL; |
1304 | } |
1305 | } |
1306 | |
1307 | static int hss_hdlc_ioctl(struct net_device *dev, struct if_settings *ifs) |
1308 | { |
1309 | const size_t size = sizeof(sync_serial_settings); |
1310 | sync_serial_settings new_line; |
1311 | sync_serial_settings __user *line = ifs->ifs_ifsu.sync; |
1312 | struct port *port = dev_to_port(dev); |
1313 | unsigned long flags; |
1314 | int clk; |
1315 | |
1316 | switch (ifs->type) { |
1317 | case IF_GET_IFACE: |
1318 | ifs->type = IF_IFACE_V35; |
1319 | if (ifs->size < size) { |
1320 | ifs->size = size; /* data size wanted */ |
1321 | return -ENOBUFS; |
1322 | } |
1323 | memset(&new_line, 0, sizeof(new_line)); |
1324 | new_line.clock_type = port->clock_type; |
1325 | new_line.clock_rate = port->clock_rate; |
1326 | new_line.loopback = port->loopback; |
1327 | if (copy_to_user(to: line, from: &new_line, n: size)) |
1328 | return -EFAULT; |
1329 | return 0; |
1330 | |
1331 | case IF_IFACE_SYNC_SERIAL: |
1332 | case IF_IFACE_V35: |
1333 | if (!capable(CAP_NET_ADMIN)) |
1334 | return -EPERM; |
1335 | if (copy_from_user(to: &new_line, from: line, n: size)) |
1336 | return -EFAULT; |
1337 | |
1338 | clk = new_line.clock_type; |
1339 | hss_hdlc_set_clock(port, clock_type: clk); |
1340 | |
1341 | if (clk != CLOCK_EXT && clk != CLOCK_INT) |
1342 | return -EINVAL; /* No such clock setting */ |
1343 | |
1344 | if (new_line.loopback != 0 && new_line.loopback != 1) |
1345 | return -EINVAL; |
1346 | |
1347 | port->clock_type = clk; /* Update settings */ |
1348 | if (clk == CLOCK_INT) { |
1349 | find_best_clock(IXP4XX_TIMER_FREQ, |
1350 | rate: new_line.clock_rate, |
1351 | best: &port->clock_rate, reg: &port->clock_reg); |
1352 | } else { |
1353 | port->clock_rate = 0; |
1354 | port->clock_reg = CLK42X_SPEED_2048KHZ; |
1355 | } |
1356 | port->loopback = new_line.loopback; |
1357 | |
1358 | spin_lock_irqsave(&npe_lock, flags); |
1359 | |
1360 | if (dev->flags & IFF_UP) |
1361 | hss_config(port); |
1362 | |
1363 | if (port->loopback || port->carrier) |
1364 | netif_carrier_on(dev: port->netdev); |
1365 | else |
1366 | netif_carrier_off(dev: port->netdev); |
1367 | spin_unlock_irqrestore(lock: &npe_lock, flags); |
1368 | |
1369 | return 0; |
1370 | |
1371 | default: |
1372 | return hdlc_ioctl(dev, ifs); |
1373 | } |
1374 | } |
1375 | |
1376 | /***************************************************************************** |
1377 | * initialization |
1378 | ****************************************************************************/ |
1379 | |
1380 | static const struct net_device_ops hss_hdlc_ops = { |
1381 | .ndo_open = hss_hdlc_open, |
1382 | .ndo_stop = hss_hdlc_close, |
1383 | .ndo_start_xmit = hdlc_start_xmit, |
1384 | .ndo_siocwandev = hss_hdlc_ioctl, |
1385 | }; |
1386 | |
1387 | static int ixp4xx_hss_probe(struct platform_device *pdev) |
1388 | { |
1389 | struct of_phandle_args queue_spec; |
1390 | struct of_phandle_args npe_spec; |
1391 | struct device *dev = &pdev->dev; |
1392 | struct net_device *ndev; |
1393 | struct device_node *np; |
1394 | struct regmap *rmap; |
1395 | struct port *port; |
1396 | hdlc_device *hdlc; |
1397 | int err; |
1398 | u32 val; |
1399 | |
1400 | /* |
1401 | * Go into the syscon and check if we have the HSS and HDLC |
1402 | * features available, else this will not work. |
1403 | */ |
1404 | rmap = syscon_regmap_lookup_by_compatible(s: "syscon" ); |
1405 | if (IS_ERR(ptr: rmap)) |
1406 | return dev_err_probe(dev, err: PTR_ERR(ptr: rmap), |
1407 | fmt: "failed to look up syscon\n" ); |
1408 | |
1409 | val = cpu_ixp4xx_features(rmap); |
1410 | |
1411 | if ((val & (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) != |
1412 | (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) { |
1413 | dev_err(dev, "HDLC and HSS feature unavailable in platform\n" ); |
1414 | return -ENODEV; |
1415 | } |
1416 | |
1417 | np = dev->of_node; |
1418 | |
1419 | port = devm_kzalloc(dev, size: sizeof(*port), GFP_KERNEL); |
1420 | if (!port) |
1421 | return -ENOMEM; |
1422 | |
1423 | err = of_parse_phandle_with_fixed_args(np, list_name: "intel,npe-handle" , cell_count: 1, index: 0, |
1424 | out_args: &npe_spec); |
1425 | if (err) |
1426 | return dev_err_probe(dev, err, fmt: "no NPE engine specified\n" ); |
1427 | /* NPE ID 0x00, 0x10, 0x20... */ |
1428 | port->npe = npe_request(id: npe_spec.args[0] << 4); |
1429 | if (!port->npe) { |
1430 | dev_err(dev, "unable to obtain NPE instance\n" ); |
1431 | return -ENODEV; |
1432 | } |
1433 | |
1434 | /* Get the TX ready queue as resource from queue manager */ |
1435 | err = of_parse_phandle_with_fixed_args(np, list_name: "intek,queue-chl-txready" , cell_count: 1, index: 0, |
1436 | out_args: &queue_spec); |
1437 | if (err) |
1438 | return dev_err_probe(dev, err, fmt: "no txready queue phandle\n" ); |
1439 | port->txreadyq = queue_spec.args[0]; |
1440 | /* Get the RX trig queue as resource from queue manager */ |
1441 | err = of_parse_phandle_with_fixed_args(np, list_name: "intek,queue-chl-rxtrig" , cell_count: 1, index: 0, |
1442 | out_args: &queue_spec); |
1443 | if (err) |
1444 | return dev_err_probe(dev, err, fmt: "no rxtrig queue phandle\n" ); |
1445 | port->rxtrigq = queue_spec.args[0]; |
1446 | /* Get the RX queue as resource from queue manager */ |
1447 | err = of_parse_phandle_with_fixed_args(np, list_name: "intek,queue-pkt-rx" , cell_count: 1, index: 0, |
1448 | out_args: &queue_spec); |
1449 | if (err) |
1450 | return dev_err_probe(dev, err, fmt: "no RX queue phandle\n" ); |
1451 | port->rxq = queue_spec.args[0]; |
1452 | /* Get the TX queue as resource from queue manager */ |
1453 | err = of_parse_phandle_with_fixed_args(np, list_name: "intek,queue-pkt-tx" , cell_count: 1, index: 0, |
1454 | out_args: &queue_spec); |
1455 | if (err) |
1456 | return dev_err_probe(dev, err, fmt: "no RX queue phandle\n" ); |
1457 | port->txq = queue_spec.args[0]; |
1458 | /* Get the RX free queue as resource from queue manager */ |
1459 | err = of_parse_phandle_with_fixed_args(np, list_name: "intek,queue-pkt-rxfree" , cell_count: 1, index: 0, |
1460 | out_args: &queue_spec); |
1461 | if (err) |
1462 | return dev_err_probe(dev, err, fmt: "no RX free queue phandle\n" ); |
1463 | port->rxfreeq = queue_spec.args[0]; |
1464 | /* Get the TX done queue as resource from queue manager */ |
1465 | err = of_parse_phandle_with_fixed_args(np, list_name: "intek,queue-pkt-txdone" , cell_count: 1, index: 0, |
1466 | out_args: &queue_spec); |
1467 | if (err) |
1468 | return dev_err_probe(dev, err, fmt: "no TX done queue phandle\n" ); |
1469 | port->txdoneq = queue_spec.args[0]; |
1470 | |
1471 | /* Obtain all the line control GPIOs */ |
1472 | port->cts = devm_gpiod_get(dev, con_id: "cts" , flags: GPIOD_OUT_LOW); |
1473 | if (IS_ERR(ptr: port->cts)) |
1474 | return dev_err_probe(dev, err: PTR_ERR(ptr: port->cts), fmt: "unable to get CTS GPIO\n" ); |
1475 | port->rts = devm_gpiod_get(dev, con_id: "rts" , flags: GPIOD_OUT_LOW); |
1476 | if (IS_ERR(ptr: port->rts)) |
1477 | return dev_err_probe(dev, err: PTR_ERR(ptr: port->rts), fmt: "unable to get RTS GPIO\n" ); |
1478 | port->dcd = devm_gpiod_get(dev, con_id: "dcd" , flags: GPIOD_IN); |
1479 | if (IS_ERR(ptr: port->dcd)) |
1480 | return dev_err_probe(dev, err: PTR_ERR(ptr: port->dcd), fmt: "unable to get DCD GPIO\n" ); |
1481 | port->dtr = devm_gpiod_get(dev, con_id: "dtr" , flags: GPIOD_OUT_LOW); |
1482 | if (IS_ERR(ptr: port->dtr)) |
1483 | return dev_err_probe(dev, err: PTR_ERR(ptr: port->dtr), fmt: "unable to get DTR GPIO\n" ); |
1484 | port->clk_internal = devm_gpiod_get(dev, con_id: "clk-internal" , flags: GPIOD_OUT_LOW); |
1485 | if (IS_ERR(ptr: port->clk_internal)) |
1486 | return dev_err_probe(dev, err: PTR_ERR(ptr: port->clk_internal), |
1487 | fmt: "unable to get CLK internal GPIO\n" ); |
1488 | |
1489 | ndev = alloc_hdlcdev(priv: port); |
1490 | port->netdev = alloc_hdlcdev(priv: port); |
1491 | if (!port->netdev) { |
1492 | err = -ENOMEM; |
1493 | goto err_plat; |
1494 | } |
1495 | |
1496 | SET_NETDEV_DEV(ndev, &pdev->dev); |
1497 | hdlc = dev_to_hdlc(dev: ndev); |
1498 | hdlc->attach = hss_hdlc_attach; |
1499 | hdlc->xmit = hss_hdlc_xmit; |
1500 | ndev->netdev_ops = &hss_hdlc_ops; |
1501 | ndev->tx_queue_len = 100; |
1502 | port->clock_type = CLOCK_EXT; |
1503 | port->clock_rate = 0; |
1504 | port->clock_reg = CLK42X_SPEED_2048KHZ; |
1505 | port->id = pdev->id; |
1506 | port->dev = &pdev->dev; |
1507 | netif_napi_add_weight(dev: ndev, napi: &port->napi, poll: hss_hdlc_poll, NAPI_WEIGHT); |
1508 | |
1509 | err = register_hdlc_device(ndev); |
1510 | if (err) |
1511 | goto err_free_netdev; |
1512 | |
1513 | platform_set_drvdata(pdev, data: port); |
1514 | |
1515 | netdev_info(dev: ndev, format: "initialized\n" ); |
1516 | return 0; |
1517 | |
1518 | err_free_netdev: |
1519 | free_netdev(dev: ndev); |
1520 | err_plat: |
1521 | npe_release(npe: port->npe); |
1522 | return err; |
1523 | } |
1524 | |
1525 | static int ixp4xx_hss_remove(struct platform_device *pdev) |
1526 | { |
1527 | struct port *port = platform_get_drvdata(pdev); |
1528 | |
1529 | unregister_hdlc_device(dev: port->netdev); |
1530 | free_netdev(dev: port->netdev); |
1531 | npe_release(npe: port->npe); |
1532 | return 0; |
1533 | } |
1534 | |
1535 | static struct platform_driver ixp4xx_hss_driver = { |
1536 | .driver.name = DRV_NAME, |
1537 | .probe = ixp4xx_hss_probe, |
1538 | .remove = ixp4xx_hss_remove, |
1539 | }; |
1540 | module_platform_driver(ixp4xx_hss_driver); |
1541 | |
1542 | MODULE_AUTHOR("Krzysztof Halasa" ); |
1543 | MODULE_DESCRIPTION("Intel IXP4xx HSS driver" ); |
1544 | MODULE_LICENSE("GPL v2" ); |
1545 | MODULE_ALIAS("platform:ixp4xx_hss" ); |
1546 | |