1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports |
4 | * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> |
5 | * |
6 | * Based on the 64360 driver from: |
7 | * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il> |
8 | * Rabeeh Khoury <rabeeh@marvell.com> |
9 | * |
10 | * Copyright (C) 2003 PMC-Sierra, Inc., |
11 | * written by Manish Lachwani |
12 | * |
13 | * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> |
14 | * |
15 | * Copyright (C) 2004-2006 MontaVista Software, Inc. |
16 | * Dale Farnsworth <dale@farnsworth.org> |
17 | * |
18 | * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> |
19 | * <sjhill@realitydiluted.com> |
20 | * |
21 | * Copyright (C) 2007-2008 Marvell Semiconductor |
22 | * Lennert Buytenhek <buytenh@marvell.com> |
23 | * |
24 | * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de> |
25 | */ |
26 | |
27 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
28 | |
29 | #include <linux/init.h> |
30 | #include <linux/dma-mapping.h> |
31 | #include <linux/in.h> |
32 | #include <linux/ip.h> |
33 | #include <net/tso.h> |
34 | #include <linux/tcp.h> |
35 | #include <linux/udp.h> |
36 | #include <linux/etherdevice.h> |
37 | #include <linux/delay.h> |
38 | #include <linux/ethtool.h> |
39 | #include <linux/platform_device.h> |
40 | #include <linux/module.h> |
41 | #include <linux/kernel.h> |
42 | #include <linux/spinlock.h> |
43 | #include <linux/workqueue.h> |
44 | #include <linux/phy.h> |
45 | #include <linux/mv643xx_eth.h> |
46 | #include <linux/io.h> |
47 | #include <linux/interrupt.h> |
48 | #include <linux/types.h> |
49 | #include <linux/slab.h> |
50 | #include <linux/clk.h> |
51 | #include <linux/of.h> |
52 | #include <linux/of_irq.h> |
53 | #include <linux/of_net.h> |
54 | #include <linux/of_mdio.h> |
55 | |
56 | static char mv643xx_eth_driver_name[] = "mv643xx_eth" ; |
57 | static char mv643xx_eth_driver_version[] = "1.4" ; |
58 | |
59 | |
60 | /* |
61 | * Registers shared between all ports. |
62 | */ |
63 | #define PHY_ADDR 0x0000 |
64 | #define WINDOW_BASE(w) (0x0200 + ((w) << 3)) |
65 | #define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) |
66 | #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) |
67 | #define WINDOW_BAR_ENABLE 0x0290 |
68 | #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) |
69 | |
70 | /* |
71 | * Main per-port registers. These live at offset 0x0400 for |
72 | * port #0, 0x0800 for port #1, and 0x0c00 for port #2. |
73 | */ |
74 | #define PORT_CONFIG 0x0000 |
75 | #define UNICAST_PROMISCUOUS_MODE 0x00000001 |
76 | #define PORT_CONFIG_EXT 0x0004 |
77 | #define MAC_ADDR_LOW 0x0014 |
78 | #define MAC_ADDR_HIGH 0x0018 |
79 | #define SDMA_CONFIG 0x001c |
80 | #define TX_BURST_SIZE_16_64BIT 0x01000000 |
81 | #define TX_BURST_SIZE_4_64BIT 0x00800000 |
82 | #define BLM_TX_NO_SWAP 0x00000020 |
83 | #define BLM_RX_NO_SWAP 0x00000010 |
84 | #define RX_BURST_SIZE_16_64BIT 0x00000008 |
85 | #define RX_BURST_SIZE_4_64BIT 0x00000004 |
86 | #define PORT_SERIAL_CONTROL 0x003c |
87 | #define SET_MII_SPEED_TO_100 0x01000000 |
88 | #define SET_GMII_SPEED_TO_1000 0x00800000 |
89 | #define SET_FULL_DUPLEX_MODE 0x00200000 |
90 | #define MAX_RX_PACKET_9700BYTE 0x000a0000 |
91 | #define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000 |
92 | #define DO_NOT_FORCE_LINK_FAIL 0x00000400 |
93 | #define SERIAL_PORT_CONTROL_RESERVED 0x00000200 |
94 | #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008 |
95 | #define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004 |
96 | #define FORCE_LINK_PASS 0x00000002 |
97 | #define SERIAL_PORT_ENABLE 0x00000001 |
98 | #define PORT_STATUS 0x0044 |
99 | #define TX_FIFO_EMPTY 0x00000400 |
100 | #define TX_IN_PROGRESS 0x00000080 |
101 | #define PORT_SPEED_MASK 0x00000030 |
102 | #define PORT_SPEED_1000 0x00000010 |
103 | #define PORT_SPEED_100 0x00000020 |
104 | #define PORT_SPEED_10 0x00000000 |
105 | #define FLOW_CONTROL_ENABLED 0x00000008 |
106 | #define FULL_DUPLEX 0x00000004 |
107 | #define LINK_UP 0x00000002 |
108 | #define TXQ_COMMAND 0x0048 |
109 | #define TXQ_FIX_PRIO_CONF 0x004c |
110 | #define PORT_SERIAL_CONTROL1 0x004c |
111 | #define RGMII_EN 0x00000008 |
112 | #define CLK125_BYPASS_EN 0x00000010 |
113 | #define TX_BW_RATE 0x0050 |
114 | #define TX_BW_MTU 0x0058 |
115 | #define TX_BW_BURST 0x005c |
116 | #define INT_CAUSE 0x0060 |
117 | #define INT_TX_END 0x07f80000 |
118 | #define INT_TX_END_0 0x00080000 |
119 | #define INT_RX 0x000003fc |
120 | #define INT_RX_0 0x00000004 |
121 | #define INT_EXT 0x00000002 |
122 | #define INT_CAUSE_EXT 0x0064 |
123 | #define INT_EXT_LINK_PHY 0x00110000 |
124 | #define INT_EXT_TX 0x000000ff |
125 | #define INT_MASK 0x0068 |
126 | #define INT_MASK_EXT 0x006c |
127 | #define TX_FIFO_URGENT_THRESHOLD 0x0074 |
128 | #define RX_DISCARD_FRAME_CNT 0x0084 |
129 | #define RX_OVERRUN_FRAME_CNT 0x0088 |
130 | #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc |
131 | #define TX_BW_RATE_MOVED 0x00e0 |
132 | #define TX_BW_MTU_MOVED 0x00e8 |
133 | #define TX_BW_BURST_MOVED 0x00ec |
134 | #define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4)) |
135 | #define RXQ_COMMAND 0x0280 |
136 | #define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2)) |
137 | #define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4)) |
138 | #define TXQ_BW_CONF(q) (0x0304 + ((q) << 4)) |
139 | #define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4)) |
140 | |
141 | /* |
142 | * Misc per-port registers. |
143 | */ |
144 | #define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) |
145 | #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) |
146 | #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) |
147 | #define UNICAST_TABLE(p) (0x1600 + ((p) << 10)) |
148 | |
149 | |
150 | /* |
151 | * SDMA configuration register default value. |
152 | */ |
153 | #if defined(__BIG_ENDIAN) |
154 | #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ |
155 | (RX_BURST_SIZE_4_64BIT | \ |
156 | TX_BURST_SIZE_4_64BIT) |
157 | #elif defined(__LITTLE_ENDIAN) |
158 | #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ |
159 | (RX_BURST_SIZE_4_64BIT | \ |
160 | BLM_RX_NO_SWAP | \ |
161 | BLM_TX_NO_SWAP | \ |
162 | TX_BURST_SIZE_4_64BIT) |
163 | #else |
164 | #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined |
165 | #endif |
166 | |
167 | |
168 | /* |
169 | * Misc definitions. |
170 | */ |
171 | #define DEFAULT_RX_QUEUE_SIZE 128 |
172 | #define DEFAULT_TX_QUEUE_SIZE 512 |
173 | #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) |
174 | |
175 | /* Max number of allowed TCP segments for software TSO */ |
176 | #define MV643XX_MAX_TSO_SEGS 100 |
177 | #define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) |
178 | |
179 | #define (txq, addr) \ |
180 | ((addr >= txq->tso_hdrs_dma) && \ |
181 | (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) |
182 | |
183 | #define DESC_DMA_MAP_SINGLE 0 |
184 | #define DESC_DMA_MAP_PAGE 1 |
185 | |
186 | /* |
187 | * RX/TX descriptors. |
188 | */ |
189 | #if defined(__BIG_ENDIAN) |
190 | struct rx_desc { |
191 | u16 byte_cnt; /* Descriptor buffer byte count */ |
192 | u16 buf_size; /* Buffer size */ |
193 | u32 cmd_sts; /* Descriptor command status */ |
194 | u32 next_desc_ptr; /* Next descriptor pointer */ |
195 | u32 buf_ptr; /* Descriptor buffer pointer */ |
196 | }; |
197 | |
198 | struct tx_desc { |
199 | u16 byte_cnt; /* buffer byte count */ |
200 | u16 l4i_chk; /* CPU provided TCP checksum */ |
201 | u32 cmd_sts; /* Command/status field */ |
202 | u32 next_desc_ptr; /* Pointer to next descriptor */ |
203 | u32 buf_ptr; /* pointer to buffer for this descriptor*/ |
204 | }; |
205 | #elif defined(__LITTLE_ENDIAN) |
206 | struct rx_desc { |
207 | u32 cmd_sts; /* Descriptor command status */ |
208 | u16 buf_size; /* Buffer size */ |
209 | u16 byte_cnt; /* Descriptor buffer byte count */ |
210 | u32 buf_ptr; /* Descriptor buffer pointer */ |
211 | u32 next_desc_ptr; /* Next descriptor pointer */ |
212 | }; |
213 | |
214 | struct tx_desc { |
215 | u32 cmd_sts; /* Command/status field */ |
216 | u16 l4i_chk; /* CPU provided TCP checksum */ |
217 | u16 byte_cnt; /* buffer byte count */ |
218 | u32 buf_ptr; /* pointer to buffer for this descriptor*/ |
219 | u32 next_desc_ptr; /* Pointer to next descriptor */ |
220 | }; |
221 | #else |
222 | #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined |
223 | #endif |
224 | |
225 | /* RX & TX descriptor command */ |
226 | #define BUFFER_OWNED_BY_DMA 0x80000000 |
227 | |
228 | /* RX & TX descriptor status */ |
229 | #define ERROR_SUMMARY 0x00000001 |
230 | |
231 | /* RX descriptor status */ |
232 | #define LAYER_4_CHECKSUM_OK 0x40000000 |
233 | #define RX_ENABLE_INTERRUPT 0x20000000 |
234 | #define RX_FIRST_DESC 0x08000000 |
235 | #define RX_LAST_DESC 0x04000000 |
236 | #define RX_IP_HDR_OK 0x02000000 |
237 | #define RX_PKT_IS_IPV4 0x01000000 |
238 | #define RX_PKT_IS_ETHERNETV2 0x00800000 |
239 | #define RX_PKT_LAYER4_TYPE_MASK 0x00600000 |
240 | #define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000 |
241 | #define RX_PKT_IS_VLAN_TAGGED 0x00080000 |
242 | |
243 | /* TX descriptor command */ |
244 | #define TX_ENABLE_INTERRUPT 0x00800000 |
245 | #define GEN_CRC 0x00400000 |
246 | #define TX_FIRST_DESC 0x00200000 |
247 | #define TX_LAST_DESC 0x00100000 |
248 | #define ZERO_PADDING 0x00080000 |
249 | #define GEN_IP_V4_CHECKSUM 0x00040000 |
250 | #define GEN_TCP_UDP_CHECKSUM 0x00020000 |
251 | #define UDP_FRAME 0x00010000 |
252 | #define 0x00008000 |
253 | #define GEN_TCP_UDP_CHK_FULL 0x00000400 |
254 | #define 0x00000200 |
255 | |
256 | #define TX_IHL_SHIFT 11 |
257 | |
258 | |
259 | /* global *******************************************************************/ |
260 | struct mv643xx_eth_shared_private { |
261 | /* |
262 | * Ethernet controller base address. |
263 | */ |
264 | void __iomem *base; |
265 | |
266 | /* |
267 | * Per-port MBUS window access register value. |
268 | */ |
269 | u32 win_protect; |
270 | |
271 | /* |
272 | * Hardware-specific parameters. |
273 | */ |
274 | int extended_rx_coal_limit; |
275 | int tx_bw_control; |
276 | int tx_csum_limit; |
277 | struct clk *clk; |
278 | }; |
279 | |
280 | #define TX_BW_CONTROL_ABSENT 0 |
281 | #define TX_BW_CONTROL_OLD_LAYOUT 1 |
282 | #define TX_BW_CONTROL_NEW_LAYOUT 2 |
283 | |
284 | static int mv643xx_eth_open(struct net_device *dev); |
285 | static int mv643xx_eth_stop(struct net_device *dev); |
286 | |
287 | |
288 | /* per-port *****************************************************************/ |
289 | struct mib_counters { |
290 | u64 good_octets_received; |
291 | u32 bad_octets_received; |
292 | u32 internal_mac_transmit_err; |
293 | u32 good_frames_received; |
294 | u32 bad_frames_received; |
295 | u32 broadcast_frames_received; |
296 | u32 multicast_frames_received; |
297 | u32 frames_64_octets; |
298 | u32 frames_65_to_127_octets; |
299 | u32 frames_128_to_255_octets; |
300 | u32 frames_256_to_511_octets; |
301 | u32 frames_512_to_1023_octets; |
302 | u32 frames_1024_to_max_octets; |
303 | u64 good_octets_sent; |
304 | u32 good_frames_sent; |
305 | u32 excessive_collision; |
306 | u32 multicast_frames_sent; |
307 | u32 broadcast_frames_sent; |
308 | u32 unrec_mac_control_received; |
309 | u32 fc_sent; |
310 | u32 good_fc_received; |
311 | u32 bad_fc_received; |
312 | u32 undersize_received; |
313 | u32 fragments_received; |
314 | u32 oversize_received; |
315 | u32 jabber_received; |
316 | u32 mac_receive_error; |
317 | u32 bad_crc_event; |
318 | u32 collision; |
319 | u32 late_collision; |
320 | /* Non MIB hardware counters */ |
321 | u32 rx_discard; |
322 | u32 rx_overrun; |
323 | }; |
324 | |
325 | struct rx_queue { |
326 | int index; |
327 | |
328 | int rx_ring_size; |
329 | |
330 | int rx_desc_count; |
331 | int rx_curr_desc; |
332 | int rx_used_desc; |
333 | |
334 | struct rx_desc *rx_desc_area; |
335 | dma_addr_t rx_desc_dma; |
336 | int rx_desc_area_size; |
337 | struct sk_buff **rx_skb; |
338 | }; |
339 | |
340 | struct tx_queue { |
341 | int index; |
342 | |
343 | int tx_ring_size; |
344 | |
345 | int tx_desc_count; |
346 | int tx_curr_desc; |
347 | int tx_used_desc; |
348 | |
349 | int tx_stop_threshold; |
350 | int tx_wake_threshold; |
351 | |
352 | char *tso_hdrs; |
353 | dma_addr_t tso_hdrs_dma; |
354 | |
355 | struct tx_desc *tx_desc_area; |
356 | char *tx_desc_mapping; /* array to track the type of the dma mapping */ |
357 | dma_addr_t tx_desc_dma; |
358 | int tx_desc_area_size; |
359 | |
360 | struct sk_buff_head tx_skb; |
361 | |
362 | unsigned long tx_packets; |
363 | unsigned long tx_bytes; |
364 | unsigned long tx_dropped; |
365 | }; |
366 | |
367 | struct mv643xx_eth_private { |
368 | struct mv643xx_eth_shared_private *shared; |
369 | void __iomem *base; |
370 | int port_num; |
371 | |
372 | struct net_device *dev; |
373 | |
374 | struct timer_list mib_counters_timer; |
375 | spinlock_t mib_counters_lock; |
376 | struct mib_counters mib_counters; |
377 | |
378 | struct work_struct tx_timeout_task; |
379 | |
380 | struct napi_struct napi; |
381 | u32 int_mask; |
382 | u8 oom; |
383 | u8 work_link; |
384 | u8 work_tx; |
385 | u8 work_tx_end; |
386 | u8 work_rx; |
387 | u8 work_rx_refill; |
388 | |
389 | int skb_size; |
390 | |
391 | /* |
392 | * RX state. |
393 | */ |
394 | int rx_ring_size; |
395 | unsigned long rx_desc_sram_addr; |
396 | int rx_desc_sram_size; |
397 | int rxq_count; |
398 | struct timer_list rx_oom; |
399 | struct rx_queue rxq[8]; |
400 | |
401 | /* |
402 | * TX state. |
403 | */ |
404 | int tx_ring_size; |
405 | unsigned long tx_desc_sram_addr; |
406 | int tx_desc_sram_size; |
407 | int txq_count; |
408 | struct tx_queue txq[8]; |
409 | |
410 | /* |
411 | * Hardware-specific parameters. |
412 | */ |
413 | struct clk *clk; |
414 | unsigned int t_clk; |
415 | }; |
416 | |
417 | |
418 | /* port register accessors **************************************************/ |
419 | static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) |
420 | { |
421 | return readl(addr: mp->shared->base + offset); |
422 | } |
423 | |
424 | static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) |
425 | { |
426 | return readl(addr: mp->base + offset); |
427 | } |
428 | |
429 | static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) |
430 | { |
431 | writel(val: data, addr: mp->shared->base + offset); |
432 | } |
433 | |
434 | static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) |
435 | { |
436 | writel(val: data, addr: mp->base + offset); |
437 | } |
438 | |
439 | |
440 | /* rxq/txq helper functions *************************************************/ |
441 | static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) |
442 | { |
443 | return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); |
444 | } |
445 | |
446 | static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) |
447 | { |
448 | return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); |
449 | } |
450 | |
451 | static void rxq_enable(struct rx_queue *rxq) |
452 | { |
453 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
454 | wrlp(mp, RXQ_COMMAND, data: 1 << rxq->index); |
455 | } |
456 | |
457 | static void rxq_disable(struct rx_queue *rxq) |
458 | { |
459 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
460 | u8 mask = 1 << rxq->index; |
461 | |
462 | wrlp(mp, RXQ_COMMAND, data: mask << 8); |
463 | while (rdlp(mp, RXQ_COMMAND) & mask) |
464 | udelay(10); |
465 | } |
466 | |
467 | static void txq_reset_hw_ptr(struct tx_queue *txq) |
468 | { |
469 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
470 | u32 addr; |
471 | |
472 | addr = (u32)txq->tx_desc_dma; |
473 | addr += txq->tx_curr_desc * sizeof(struct tx_desc); |
474 | wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), data: addr); |
475 | } |
476 | |
477 | static void txq_enable(struct tx_queue *txq) |
478 | { |
479 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
480 | wrlp(mp, TXQ_COMMAND, data: 1 << txq->index); |
481 | } |
482 | |
483 | static void txq_disable(struct tx_queue *txq) |
484 | { |
485 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
486 | u8 mask = 1 << txq->index; |
487 | |
488 | wrlp(mp, TXQ_COMMAND, data: mask << 8); |
489 | while (rdlp(mp, TXQ_COMMAND) & mask) |
490 | udelay(10); |
491 | } |
492 | |
493 | static void txq_maybe_wake(struct tx_queue *txq) |
494 | { |
495 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
496 | struct netdev_queue *nq = netdev_get_tx_queue(dev: mp->dev, index: txq->index); |
497 | |
498 | if (netif_tx_queue_stopped(dev_queue: nq)) { |
499 | __netif_tx_lock(txq: nq, smp_processor_id()); |
500 | if (txq->tx_desc_count <= txq->tx_wake_threshold) |
501 | netif_tx_wake_queue(dev_queue: nq); |
502 | __netif_tx_unlock(txq: nq); |
503 | } |
504 | } |
505 | |
506 | static int rxq_process(struct rx_queue *rxq, int budget) |
507 | { |
508 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
509 | struct net_device_stats *stats = &mp->dev->stats; |
510 | int rx; |
511 | |
512 | rx = 0; |
513 | while (rx < budget && rxq->rx_desc_count) { |
514 | struct rx_desc *rx_desc; |
515 | unsigned int cmd_sts; |
516 | struct sk_buff *skb; |
517 | u16 byte_cnt; |
518 | |
519 | rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; |
520 | |
521 | cmd_sts = rx_desc->cmd_sts; |
522 | if (cmd_sts & BUFFER_OWNED_BY_DMA) |
523 | break; |
524 | rmb(); |
525 | |
526 | skb = rxq->rx_skb[rxq->rx_curr_desc]; |
527 | rxq->rx_skb[rxq->rx_curr_desc] = NULL; |
528 | |
529 | rxq->rx_curr_desc++; |
530 | if (rxq->rx_curr_desc == rxq->rx_ring_size) |
531 | rxq->rx_curr_desc = 0; |
532 | |
533 | dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, |
534 | rx_desc->buf_size, DMA_FROM_DEVICE); |
535 | rxq->rx_desc_count--; |
536 | rx++; |
537 | |
538 | mp->work_rx_refill |= 1 << rxq->index; |
539 | |
540 | byte_cnt = rx_desc->byte_cnt; |
541 | |
542 | /* |
543 | * Update statistics. |
544 | * |
545 | * Note that the descriptor byte count includes 2 dummy |
546 | * bytes automatically inserted by the hardware at the |
547 | * start of the packet (which we don't count), and a 4 |
548 | * byte CRC at the end of the packet (which we do count). |
549 | */ |
550 | stats->rx_packets++; |
551 | stats->rx_bytes += byte_cnt - 2; |
552 | |
553 | /* |
554 | * In case we received a packet without first / last bits |
555 | * on, or the error summary bit is set, the packet needs |
556 | * to be dropped. |
557 | */ |
558 | if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY)) |
559 | != (RX_FIRST_DESC | RX_LAST_DESC)) |
560 | goto err; |
561 | |
562 | /* |
563 | * The -4 is for the CRC in the trailer of the |
564 | * received packet |
565 | */ |
566 | skb_put(skb, len: byte_cnt - 2 - 4); |
567 | |
568 | if (cmd_sts & LAYER_4_CHECKSUM_OK) |
569 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
570 | skb->protocol = eth_type_trans(skb, dev: mp->dev); |
571 | |
572 | napi_gro_receive(napi: &mp->napi, skb); |
573 | |
574 | continue; |
575 | |
576 | err: |
577 | stats->rx_dropped++; |
578 | |
579 | if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != |
580 | (RX_FIRST_DESC | RX_LAST_DESC)) { |
581 | if (net_ratelimit()) |
582 | netdev_err(dev: mp->dev, |
583 | format: "received packet spanning multiple descriptors\n" ); |
584 | } |
585 | |
586 | if (cmd_sts & ERROR_SUMMARY) |
587 | stats->rx_errors++; |
588 | |
589 | dev_kfree_skb(skb); |
590 | } |
591 | |
592 | if (rx < budget) |
593 | mp->work_rx &= ~(1 << rxq->index); |
594 | |
595 | return rx; |
596 | } |
597 | |
598 | static int rxq_refill(struct rx_queue *rxq, int budget) |
599 | { |
600 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
601 | int refilled; |
602 | |
603 | refilled = 0; |
604 | while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { |
605 | struct sk_buff *skb; |
606 | int rx; |
607 | struct rx_desc *rx_desc; |
608 | int size; |
609 | |
610 | skb = netdev_alloc_skb(dev: mp->dev, length: mp->skb_size); |
611 | |
612 | if (skb == NULL) { |
613 | mp->oom = 1; |
614 | goto oom; |
615 | } |
616 | |
617 | if (SKB_DMA_REALIGN) |
618 | skb_reserve(skb, SKB_DMA_REALIGN); |
619 | |
620 | refilled++; |
621 | rxq->rx_desc_count++; |
622 | |
623 | rx = rxq->rx_used_desc++; |
624 | if (rxq->rx_used_desc == rxq->rx_ring_size) |
625 | rxq->rx_used_desc = 0; |
626 | |
627 | rx_desc = rxq->rx_desc_area + rx; |
628 | |
629 | size = skb_end_pointer(skb) - skb->data; |
630 | rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, |
631 | skb->data, size, |
632 | DMA_FROM_DEVICE); |
633 | rx_desc->buf_size = size; |
634 | rxq->rx_skb[rx] = skb; |
635 | wmb(); |
636 | rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; |
637 | wmb(); |
638 | |
639 | /* |
640 | * The hardware automatically prepends 2 bytes of |
641 | * dummy data to each received packet, so that the |
642 | * IP header ends up 16-byte aligned. |
643 | */ |
644 | skb_reserve(skb, len: 2); |
645 | } |
646 | |
647 | if (refilled < budget) |
648 | mp->work_rx_refill &= ~(1 << rxq->index); |
649 | |
650 | oom: |
651 | return refilled; |
652 | } |
653 | |
654 | |
655 | /* tx ***********************************************************************/ |
656 | static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) |
657 | { |
658 | int frag; |
659 | |
660 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
661 | const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; |
662 | |
663 | if (skb_frag_size(frag: fragp) <= 8 && skb_frag_off(frag: fragp) & 7) |
664 | return 1; |
665 | } |
666 | |
667 | return 0; |
668 | } |
669 | |
670 | static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb, |
671 | u16 *l4i_chk, u32 *command, int length) |
672 | { |
673 | int ret; |
674 | u32 cmd = 0; |
675 | |
676 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
677 | int hdr_len; |
678 | int tag_bytes; |
679 | |
680 | BUG_ON(skb->protocol != htons(ETH_P_IP) && |
681 | skb->protocol != htons(ETH_P_8021Q)); |
682 | |
683 | hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; |
684 | tag_bytes = hdr_len - ETH_HLEN; |
685 | |
686 | if (length - hdr_len > mp->shared->tx_csum_limit || |
687 | unlikely(tag_bytes & ~12)) { |
688 | ret = skb_checksum_help(skb); |
689 | if (!ret) |
690 | goto no_csum; |
691 | return ret; |
692 | } |
693 | |
694 | if (tag_bytes & 4) |
695 | cmd |= MAC_HDR_EXTRA_4_BYTES; |
696 | if (tag_bytes & 8) |
697 | cmd |= MAC_HDR_EXTRA_8_BYTES; |
698 | |
699 | cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL | |
700 | GEN_IP_V4_CHECKSUM | |
701 | ip_hdr(skb)->ihl << TX_IHL_SHIFT; |
702 | |
703 | /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL |
704 | * it seems we don't need to pass the initial checksum. |
705 | */ |
706 | switch (ip_hdr(skb)->protocol) { |
707 | case IPPROTO_UDP: |
708 | cmd |= UDP_FRAME; |
709 | *l4i_chk = 0; |
710 | break; |
711 | case IPPROTO_TCP: |
712 | *l4i_chk = 0; |
713 | break; |
714 | default: |
715 | WARN(1, "protocol not supported" ); |
716 | } |
717 | } else { |
718 | no_csum: |
719 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ |
720 | cmd |= 5 << TX_IHL_SHIFT; |
721 | } |
722 | *command = cmd; |
723 | return 0; |
724 | } |
725 | |
726 | static inline int |
727 | txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, |
728 | struct sk_buff *skb, char *data, int length, |
729 | bool last_tcp, bool is_last) |
730 | { |
731 | int tx_index; |
732 | u32 cmd_sts; |
733 | struct tx_desc *desc; |
734 | |
735 | tx_index = txq->tx_curr_desc++; |
736 | if (txq->tx_curr_desc == txq->tx_ring_size) |
737 | txq->tx_curr_desc = 0; |
738 | desc = &txq->tx_desc_area[tx_index]; |
739 | txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; |
740 | |
741 | desc->l4i_chk = 0; |
742 | desc->byte_cnt = length; |
743 | |
744 | if (length <= 8 && (uintptr_t)data & 0x7) { |
745 | /* Copy unaligned small data fragment to TSO header data area */ |
746 | memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE, |
747 | data, length); |
748 | desc->buf_ptr = txq->tso_hdrs_dma |
749 | + tx_index * TSO_HEADER_SIZE; |
750 | } else { |
751 | /* Alignment is okay, map buffer and hand off to hardware */ |
752 | txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; |
753 | desc->buf_ptr = dma_map_single(dev->dev.parent, data, |
754 | length, DMA_TO_DEVICE); |
755 | if (unlikely(dma_mapping_error(dev->dev.parent, |
756 | desc->buf_ptr))) { |
757 | WARN(1, "dma_map_single failed!\n" ); |
758 | return -ENOMEM; |
759 | } |
760 | } |
761 | |
762 | cmd_sts = BUFFER_OWNED_BY_DMA; |
763 | if (last_tcp) { |
764 | /* last descriptor in the TCP packet */ |
765 | cmd_sts |= ZERO_PADDING | TX_LAST_DESC; |
766 | /* last descriptor in SKB */ |
767 | if (is_last) |
768 | cmd_sts |= TX_ENABLE_INTERRUPT; |
769 | } |
770 | desc->cmd_sts = cmd_sts; |
771 | return 0; |
772 | } |
773 | |
774 | static inline void |
775 | txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length, |
776 | u32 *first_cmd_sts, bool first_desc) |
777 | { |
778 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
779 | int hdr_len = skb_tcp_all_headers(skb); |
780 | int tx_index; |
781 | struct tx_desc *desc; |
782 | int ret; |
783 | u32 cmd_csum = 0; |
784 | u16 l4i_chk = 0; |
785 | u32 cmd_sts; |
786 | |
787 | tx_index = txq->tx_curr_desc; |
788 | desc = &txq->tx_desc_area[tx_index]; |
789 | |
790 | ret = skb_tx_csum(mp, skb, l4i_chk: &l4i_chk, command: &cmd_csum, length); |
791 | if (ret) |
792 | WARN(1, "failed to prepare checksum!" ); |
793 | |
794 | /* Should we set this? Can't use the value from skb_tx_csum() |
795 | * as it's not the correct initial L4 checksum to use. |
796 | */ |
797 | desc->l4i_chk = 0; |
798 | |
799 | desc->byte_cnt = hdr_len; |
800 | desc->buf_ptr = txq->tso_hdrs_dma + |
801 | txq->tx_curr_desc * TSO_HEADER_SIZE; |
802 | cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC | |
803 | GEN_CRC; |
804 | |
805 | /* Defer updating the first command descriptor until all |
806 | * following descriptors have been written. |
807 | */ |
808 | if (first_desc) |
809 | *first_cmd_sts = cmd_sts; |
810 | else |
811 | desc->cmd_sts = cmd_sts; |
812 | |
813 | txq->tx_curr_desc++; |
814 | if (txq->tx_curr_desc == txq->tx_ring_size) |
815 | txq->tx_curr_desc = 0; |
816 | } |
817 | |
818 | static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, |
819 | struct net_device *dev) |
820 | { |
821 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
822 | int hdr_len, total_len, data_left, ret; |
823 | int desc_count = 0; |
824 | struct tso_t tso; |
825 | struct tx_desc *first_tx_desc; |
826 | u32 first_cmd_sts = 0; |
827 | |
828 | /* Count needed descriptors */ |
829 | if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) { |
830 | netdev_dbg(dev, "not enough descriptors for TSO!\n" ); |
831 | return -EBUSY; |
832 | } |
833 | |
834 | first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc]; |
835 | |
836 | /* Initialize the TSO handler, and prepare the first payload */ |
837 | hdr_len = tso_start(skb, tso: &tso); |
838 | |
839 | total_len = skb->len - hdr_len; |
840 | while (total_len > 0) { |
841 | bool first_desc = (desc_count == 0); |
842 | char *hdr; |
843 | |
844 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); |
845 | total_len -= data_left; |
846 | desc_count++; |
847 | |
848 | /* prepare packet headers: MAC + IP + TCP */ |
849 | hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE; |
850 | tso_build_hdr(skb, hdr, tso: &tso, size: data_left, is_last: total_len == 0); |
851 | txq_put_hdr_tso(skb, txq, length: data_left, first_cmd_sts: &first_cmd_sts, |
852 | first_desc); |
853 | |
854 | while (data_left > 0) { |
855 | int size; |
856 | desc_count++; |
857 | |
858 | size = min_t(int, tso.size, data_left); |
859 | ret = txq_put_data_tso(dev, txq, skb, data: tso.data, length: size, |
860 | last_tcp: size == data_left, |
861 | is_last: total_len == 0); |
862 | if (ret) |
863 | goto err_release; |
864 | data_left -= size; |
865 | tso_build_data(skb, tso: &tso, size); |
866 | } |
867 | } |
868 | |
869 | __skb_queue_tail(list: &txq->tx_skb, newsk: skb); |
870 | skb_tx_timestamp(skb); |
871 | |
872 | /* ensure all other descriptors are written before first cmd_sts */ |
873 | wmb(); |
874 | first_tx_desc->cmd_sts = first_cmd_sts; |
875 | |
876 | /* clear TX_END status */ |
877 | mp->work_tx_end &= ~(1 << txq->index); |
878 | |
879 | /* ensure all descriptors are written before poking hardware */ |
880 | wmb(); |
881 | txq_enable(txq); |
882 | txq->tx_desc_count += desc_count; |
883 | return 0; |
884 | err_release: |
885 | /* TODO: Release all used data descriptors; header descriptors must not |
886 | * be DMA-unmapped. |
887 | */ |
888 | return ret; |
889 | } |
890 | |
891 | static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) |
892 | { |
893 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
894 | int nr_frags = skb_shinfo(skb)->nr_frags; |
895 | int frag; |
896 | |
897 | for (frag = 0; frag < nr_frags; frag++) { |
898 | skb_frag_t *this_frag; |
899 | int tx_index; |
900 | struct tx_desc *desc; |
901 | |
902 | this_frag = &skb_shinfo(skb)->frags[frag]; |
903 | tx_index = txq->tx_curr_desc++; |
904 | if (txq->tx_curr_desc == txq->tx_ring_size) |
905 | txq->tx_curr_desc = 0; |
906 | desc = &txq->tx_desc_area[tx_index]; |
907 | txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE; |
908 | |
909 | /* |
910 | * The last fragment will generate an interrupt |
911 | * which will free the skb on TX completion. |
912 | */ |
913 | if (frag == nr_frags - 1) { |
914 | desc->cmd_sts = BUFFER_OWNED_BY_DMA | |
915 | ZERO_PADDING | TX_LAST_DESC | |
916 | TX_ENABLE_INTERRUPT; |
917 | } else { |
918 | desc->cmd_sts = BUFFER_OWNED_BY_DMA; |
919 | } |
920 | |
921 | desc->l4i_chk = 0; |
922 | desc->byte_cnt = skb_frag_size(frag: this_frag); |
923 | desc->buf_ptr = skb_frag_dma_map(dev: mp->dev->dev.parent, |
924 | frag: this_frag, offset: 0, size: desc->byte_cnt, |
925 | dir: DMA_TO_DEVICE); |
926 | } |
927 | } |
928 | |
929 | static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb, |
930 | struct net_device *dev) |
931 | { |
932 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
933 | int nr_frags = skb_shinfo(skb)->nr_frags; |
934 | int tx_index; |
935 | struct tx_desc *desc; |
936 | u32 cmd_sts; |
937 | u16 l4i_chk; |
938 | int length, ret; |
939 | |
940 | cmd_sts = 0; |
941 | l4i_chk = 0; |
942 | |
943 | if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { |
944 | if (net_ratelimit()) |
945 | netdev_err(dev, format: "tx queue full?!\n" ); |
946 | return -EBUSY; |
947 | } |
948 | |
949 | ret = skb_tx_csum(mp, skb, l4i_chk: &l4i_chk, command: &cmd_sts, length: skb->len); |
950 | if (ret) |
951 | return ret; |
952 | cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; |
953 | |
954 | tx_index = txq->tx_curr_desc++; |
955 | if (txq->tx_curr_desc == txq->tx_ring_size) |
956 | txq->tx_curr_desc = 0; |
957 | desc = &txq->tx_desc_area[tx_index]; |
958 | txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; |
959 | |
960 | if (nr_frags) { |
961 | txq_submit_frag_skb(txq, skb); |
962 | length = skb_headlen(skb); |
963 | } else { |
964 | cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; |
965 | length = skb->len; |
966 | } |
967 | |
968 | desc->l4i_chk = l4i_chk; |
969 | desc->byte_cnt = length; |
970 | desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, |
971 | length, DMA_TO_DEVICE); |
972 | |
973 | __skb_queue_tail(list: &txq->tx_skb, newsk: skb); |
974 | |
975 | skb_tx_timestamp(skb); |
976 | |
977 | /* ensure all other descriptors are written before first cmd_sts */ |
978 | wmb(); |
979 | desc->cmd_sts = cmd_sts; |
980 | |
981 | /* clear TX_END status */ |
982 | mp->work_tx_end &= ~(1 << txq->index); |
983 | |
984 | /* ensure all descriptors are written before poking hardware */ |
985 | wmb(); |
986 | txq_enable(txq); |
987 | |
988 | txq->tx_desc_count += nr_frags + 1; |
989 | |
990 | return 0; |
991 | } |
992 | |
993 | static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) |
994 | { |
995 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
996 | int length, queue, ret; |
997 | struct tx_queue *txq; |
998 | struct netdev_queue *nq; |
999 | |
1000 | queue = skb_get_queue_mapping(skb); |
1001 | txq = mp->txq + queue; |
1002 | nq = netdev_get_tx_queue(dev, index: queue); |
1003 | |
1004 | if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { |
1005 | netdev_printk(KERN_DEBUG, dev, |
1006 | format: "failed to linearize skb with tiny unaligned fragment\n" ); |
1007 | return NETDEV_TX_BUSY; |
1008 | } |
1009 | |
1010 | length = skb->len; |
1011 | |
1012 | if (skb_is_gso(skb)) |
1013 | ret = txq_submit_tso(txq, skb, dev); |
1014 | else |
1015 | ret = txq_submit_skb(txq, skb, dev); |
1016 | if (!ret) { |
1017 | txq->tx_bytes += length; |
1018 | txq->tx_packets++; |
1019 | |
1020 | if (txq->tx_desc_count >= txq->tx_stop_threshold) |
1021 | netif_tx_stop_queue(dev_queue: nq); |
1022 | } else { |
1023 | txq->tx_dropped++; |
1024 | dev_kfree_skb_any(skb); |
1025 | } |
1026 | |
1027 | return NETDEV_TX_OK; |
1028 | } |
1029 | |
1030 | |
1031 | /* tx napi ******************************************************************/ |
1032 | static void txq_kick(struct tx_queue *txq) |
1033 | { |
1034 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
1035 | struct netdev_queue *nq = netdev_get_tx_queue(dev: mp->dev, index: txq->index); |
1036 | u32 hw_desc_ptr; |
1037 | u32 expected_ptr; |
1038 | |
1039 | __netif_tx_lock(txq: nq, smp_processor_id()); |
1040 | |
1041 | if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) |
1042 | goto out; |
1043 | |
1044 | hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); |
1045 | expected_ptr = (u32)txq->tx_desc_dma + |
1046 | txq->tx_curr_desc * sizeof(struct tx_desc); |
1047 | |
1048 | if (hw_desc_ptr != expected_ptr) |
1049 | txq_enable(txq); |
1050 | |
1051 | out: |
1052 | __netif_tx_unlock(txq: nq); |
1053 | |
1054 | mp->work_tx_end &= ~(1 << txq->index); |
1055 | } |
1056 | |
1057 | static int txq_reclaim(struct tx_queue *txq, int budget, int force) |
1058 | { |
1059 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
1060 | struct netdev_queue *nq = netdev_get_tx_queue(dev: mp->dev, index: txq->index); |
1061 | int reclaimed; |
1062 | |
1063 | __netif_tx_lock_bh(txq: nq); |
1064 | |
1065 | reclaimed = 0; |
1066 | while (reclaimed < budget && txq->tx_desc_count > 0) { |
1067 | int tx_index; |
1068 | struct tx_desc *desc; |
1069 | u32 cmd_sts; |
1070 | char desc_dma_map; |
1071 | |
1072 | tx_index = txq->tx_used_desc; |
1073 | desc = &txq->tx_desc_area[tx_index]; |
1074 | desc_dma_map = txq->tx_desc_mapping[tx_index]; |
1075 | |
1076 | cmd_sts = desc->cmd_sts; |
1077 | |
1078 | if (cmd_sts & BUFFER_OWNED_BY_DMA) { |
1079 | if (!force) |
1080 | break; |
1081 | desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; |
1082 | } |
1083 | |
1084 | txq->tx_used_desc = tx_index + 1; |
1085 | if (txq->tx_used_desc == txq->tx_ring_size) |
1086 | txq->tx_used_desc = 0; |
1087 | |
1088 | reclaimed++; |
1089 | txq->tx_desc_count--; |
1090 | |
1091 | if (!IS_TSO_HEADER(txq, desc->buf_ptr)) { |
1092 | |
1093 | if (desc_dma_map == DESC_DMA_MAP_PAGE) |
1094 | dma_unmap_page(mp->dev->dev.parent, |
1095 | desc->buf_ptr, |
1096 | desc->byte_cnt, |
1097 | DMA_TO_DEVICE); |
1098 | else |
1099 | dma_unmap_single(mp->dev->dev.parent, |
1100 | desc->buf_ptr, |
1101 | desc->byte_cnt, |
1102 | DMA_TO_DEVICE); |
1103 | } |
1104 | |
1105 | if (cmd_sts & TX_ENABLE_INTERRUPT) { |
1106 | struct sk_buff *skb = __skb_dequeue(list: &txq->tx_skb); |
1107 | |
1108 | if (!WARN_ON(!skb)) |
1109 | dev_consume_skb_any(skb); |
1110 | } |
1111 | |
1112 | if (cmd_sts & ERROR_SUMMARY) { |
1113 | netdev_info(dev: mp->dev, format: "tx error\n" ); |
1114 | mp->dev->stats.tx_errors++; |
1115 | } |
1116 | |
1117 | } |
1118 | |
1119 | __netif_tx_unlock_bh(txq: nq); |
1120 | |
1121 | if (reclaimed < budget) |
1122 | mp->work_tx &= ~(1 << txq->index); |
1123 | |
1124 | return reclaimed; |
1125 | } |
1126 | |
1127 | |
1128 | /* tx rate control **********************************************************/ |
1129 | /* |
1130 | * Set total maximum TX rate (shared by all TX queues for this port) |
1131 | * to 'rate' bits per second, with a maximum burst of 'burst' bytes. |
1132 | */ |
1133 | static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) |
1134 | { |
1135 | int token_rate; |
1136 | int mtu; |
1137 | int bucket_size; |
1138 | |
1139 | token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000); |
1140 | if (token_rate > 1023) |
1141 | token_rate = 1023; |
1142 | |
1143 | mtu = (mp->dev->mtu + 255) >> 8; |
1144 | if (mtu > 63) |
1145 | mtu = 63; |
1146 | |
1147 | bucket_size = (burst + 255) >> 8; |
1148 | if (bucket_size > 65535) |
1149 | bucket_size = 65535; |
1150 | |
1151 | switch (mp->shared->tx_bw_control) { |
1152 | case TX_BW_CONTROL_OLD_LAYOUT: |
1153 | wrlp(mp, TX_BW_RATE, data: token_rate); |
1154 | wrlp(mp, TX_BW_MTU, data: mtu); |
1155 | wrlp(mp, TX_BW_BURST, data: bucket_size); |
1156 | break; |
1157 | case TX_BW_CONTROL_NEW_LAYOUT: |
1158 | wrlp(mp, TX_BW_RATE_MOVED, data: token_rate); |
1159 | wrlp(mp, TX_BW_MTU_MOVED, data: mtu); |
1160 | wrlp(mp, TX_BW_BURST_MOVED, data: bucket_size); |
1161 | break; |
1162 | } |
1163 | } |
1164 | |
1165 | static void txq_set_rate(struct tx_queue *txq, int rate, int burst) |
1166 | { |
1167 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
1168 | int token_rate; |
1169 | int bucket_size; |
1170 | |
1171 | token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000); |
1172 | if (token_rate > 1023) |
1173 | token_rate = 1023; |
1174 | |
1175 | bucket_size = (burst + 255) >> 8; |
1176 | if (bucket_size > 65535) |
1177 | bucket_size = 65535; |
1178 | |
1179 | wrlp(mp, TXQ_BW_TOKENS(txq->index), data: token_rate << 14); |
1180 | wrlp(mp, TXQ_BW_CONF(txq->index), data: (bucket_size << 10) | token_rate); |
1181 | } |
1182 | |
1183 | static void txq_set_fixed_prio_mode(struct tx_queue *txq) |
1184 | { |
1185 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
1186 | int off; |
1187 | u32 val; |
1188 | |
1189 | /* |
1190 | * Turn on fixed priority mode. |
1191 | */ |
1192 | off = 0; |
1193 | switch (mp->shared->tx_bw_control) { |
1194 | case TX_BW_CONTROL_OLD_LAYOUT: |
1195 | off = TXQ_FIX_PRIO_CONF; |
1196 | break; |
1197 | case TX_BW_CONTROL_NEW_LAYOUT: |
1198 | off = TXQ_FIX_PRIO_CONF_MOVED; |
1199 | break; |
1200 | } |
1201 | |
1202 | if (off) { |
1203 | val = rdlp(mp, offset: off); |
1204 | val |= 1 << txq->index; |
1205 | wrlp(mp, offset: off, data: val); |
1206 | } |
1207 | } |
1208 | |
1209 | |
1210 | /* mii management interface *************************************************/ |
1211 | static void mv643xx_eth_adjust_link(struct net_device *dev) |
1212 | { |
1213 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1214 | u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL); |
1215 | u32 autoneg_disable = FORCE_LINK_PASS | |
1216 | DISABLE_AUTO_NEG_SPEED_GMII | |
1217 | DISABLE_AUTO_NEG_FOR_FLOW_CTRL | |
1218 | DISABLE_AUTO_NEG_FOR_DUPLEX; |
1219 | |
1220 | if (dev->phydev->autoneg == AUTONEG_ENABLE) { |
1221 | /* enable auto negotiation */ |
1222 | pscr &= ~autoneg_disable; |
1223 | goto out_write; |
1224 | } |
1225 | |
1226 | pscr |= autoneg_disable; |
1227 | |
1228 | if (dev->phydev->speed == SPEED_1000) { |
1229 | /* force gigabit, half duplex not supported */ |
1230 | pscr |= SET_GMII_SPEED_TO_1000; |
1231 | pscr |= SET_FULL_DUPLEX_MODE; |
1232 | goto out_write; |
1233 | } |
1234 | |
1235 | pscr &= ~SET_GMII_SPEED_TO_1000; |
1236 | |
1237 | if (dev->phydev->speed == SPEED_100) |
1238 | pscr |= SET_MII_SPEED_TO_100; |
1239 | else |
1240 | pscr &= ~SET_MII_SPEED_TO_100; |
1241 | |
1242 | if (dev->phydev->duplex == DUPLEX_FULL) |
1243 | pscr |= SET_FULL_DUPLEX_MODE; |
1244 | else |
1245 | pscr &= ~SET_FULL_DUPLEX_MODE; |
1246 | |
1247 | out_write: |
1248 | wrlp(mp, PORT_SERIAL_CONTROL, data: pscr); |
1249 | } |
1250 | |
1251 | /* statistics ***************************************************************/ |
1252 | static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) |
1253 | { |
1254 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1255 | struct net_device_stats *stats = &dev->stats; |
1256 | unsigned long tx_packets = 0; |
1257 | unsigned long tx_bytes = 0; |
1258 | unsigned long tx_dropped = 0; |
1259 | int i; |
1260 | |
1261 | for (i = 0; i < mp->txq_count; i++) { |
1262 | struct tx_queue *txq = mp->txq + i; |
1263 | |
1264 | tx_packets += txq->tx_packets; |
1265 | tx_bytes += txq->tx_bytes; |
1266 | tx_dropped += txq->tx_dropped; |
1267 | } |
1268 | |
1269 | stats->tx_packets = tx_packets; |
1270 | stats->tx_bytes = tx_bytes; |
1271 | stats->tx_dropped = tx_dropped; |
1272 | |
1273 | return stats; |
1274 | } |
1275 | |
1276 | static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) |
1277 | { |
1278 | return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); |
1279 | } |
1280 | |
1281 | static void mib_counters_clear(struct mv643xx_eth_private *mp) |
1282 | { |
1283 | int i; |
1284 | |
1285 | for (i = 0; i < 0x80; i += 4) |
1286 | mib_read(mp, offset: i); |
1287 | |
1288 | /* Clear non MIB hw counters also */ |
1289 | rdlp(mp, RX_DISCARD_FRAME_CNT); |
1290 | rdlp(mp, RX_OVERRUN_FRAME_CNT); |
1291 | } |
1292 | |
1293 | static void mib_counters_update(struct mv643xx_eth_private *mp) |
1294 | { |
1295 | struct mib_counters *p = &mp->mib_counters; |
1296 | |
1297 | spin_lock_bh(lock: &mp->mib_counters_lock); |
1298 | p->good_octets_received += mib_read(mp, offset: 0x00); |
1299 | p->bad_octets_received += mib_read(mp, offset: 0x08); |
1300 | p->internal_mac_transmit_err += mib_read(mp, offset: 0x0c); |
1301 | p->good_frames_received += mib_read(mp, offset: 0x10); |
1302 | p->bad_frames_received += mib_read(mp, offset: 0x14); |
1303 | p->broadcast_frames_received += mib_read(mp, offset: 0x18); |
1304 | p->multicast_frames_received += mib_read(mp, offset: 0x1c); |
1305 | p->frames_64_octets += mib_read(mp, offset: 0x20); |
1306 | p->frames_65_to_127_octets += mib_read(mp, offset: 0x24); |
1307 | p->frames_128_to_255_octets += mib_read(mp, offset: 0x28); |
1308 | p->frames_256_to_511_octets += mib_read(mp, offset: 0x2c); |
1309 | p->frames_512_to_1023_octets += mib_read(mp, offset: 0x30); |
1310 | p->frames_1024_to_max_octets += mib_read(mp, offset: 0x34); |
1311 | p->good_octets_sent += mib_read(mp, offset: 0x38); |
1312 | p->good_frames_sent += mib_read(mp, offset: 0x40); |
1313 | p->excessive_collision += mib_read(mp, offset: 0x44); |
1314 | p->multicast_frames_sent += mib_read(mp, offset: 0x48); |
1315 | p->broadcast_frames_sent += mib_read(mp, offset: 0x4c); |
1316 | p->unrec_mac_control_received += mib_read(mp, offset: 0x50); |
1317 | p->fc_sent += mib_read(mp, offset: 0x54); |
1318 | p->good_fc_received += mib_read(mp, offset: 0x58); |
1319 | p->bad_fc_received += mib_read(mp, offset: 0x5c); |
1320 | p->undersize_received += mib_read(mp, offset: 0x60); |
1321 | p->fragments_received += mib_read(mp, offset: 0x64); |
1322 | p->oversize_received += mib_read(mp, offset: 0x68); |
1323 | p->jabber_received += mib_read(mp, offset: 0x6c); |
1324 | p->mac_receive_error += mib_read(mp, offset: 0x70); |
1325 | p->bad_crc_event += mib_read(mp, offset: 0x74); |
1326 | p->collision += mib_read(mp, offset: 0x78); |
1327 | p->late_collision += mib_read(mp, offset: 0x7c); |
1328 | /* Non MIB hardware counters */ |
1329 | p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); |
1330 | p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); |
1331 | spin_unlock_bh(lock: &mp->mib_counters_lock); |
1332 | } |
1333 | |
1334 | static void mib_counters_timer_wrapper(struct timer_list *t) |
1335 | { |
1336 | struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer); |
1337 | mib_counters_update(mp); |
1338 | mod_timer(timer: &mp->mib_counters_timer, expires: jiffies + 30 * HZ); |
1339 | } |
1340 | |
1341 | |
1342 | /* interrupt coalescing *****************************************************/ |
1343 | /* |
1344 | * Hardware coalescing parameters are set in units of 64 t_clk |
1345 | * cycles. I.e.: |
1346 | * |
1347 | * coal_delay_in_usec = 64000000 * register_value / t_clk_rate |
1348 | * |
1349 | * register_value = coal_delay_in_usec * t_clk_rate / 64000000 |
1350 | * |
1351 | * In the ->set*() methods, we round the computed register value |
1352 | * to the nearest integer. |
1353 | */ |
1354 | static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) |
1355 | { |
1356 | u32 val = rdlp(mp, SDMA_CONFIG); |
1357 | u64 temp; |
1358 | |
1359 | if (mp->shared->extended_rx_coal_limit) |
1360 | temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7); |
1361 | else |
1362 | temp = (val & 0x003fff00) >> 8; |
1363 | |
1364 | temp *= 64000000; |
1365 | temp += mp->t_clk / 2; |
1366 | do_div(temp, mp->t_clk); |
1367 | |
1368 | return (unsigned int)temp; |
1369 | } |
1370 | |
1371 | static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec) |
1372 | { |
1373 | u64 temp; |
1374 | u32 val; |
1375 | |
1376 | temp = (u64)usec * mp->t_clk; |
1377 | temp += 31999999; |
1378 | do_div(temp, 64000000); |
1379 | |
1380 | val = rdlp(mp, SDMA_CONFIG); |
1381 | if (mp->shared->extended_rx_coal_limit) { |
1382 | if (temp > 0xffff) |
1383 | temp = 0xffff; |
1384 | val &= ~0x023fff80; |
1385 | val |= (temp & 0x8000) << 10; |
1386 | val |= (temp & 0x7fff) << 7; |
1387 | } else { |
1388 | if (temp > 0x3fff) |
1389 | temp = 0x3fff; |
1390 | val &= ~0x003fff00; |
1391 | val |= (temp & 0x3fff) << 8; |
1392 | } |
1393 | wrlp(mp, SDMA_CONFIG, data: val); |
1394 | } |
1395 | |
1396 | static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) |
1397 | { |
1398 | u64 temp; |
1399 | |
1400 | temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; |
1401 | temp *= 64000000; |
1402 | temp += mp->t_clk / 2; |
1403 | do_div(temp, mp->t_clk); |
1404 | |
1405 | return (unsigned int)temp; |
1406 | } |
1407 | |
1408 | static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec) |
1409 | { |
1410 | u64 temp; |
1411 | |
1412 | temp = (u64)usec * mp->t_clk; |
1413 | temp += 31999999; |
1414 | do_div(temp, 64000000); |
1415 | |
1416 | if (temp > 0x3fff) |
1417 | temp = 0x3fff; |
1418 | |
1419 | wrlp(mp, TX_FIFO_URGENT_THRESHOLD, data: temp << 4); |
1420 | } |
1421 | |
1422 | |
1423 | /* ethtool ******************************************************************/ |
1424 | struct mv643xx_eth_stats { |
1425 | char stat_string[ETH_GSTRING_LEN]; |
1426 | int sizeof_stat; |
1427 | int netdev_off; |
1428 | int mp_off; |
1429 | }; |
1430 | |
1431 | #define SSTAT(m) \ |
1432 | { #m, sizeof_field(struct net_device_stats, m), \ |
1433 | offsetof(struct net_device, stats.m), -1 } |
1434 | |
1435 | #define MIBSTAT(m) \ |
1436 | { #m, sizeof_field(struct mib_counters, m), \ |
1437 | -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } |
1438 | |
1439 | static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { |
1440 | SSTAT(rx_packets), |
1441 | SSTAT(tx_packets), |
1442 | SSTAT(rx_bytes), |
1443 | SSTAT(tx_bytes), |
1444 | SSTAT(rx_errors), |
1445 | SSTAT(tx_errors), |
1446 | SSTAT(rx_dropped), |
1447 | SSTAT(tx_dropped), |
1448 | MIBSTAT(good_octets_received), |
1449 | MIBSTAT(bad_octets_received), |
1450 | MIBSTAT(internal_mac_transmit_err), |
1451 | MIBSTAT(good_frames_received), |
1452 | MIBSTAT(bad_frames_received), |
1453 | MIBSTAT(broadcast_frames_received), |
1454 | MIBSTAT(multicast_frames_received), |
1455 | MIBSTAT(frames_64_octets), |
1456 | MIBSTAT(frames_65_to_127_octets), |
1457 | MIBSTAT(frames_128_to_255_octets), |
1458 | MIBSTAT(frames_256_to_511_octets), |
1459 | MIBSTAT(frames_512_to_1023_octets), |
1460 | MIBSTAT(frames_1024_to_max_octets), |
1461 | MIBSTAT(good_octets_sent), |
1462 | MIBSTAT(good_frames_sent), |
1463 | MIBSTAT(excessive_collision), |
1464 | MIBSTAT(multicast_frames_sent), |
1465 | MIBSTAT(broadcast_frames_sent), |
1466 | MIBSTAT(unrec_mac_control_received), |
1467 | MIBSTAT(fc_sent), |
1468 | MIBSTAT(good_fc_received), |
1469 | MIBSTAT(bad_fc_received), |
1470 | MIBSTAT(undersize_received), |
1471 | MIBSTAT(fragments_received), |
1472 | MIBSTAT(oversize_received), |
1473 | MIBSTAT(jabber_received), |
1474 | MIBSTAT(mac_receive_error), |
1475 | MIBSTAT(bad_crc_event), |
1476 | MIBSTAT(collision), |
1477 | MIBSTAT(late_collision), |
1478 | MIBSTAT(rx_discard), |
1479 | MIBSTAT(rx_overrun), |
1480 | }; |
1481 | |
1482 | static int |
1483 | mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp, |
1484 | struct ethtool_link_ksettings *cmd) |
1485 | { |
1486 | struct net_device *dev = mp->dev; |
1487 | |
1488 | phy_ethtool_ksettings_get(phydev: dev->phydev, cmd); |
1489 | |
1490 | /* |
1491 | * The MAC does not support 1000baseT_Half. |
1492 | */ |
1493 | linkmode_clear_bit(nr: ETHTOOL_LINK_MODE_1000baseT_Half_BIT, |
1494 | addr: cmd->link_modes.supported); |
1495 | linkmode_clear_bit(nr: ETHTOOL_LINK_MODE_1000baseT_Half_BIT, |
1496 | addr: cmd->link_modes.advertising); |
1497 | |
1498 | return 0; |
1499 | } |
1500 | |
1501 | static int |
1502 | mv643xx_eth_get_link_ksettings_phyless(struct mv643xx_eth_private *mp, |
1503 | struct ethtool_link_ksettings *cmd) |
1504 | { |
1505 | u32 port_status; |
1506 | u32 supported, advertising; |
1507 | |
1508 | port_status = rdlp(mp, PORT_STATUS); |
1509 | |
1510 | supported = SUPPORTED_MII; |
1511 | advertising = ADVERTISED_MII; |
1512 | switch (port_status & PORT_SPEED_MASK) { |
1513 | case PORT_SPEED_10: |
1514 | cmd->base.speed = SPEED_10; |
1515 | break; |
1516 | case PORT_SPEED_100: |
1517 | cmd->base.speed = SPEED_100; |
1518 | break; |
1519 | case PORT_SPEED_1000: |
1520 | cmd->base.speed = SPEED_1000; |
1521 | break; |
1522 | default: |
1523 | cmd->base.speed = -1; |
1524 | break; |
1525 | } |
1526 | cmd->base.duplex = (port_status & FULL_DUPLEX) ? |
1527 | DUPLEX_FULL : DUPLEX_HALF; |
1528 | cmd->base.port = PORT_MII; |
1529 | cmd->base.phy_address = 0; |
1530 | cmd->base.autoneg = AUTONEG_DISABLE; |
1531 | |
1532 | ethtool_convert_legacy_u32_to_link_mode(dst: cmd->link_modes.supported, |
1533 | legacy_u32: supported); |
1534 | ethtool_convert_legacy_u32_to_link_mode(dst: cmd->link_modes.advertising, |
1535 | legacy_u32: advertising); |
1536 | |
1537 | return 0; |
1538 | } |
1539 | |
1540 | static void |
1541 | mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
1542 | { |
1543 | wol->supported = 0; |
1544 | wol->wolopts = 0; |
1545 | if (dev->phydev) |
1546 | phy_ethtool_get_wol(phydev: dev->phydev, wol); |
1547 | } |
1548 | |
1549 | static int |
1550 | mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
1551 | { |
1552 | int err; |
1553 | |
1554 | if (!dev->phydev) |
1555 | return -EOPNOTSUPP; |
1556 | |
1557 | err = phy_ethtool_set_wol(phydev: dev->phydev, wol); |
1558 | /* Given that mv643xx_eth works without the marvell-specific PHY driver, |
1559 | * this debugging hint is useful to have. |
1560 | */ |
1561 | if (err == -EOPNOTSUPP) |
1562 | netdev_info(dev, format: "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n" ); |
1563 | return err; |
1564 | } |
1565 | |
1566 | static int |
1567 | mv643xx_eth_get_link_ksettings(struct net_device *dev, |
1568 | struct ethtool_link_ksettings *cmd) |
1569 | { |
1570 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1571 | |
1572 | if (dev->phydev) |
1573 | return mv643xx_eth_get_link_ksettings_phy(mp, cmd); |
1574 | else |
1575 | return mv643xx_eth_get_link_ksettings_phyless(mp, cmd); |
1576 | } |
1577 | |
1578 | static int |
1579 | mv643xx_eth_set_link_ksettings(struct net_device *dev, |
1580 | const struct ethtool_link_ksettings *cmd) |
1581 | { |
1582 | struct ethtool_link_ksettings c = *cmd; |
1583 | u32 advertising; |
1584 | int ret; |
1585 | |
1586 | if (!dev->phydev) |
1587 | return -EINVAL; |
1588 | |
1589 | /* |
1590 | * The MAC does not support 1000baseT_Half. |
1591 | */ |
1592 | ethtool_convert_link_mode_to_legacy_u32(legacy_u32: &advertising, |
1593 | src: c.link_modes.advertising); |
1594 | advertising &= ~ADVERTISED_1000baseT_Half; |
1595 | ethtool_convert_legacy_u32_to_link_mode(dst: c.link_modes.advertising, |
1596 | legacy_u32: advertising); |
1597 | |
1598 | ret = phy_ethtool_ksettings_set(phydev: dev->phydev, cmd: &c); |
1599 | if (!ret) |
1600 | mv643xx_eth_adjust_link(dev); |
1601 | return ret; |
1602 | } |
1603 | |
1604 | static void mv643xx_eth_get_drvinfo(struct net_device *dev, |
1605 | struct ethtool_drvinfo *drvinfo) |
1606 | { |
1607 | strscpy(p: drvinfo->driver, q: mv643xx_eth_driver_name, |
1608 | size: sizeof(drvinfo->driver)); |
1609 | strscpy(p: drvinfo->version, q: mv643xx_eth_driver_version, |
1610 | size: sizeof(drvinfo->version)); |
1611 | strscpy(p: drvinfo->fw_version, q: "N/A" , size: sizeof(drvinfo->fw_version)); |
1612 | strscpy(p: drvinfo->bus_info, q: "platform" , size: sizeof(drvinfo->bus_info)); |
1613 | } |
1614 | |
1615 | static int mv643xx_eth_get_coalesce(struct net_device *dev, |
1616 | struct ethtool_coalesce *ec, |
1617 | struct kernel_ethtool_coalesce *kernel_coal, |
1618 | struct netlink_ext_ack *extack) |
1619 | { |
1620 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1621 | |
1622 | ec->rx_coalesce_usecs = get_rx_coal(mp); |
1623 | ec->tx_coalesce_usecs = get_tx_coal(mp); |
1624 | |
1625 | return 0; |
1626 | } |
1627 | |
1628 | static int mv643xx_eth_set_coalesce(struct net_device *dev, |
1629 | struct ethtool_coalesce *ec, |
1630 | struct kernel_ethtool_coalesce *kernel_coal, |
1631 | struct netlink_ext_ack *extack) |
1632 | { |
1633 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1634 | |
1635 | set_rx_coal(mp, usec: ec->rx_coalesce_usecs); |
1636 | set_tx_coal(mp, usec: ec->tx_coalesce_usecs); |
1637 | |
1638 | return 0; |
1639 | } |
1640 | |
1641 | static void |
1642 | mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er, |
1643 | struct kernel_ethtool_ringparam *kernel_er, |
1644 | struct netlink_ext_ack *extack) |
1645 | { |
1646 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1647 | |
1648 | er->rx_max_pending = 4096; |
1649 | er->tx_max_pending = 4096; |
1650 | |
1651 | er->rx_pending = mp->rx_ring_size; |
1652 | er->tx_pending = mp->tx_ring_size; |
1653 | } |
1654 | |
1655 | static int |
1656 | mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er, |
1657 | struct kernel_ethtool_ringparam *kernel_er, |
1658 | struct netlink_ext_ack *extack) |
1659 | { |
1660 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1661 | |
1662 | if (er->rx_mini_pending || er->rx_jumbo_pending) |
1663 | return -EINVAL; |
1664 | |
1665 | mp->rx_ring_size = min(er->rx_pending, 4096U); |
1666 | mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending, |
1667 | MV643XX_MAX_SKB_DESCS * 2, 4096); |
1668 | if (mp->tx_ring_size != er->tx_pending) |
1669 | netdev_warn(dev, format: "TX queue size set to %u (requested %u)\n" , |
1670 | mp->tx_ring_size, er->tx_pending); |
1671 | |
1672 | if (netif_running(dev)) { |
1673 | mv643xx_eth_stop(dev); |
1674 | if (mv643xx_eth_open(dev)) { |
1675 | netdev_err(dev, |
1676 | format: "fatal error on re-opening device after ring param change\n" ); |
1677 | return -ENOMEM; |
1678 | } |
1679 | } |
1680 | |
1681 | return 0; |
1682 | } |
1683 | |
1684 | |
1685 | static int |
1686 | mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features) |
1687 | { |
1688 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1689 | bool rx_csum = features & NETIF_F_RXCSUM; |
1690 | |
1691 | wrlp(mp, PORT_CONFIG, data: rx_csum ? 0x02000000 : 0x00000000); |
1692 | |
1693 | return 0; |
1694 | } |
1695 | |
1696 | static void mv643xx_eth_get_strings(struct net_device *dev, |
1697 | uint32_t stringset, uint8_t *data) |
1698 | { |
1699 | int i; |
1700 | |
1701 | if (stringset == ETH_SS_STATS) { |
1702 | for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { |
1703 | memcpy(data + i * ETH_GSTRING_LEN, |
1704 | mv643xx_eth_stats[i].stat_string, |
1705 | ETH_GSTRING_LEN); |
1706 | } |
1707 | } |
1708 | } |
1709 | |
1710 | static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, |
1711 | struct ethtool_stats *stats, |
1712 | uint64_t *data) |
1713 | { |
1714 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1715 | int i; |
1716 | |
1717 | mv643xx_eth_get_stats(dev); |
1718 | mib_counters_update(mp); |
1719 | |
1720 | for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { |
1721 | const struct mv643xx_eth_stats *stat; |
1722 | void *p; |
1723 | |
1724 | stat = mv643xx_eth_stats + i; |
1725 | |
1726 | if (stat->netdev_off >= 0) |
1727 | p = ((void *)mp->dev) + stat->netdev_off; |
1728 | else |
1729 | p = ((void *)mp) + stat->mp_off; |
1730 | |
1731 | data[i] = (stat->sizeof_stat == 8) ? |
1732 | *(uint64_t *)p : *(uint32_t *)p; |
1733 | } |
1734 | } |
1735 | |
1736 | static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) |
1737 | { |
1738 | if (sset == ETH_SS_STATS) |
1739 | return ARRAY_SIZE(mv643xx_eth_stats); |
1740 | |
1741 | return -EOPNOTSUPP; |
1742 | } |
1743 | |
1744 | static const struct ethtool_ops mv643xx_eth_ethtool_ops = { |
1745 | .supported_coalesce_params = ETHTOOL_COALESCE_USECS, |
1746 | .get_drvinfo = mv643xx_eth_get_drvinfo, |
1747 | .nway_reset = phy_ethtool_nway_reset, |
1748 | .get_link = ethtool_op_get_link, |
1749 | .get_coalesce = mv643xx_eth_get_coalesce, |
1750 | .set_coalesce = mv643xx_eth_set_coalesce, |
1751 | .get_ringparam = mv643xx_eth_get_ringparam, |
1752 | .set_ringparam = mv643xx_eth_set_ringparam, |
1753 | .get_strings = mv643xx_eth_get_strings, |
1754 | .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, |
1755 | .get_sset_count = mv643xx_eth_get_sset_count, |
1756 | .get_ts_info = ethtool_op_get_ts_info, |
1757 | .get_wol = mv643xx_eth_get_wol, |
1758 | .set_wol = mv643xx_eth_set_wol, |
1759 | .get_link_ksettings = mv643xx_eth_get_link_ksettings, |
1760 | .set_link_ksettings = mv643xx_eth_set_link_ksettings, |
1761 | }; |
1762 | |
1763 | |
1764 | /* address handling *********************************************************/ |
1765 | static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) |
1766 | { |
1767 | unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); |
1768 | unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); |
1769 | |
1770 | addr[0] = (mac_h >> 24) & 0xff; |
1771 | addr[1] = (mac_h >> 16) & 0xff; |
1772 | addr[2] = (mac_h >> 8) & 0xff; |
1773 | addr[3] = mac_h & 0xff; |
1774 | addr[4] = (mac_l >> 8) & 0xff; |
1775 | addr[5] = mac_l & 0xff; |
1776 | } |
1777 | |
1778 | static void uc_addr_set(struct mv643xx_eth_private *mp, const u8 *addr) |
1779 | { |
1780 | wrlp(mp, MAC_ADDR_HIGH, |
1781 | data: (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); |
1782 | wrlp(mp, MAC_ADDR_LOW, data: (addr[4] << 8) | addr[5]); |
1783 | } |
1784 | |
1785 | static u32 uc_addr_filter_mask(struct net_device *dev) |
1786 | { |
1787 | struct netdev_hw_addr *ha; |
1788 | u32 nibbles; |
1789 | |
1790 | if (dev->flags & IFF_PROMISC) |
1791 | return 0; |
1792 | |
1793 | nibbles = 1 << (dev->dev_addr[5] & 0x0f); |
1794 | netdev_for_each_uc_addr(ha, dev) { |
1795 | if (memcmp(p: dev->dev_addr, q: ha->addr, size: 5)) |
1796 | return 0; |
1797 | if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) |
1798 | return 0; |
1799 | |
1800 | nibbles |= 1 << (ha->addr[5] & 0x0f); |
1801 | } |
1802 | |
1803 | return nibbles; |
1804 | } |
1805 | |
1806 | static void mv643xx_eth_program_unicast_filter(struct net_device *dev) |
1807 | { |
1808 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1809 | u32 port_config; |
1810 | u32 nibbles; |
1811 | int i; |
1812 | |
1813 | uc_addr_set(mp, addr: dev->dev_addr); |
1814 | |
1815 | port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; |
1816 | |
1817 | nibbles = uc_addr_filter_mask(dev); |
1818 | if (!nibbles) { |
1819 | port_config |= UNICAST_PROMISCUOUS_MODE; |
1820 | nibbles = 0xffff; |
1821 | } |
1822 | |
1823 | for (i = 0; i < 16; i += 4) { |
1824 | int off = UNICAST_TABLE(mp->port_num) + i; |
1825 | u32 v; |
1826 | |
1827 | v = 0; |
1828 | if (nibbles & 1) |
1829 | v |= 0x00000001; |
1830 | if (nibbles & 2) |
1831 | v |= 0x00000100; |
1832 | if (nibbles & 4) |
1833 | v |= 0x00010000; |
1834 | if (nibbles & 8) |
1835 | v |= 0x01000000; |
1836 | nibbles >>= 4; |
1837 | |
1838 | wrl(mp, offset: off, data: v); |
1839 | } |
1840 | |
1841 | wrlp(mp, PORT_CONFIG, data: port_config); |
1842 | } |
1843 | |
1844 | static int addr_crc(unsigned char *addr) |
1845 | { |
1846 | int crc = 0; |
1847 | int i; |
1848 | |
1849 | for (i = 0; i < 6; i++) { |
1850 | int j; |
1851 | |
1852 | crc = (crc ^ addr[i]) << 8; |
1853 | for (j = 7; j >= 0; j--) { |
1854 | if (crc & (0x100 << j)) |
1855 | crc ^= 0x107 << j; |
1856 | } |
1857 | } |
1858 | |
1859 | return crc; |
1860 | } |
1861 | |
1862 | static void mv643xx_eth_program_multicast_filter(struct net_device *dev) |
1863 | { |
1864 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1865 | u32 *mc_spec; |
1866 | u32 *mc_other; |
1867 | struct netdev_hw_addr *ha; |
1868 | int i; |
1869 | |
1870 | if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) |
1871 | goto promiscuous; |
1872 | |
1873 | /* Allocate both mc_spec and mc_other tables */ |
1874 | mc_spec = kcalloc(n: 128, size: sizeof(u32), GFP_ATOMIC); |
1875 | if (!mc_spec) |
1876 | goto promiscuous; |
1877 | mc_other = &mc_spec[64]; |
1878 | |
1879 | netdev_for_each_mc_addr(ha, dev) { |
1880 | u8 *a = ha->addr; |
1881 | u32 *table; |
1882 | u8 entry; |
1883 | |
1884 | if (memcmp(p: a, q: "\x01\x00\x5e\x00\x00" , size: 5) == 0) { |
1885 | table = mc_spec; |
1886 | entry = a[5]; |
1887 | } else { |
1888 | table = mc_other; |
1889 | entry = addr_crc(addr: a); |
1890 | } |
1891 | |
1892 | table[entry >> 2] |= 1 << (8 * (entry & 3)); |
1893 | } |
1894 | |
1895 | for (i = 0; i < 64; i++) { |
1896 | wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32), |
1897 | data: mc_spec[i]); |
1898 | wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32), |
1899 | data: mc_other[i]); |
1900 | } |
1901 | |
1902 | kfree(objp: mc_spec); |
1903 | return; |
1904 | |
1905 | promiscuous: |
1906 | for (i = 0; i < 64; i++) { |
1907 | wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32), |
1908 | data: 0x01010101u); |
1909 | wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32), |
1910 | data: 0x01010101u); |
1911 | } |
1912 | } |
1913 | |
1914 | static void mv643xx_eth_set_rx_mode(struct net_device *dev) |
1915 | { |
1916 | mv643xx_eth_program_unicast_filter(dev); |
1917 | mv643xx_eth_program_multicast_filter(dev); |
1918 | } |
1919 | |
1920 | static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) |
1921 | { |
1922 | struct sockaddr *sa = addr; |
1923 | |
1924 | if (!is_valid_ether_addr(addr: sa->sa_data)) |
1925 | return -EADDRNOTAVAIL; |
1926 | |
1927 | eth_hw_addr_set(dev, addr: sa->sa_data); |
1928 | |
1929 | netif_addr_lock_bh(dev); |
1930 | mv643xx_eth_program_unicast_filter(dev); |
1931 | netif_addr_unlock_bh(dev); |
1932 | |
1933 | return 0; |
1934 | } |
1935 | |
1936 | |
1937 | /* rx/tx queue initialisation ***********************************************/ |
1938 | static int rxq_init(struct mv643xx_eth_private *mp, int index) |
1939 | { |
1940 | struct rx_queue *rxq = mp->rxq + index; |
1941 | struct rx_desc *rx_desc; |
1942 | int size; |
1943 | int i; |
1944 | |
1945 | rxq->index = index; |
1946 | |
1947 | rxq->rx_ring_size = mp->rx_ring_size; |
1948 | |
1949 | rxq->rx_desc_count = 0; |
1950 | rxq->rx_curr_desc = 0; |
1951 | rxq->rx_used_desc = 0; |
1952 | |
1953 | size = rxq->rx_ring_size * sizeof(struct rx_desc); |
1954 | |
1955 | if (index == 0 && size <= mp->rx_desc_sram_size) { |
1956 | rxq->rx_desc_area = ioremap(offset: mp->rx_desc_sram_addr, |
1957 | size: mp->rx_desc_sram_size); |
1958 | rxq->rx_desc_dma = mp->rx_desc_sram_addr; |
1959 | } else { |
1960 | rxq->rx_desc_area = dma_alloc_coherent(dev: mp->dev->dev.parent, |
1961 | size, dma_handle: &rxq->rx_desc_dma, |
1962 | GFP_KERNEL); |
1963 | } |
1964 | |
1965 | if (rxq->rx_desc_area == NULL) { |
1966 | netdev_err(dev: mp->dev, |
1967 | format: "can't allocate rx ring (%d bytes)\n" , size); |
1968 | goto out; |
1969 | } |
1970 | memset(rxq->rx_desc_area, 0, size); |
1971 | |
1972 | rxq->rx_desc_area_size = size; |
1973 | rxq->rx_skb = kcalloc(n: rxq->rx_ring_size, size: sizeof(*rxq->rx_skb), |
1974 | GFP_KERNEL); |
1975 | if (rxq->rx_skb == NULL) |
1976 | goto out_free; |
1977 | |
1978 | rx_desc = rxq->rx_desc_area; |
1979 | for (i = 0; i < rxq->rx_ring_size; i++) { |
1980 | int nexti; |
1981 | |
1982 | nexti = i + 1; |
1983 | if (nexti == rxq->rx_ring_size) |
1984 | nexti = 0; |
1985 | |
1986 | rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + |
1987 | nexti * sizeof(struct rx_desc); |
1988 | } |
1989 | |
1990 | return 0; |
1991 | |
1992 | |
1993 | out_free: |
1994 | if (index == 0 && size <= mp->rx_desc_sram_size) |
1995 | iounmap(addr: rxq->rx_desc_area); |
1996 | else |
1997 | dma_free_coherent(dev: mp->dev->dev.parent, size, |
1998 | cpu_addr: rxq->rx_desc_area, |
1999 | dma_handle: rxq->rx_desc_dma); |
2000 | |
2001 | out: |
2002 | return -ENOMEM; |
2003 | } |
2004 | |
2005 | static void rxq_deinit(struct rx_queue *rxq) |
2006 | { |
2007 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
2008 | int i; |
2009 | |
2010 | rxq_disable(rxq); |
2011 | |
2012 | for (i = 0; i < rxq->rx_ring_size; i++) { |
2013 | if (rxq->rx_skb[i]) { |
2014 | dev_consume_skb_any(skb: rxq->rx_skb[i]); |
2015 | rxq->rx_desc_count--; |
2016 | } |
2017 | } |
2018 | |
2019 | if (rxq->rx_desc_count) { |
2020 | netdev_err(dev: mp->dev, format: "error freeing rx ring -- %d skbs stuck\n" , |
2021 | rxq->rx_desc_count); |
2022 | } |
2023 | |
2024 | if (rxq->index == 0 && |
2025 | rxq->rx_desc_area_size <= mp->rx_desc_sram_size) |
2026 | iounmap(addr: rxq->rx_desc_area); |
2027 | else |
2028 | dma_free_coherent(dev: mp->dev->dev.parent, size: rxq->rx_desc_area_size, |
2029 | cpu_addr: rxq->rx_desc_area, dma_handle: rxq->rx_desc_dma); |
2030 | |
2031 | kfree(objp: rxq->rx_skb); |
2032 | } |
2033 | |
2034 | static int txq_init(struct mv643xx_eth_private *mp, int index) |
2035 | { |
2036 | struct tx_queue *txq = mp->txq + index; |
2037 | struct tx_desc *tx_desc; |
2038 | int size; |
2039 | int ret; |
2040 | int i; |
2041 | |
2042 | txq->index = index; |
2043 | |
2044 | txq->tx_ring_size = mp->tx_ring_size; |
2045 | |
2046 | /* A queue must always have room for at least one skb. |
2047 | * Therefore, stop the queue when the free entries reaches |
2048 | * the maximum number of descriptors per skb. |
2049 | */ |
2050 | txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS; |
2051 | txq->tx_wake_threshold = txq->tx_stop_threshold / 2; |
2052 | |
2053 | txq->tx_desc_count = 0; |
2054 | txq->tx_curr_desc = 0; |
2055 | txq->tx_used_desc = 0; |
2056 | |
2057 | size = txq->tx_ring_size * sizeof(struct tx_desc); |
2058 | |
2059 | if (index == 0 && size <= mp->tx_desc_sram_size) { |
2060 | txq->tx_desc_area = ioremap(offset: mp->tx_desc_sram_addr, |
2061 | size: mp->tx_desc_sram_size); |
2062 | txq->tx_desc_dma = mp->tx_desc_sram_addr; |
2063 | } else { |
2064 | txq->tx_desc_area = dma_alloc_coherent(dev: mp->dev->dev.parent, |
2065 | size, dma_handle: &txq->tx_desc_dma, |
2066 | GFP_KERNEL); |
2067 | } |
2068 | |
2069 | if (txq->tx_desc_area == NULL) { |
2070 | netdev_err(dev: mp->dev, |
2071 | format: "can't allocate tx ring (%d bytes)\n" , size); |
2072 | return -ENOMEM; |
2073 | } |
2074 | memset(txq->tx_desc_area, 0, size); |
2075 | |
2076 | txq->tx_desc_area_size = size; |
2077 | |
2078 | tx_desc = txq->tx_desc_area; |
2079 | for (i = 0; i < txq->tx_ring_size; i++) { |
2080 | struct tx_desc *txd = tx_desc + i; |
2081 | int nexti; |
2082 | |
2083 | nexti = i + 1; |
2084 | if (nexti == txq->tx_ring_size) |
2085 | nexti = 0; |
2086 | |
2087 | txd->cmd_sts = 0; |
2088 | txd->next_desc_ptr = txq->tx_desc_dma + |
2089 | nexti * sizeof(struct tx_desc); |
2090 | } |
2091 | |
2092 | txq->tx_desc_mapping = kcalloc(n: txq->tx_ring_size, size: sizeof(char), |
2093 | GFP_KERNEL); |
2094 | if (!txq->tx_desc_mapping) { |
2095 | ret = -ENOMEM; |
2096 | goto err_free_desc_area; |
2097 | } |
2098 | |
2099 | /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ |
2100 | txq->tso_hdrs = dma_alloc_coherent(dev: mp->dev->dev.parent, |
2101 | size: txq->tx_ring_size * TSO_HEADER_SIZE, |
2102 | dma_handle: &txq->tso_hdrs_dma, GFP_KERNEL); |
2103 | if (txq->tso_hdrs == NULL) { |
2104 | ret = -ENOMEM; |
2105 | goto err_free_desc_mapping; |
2106 | } |
2107 | skb_queue_head_init(list: &txq->tx_skb); |
2108 | |
2109 | return 0; |
2110 | |
2111 | err_free_desc_mapping: |
2112 | kfree(objp: txq->tx_desc_mapping); |
2113 | err_free_desc_area: |
2114 | if (index == 0 && size <= mp->tx_desc_sram_size) |
2115 | iounmap(addr: txq->tx_desc_area); |
2116 | else |
2117 | dma_free_coherent(dev: mp->dev->dev.parent, size: txq->tx_desc_area_size, |
2118 | cpu_addr: txq->tx_desc_area, dma_handle: txq->tx_desc_dma); |
2119 | return ret; |
2120 | } |
2121 | |
2122 | static void txq_deinit(struct tx_queue *txq) |
2123 | { |
2124 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
2125 | |
2126 | txq_disable(txq); |
2127 | txq_reclaim(txq, budget: txq->tx_ring_size, force: 1); |
2128 | |
2129 | BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); |
2130 | |
2131 | if (txq->index == 0 && |
2132 | txq->tx_desc_area_size <= mp->tx_desc_sram_size) |
2133 | iounmap(addr: txq->tx_desc_area); |
2134 | else |
2135 | dma_free_coherent(dev: mp->dev->dev.parent, size: txq->tx_desc_area_size, |
2136 | cpu_addr: txq->tx_desc_area, dma_handle: txq->tx_desc_dma); |
2137 | kfree(objp: txq->tx_desc_mapping); |
2138 | |
2139 | if (txq->tso_hdrs) |
2140 | dma_free_coherent(dev: mp->dev->dev.parent, |
2141 | size: txq->tx_ring_size * TSO_HEADER_SIZE, |
2142 | cpu_addr: txq->tso_hdrs, dma_handle: txq->tso_hdrs_dma); |
2143 | } |
2144 | |
2145 | |
2146 | /* netdev ops and related ***************************************************/ |
2147 | static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) |
2148 | { |
2149 | u32 int_cause; |
2150 | u32 int_cause_ext; |
2151 | |
2152 | int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; |
2153 | if (int_cause == 0) |
2154 | return 0; |
2155 | |
2156 | int_cause_ext = 0; |
2157 | if (int_cause & INT_EXT) { |
2158 | int_cause &= ~INT_EXT; |
2159 | int_cause_ext = rdlp(mp, INT_CAUSE_EXT); |
2160 | } |
2161 | |
2162 | if (int_cause) { |
2163 | wrlp(mp, INT_CAUSE, data: ~int_cause); |
2164 | mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & |
2165 | ~(rdlp(mp, TXQ_COMMAND) & 0xff); |
2166 | mp->work_rx |= (int_cause & INT_RX) >> 2; |
2167 | } |
2168 | |
2169 | int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; |
2170 | if (int_cause_ext) { |
2171 | wrlp(mp, INT_CAUSE_EXT, data: ~int_cause_ext); |
2172 | if (int_cause_ext & INT_EXT_LINK_PHY) |
2173 | mp->work_link = 1; |
2174 | mp->work_tx |= int_cause_ext & INT_EXT_TX; |
2175 | } |
2176 | |
2177 | return 1; |
2178 | } |
2179 | |
2180 | static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) |
2181 | { |
2182 | struct net_device *dev = (struct net_device *)dev_id; |
2183 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
2184 | |
2185 | if (unlikely(!mv643xx_eth_collect_events(mp))) |
2186 | return IRQ_NONE; |
2187 | |
2188 | wrlp(mp, INT_MASK, data: 0); |
2189 | napi_schedule(n: &mp->napi); |
2190 | |
2191 | return IRQ_HANDLED; |
2192 | } |
2193 | |
2194 | static void handle_link_event(struct mv643xx_eth_private *mp) |
2195 | { |
2196 | struct net_device *dev = mp->dev; |
2197 | u32 port_status; |
2198 | int speed; |
2199 | int duplex; |
2200 | int fc; |
2201 | |
2202 | port_status = rdlp(mp, PORT_STATUS); |
2203 | if (!(port_status & LINK_UP)) { |
2204 | if (netif_carrier_ok(dev)) { |
2205 | int i; |
2206 | |
2207 | netdev_info(dev, format: "link down\n" ); |
2208 | |
2209 | netif_carrier_off(dev); |
2210 | |
2211 | for (i = 0; i < mp->txq_count; i++) { |
2212 | struct tx_queue *txq = mp->txq + i; |
2213 | |
2214 | txq_reclaim(txq, budget: txq->tx_ring_size, force: 1); |
2215 | txq_reset_hw_ptr(txq); |
2216 | } |
2217 | } |
2218 | return; |
2219 | } |
2220 | |
2221 | switch (port_status & PORT_SPEED_MASK) { |
2222 | case PORT_SPEED_10: |
2223 | speed = 10; |
2224 | break; |
2225 | case PORT_SPEED_100: |
2226 | speed = 100; |
2227 | break; |
2228 | case PORT_SPEED_1000: |
2229 | speed = 1000; |
2230 | break; |
2231 | default: |
2232 | speed = -1; |
2233 | break; |
2234 | } |
2235 | duplex = (port_status & FULL_DUPLEX) ? 1 : 0; |
2236 | fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; |
2237 | |
2238 | netdev_info(dev, format: "link up, %d Mb/s, %s duplex, flow control %sabled\n" , |
2239 | speed, duplex ? "full" : "half" , fc ? "en" : "dis" ); |
2240 | |
2241 | if (!netif_carrier_ok(dev)) |
2242 | netif_carrier_on(dev); |
2243 | } |
2244 | |
2245 | static int mv643xx_eth_poll(struct napi_struct *napi, int budget) |
2246 | { |
2247 | struct mv643xx_eth_private *mp; |
2248 | int work_done; |
2249 | |
2250 | mp = container_of(napi, struct mv643xx_eth_private, napi); |
2251 | |
2252 | if (unlikely(mp->oom)) { |
2253 | mp->oom = 0; |
2254 | del_timer(timer: &mp->rx_oom); |
2255 | } |
2256 | |
2257 | work_done = 0; |
2258 | while (work_done < budget) { |
2259 | u8 queue_mask; |
2260 | int queue; |
2261 | int work_tbd; |
2262 | |
2263 | if (mp->work_link) { |
2264 | mp->work_link = 0; |
2265 | handle_link_event(mp); |
2266 | work_done++; |
2267 | continue; |
2268 | } |
2269 | |
2270 | queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; |
2271 | if (likely(!mp->oom)) |
2272 | queue_mask |= mp->work_rx_refill; |
2273 | |
2274 | if (!queue_mask) { |
2275 | if (mv643xx_eth_collect_events(mp)) |
2276 | continue; |
2277 | break; |
2278 | } |
2279 | |
2280 | queue = fls(x: queue_mask) - 1; |
2281 | queue_mask = 1 << queue; |
2282 | |
2283 | work_tbd = budget - work_done; |
2284 | if (work_tbd > 16) |
2285 | work_tbd = 16; |
2286 | |
2287 | if (mp->work_tx_end & queue_mask) { |
2288 | txq_kick(txq: mp->txq + queue); |
2289 | } else if (mp->work_tx & queue_mask) { |
2290 | work_done += txq_reclaim(txq: mp->txq + queue, budget: work_tbd, force: 0); |
2291 | txq_maybe_wake(txq: mp->txq + queue); |
2292 | } else if (mp->work_rx & queue_mask) { |
2293 | work_done += rxq_process(rxq: mp->rxq + queue, budget: work_tbd); |
2294 | } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { |
2295 | work_done += rxq_refill(rxq: mp->rxq + queue, budget: work_tbd); |
2296 | } else { |
2297 | BUG(); |
2298 | } |
2299 | } |
2300 | |
2301 | if (work_done < budget) { |
2302 | if (mp->oom) |
2303 | mod_timer(timer: &mp->rx_oom, expires: jiffies + (HZ / 10)); |
2304 | napi_complete_done(n: napi, work_done); |
2305 | wrlp(mp, INT_MASK, data: mp->int_mask); |
2306 | } |
2307 | |
2308 | return work_done; |
2309 | } |
2310 | |
2311 | static inline void oom_timer_wrapper(struct timer_list *t) |
2312 | { |
2313 | struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom); |
2314 | |
2315 | napi_schedule(n: &mp->napi); |
2316 | } |
2317 | |
2318 | static void port_start(struct mv643xx_eth_private *mp) |
2319 | { |
2320 | struct net_device *dev = mp->dev; |
2321 | u32 pscr; |
2322 | int i; |
2323 | |
2324 | /* |
2325 | * Perform PHY reset, if there is a PHY. |
2326 | */ |
2327 | if (dev->phydev) { |
2328 | struct ethtool_link_ksettings cmd; |
2329 | |
2330 | mv643xx_eth_get_link_ksettings(dev, cmd: &cmd); |
2331 | phy_init_hw(phydev: dev->phydev); |
2332 | mv643xx_eth_set_link_ksettings( |
2333 | dev, cmd: (const struct ethtool_link_ksettings *)&cmd); |
2334 | phy_start(phydev: dev->phydev); |
2335 | } |
2336 | |
2337 | /* |
2338 | * Configure basic link parameters. |
2339 | */ |
2340 | pscr = rdlp(mp, PORT_SERIAL_CONTROL); |
2341 | |
2342 | pscr |= SERIAL_PORT_ENABLE; |
2343 | wrlp(mp, PORT_SERIAL_CONTROL, data: pscr); |
2344 | |
2345 | pscr |= DO_NOT_FORCE_LINK_FAIL; |
2346 | if (!dev->phydev) |
2347 | pscr |= FORCE_LINK_PASS; |
2348 | wrlp(mp, PORT_SERIAL_CONTROL, data: pscr); |
2349 | |
2350 | /* |
2351 | * Configure TX path and queues. |
2352 | */ |
2353 | tx_set_rate(mp, rate: 1000000000, burst: 16777216); |
2354 | for (i = 0; i < mp->txq_count; i++) { |
2355 | struct tx_queue *txq = mp->txq + i; |
2356 | |
2357 | txq_reset_hw_ptr(txq); |
2358 | txq_set_rate(txq, rate: 1000000000, burst: 16777216); |
2359 | txq_set_fixed_prio_mode(txq); |
2360 | } |
2361 | |
2362 | /* |
2363 | * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast |
2364 | * frames to RX queue #0, and include the pseudo-header when |
2365 | * calculating receive checksums. |
2366 | */ |
2367 | mv643xx_eth_set_features(dev: mp->dev, features: mp->dev->features); |
2368 | |
2369 | /* |
2370 | * Treat BPDUs as normal multicasts, and disable partition mode. |
2371 | */ |
2372 | wrlp(mp, PORT_CONFIG_EXT, data: 0x00000000); |
2373 | |
2374 | /* |
2375 | * Add configured unicast addresses to address filter table. |
2376 | */ |
2377 | mv643xx_eth_program_unicast_filter(dev: mp->dev); |
2378 | |
2379 | /* |
2380 | * Enable the receive queues. |
2381 | */ |
2382 | for (i = 0; i < mp->rxq_count; i++) { |
2383 | struct rx_queue *rxq = mp->rxq + i; |
2384 | u32 addr; |
2385 | |
2386 | addr = (u32)rxq->rx_desc_dma; |
2387 | addr += rxq->rx_curr_desc * sizeof(struct rx_desc); |
2388 | wrlp(mp, RXQ_CURRENT_DESC_PTR(i), data: addr); |
2389 | |
2390 | rxq_enable(rxq); |
2391 | } |
2392 | } |
2393 | |
2394 | static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) |
2395 | { |
2396 | int skb_size; |
2397 | |
2398 | /* |
2399 | * Reserve 2+14 bytes for an ethernet header (the hardware |
2400 | * automatically prepends 2 bytes of dummy data to each |
2401 | * received packet), 16 bytes for up to four VLAN tags, and |
2402 | * 4 bytes for the trailing FCS -- 36 bytes total. |
2403 | */ |
2404 | skb_size = mp->dev->mtu + 36; |
2405 | |
2406 | /* |
2407 | * Make sure that the skb size is a multiple of 8 bytes, as |
2408 | * the lower three bits of the receive descriptor's buffer |
2409 | * size field are ignored by the hardware. |
2410 | */ |
2411 | mp->skb_size = (skb_size + 7) & ~7; |
2412 | |
2413 | /* |
2414 | * If NET_SKB_PAD is smaller than a cache line, |
2415 | * netdev_alloc_skb() will cause skb->data to be misaligned |
2416 | * to a cache line boundary. If this is the case, include |
2417 | * some extra space to allow re-aligning the data area. |
2418 | */ |
2419 | mp->skb_size += SKB_DMA_REALIGN; |
2420 | } |
2421 | |
2422 | static int mv643xx_eth_open(struct net_device *dev) |
2423 | { |
2424 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
2425 | int err; |
2426 | int i; |
2427 | |
2428 | wrlp(mp, INT_CAUSE, data: 0); |
2429 | wrlp(mp, INT_CAUSE_EXT, data: 0); |
2430 | rdlp(mp, INT_CAUSE_EXT); |
2431 | |
2432 | err = request_irq(irq: dev->irq, handler: mv643xx_eth_irq, |
2433 | IRQF_SHARED, name: dev->name, dev); |
2434 | if (err) { |
2435 | netdev_err(dev, format: "can't assign irq\n" ); |
2436 | return -EAGAIN; |
2437 | } |
2438 | |
2439 | mv643xx_eth_recalc_skb_size(mp); |
2440 | |
2441 | napi_enable(n: &mp->napi); |
2442 | |
2443 | mp->int_mask = INT_EXT; |
2444 | |
2445 | for (i = 0; i < mp->rxq_count; i++) { |
2446 | err = rxq_init(mp, index: i); |
2447 | if (err) { |
2448 | while (--i >= 0) |
2449 | rxq_deinit(rxq: mp->rxq + i); |
2450 | goto out; |
2451 | } |
2452 | |
2453 | rxq_refill(rxq: mp->rxq + i, INT_MAX); |
2454 | mp->int_mask |= INT_RX_0 << i; |
2455 | } |
2456 | |
2457 | if (mp->oom) { |
2458 | mp->rx_oom.expires = jiffies + (HZ / 10); |
2459 | add_timer(timer: &mp->rx_oom); |
2460 | } |
2461 | |
2462 | for (i = 0; i < mp->txq_count; i++) { |
2463 | err = txq_init(mp, index: i); |
2464 | if (err) { |
2465 | while (--i >= 0) |
2466 | txq_deinit(txq: mp->txq + i); |
2467 | goto out_free; |
2468 | } |
2469 | mp->int_mask |= INT_TX_END_0 << i; |
2470 | } |
2471 | |
2472 | add_timer(timer: &mp->mib_counters_timer); |
2473 | port_start(mp); |
2474 | |
2475 | wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); |
2476 | wrlp(mp, INT_MASK, data: mp->int_mask); |
2477 | |
2478 | return 0; |
2479 | |
2480 | |
2481 | out_free: |
2482 | for (i = 0; i < mp->rxq_count; i++) |
2483 | rxq_deinit(rxq: mp->rxq + i); |
2484 | out: |
2485 | napi_disable(n: &mp->napi); |
2486 | free_irq(dev->irq, dev); |
2487 | |
2488 | return err; |
2489 | } |
2490 | |
2491 | static void port_reset(struct mv643xx_eth_private *mp) |
2492 | { |
2493 | unsigned int data; |
2494 | int i; |
2495 | |
2496 | for (i = 0; i < mp->rxq_count; i++) |
2497 | rxq_disable(rxq: mp->rxq + i); |
2498 | for (i = 0; i < mp->txq_count; i++) |
2499 | txq_disable(txq: mp->txq + i); |
2500 | |
2501 | while (1) { |
2502 | u32 ps = rdlp(mp, PORT_STATUS); |
2503 | |
2504 | if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) |
2505 | break; |
2506 | udelay(10); |
2507 | } |
2508 | |
2509 | /* Reset the Enable bit in the Configuration Register */ |
2510 | data = rdlp(mp, PORT_SERIAL_CONTROL); |
2511 | data &= ~(SERIAL_PORT_ENABLE | |
2512 | DO_NOT_FORCE_LINK_FAIL | |
2513 | FORCE_LINK_PASS); |
2514 | wrlp(mp, PORT_SERIAL_CONTROL, data); |
2515 | } |
2516 | |
2517 | static int mv643xx_eth_stop(struct net_device *dev) |
2518 | { |
2519 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
2520 | int i; |
2521 | |
2522 | wrlp(mp, INT_MASK_EXT, data: 0x00000000); |
2523 | wrlp(mp, INT_MASK, data: 0x00000000); |
2524 | rdlp(mp, INT_MASK); |
2525 | |
2526 | napi_disable(n: &mp->napi); |
2527 | |
2528 | del_timer_sync(timer: &mp->rx_oom); |
2529 | |
2530 | netif_carrier_off(dev); |
2531 | if (dev->phydev) |
2532 | phy_stop(phydev: dev->phydev); |
2533 | free_irq(dev->irq, dev); |
2534 | |
2535 | port_reset(mp); |
2536 | mv643xx_eth_get_stats(dev); |
2537 | mib_counters_update(mp); |
2538 | del_timer_sync(timer: &mp->mib_counters_timer); |
2539 | |
2540 | for (i = 0; i < mp->rxq_count; i++) |
2541 | rxq_deinit(rxq: mp->rxq + i); |
2542 | for (i = 0; i < mp->txq_count; i++) |
2543 | txq_deinit(txq: mp->txq + i); |
2544 | |
2545 | return 0; |
2546 | } |
2547 | |
2548 | static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
2549 | { |
2550 | int ret; |
2551 | |
2552 | if (!dev->phydev) |
2553 | return -ENOTSUPP; |
2554 | |
2555 | ret = phy_mii_ioctl(phydev: dev->phydev, ifr, cmd); |
2556 | if (!ret) |
2557 | mv643xx_eth_adjust_link(dev); |
2558 | return ret; |
2559 | } |
2560 | |
2561 | static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) |
2562 | { |
2563 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
2564 | |
2565 | dev->mtu = new_mtu; |
2566 | mv643xx_eth_recalc_skb_size(mp); |
2567 | tx_set_rate(mp, rate: 1000000000, burst: 16777216); |
2568 | |
2569 | if (!netif_running(dev)) |
2570 | return 0; |
2571 | |
2572 | /* |
2573 | * Stop and then re-open the interface. This will allocate RX |
2574 | * skbs of the new MTU. |
2575 | * There is a possible danger that the open will not succeed, |
2576 | * due to memory being full. |
2577 | */ |
2578 | mv643xx_eth_stop(dev); |
2579 | if (mv643xx_eth_open(dev)) { |
2580 | netdev_err(dev, |
2581 | format: "fatal error on re-opening device after MTU change\n" ); |
2582 | } |
2583 | |
2584 | return 0; |
2585 | } |
2586 | |
2587 | static void tx_timeout_task(struct work_struct *ugly) |
2588 | { |
2589 | struct mv643xx_eth_private *mp; |
2590 | |
2591 | mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); |
2592 | if (netif_running(dev: mp->dev)) { |
2593 | netif_tx_stop_all_queues(dev: mp->dev); |
2594 | port_reset(mp); |
2595 | port_start(mp); |
2596 | netif_tx_wake_all_queues(dev: mp->dev); |
2597 | } |
2598 | } |
2599 | |
2600 | static void mv643xx_eth_tx_timeout(struct net_device *dev, unsigned int txqueue) |
2601 | { |
2602 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
2603 | |
2604 | netdev_info(dev, format: "tx timeout\n" ); |
2605 | |
2606 | schedule_work(work: &mp->tx_timeout_task); |
2607 | } |
2608 | |
2609 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2610 | static void mv643xx_eth_netpoll(struct net_device *dev) |
2611 | { |
2612 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
2613 | |
2614 | wrlp(mp, INT_MASK, data: 0x00000000); |
2615 | rdlp(mp, INT_MASK); |
2616 | |
2617 | mv643xx_eth_irq(irq: dev->irq, dev_id: dev); |
2618 | |
2619 | wrlp(mp, INT_MASK, data: mp->int_mask); |
2620 | } |
2621 | #endif |
2622 | |
2623 | |
2624 | /* platform glue ************************************************************/ |
2625 | static void |
2626 | mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp, |
2627 | const struct mbus_dram_target_info *dram) |
2628 | { |
2629 | void __iomem *base = msp->base; |
2630 | u32 win_enable; |
2631 | u32 win_protect; |
2632 | int i; |
2633 | |
2634 | for (i = 0; i < 6; i++) { |
2635 | writel(val: 0, addr: base + WINDOW_BASE(i)); |
2636 | writel(val: 0, addr: base + WINDOW_SIZE(i)); |
2637 | if (i < 4) |
2638 | writel(val: 0, addr: base + WINDOW_REMAP_HIGH(i)); |
2639 | } |
2640 | |
2641 | win_enable = 0x3f; |
2642 | win_protect = 0; |
2643 | |
2644 | for (i = 0; i < dram->num_cs; i++) { |
2645 | const struct mbus_dram_window *cs = dram->cs + i; |
2646 | |
2647 | writel(val: (cs->base & 0xffff0000) | |
2648 | (cs->mbus_attr << 8) | |
2649 | dram->mbus_dram_target_id, addr: base + WINDOW_BASE(i)); |
2650 | writel(val: (cs->size - 1) & 0xffff0000, addr: base + WINDOW_SIZE(i)); |
2651 | |
2652 | win_enable &= ~(1 << i); |
2653 | win_protect |= 3 << (2 * i); |
2654 | } |
2655 | |
2656 | writel(val: win_enable, addr: base + WINDOW_BAR_ENABLE); |
2657 | msp->win_protect = win_protect; |
2658 | } |
2659 | |
2660 | static void infer_hw_params(struct mv643xx_eth_shared_private *msp) |
2661 | { |
2662 | /* |
2663 | * Check whether we have a 14-bit coal limit field in bits |
2664 | * [21:8], or a 16-bit coal limit in bits [25,21:7] of the |
2665 | * SDMA config register. |
2666 | */ |
2667 | writel(val: 0x02000000, addr: msp->base + 0x0400 + SDMA_CONFIG); |
2668 | if (readl(addr: msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000) |
2669 | msp->extended_rx_coal_limit = 1; |
2670 | else |
2671 | msp->extended_rx_coal_limit = 0; |
2672 | |
2673 | /* |
2674 | * Check whether the MAC supports TX rate control, and if |
2675 | * yes, whether its associated registers are in the old or |
2676 | * the new place. |
2677 | */ |
2678 | writel(val: 1, addr: msp->base + 0x0400 + TX_BW_MTU_MOVED); |
2679 | if (readl(addr: msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) { |
2680 | msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; |
2681 | } else { |
2682 | writel(val: 7, addr: msp->base + 0x0400 + TX_BW_RATE); |
2683 | if (readl(addr: msp->base + 0x0400 + TX_BW_RATE) & 7) |
2684 | msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; |
2685 | else |
2686 | msp->tx_bw_control = TX_BW_CONTROL_ABSENT; |
2687 | } |
2688 | } |
2689 | |
2690 | #if defined(CONFIG_OF) |
2691 | static const struct of_device_id mv643xx_eth_shared_ids[] = { |
2692 | { .compatible = "marvell,orion-eth" , }, |
2693 | { .compatible = "marvell,kirkwood-eth" , }, |
2694 | { } |
2695 | }; |
2696 | MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids); |
2697 | #endif |
2698 | |
2699 | #ifdef CONFIG_OF_IRQ |
2700 | #define mv643xx_eth_property(_np, _name, _v) \ |
2701 | do { \ |
2702 | u32 tmp; \ |
2703 | if (!of_property_read_u32(_np, "marvell," _name, &tmp)) \ |
2704 | _v = tmp; \ |
2705 | } while (0) |
2706 | |
2707 | static struct platform_device *port_platdev[3]; |
2708 | |
2709 | static void mv643xx_eth_shared_of_remove(void) |
2710 | { |
2711 | int n; |
2712 | |
2713 | for (n = 0; n < 3; n++) { |
2714 | platform_device_del(pdev: port_platdev[n]); |
2715 | port_platdev[n] = NULL; |
2716 | } |
2717 | } |
2718 | |
2719 | static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, |
2720 | struct device_node *pnp) |
2721 | { |
2722 | struct platform_device *ppdev; |
2723 | struct mv643xx_eth_platform_data ppd; |
2724 | struct resource res; |
2725 | int ret; |
2726 | int dev_num = 0; |
2727 | |
2728 | memset(&ppd, 0, sizeof(ppd)); |
2729 | ppd.shared = pdev; |
2730 | |
2731 | memset(&res, 0, sizeof(res)); |
2732 | if (of_irq_to_resource(dev: pnp, index: 0, r: &res) <= 0) { |
2733 | dev_err(&pdev->dev, "missing interrupt on %pOFn\n" , pnp); |
2734 | return -EINVAL; |
2735 | } |
2736 | |
2737 | if (of_property_read_u32(np: pnp, propname: "reg" , out_value: &ppd.port_number)) { |
2738 | dev_err(&pdev->dev, "missing reg property on %pOFn\n" , pnp); |
2739 | return -EINVAL; |
2740 | } |
2741 | |
2742 | if (ppd.port_number >= 3) { |
2743 | dev_err(&pdev->dev, "invalid reg property on %pOFn\n" , pnp); |
2744 | return -EINVAL; |
2745 | } |
2746 | |
2747 | while (dev_num < 3 && port_platdev[dev_num]) |
2748 | dev_num++; |
2749 | |
2750 | if (dev_num == 3) { |
2751 | dev_err(&pdev->dev, "too many ports registered\n" ); |
2752 | return -EINVAL; |
2753 | } |
2754 | |
2755 | ret = of_get_mac_address(np: pnp, mac: ppd.mac_addr); |
2756 | if (ret == -EPROBE_DEFER) |
2757 | return ret; |
2758 | |
2759 | mv643xx_eth_property(pnp, "tx-queue-size" , ppd.tx_queue_size); |
2760 | mv643xx_eth_property(pnp, "tx-sram-addr" , ppd.tx_sram_addr); |
2761 | mv643xx_eth_property(pnp, "tx-sram-size" , ppd.tx_sram_size); |
2762 | mv643xx_eth_property(pnp, "rx-queue-size" , ppd.rx_queue_size); |
2763 | mv643xx_eth_property(pnp, "rx-sram-addr" , ppd.rx_sram_addr); |
2764 | mv643xx_eth_property(pnp, "rx-sram-size" , ppd.rx_sram_size); |
2765 | |
2766 | of_get_phy_mode(np: pnp, interface: &ppd.interface); |
2767 | |
2768 | ppd.phy_node = of_parse_phandle(np: pnp, phandle_name: "phy-handle" , index: 0); |
2769 | if (!ppd.phy_node) { |
2770 | ppd.phy_addr = MV643XX_ETH_PHY_NONE; |
2771 | of_property_read_u32(np: pnp, propname: "speed" , out_value: &ppd.speed); |
2772 | of_property_read_u32(np: pnp, propname: "duplex" , out_value: &ppd.duplex); |
2773 | } |
2774 | |
2775 | ppdev = platform_device_alloc(MV643XX_ETH_NAME, id: dev_num); |
2776 | if (!ppdev) |
2777 | return -ENOMEM; |
2778 | ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); |
2779 | ppdev->dev.of_node = pnp; |
2780 | |
2781 | ret = platform_device_add_resources(pdev: ppdev, res: &res, num: 1); |
2782 | if (ret) |
2783 | goto port_err; |
2784 | |
2785 | ret = platform_device_add_data(pdev: ppdev, data: &ppd, size: sizeof(ppd)); |
2786 | if (ret) |
2787 | goto port_err; |
2788 | |
2789 | ret = platform_device_add(pdev: ppdev); |
2790 | if (ret) |
2791 | goto port_err; |
2792 | |
2793 | port_platdev[dev_num] = ppdev; |
2794 | |
2795 | return 0; |
2796 | |
2797 | port_err: |
2798 | platform_device_put(pdev: ppdev); |
2799 | return ret; |
2800 | } |
2801 | |
2802 | static int mv643xx_eth_shared_of_probe(struct platform_device *pdev) |
2803 | { |
2804 | struct mv643xx_eth_shared_platform_data *pd; |
2805 | struct device_node *pnp, *np = pdev->dev.of_node; |
2806 | int ret; |
2807 | |
2808 | /* bail out if not registered from DT */ |
2809 | if (!np) |
2810 | return 0; |
2811 | |
2812 | pd = devm_kzalloc(dev: &pdev->dev, size: sizeof(*pd), GFP_KERNEL); |
2813 | if (!pd) |
2814 | return -ENOMEM; |
2815 | pdev->dev.platform_data = pd; |
2816 | |
2817 | mv643xx_eth_property(np, "tx-checksum-limit" , pd->tx_csum_limit); |
2818 | |
2819 | for_each_available_child_of_node(np, pnp) { |
2820 | ret = mv643xx_eth_shared_of_add_port(pdev, pnp); |
2821 | if (ret) { |
2822 | of_node_put(node: pnp); |
2823 | mv643xx_eth_shared_of_remove(); |
2824 | return ret; |
2825 | } |
2826 | } |
2827 | return 0; |
2828 | } |
2829 | |
2830 | #else |
2831 | static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev) |
2832 | { |
2833 | return 0; |
2834 | } |
2835 | |
2836 | static inline void mv643xx_eth_shared_of_remove(void) |
2837 | { |
2838 | } |
2839 | #endif |
2840 | |
2841 | static int mv643xx_eth_shared_probe(struct platform_device *pdev) |
2842 | { |
2843 | static int mv643xx_eth_version_printed; |
2844 | struct mv643xx_eth_shared_platform_data *pd; |
2845 | struct mv643xx_eth_shared_private *msp; |
2846 | const struct mbus_dram_target_info *dram; |
2847 | struct resource *res; |
2848 | int ret; |
2849 | |
2850 | if (!mv643xx_eth_version_printed++) |
2851 | pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n" , |
2852 | mv643xx_eth_driver_version); |
2853 | |
2854 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2855 | if (res == NULL) |
2856 | return -EINVAL; |
2857 | |
2858 | msp = devm_kzalloc(dev: &pdev->dev, size: sizeof(*msp), GFP_KERNEL); |
2859 | if (msp == NULL) |
2860 | return -ENOMEM; |
2861 | platform_set_drvdata(pdev, data: msp); |
2862 | |
2863 | msp->base = devm_ioremap(dev: &pdev->dev, offset: res->start, size: resource_size(res)); |
2864 | if (msp->base == NULL) |
2865 | return -ENOMEM; |
2866 | |
2867 | msp->clk = devm_clk_get(dev: &pdev->dev, NULL); |
2868 | if (!IS_ERR(ptr: msp->clk)) |
2869 | clk_prepare_enable(clk: msp->clk); |
2870 | |
2871 | /* |
2872 | * (Re-)program MBUS remapping windows if we are asked to. |
2873 | */ |
2874 | dram = mv_mbus_dram_info(); |
2875 | if (dram) |
2876 | mv643xx_eth_conf_mbus_windows(msp, dram); |
2877 | |
2878 | ret = mv643xx_eth_shared_of_probe(pdev); |
2879 | if (ret) |
2880 | goto err_put_clk; |
2881 | pd = dev_get_platdata(dev: &pdev->dev); |
2882 | |
2883 | msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? |
2884 | pd->tx_csum_limit : 9 * 1024; |
2885 | infer_hw_params(msp); |
2886 | |
2887 | return 0; |
2888 | |
2889 | err_put_clk: |
2890 | if (!IS_ERR(ptr: msp->clk)) |
2891 | clk_disable_unprepare(clk: msp->clk); |
2892 | return ret; |
2893 | } |
2894 | |
2895 | static void mv643xx_eth_shared_remove(struct platform_device *pdev) |
2896 | { |
2897 | struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); |
2898 | |
2899 | mv643xx_eth_shared_of_remove(); |
2900 | if (!IS_ERR(ptr: msp->clk)) |
2901 | clk_disable_unprepare(clk: msp->clk); |
2902 | } |
2903 | |
2904 | static struct platform_driver mv643xx_eth_shared_driver = { |
2905 | .probe = mv643xx_eth_shared_probe, |
2906 | .remove_new = mv643xx_eth_shared_remove, |
2907 | .driver = { |
2908 | .name = MV643XX_ETH_SHARED_NAME, |
2909 | .of_match_table = of_match_ptr(mv643xx_eth_shared_ids), |
2910 | }, |
2911 | }; |
2912 | |
2913 | static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) |
2914 | { |
2915 | int addr_shift = 5 * mp->port_num; |
2916 | u32 data; |
2917 | |
2918 | data = rdl(mp, PHY_ADDR); |
2919 | data &= ~(0x1f << addr_shift); |
2920 | data |= (phy_addr & 0x1f) << addr_shift; |
2921 | wrl(mp, PHY_ADDR, data); |
2922 | } |
2923 | |
2924 | static int phy_addr_get(struct mv643xx_eth_private *mp) |
2925 | { |
2926 | unsigned int data; |
2927 | |
2928 | data = rdl(mp, PHY_ADDR); |
2929 | |
2930 | return (data >> (5 * mp->port_num)) & 0x1f; |
2931 | } |
2932 | |
2933 | static void set_params(struct mv643xx_eth_private *mp, |
2934 | struct mv643xx_eth_platform_data *pd) |
2935 | { |
2936 | struct net_device *dev = mp->dev; |
2937 | unsigned int tx_ring_size; |
2938 | |
2939 | if (is_valid_ether_addr(addr: pd->mac_addr)) { |
2940 | eth_hw_addr_set(dev, addr: pd->mac_addr); |
2941 | } else { |
2942 | u8 addr[ETH_ALEN]; |
2943 | |
2944 | uc_addr_get(mp, addr); |
2945 | eth_hw_addr_set(dev, addr); |
2946 | } |
2947 | |
2948 | mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; |
2949 | if (pd->rx_queue_size) |
2950 | mp->rx_ring_size = pd->rx_queue_size; |
2951 | mp->rx_desc_sram_addr = pd->rx_sram_addr; |
2952 | mp->rx_desc_sram_size = pd->rx_sram_size; |
2953 | |
2954 | mp->rxq_count = pd->rx_queue_count ? : 1; |
2955 | |
2956 | tx_ring_size = DEFAULT_TX_QUEUE_SIZE; |
2957 | if (pd->tx_queue_size) |
2958 | tx_ring_size = pd->tx_queue_size; |
2959 | |
2960 | mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size, |
2961 | MV643XX_MAX_SKB_DESCS * 2, 4096); |
2962 | if (mp->tx_ring_size != tx_ring_size) |
2963 | netdev_warn(dev, format: "TX queue size set to %u (requested %u)\n" , |
2964 | mp->tx_ring_size, tx_ring_size); |
2965 | |
2966 | mp->tx_desc_sram_addr = pd->tx_sram_addr; |
2967 | mp->tx_desc_sram_size = pd->tx_sram_size; |
2968 | |
2969 | mp->txq_count = pd->tx_queue_count ? : 1; |
2970 | } |
2971 | |
2972 | static int get_phy_mode(struct mv643xx_eth_private *mp) |
2973 | { |
2974 | struct device *dev = mp->dev->dev.parent; |
2975 | phy_interface_t iface; |
2976 | int err; |
2977 | |
2978 | if (dev->of_node) |
2979 | err = of_get_phy_mode(np: dev->of_node, interface: &iface); |
2980 | |
2981 | /* Historical default if unspecified. We could also read/write |
2982 | * the interface state in the PSC1 |
2983 | */ |
2984 | if (!dev->of_node || err) |
2985 | iface = PHY_INTERFACE_MODE_GMII; |
2986 | return iface; |
2987 | } |
2988 | |
2989 | static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, |
2990 | int phy_addr) |
2991 | { |
2992 | struct phy_device *phydev; |
2993 | int start; |
2994 | int num; |
2995 | int i; |
2996 | char phy_id[MII_BUS_ID_SIZE + 3]; |
2997 | |
2998 | if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { |
2999 | start = phy_addr_get(mp) & 0x1f; |
3000 | num = 32; |
3001 | } else { |
3002 | start = phy_addr & 0x1f; |
3003 | num = 1; |
3004 | } |
3005 | |
3006 | /* Attempt to connect to the PHY using orion-mdio */ |
3007 | phydev = ERR_PTR(error: -ENODEV); |
3008 | for (i = 0; i < num; i++) { |
3009 | int addr = (start + i) & 0x1f; |
3010 | |
3011 | snprintf(buf: phy_id, size: sizeof(phy_id), PHY_ID_FMT, |
3012 | "orion-mdio-mii" , addr); |
3013 | |
3014 | phydev = phy_connect(dev: mp->dev, bus_id: phy_id, handler: mv643xx_eth_adjust_link, |
3015 | interface: get_phy_mode(mp)); |
3016 | if (!IS_ERR(ptr: phydev)) { |
3017 | phy_addr_set(mp, phy_addr: addr); |
3018 | break; |
3019 | } |
3020 | } |
3021 | |
3022 | return phydev; |
3023 | } |
3024 | |
3025 | static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) |
3026 | { |
3027 | struct net_device *dev = mp->dev; |
3028 | struct phy_device *phy = dev->phydev; |
3029 | |
3030 | if (speed == 0) { |
3031 | phy->autoneg = AUTONEG_ENABLE; |
3032 | phy->speed = 0; |
3033 | phy->duplex = 0; |
3034 | linkmode_copy(dst: phy->advertising, src: phy->supported); |
3035 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_Autoneg_BIT, |
3036 | addr: phy->advertising); |
3037 | } else { |
3038 | phy->autoneg = AUTONEG_DISABLE; |
3039 | linkmode_zero(dst: phy->advertising); |
3040 | phy->speed = speed; |
3041 | phy->duplex = duplex; |
3042 | } |
3043 | phy_start_aneg(phydev: phy); |
3044 | } |
3045 | |
3046 | static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) |
3047 | { |
3048 | struct net_device *dev = mp->dev; |
3049 | u32 pscr; |
3050 | |
3051 | pscr = rdlp(mp, PORT_SERIAL_CONTROL); |
3052 | if (pscr & SERIAL_PORT_ENABLE) { |
3053 | pscr &= ~SERIAL_PORT_ENABLE; |
3054 | wrlp(mp, PORT_SERIAL_CONTROL, data: pscr); |
3055 | } |
3056 | |
3057 | pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; |
3058 | if (!dev->phydev) { |
3059 | pscr |= DISABLE_AUTO_NEG_SPEED_GMII; |
3060 | if (speed == SPEED_1000) |
3061 | pscr |= SET_GMII_SPEED_TO_1000; |
3062 | else if (speed == SPEED_100) |
3063 | pscr |= SET_MII_SPEED_TO_100; |
3064 | |
3065 | pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL; |
3066 | |
3067 | pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX; |
3068 | if (duplex == DUPLEX_FULL) |
3069 | pscr |= SET_FULL_DUPLEX_MODE; |
3070 | } |
3071 | |
3072 | wrlp(mp, PORT_SERIAL_CONTROL, data: pscr); |
3073 | } |
3074 | |
3075 | static const struct net_device_ops mv643xx_eth_netdev_ops = { |
3076 | .ndo_open = mv643xx_eth_open, |
3077 | .ndo_stop = mv643xx_eth_stop, |
3078 | .ndo_start_xmit = mv643xx_eth_xmit, |
3079 | .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, |
3080 | .ndo_set_mac_address = mv643xx_eth_set_mac_address, |
3081 | .ndo_validate_addr = eth_validate_addr, |
3082 | .ndo_eth_ioctl = mv643xx_eth_ioctl, |
3083 | .ndo_change_mtu = mv643xx_eth_change_mtu, |
3084 | .ndo_set_features = mv643xx_eth_set_features, |
3085 | .ndo_tx_timeout = mv643xx_eth_tx_timeout, |
3086 | .ndo_get_stats = mv643xx_eth_get_stats, |
3087 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3088 | .ndo_poll_controller = mv643xx_eth_netpoll, |
3089 | #endif |
3090 | }; |
3091 | |
3092 | static int mv643xx_eth_probe(struct platform_device *pdev) |
3093 | { |
3094 | struct mv643xx_eth_platform_data *pd; |
3095 | struct mv643xx_eth_private *mp; |
3096 | struct net_device *dev; |
3097 | struct phy_device *phydev = NULL; |
3098 | u32 psc1r; |
3099 | int err, irq; |
3100 | |
3101 | pd = dev_get_platdata(dev: &pdev->dev); |
3102 | if (pd == NULL) { |
3103 | dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n" ); |
3104 | return -ENODEV; |
3105 | } |
3106 | |
3107 | if (pd->shared == NULL) { |
3108 | dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n" ); |
3109 | return -ENODEV; |
3110 | } |
3111 | |
3112 | dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8); |
3113 | if (!dev) |
3114 | return -ENOMEM; |
3115 | |
3116 | SET_NETDEV_DEV(dev, &pdev->dev); |
3117 | mp = netdev_priv(dev); |
3118 | platform_set_drvdata(pdev, data: mp); |
3119 | |
3120 | mp->shared = platform_get_drvdata(pdev: pd->shared); |
3121 | mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); |
3122 | mp->port_num = pd->port_number; |
3123 | |
3124 | mp->dev = dev; |
3125 | |
3126 | if (of_device_is_compatible(device: pdev->dev.of_node, |
3127 | "marvell,kirkwood-eth-port" )) { |
3128 | psc1r = rdlp(mp, PORT_SERIAL_CONTROL1); |
3129 | |
3130 | /* Kirkwood resets some registers on gated clocks. Especially |
3131 | * CLK125_BYPASS_EN must be cleared but is not available on |
3132 | * all other SoCs/System Controllers using this driver. |
3133 | */ |
3134 | psc1r &= ~CLK125_BYPASS_EN; |
3135 | |
3136 | /* On Kirkwood with two Ethernet controllers, if both of them |
3137 | * have RGMII_EN disabled, the first controller will be in GMII |
3138 | * mode and the second one is effectively disabled, instead of |
3139 | * two MII interfaces. |
3140 | * |
3141 | * To enable GMII in the first controller, the second one must |
3142 | * also be configured (and may be enabled) with RGMII_EN |
3143 | * disabled too, even though it cannot be used at all. |
3144 | */ |
3145 | switch (pd->interface) { |
3146 | /* Use internal to denote second controller being disabled */ |
3147 | case PHY_INTERFACE_MODE_INTERNAL: |
3148 | case PHY_INTERFACE_MODE_MII: |
3149 | case PHY_INTERFACE_MODE_GMII: |
3150 | psc1r &= ~RGMII_EN; |
3151 | break; |
3152 | case PHY_INTERFACE_MODE_RGMII: |
3153 | case PHY_INTERFACE_MODE_RGMII_ID: |
3154 | case PHY_INTERFACE_MODE_RGMII_RXID: |
3155 | case PHY_INTERFACE_MODE_RGMII_TXID: |
3156 | psc1r |= RGMII_EN; |
3157 | break; |
3158 | default: |
3159 | /* Unknown; don't touch */ |
3160 | break; |
3161 | } |
3162 | |
3163 | wrlp(mp, PORT_SERIAL_CONTROL1, data: psc1r); |
3164 | } |
3165 | |
3166 | /* |
3167 | * Start with a default rate, and if there is a clock, allow |
3168 | * it to override the default. |
3169 | */ |
3170 | mp->t_clk = 133000000; |
3171 | mp->clk = devm_clk_get(dev: &pdev->dev, NULL); |
3172 | if (!IS_ERR(ptr: mp->clk)) { |
3173 | clk_prepare_enable(clk: mp->clk); |
3174 | mp->t_clk = clk_get_rate(clk: mp->clk); |
3175 | } else if (!IS_ERR(ptr: mp->shared->clk)) { |
3176 | mp->t_clk = clk_get_rate(clk: mp->shared->clk); |
3177 | } |
3178 | |
3179 | set_params(mp, pd); |
3180 | netif_set_real_num_tx_queues(dev, txq: mp->txq_count); |
3181 | netif_set_real_num_rx_queues(dev, rxq: mp->rxq_count); |
3182 | |
3183 | err = 0; |
3184 | if (pd->phy_node) { |
3185 | phydev = of_phy_connect(dev: mp->dev, phy_np: pd->phy_node, |
3186 | hndlr: mv643xx_eth_adjust_link, flags: 0, |
3187 | iface: get_phy_mode(mp)); |
3188 | if (!phydev) |
3189 | err = -ENODEV; |
3190 | else |
3191 | phy_addr_set(mp, phy_addr: phydev->mdio.addr); |
3192 | } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { |
3193 | phydev = phy_scan(mp, phy_addr: pd->phy_addr); |
3194 | |
3195 | if (IS_ERR(ptr: phydev)) |
3196 | err = PTR_ERR(ptr: phydev); |
3197 | else |
3198 | phy_init(mp, speed: pd->speed, duplex: pd->duplex); |
3199 | } |
3200 | if (err == -ENODEV) { |
3201 | err = -EPROBE_DEFER; |
3202 | goto out; |
3203 | } |
3204 | if (err) |
3205 | goto out; |
3206 | |
3207 | dev->ethtool_ops = &mv643xx_eth_ethtool_ops; |
3208 | |
3209 | init_pscr(mp, speed: pd->speed, duplex: pd->duplex); |
3210 | |
3211 | |
3212 | mib_counters_clear(mp); |
3213 | |
3214 | timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0); |
3215 | mp->mib_counters_timer.expires = jiffies + 30 * HZ; |
3216 | |
3217 | spin_lock_init(&mp->mib_counters_lock); |
3218 | |
3219 | INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); |
3220 | |
3221 | netif_napi_add(dev, napi: &mp->napi, poll: mv643xx_eth_poll); |
3222 | |
3223 | timer_setup(&mp->rx_oom, oom_timer_wrapper, 0); |
3224 | |
3225 | |
3226 | irq = platform_get_irq(pdev, 0); |
3227 | if (WARN_ON(irq < 0)) { |
3228 | err = irq; |
3229 | goto out; |
3230 | } |
3231 | dev->irq = irq; |
3232 | |
3233 | dev->netdev_ops = &mv643xx_eth_netdev_ops; |
3234 | |
3235 | dev->watchdog_timeo = 2 * HZ; |
3236 | dev->base_addr = 0; |
3237 | |
3238 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; |
3239 | dev->vlan_features = dev->features; |
3240 | |
3241 | dev->features |= NETIF_F_RXCSUM; |
3242 | dev->hw_features = dev->features; |
3243 | |
3244 | dev->priv_flags |= IFF_UNICAST_FLT; |
3245 | netif_set_tso_max_segs(dev, MV643XX_MAX_TSO_SEGS); |
3246 | |
3247 | /* MTU range: 64 - 9500 */ |
3248 | dev->min_mtu = 64; |
3249 | dev->max_mtu = 9500; |
3250 | |
3251 | if (mp->shared->win_protect) |
3252 | wrl(mp, WINDOW_PROTECT(mp->port_num), data: mp->shared->win_protect); |
3253 | |
3254 | netif_carrier_off(dev); |
3255 | |
3256 | wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); |
3257 | |
3258 | set_rx_coal(mp, usec: 250); |
3259 | set_tx_coal(mp, usec: 0); |
3260 | |
3261 | err = register_netdev(dev); |
3262 | if (err) |
3263 | goto out; |
3264 | |
3265 | netdev_notice(dev, format: "port %d with MAC address %pM\n" , |
3266 | mp->port_num, dev->dev_addr); |
3267 | |
3268 | if (mp->tx_desc_sram_size > 0) |
3269 | netdev_notice(dev, format: "configured with sram\n" ); |
3270 | |
3271 | return 0; |
3272 | |
3273 | out: |
3274 | if (!IS_ERR(ptr: mp->clk)) |
3275 | clk_disable_unprepare(clk: mp->clk); |
3276 | free_netdev(dev); |
3277 | |
3278 | return err; |
3279 | } |
3280 | |
3281 | static void mv643xx_eth_remove(struct platform_device *pdev) |
3282 | { |
3283 | struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); |
3284 | struct net_device *dev = mp->dev; |
3285 | |
3286 | unregister_netdev(dev: mp->dev); |
3287 | if (dev->phydev) |
3288 | phy_disconnect(phydev: dev->phydev); |
3289 | cancel_work_sync(work: &mp->tx_timeout_task); |
3290 | |
3291 | if (!IS_ERR(ptr: mp->clk)) |
3292 | clk_disable_unprepare(clk: mp->clk); |
3293 | |
3294 | free_netdev(dev: mp->dev); |
3295 | } |
3296 | |
3297 | static void mv643xx_eth_shutdown(struct platform_device *pdev) |
3298 | { |
3299 | struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); |
3300 | |
3301 | /* Mask all interrupts on ethernet port */ |
3302 | wrlp(mp, INT_MASK, data: 0); |
3303 | rdlp(mp, INT_MASK); |
3304 | |
3305 | if (netif_running(dev: mp->dev)) |
3306 | port_reset(mp); |
3307 | } |
3308 | |
3309 | static struct platform_driver mv643xx_eth_driver = { |
3310 | .probe = mv643xx_eth_probe, |
3311 | .remove_new = mv643xx_eth_remove, |
3312 | .shutdown = mv643xx_eth_shutdown, |
3313 | .driver = { |
3314 | .name = MV643XX_ETH_NAME, |
3315 | }, |
3316 | }; |
3317 | |
3318 | static struct platform_driver * const drivers[] = { |
3319 | &mv643xx_eth_shared_driver, |
3320 | &mv643xx_eth_driver, |
3321 | }; |
3322 | |
3323 | static int __init mv643xx_eth_init_module(void) |
3324 | { |
3325 | return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); |
3326 | } |
3327 | module_init(mv643xx_eth_init_module); |
3328 | |
3329 | static void __exit mv643xx_eth_cleanup_module(void) |
3330 | { |
3331 | platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); |
3332 | } |
3333 | module_exit(mv643xx_eth_cleanup_module); |
3334 | |
3335 | MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, " |
3336 | "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek" ); |
3337 | MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX" ); |
3338 | MODULE_LICENSE("GPL" ); |
3339 | MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); |
3340 | MODULE_ALIAS("platform:" MV643XX_ETH_NAME); |
3341 | |