1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Intel IXP4xx Ethernet driver for Linux |
4 | * |
5 | * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> |
6 | * |
7 | * Ethernet port config (0x00 is not present on IXP42X): |
8 | * |
9 | * logical port 0x00 0x10 0x20 |
10 | * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C) |
11 | * physical PortId 2 0 1 |
12 | * TX queue 23 24 25 |
13 | * RX-free queue 26 27 28 |
14 | * TX-done queue is always 31, per-port RX and TX-ready queues are configurable |
15 | * |
16 | * Queue entries: |
17 | * bits 0 -> 1 - NPE ID (RX and TX-done) |
18 | * bits 0 -> 2 - priority (TX, per 802.1D) |
19 | * bits 3 -> 4 - port ID (user-set?) |
20 | * bits 5 -> 31 - physical descriptor address |
21 | */ |
22 | |
23 | #include <linux/delay.h> |
24 | #include <linux/dma-mapping.h> |
25 | #include <linux/dmapool.h> |
26 | #include <linux/etherdevice.h> |
27 | #include <linux/if_vlan.h> |
28 | #include <linux/io.h> |
29 | #include <linux/kernel.h> |
30 | #include <linux/net_tstamp.h> |
31 | #include <linux/of.h> |
32 | #include <linux/of_mdio.h> |
33 | #include <linux/of_net.h> |
34 | #include <linux/phy.h> |
35 | #include <linux/platform_device.h> |
36 | #include <linux/ptp_classify.h> |
37 | #include <linux/slab.h> |
38 | #include <linux/module.h> |
39 | #include <linux/soc/ixp4xx/npe.h> |
40 | #include <linux/soc/ixp4xx/qmgr.h> |
41 | #include <linux/soc/ixp4xx/cpu.h> |
42 | #include <linux/types.h> |
43 | |
44 | #define IXP4XX_ETH_NPEA 0x00 |
45 | #define IXP4XX_ETH_NPEB 0x10 |
46 | #define IXP4XX_ETH_NPEC 0x20 |
47 | |
48 | #include "ixp46x_ts.h" |
49 | |
50 | #define DEBUG_DESC 0 |
51 | #define DEBUG_RX 0 |
52 | #define DEBUG_TX 0 |
53 | #define DEBUG_PKT_BYTES 0 |
54 | #define DEBUG_MDIO 0 |
55 | #define DEBUG_CLOSE 0 |
56 | |
57 | #define DRV_NAME "ixp4xx_eth" |
58 | |
59 | #define MAX_NPES 3 |
60 | |
61 | #define RX_DESCS 64 /* also length of all RX queues */ |
62 | #define TX_DESCS 16 /* also length of all TX queues */ |
63 | #define TXDONE_QUEUE_LEN 64 /* dwords */ |
64 | |
65 | #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS)) |
66 | #define REGS_SIZE 0x1000 |
67 | |
68 | /* MRU is said to be 14320 in a code dump, the SW manual says that |
69 | * MRU/MTU is 16320 and includes VLAN and ethernet headers. |
70 | * See "IXP400 Software Programmer's Guide" section 10.3.2, page 161. |
71 | * |
72 | * FIXME: we have chosen the safe default (14320) but if you can test |
73 | * jumboframes, experiment with 16320 and see what happens! |
74 | */ |
75 | #define MAX_MRU (14320 - VLAN_ETH_HLEN) |
76 | #define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4) |
77 | |
78 | #define NAPI_WEIGHT 16 |
79 | #define MDIO_INTERVAL (3 * HZ) |
80 | #define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */ |
81 | #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */ |
82 | |
83 | #define NPE_ID(port_id) ((port_id) >> 4) |
84 | #define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3) |
85 | #define TX_QUEUE(port_id) (NPE_ID(port_id) + 23) |
86 | #define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26) |
87 | #define TXDONE_QUEUE 31 |
88 | |
89 | #define PTP_SLAVE_MODE 1 |
90 | #define PTP_MASTER_MODE 2 |
91 | #define PORT2CHANNEL(p) NPE_ID(p->id) |
92 | |
93 | /* TX Control Registers */ |
94 | #define TX_CNTRL0_TX_EN 0x01 |
95 | #define TX_CNTRL0_HALFDUPLEX 0x02 |
96 | #define TX_CNTRL0_RETRY 0x04 |
97 | #define TX_CNTRL0_PAD_EN 0x08 |
98 | #define TX_CNTRL0_APPEND_FCS 0x10 |
99 | #define TX_CNTRL0_2DEFER 0x20 |
100 | #define TX_CNTRL0_RMII 0x40 /* reduced MII */ |
101 | #define TX_CNTRL1_RETRIES 0x0F /* 4 bits */ |
102 | |
103 | /* RX Control Registers */ |
104 | #define RX_CNTRL0_RX_EN 0x01 |
105 | #define RX_CNTRL0_PADSTRIP_EN 0x02 |
106 | #define RX_CNTRL0_SEND_FCS 0x04 |
107 | #define RX_CNTRL0_PAUSE_EN 0x08 |
108 | #define RX_CNTRL0_LOOP_EN 0x10 |
109 | #define RX_CNTRL0_ADDR_FLTR_EN 0x20 |
110 | #define RX_CNTRL0_RX_RUNT_EN 0x40 |
111 | #define RX_CNTRL0_BCAST_DIS 0x80 |
112 | #define RX_CNTRL1_DEFER_EN 0x01 |
113 | |
114 | /* Core Control Register */ |
115 | #define CORE_RESET 0x01 |
116 | #define CORE_RX_FIFO_FLUSH 0x02 |
117 | #define CORE_TX_FIFO_FLUSH 0x04 |
118 | #define CORE_SEND_JAM 0x08 |
119 | #define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */ |
120 | |
121 | #define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \ |
122 | TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \ |
123 | TX_CNTRL0_2DEFER) |
124 | #define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN |
125 | #define DEFAULT_CORE_CNTRL CORE_MDC_EN |
126 | |
127 | |
128 | /* NPE message codes */ |
129 | #define NPE_GETSTATUS 0x00 |
130 | #define NPE_EDB_SETPORTADDRESS 0x01 |
131 | #define NPE_EDB_GETMACADDRESSDATABASE 0x02 |
132 | #define NPE_EDB_SETMACADDRESSSDATABASE 0x03 |
133 | #define NPE_GETSTATS 0x04 |
134 | #define NPE_RESETSTATS 0x05 |
135 | #define NPE_SETMAXFRAMELENGTHS 0x06 |
136 | #define NPE_VLAN_SETRXTAGMODE 0x07 |
137 | #define NPE_VLAN_SETDEFAULTRXVID 0x08 |
138 | #define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09 |
139 | #define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A |
140 | #define NPE_VLAN_SETRXQOSENTRY 0x0B |
141 | #define 0x0C |
142 | #define NPE_STP_SETBLOCKINGSTATE 0x0D |
143 | #define NPE_FW_SETFIREWALLMODE 0x0E |
144 | #define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F |
145 | #define NPE_PC_SETAPMACTABLE 0x11 |
146 | #define NPE_SETLOOPBACK_MODE 0x12 |
147 | #define NPE_PC_SETBSSIDTABLE 0x13 |
148 | #define NPE_ADDRESS_FILTER_CONFIG 0x14 |
149 | #define NPE_APPENDFCSCONFIG 0x15 |
150 | #define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16 |
151 | #define NPE_MAC_RECOVERY_START 0x17 |
152 | |
153 | |
154 | #ifdef __ARMEB__ |
155 | typedef struct sk_buff buffer_t; |
156 | #define free_buffer dev_kfree_skb |
157 | #define free_buffer_irq dev_consume_skb_irq |
158 | #else |
159 | typedef void buffer_t; |
160 | #define free_buffer kfree |
161 | #define free_buffer_irq kfree |
162 | #endif |
163 | |
164 | /* Information about built-in Ethernet MAC interfaces */ |
165 | struct eth_plat_info { |
166 | u8 phy; /* MII PHY ID, 0 - 31 */ |
167 | u8 rxq; /* configurable, currently 0 - 31 only */ |
168 | u8 txreadyq; |
169 | u8 hwaddr[ETH_ALEN]; |
170 | u8 npe; /* NPE instance used by this interface */ |
171 | bool has_mdio; /* If this instance has an MDIO bus */ |
172 | }; |
173 | |
174 | struct eth_regs { |
175 | u32 tx_control[2], __res1[2]; /* 000 */ |
176 | u32 rx_control[2], __res2[2]; /* 010 */ |
177 | u32 random_seed, __res3[3]; /* 020 */ |
178 | u32 partial_empty_threshold, __res4; /* 030 */ |
179 | u32 partial_full_threshold, __res5; /* 038 */ |
180 | u32 tx_start_bytes, __res6[3]; /* 040 */ |
181 | u32 tx_deferral, rx_deferral, __res7[2];/* 050 */ |
182 | u32 tx_2part_deferral[2], __res8[2]; /* 060 */ |
183 | u32 slot_time, __res9[3]; /* 070 */ |
184 | u32 mdio_command[4]; /* 080 */ |
185 | u32 mdio_status[4]; /* 090 */ |
186 | u32 mcast_mask[6], __res10[2]; /* 0A0 */ |
187 | u32 mcast_addr[6], __res11[2]; /* 0C0 */ |
188 | u32 int_clock_threshold, __res12[3]; /* 0E0 */ |
189 | u32 hw_addr[6], __res13[61]; /* 0F0 */ |
190 | u32 core_control; /* 1FC */ |
191 | }; |
192 | |
193 | struct port { |
194 | struct eth_regs __iomem *regs; |
195 | struct ixp46x_ts_regs __iomem *timesync_regs; |
196 | int phc_index; |
197 | struct npe *npe; |
198 | struct net_device *netdev; |
199 | struct napi_struct napi; |
200 | struct eth_plat_info *plat; |
201 | buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; |
202 | struct desc *desc_tab; /* coherent */ |
203 | dma_addr_t desc_tab_phys; |
204 | int id; /* logical port ID */ |
205 | int speed, duplex; |
206 | u8 firmware[4]; |
207 | int hwts_tx_en; |
208 | int hwts_rx_en; |
209 | }; |
210 | |
211 | /* NPE message structure */ |
212 | struct msg { |
213 | #ifdef __ARMEB__ |
214 | u8 cmd, eth_id, byte2, byte3; |
215 | u8 byte4, byte5, byte6, byte7; |
216 | #else |
217 | u8 byte3, byte2, eth_id, cmd; |
218 | u8 byte7, byte6, byte5, byte4; |
219 | #endif |
220 | }; |
221 | |
222 | /* Ethernet packet descriptor */ |
223 | struct desc { |
224 | u32 next; /* pointer to next buffer, unused */ |
225 | |
226 | #ifdef __ARMEB__ |
227 | u16 buf_len; /* buffer length */ |
228 | u16 pkt_len; /* packet length */ |
229 | u32 data; /* pointer to data buffer in RAM */ |
230 | u8 dest_id; |
231 | u8 src_id; |
232 | u16 flags; |
233 | u8 qos; |
234 | u8 padlen; |
235 | u16 vlan_tci; |
236 | #else |
237 | u16 pkt_len; /* packet length */ |
238 | u16 buf_len; /* buffer length */ |
239 | u32 data; /* pointer to data buffer in RAM */ |
240 | u16 flags; |
241 | u8 src_id; |
242 | u8 dest_id; |
243 | u16 vlan_tci; |
244 | u8 padlen; |
245 | u8 qos; |
246 | #endif |
247 | |
248 | #ifdef __ARMEB__ |
249 | u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3; |
250 | u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1; |
251 | u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5; |
252 | #else |
253 | u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0; |
254 | u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4; |
255 | u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2; |
256 | #endif |
257 | }; |
258 | |
259 | |
260 | #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ |
261 | (n) * sizeof(struct desc)) |
262 | #define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) |
263 | |
264 | #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ |
265 | ((n) + RX_DESCS) * sizeof(struct desc)) |
266 | #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) |
267 | |
268 | #ifndef __ARMEB__ |
269 | static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt) |
270 | { |
271 | int i; |
272 | for (i = 0; i < cnt; i++) |
273 | dest[i] = swab32(src[i]); |
274 | } |
275 | #endif |
276 | |
277 | static DEFINE_SPINLOCK(mdio_lock); |
278 | static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ |
279 | static struct mii_bus *mdio_bus; |
280 | static struct device_node *mdio_bus_np; |
281 | static int ports_open; |
282 | static struct port *npe_port_tab[MAX_NPES]; |
283 | static struct dma_pool *dma_pool; |
284 | |
285 | static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) |
286 | { |
287 | u8 *data = skb->data; |
288 | unsigned int offset; |
289 | u16 *hi, *id; |
290 | u32 lo; |
291 | |
292 | if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4) |
293 | return 0; |
294 | |
295 | offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; |
296 | |
297 | if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid)) |
298 | return 0; |
299 | |
300 | hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID); |
301 | id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID); |
302 | |
303 | memcpy(&lo, &hi[1], sizeof(lo)); |
304 | |
305 | return (uid_hi == ntohs(*hi) && |
306 | uid_lo == ntohl(lo) && |
307 | seqid == ntohs(*id)); |
308 | } |
309 | |
310 | static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb) |
311 | { |
312 | struct skb_shared_hwtstamps *shhwtstamps; |
313 | struct ixp46x_ts_regs *regs; |
314 | u64 ns; |
315 | u32 ch, hi, lo, val; |
316 | u16 uid, seq; |
317 | |
318 | if (!port->hwts_rx_en) |
319 | return; |
320 | |
321 | ch = PORT2CHANNEL(port); |
322 | |
323 | regs = port->timesync_regs; |
324 | |
325 | val = __raw_readl(addr: ®s->channel[ch].ch_event); |
326 | |
327 | if (!(val & RX_SNAPSHOT_LOCKED)) |
328 | return; |
329 | |
330 | lo = __raw_readl(addr: ®s->channel[ch].src_uuid_lo); |
331 | hi = __raw_readl(addr: ®s->channel[ch].src_uuid_hi); |
332 | |
333 | uid = hi & 0xffff; |
334 | seq = (hi >> 16) & 0xffff; |
335 | |
336 | if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq))) |
337 | goto out; |
338 | |
339 | lo = __raw_readl(addr: ®s->channel[ch].rx_snap_lo); |
340 | hi = __raw_readl(addr: ®s->channel[ch].rx_snap_hi); |
341 | ns = ((u64) hi) << 32; |
342 | ns |= lo; |
343 | ns <<= TICKS_NS_SHIFT; |
344 | |
345 | shhwtstamps = skb_hwtstamps(skb); |
346 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); |
347 | shhwtstamps->hwtstamp = ns_to_ktime(ns); |
348 | out: |
349 | __raw_writel(RX_SNAPSHOT_LOCKED, addr: ®s->channel[ch].ch_event); |
350 | } |
351 | |
352 | static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb) |
353 | { |
354 | struct skb_shared_hwtstamps shhwtstamps; |
355 | struct ixp46x_ts_regs *regs; |
356 | struct skb_shared_info *shtx; |
357 | u64 ns; |
358 | u32 ch, cnt, hi, lo, val; |
359 | |
360 | shtx = skb_shinfo(skb); |
361 | if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en)) |
362 | shtx->tx_flags |= SKBTX_IN_PROGRESS; |
363 | else |
364 | return; |
365 | |
366 | ch = PORT2CHANNEL(port); |
367 | |
368 | regs = port->timesync_regs; |
369 | |
370 | /* |
371 | * This really stinks, but we have to poll for the Tx time stamp. |
372 | * Usually, the time stamp is ready after 4 to 6 microseconds. |
373 | */ |
374 | for (cnt = 0; cnt < 100; cnt++) { |
375 | val = __raw_readl(addr: ®s->channel[ch].ch_event); |
376 | if (val & TX_SNAPSHOT_LOCKED) |
377 | break; |
378 | udelay(1); |
379 | } |
380 | if (!(val & TX_SNAPSHOT_LOCKED)) { |
381 | shtx->tx_flags &= ~SKBTX_IN_PROGRESS; |
382 | return; |
383 | } |
384 | |
385 | lo = __raw_readl(addr: ®s->channel[ch].tx_snap_lo); |
386 | hi = __raw_readl(addr: ®s->channel[ch].tx_snap_hi); |
387 | ns = ((u64) hi) << 32; |
388 | ns |= lo; |
389 | ns <<= TICKS_NS_SHIFT; |
390 | |
391 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
392 | shhwtstamps.hwtstamp = ns_to_ktime(ns); |
393 | skb_tstamp_tx(orig_skb: skb, hwtstamps: &shhwtstamps); |
394 | |
395 | __raw_writel(TX_SNAPSHOT_LOCKED, addr: ®s->channel[ch].ch_event); |
396 | } |
397 | |
398 | static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) |
399 | { |
400 | struct hwtstamp_config cfg; |
401 | struct ixp46x_ts_regs *regs; |
402 | struct port *port = netdev_priv(dev: netdev); |
403 | int ret; |
404 | int ch; |
405 | |
406 | if (copy_from_user(to: &cfg, from: ifr->ifr_data, n: sizeof(cfg))) |
407 | return -EFAULT; |
408 | |
409 | ret = ixp46x_ptp_find(regs: &port->timesync_regs, phc_index: &port->phc_index); |
410 | if (ret) |
411 | return ret; |
412 | |
413 | ch = PORT2CHANNEL(port); |
414 | regs = port->timesync_regs; |
415 | |
416 | if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) |
417 | return -ERANGE; |
418 | |
419 | switch (cfg.rx_filter) { |
420 | case HWTSTAMP_FILTER_NONE: |
421 | port->hwts_rx_en = 0; |
422 | break; |
423 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: |
424 | port->hwts_rx_en = PTP_SLAVE_MODE; |
425 | __raw_writel(val: 0, addr: ®s->channel[ch].ch_control); |
426 | break; |
427 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: |
428 | port->hwts_rx_en = PTP_MASTER_MODE; |
429 | __raw_writel(MASTER_MODE, addr: ®s->channel[ch].ch_control); |
430 | break; |
431 | default: |
432 | return -ERANGE; |
433 | } |
434 | |
435 | port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; |
436 | |
437 | /* Clear out any old time stamps. */ |
438 | __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED, |
439 | addr: ®s->channel[ch].ch_event); |
440 | |
441 | return copy_to_user(to: ifr->ifr_data, from: &cfg, n: sizeof(cfg)) ? -EFAULT : 0; |
442 | } |
443 | |
444 | static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) |
445 | { |
446 | struct hwtstamp_config cfg; |
447 | struct port *port = netdev_priv(dev: netdev); |
448 | |
449 | cfg.flags = 0; |
450 | cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; |
451 | |
452 | switch (port->hwts_rx_en) { |
453 | case 0: |
454 | cfg.rx_filter = HWTSTAMP_FILTER_NONE; |
455 | break; |
456 | case PTP_SLAVE_MODE: |
457 | cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; |
458 | break; |
459 | case PTP_MASTER_MODE: |
460 | cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; |
461 | break; |
462 | default: |
463 | WARN_ON_ONCE(1); |
464 | return -ERANGE; |
465 | } |
466 | |
467 | return copy_to_user(to: ifr->ifr_data, from: &cfg, n: sizeof(cfg)) ? -EFAULT : 0; |
468 | } |
469 | |
470 | static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location, |
471 | int write, u16 cmd) |
472 | { |
473 | int cycles = 0; |
474 | |
475 | if (__raw_readl(addr: &mdio_regs->mdio_command[3]) & 0x80) { |
476 | printk(KERN_ERR "%s: MII not ready to transmit\n" , bus->name); |
477 | return -1; |
478 | } |
479 | |
480 | if (write) { |
481 | __raw_writel(val: cmd & 0xFF, addr: &mdio_regs->mdio_command[0]); |
482 | __raw_writel(val: cmd >> 8, addr: &mdio_regs->mdio_command[1]); |
483 | } |
484 | __raw_writel(val: ((phy_id << 5) | location) & 0xFF, |
485 | addr: &mdio_regs->mdio_command[2]); |
486 | __raw_writel(val: (phy_id >> 3) | (write << 2) | 0x80 /* GO */, |
487 | addr: &mdio_regs->mdio_command[3]); |
488 | |
489 | while ((cycles < MAX_MDIO_RETRIES) && |
490 | (__raw_readl(addr: &mdio_regs->mdio_command[3]) & 0x80)) { |
491 | udelay(1); |
492 | cycles++; |
493 | } |
494 | |
495 | if (cycles == MAX_MDIO_RETRIES) { |
496 | printk(KERN_ERR "%s #%i: MII write failed\n" , bus->name, |
497 | phy_id); |
498 | return -1; |
499 | } |
500 | |
501 | #if DEBUG_MDIO |
502 | printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n" , bus->name, |
503 | phy_id, write ? "write" : "read" , cycles); |
504 | #endif |
505 | |
506 | if (write) |
507 | return 0; |
508 | |
509 | if (__raw_readl(addr: &mdio_regs->mdio_status[3]) & 0x80) { |
510 | #if DEBUG_MDIO |
511 | printk(KERN_DEBUG "%s #%i: MII read failed\n" , bus->name, |
512 | phy_id); |
513 | #endif |
514 | return 0xFFFF; /* don't return error */ |
515 | } |
516 | |
517 | return (__raw_readl(addr: &mdio_regs->mdio_status[0]) & 0xFF) | |
518 | ((__raw_readl(addr: &mdio_regs->mdio_status[1]) & 0xFF) << 8); |
519 | } |
520 | |
521 | static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location) |
522 | { |
523 | unsigned long flags; |
524 | int ret; |
525 | |
526 | spin_lock_irqsave(&mdio_lock, flags); |
527 | ret = ixp4xx_mdio_cmd(bus, phy_id, location, write: 0, cmd: 0); |
528 | spin_unlock_irqrestore(lock: &mdio_lock, flags); |
529 | #if DEBUG_MDIO |
530 | printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n" , bus->name, |
531 | phy_id, location, ret); |
532 | #endif |
533 | return ret; |
534 | } |
535 | |
536 | static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location, |
537 | u16 val) |
538 | { |
539 | unsigned long flags; |
540 | int ret; |
541 | |
542 | spin_lock_irqsave(&mdio_lock, flags); |
543 | ret = ixp4xx_mdio_cmd(bus, phy_id, location, write: 1, cmd: val); |
544 | spin_unlock_irqrestore(lock: &mdio_lock, flags); |
545 | #if DEBUG_MDIO |
546 | printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n" , |
547 | bus->name, phy_id, location, val, ret); |
548 | #endif |
549 | return ret; |
550 | } |
551 | |
552 | static int ixp4xx_mdio_register(struct eth_regs __iomem *regs) |
553 | { |
554 | int err; |
555 | |
556 | if (!(mdio_bus = mdiobus_alloc())) |
557 | return -ENOMEM; |
558 | |
559 | mdio_regs = regs; |
560 | __raw_writel(DEFAULT_CORE_CNTRL, addr: &mdio_regs->core_control); |
561 | mdio_bus->name = "IXP4xx MII Bus" ; |
562 | mdio_bus->read = &ixp4xx_mdio_read; |
563 | mdio_bus->write = &ixp4xx_mdio_write; |
564 | snprintf(buf: mdio_bus->id, MII_BUS_ID_SIZE, fmt: "ixp4xx-eth-0" ); |
565 | |
566 | err = of_mdiobus_register(mdio: mdio_bus, np: mdio_bus_np); |
567 | if (err) |
568 | mdiobus_free(bus: mdio_bus); |
569 | return err; |
570 | } |
571 | |
572 | static void ixp4xx_mdio_remove(void) |
573 | { |
574 | mdiobus_unregister(bus: mdio_bus); |
575 | mdiobus_free(bus: mdio_bus); |
576 | } |
577 | |
578 | |
579 | static void ixp4xx_adjust_link(struct net_device *dev) |
580 | { |
581 | struct port *port = netdev_priv(dev); |
582 | struct phy_device *phydev = dev->phydev; |
583 | |
584 | if (!phydev->link) { |
585 | if (port->speed) { |
586 | port->speed = 0; |
587 | printk(KERN_INFO "%s: link down\n" , dev->name); |
588 | } |
589 | return; |
590 | } |
591 | |
592 | if (port->speed == phydev->speed && port->duplex == phydev->duplex) |
593 | return; |
594 | |
595 | port->speed = phydev->speed; |
596 | port->duplex = phydev->duplex; |
597 | |
598 | if (port->duplex) |
599 | __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX, |
600 | addr: &port->regs->tx_control[0]); |
601 | else |
602 | __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX, |
603 | addr: &port->regs->tx_control[0]); |
604 | |
605 | netdev_info(dev, format: "%s: link up, speed %u Mb/s, %s duplex\n" , |
606 | dev->name, port->speed, port->duplex ? "full" : "half" ); |
607 | } |
608 | |
609 | |
610 | static inline void debug_pkt(struct net_device *dev, const char *func, |
611 | u8 *data, int len) |
612 | { |
613 | #if DEBUG_PKT_BYTES |
614 | int i; |
615 | |
616 | netdev_debug(dev, "%s(%i) " , func, len); |
617 | for (i = 0; i < len; i++) { |
618 | if (i >= DEBUG_PKT_BYTES) |
619 | break; |
620 | printk("%s%02X" , |
621 | ((i == 6) || (i == 12) || (i >= 14)) ? " " : "" , |
622 | data[i]); |
623 | } |
624 | printk("\n" ); |
625 | #endif |
626 | } |
627 | |
628 | |
629 | static inline void debug_desc(u32 phys, struct desc *desc) |
630 | { |
631 | #if DEBUG_DESC |
632 | printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X" |
633 | " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n" , |
634 | phys, desc->next, desc->buf_len, desc->pkt_len, |
635 | desc->data, desc->dest_id, desc->src_id, desc->flags, |
636 | desc->qos, desc->padlen, desc->vlan_tci, |
637 | desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2, |
638 | desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5, |
639 | desc->src_mac_0, desc->src_mac_1, desc->src_mac_2, |
640 | desc->src_mac_3, desc->src_mac_4, desc->src_mac_5); |
641 | #endif |
642 | } |
643 | |
644 | static inline int queue_get_desc(unsigned int queue, struct port *port, |
645 | int is_tx) |
646 | { |
647 | u32 phys, tab_phys, n_desc; |
648 | struct desc *tab; |
649 | |
650 | if (!(phys = qmgr_get_entry(queue))) |
651 | return -1; |
652 | |
653 | phys &= ~0x1F; /* mask out non-address bits */ |
654 | tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0); |
655 | tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0); |
656 | n_desc = (phys - tab_phys) / sizeof(struct desc); |
657 | BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS)); |
658 | debug_desc(phys, desc: &tab[n_desc]); |
659 | BUG_ON(tab[n_desc].next); |
660 | return n_desc; |
661 | } |
662 | |
663 | static inline void queue_put_desc(unsigned int queue, u32 phys, |
664 | struct desc *desc) |
665 | { |
666 | debug_desc(phys, desc); |
667 | BUG_ON(phys & 0x1F); |
668 | qmgr_put_entry(queue, val: phys); |
669 | /* Don't check for queue overflow here, we've allocated sufficient |
670 | length and queues >= 32 don't support this check anyway. */ |
671 | } |
672 | |
673 | |
674 | static inline void dma_unmap_tx(struct port *port, struct desc *desc) |
675 | { |
676 | #ifdef __ARMEB__ |
677 | dma_unmap_single(&port->netdev->dev, desc->data, |
678 | desc->buf_len, DMA_TO_DEVICE); |
679 | #else |
680 | dma_unmap_single(&port->netdev->dev, desc->data & ~3, |
681 | ALIGN((desc->data & 3) + desc->buf_len, 4), |
682 | DMA_TO_DEVICE); |
683 | #endif |
684 | } |
685 | |
686 | |
687 | static void eth_rx_irq(void *pdev) |
688 | { |
689 | struct net_device *dev = pdev; |
690 | struct port *port = netdev_priv(dev); |
691 | |
692 | #if DEBUG_RX |
693 | printk(KERN_DEBUG "%s: eth_rx_irq\n" , dev->name); |
694 | #endif |
695 | qmgr_disable_irq(queue: port->plat->rxq); |
696 | napi_schedule(n: &port->napi); |
697 | } |
698 | |
699 | static int eth_poll(struct napi_struct *napi, int budget) |
700 | { |
701 | struct port *port = container_of(napi, struct port, napi); |
702 | struct net_device *dev = port->netdev; |
703 | unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id); |
704 | int received = 0; |
705 | |
706 | #if DEBUG_RX |
707 | netdev_debug(dev, "eth_poll\n" ); |
708 | #endif |
709 | |
710 | while (received < budget) { |
711 | struct sk_buff *skb; |
712 | struct desc *desc; |
713 | int n; |
714 | #ifdef __ARMEB__ |
715 | struct sk_buff *temp; |
716 | u32 phys; |
717 | #endif |
718 | |
719 | if ((n = queue_get_desc(queue: rxq, port, is_tx: 0)) < 0) { |
720 | #if DEBUG_RX |
721 | netdev_debug(dev, "eth_poll napi_complete\n" ); |
722 | #endif |
723 | napi_complete(n: napi); |
724 | qmgr_enable_irq(queue: rxq); |
725 | if (!qmgr_stat_below_low_watermark(queue: rxq) && |
726 | napi_schedule(n: napi)) { /* not empty again */ |
727 | #if DEBUG_RX |
728 | netdev_debug(dev, "eth_poll napi_schedule succeeded\n" ); |
729 | #endif |
730 | qmgr_disable_irq(queue: rxq); |
731 | continue; |
732 | } |
733 | #if DEBUG_RX |
734 | netdev_debug(dev, "eth_poll all done\n" ); |
735 | #endif |
736 | return received; /* all work done */ |
737 | } |
738 | |
739 | desc = rx_desc_ptr(port, n); |
740 | |
741 | #ifdef __ARMEB__ |
742 | if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { |
743 | phys = dma_map_single(&dev->dev, skb->data, |
744 | RX_BUFF_SIZE, DMA_FROM_DEVICE); |
745 | if (dma_mapping_error(&dev->dev, phys)) { |
746 | dev_kfree_skb(skb); |
747 | skb = NULL; |
748 | } |
749 | } |
750 | #else |
751 | skb = netdev_alloc_skb(dev, |
752 | ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); |
753 | #endif |
754 | |
755 | if (!skb) { |
756 | dev->stats.rx_dropped++; |
757 | /* put the desc back on RX-ready queue */ |
758 | desc->buf_len = MAX_MRU; |
759 | desc->pkt_len = 0; |
760 | queue_put_desc(queue: rxfreeq, rx_desc_phys(port, n), desc); |
761 | continue; |
762 | } |
763 | |
764 | /* process received frame */ |
765 | #ifdef __ARMEB__ |
766 | temp = skb; |
767 | skb = port->rx_buff_tab[n]; |
768 | dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, |
769 | RX_BUFF_SIZE, DMA_FROM_DEVICE); |
770 | #else |
771 | dma_sync_single_for_cpu(dev: &dev->dev, addr: desc->data - NET_IP_ALIGN, |
772 | RX_BUFF_SIZE, dir: DMA_FROM_DEVICE); |
773 | memcpy_swab32(dest: (u32 *)skb->data, src: (u32 *)port->rx_buff_tab[n], |
774 | ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); |
775 | #endif |
776 | skb_reserve(skb, NET_IP_ALIGN); |
777 | skb_put(skb, len: desc->pkt_len); |
778 | |
779 | debug_pkt(dev, func: "eth_poll" , data: skb->data, len: skb->len); |
780 | |
781 | ixp_rx_timestamp(port, skb); |
782 | skb->protocol = eth_type_trans(skb, dev); |
783 | dev->stats.rx_packets++; |
784 | dev->stats.rx_bytes += skb->len; |
785 | netif_receive_skb(skb); |
786 | |
787 | /* put the new buffer on RX-free queue */ |
788 | #ifdef __ARMEB__ |
789 | port->rx_buff_tab[n] = temp; |
790 | desc->data = phys + NET_IP_ALIGN; |
791 | #endif |
792 | desc->buf_len = MAX_MRU; |
793 | desc->pkt_len = 0; |
794 | queue_put_desc(queue: rxfreeq, rx_desc_phys(port, n), desc); |
795 | received++; |
796 | } |
797 | |
798 | #if DEBUG_RX |
799 | netdev_debug(dev, "eth_poll(): end, not all work done\n" ); |
800 | #endif |
801 | return received; /* not all work done */ |
802 | } |
803 | |
804 | |
805 | static void eth_txdone_irq(void *unused) |
806 | { |
807 | u32 phys; |
808 | |
809 | #if DEBUG_TX |
810 | printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n" ); |
811 | #endif |
812 | while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) { |
813 | u32 npe_id, n_desc; |
814 | struct port *port; |
815 | struct desc *desc; |
816 | int start; |
817 | |
818 | npe_id = phys & 3; |
819 | BUG_ON(npe_id >= MAX_NPES); |
820 | port = npe_port_tab[npe_id]; |
821 | BUG_ON(!port); |
822 | phys &= ~0x1F; /* mask out non-address bits */ |
823 | n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc); |
824 | BUG_ON(n_desc >= TX_DESCS); |
825 | desc = tx_desc_ptr(port, n_desc); |
826 | debug_desc(phys, desc); |
827 | |
828 | if (port->tx_buff_tab[n_desc]) { /* not the draining packet */ |
829 | port->netdev->stats.tx_packets++; |
830 | port->netdev->stats.tx_bytes += desc->pkt_len; |
831 | |
832 | dma_unmap_tx(port, desc); |
833 | #if DEBUG_TX |
834 | printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n" , |
835 | port->netdev->name, port->tx_buff_tab[n_desc]); |
836 | #endif |
837 | free_buffer_irq(objp: port->tx_buff_tab[n_desc]); |
838 | port->tx_buff_tab[n_desc] = NULL; |
839 | } |
840 | |
841 | start = qmgr_stat_below_low_watermark(queue: port->plat->txreadyq); |
842 | queue_put_desc(queue: port->plat->txreadyq, phys, desc); |
843 | if (start) { /* TX-ready queue was empty */ |
844 | #if DEBUG_TX |
845 | printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n" , |
846 | port->netdev->name); |
847 | #endif |
848 | netif_wake_queue(dev: port->netdev); |
849 | } |
850 | } |
851 | } |
852 | |
853 | static netdev_tx_t eth_xmit(struct sk_buff *skb, struct net_device *dev) |
854 | { |
855 | struct port *port = netdev_priv(dev); |
856 | unsigned int txreadyq = port->plat->txreadyq; |
857 | int len, offset, bytes, n; |
858 | void *mem; |
859 | u32 phys; |
860 | struct desc *desc; |
861 | |
862 | #if DEBUG_TX |
863 | netdev_debug(dev, "eth_xmit\n" ); |
864 | #endif |
865 | |
866 | if (unlikely(skb->len > MAX_MRU)) { |
867 | dev_kfree_skb(skb); |
868 | dev->stats.tx_errors++; |
869 | return NETDEV_TX_OK; |
870 | } |
871 | |
872 | debug_pkt(dev, func: "eth_xmit" , data: skb->data, len: skb->len); |
873 | |
874 | len = skb->len; |
875 | #ifdef __ARMEB__ |
876 | offset = 0; /* no need to keep alignment */ |
877 | bytes = len; |
878 | mem = skb->data; |
879 | #else |
880 | offset = (uintptr_t)skb->data & 3; /* keep 32-bit alignment */ |
881 | bytes = ALIGN(offset + len, 4); |
882 | if (!(mem = kmalloc(size: bytes, GFP_ATOMIC))) { |
883 | dev_kfree_skb(skb); |
884 | dev->stats.tx_dropped++; |
885 | return NETDEV_TX_OK; |
886 | } |
887 | memcpy_swab32(dest: mem, src: (u32 *)((uintptr_t)skb->data & ~3), cnt: bytes / 4); |
888 | #endif |
889 | |
890 | phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); |
891 | if (dma_mapping_error(dev: &dev->dev, dma_addr: phys)) { |
892 | dev_kfree_skb(skb); |
893 | #ifndef __ARMEB__ |
894 | kfree(objp: mem); |
895 | #endif |
896 | dev->stats.tx_dropped++; |
897 | return NETDEV_TX_OK; |
898 | } |
899 | |
900 | n = queue_get_desc(queue: txreadyq, port, is_tx: 1); |
901 | BUG_ON(n < 0); |
902 | desc = tx_desc_ptr(port, n); |
903 | |
904 | #ifdef __ARMEB__ |
905 | port->tx_buff_tab[n] = skb; |
906 | #else |
907 | port->tx_buff_tab[n] = mem; |
908 | #endif |
909 | desc->data = phys + offset; |
910 | desc->buf_len = desc->pkt_len = len; |
911 | |
912 | /* NPE firmware pads short frames with zeros internally */ |
913 | wmb(); |
914 | queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); |
915 | |
916 | if (qmgr_stat_below_low_watermark(queue: txreadyq)) { /* empty */ |
917 | #if DEBUG_TX |
918 | netdev_debug(dev, "eth_xmit queue full\n" ); |
919 | #endif |
920 | netif_stop_queue(dev); |
921 | /* we could miss TX ready interrupt */ |
922 | /* really empty in fact */ |
923 | if (!qmgr_stat_below_low_watermark(queue: txreadyq)) { |
924 | #if DEBUG_TX |
925 | netdev_debug(dev, "eth_xmit ready again\n" ); |
926 | #endif |
927 | netif_wake_queue(dev); |
928 | } |
929 | } |
930 | |
931 | #if DEBUG_TX |
932 | netdev_debug(dev, "eth_xmit end\n" ); |
933 | #endif |
934 | |
935 | ixp_tx_timestamp(port, skb); |
936 | skb_tx_timestamp(skb); |
937 | |
938 | #ifndef __ARMEB__ |
939 | dev_kfree_skb(skb); |
940 | #endif |
941 | return NETDEV_TX_OK; |
942 | } |
943 | |
944 | |
945 | static void eth_set_mcast_list(struct net_device *dev) |
946 | { |
947 | struct port *port = netdev_priv(dev); |
948 | struct netdev_hw_addr *ha; |
949 | u8 diffs[ETH_ALEN], *addr; |
950 | int i; |
951 | static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; |
952 | |
953 | if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) { |
954 | for (i = 0; i < ETH_ALEN; i++) { |
955 | __raw_writel(val: allmulti[i], addr: &port->regs->mcast_addr[i]); |
956 | __raw_writel(val: allmulti[i], addr: &port->regs->mcast_mask[i]); |
957 | } |
958 | __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, |
959 | addr: &port->regs->rx_control[0]); |
960 | return; |
961 | } |
962 | |
963 | if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) { |
964 | __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN, |
965 | addr: &port->regs->rx_control[0]); |
966 | return; |
967 | } |
968 | |
969 | eth_zero_addr(addr: diffs); |
970 | |
971 | addr = NULL; |
972 | netdev_for_each_mc_addr(ha, dev) { |
973 | if (!addr) |
974 | addr = ha->addr; /* first MAC address */ |
975 | for (i = 0; i < ETH_ALEN; i++) |
976 | diffs[i] |= addr[i] ^ ha->addr[i]; |
977 | } |
978 | |
979 | for (i = 0; i < ETH_ALEN; i++) { |
980 | __raw_writel(val: addr[i], addr: &port->regs->mcast_addr[i]); |
981 | __raw_writel(val: ~diffs[i], addr: &port->regs->mcast_mask[i]); |
982 | } |
983 | |
984 | __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, |
985 | addr: &port->regs->rx_control[0]); |
986 | } |
987 | |
988 | |
989 | static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) |
990 | { |
991 | if (!netif_running(dev)) |
992 | return -EINVAL; |
993 | |
994 | if (cpu_is_ixp46x()) { |
995 | if (cmd == SIOCSHWTSTAMP) |
996 | return hwtstamp_set(netdev: dev, ifr: req); |
997 | if (cmd == SIOCGHWTSTAMP) |
998 | return hwtstamp_get(netdev: dev, ifr: req); |
999 | } |
1000 | |
1001 | return phy_mii_ioctl(phydev: dev->phydev, ifr: req, cmd); |
1002 | } |
1003 | |
1004 | /* ethtool support */ |
1005 | |
1006 | static void ixp4xx_get_drvinfo(struct net_device *dev, |
1007 | struct ethtool_drvinfo *info) |
1008 | { |
1009 | struct port *port = netdev_priv(dev); |
1010 | |
1011 | strscpy(p: info->driver, DRV_NAME, size: sizeof(info->driver)); |
1012 | snprintf(buf: info->fw_version, size: sizeof(info->fw_version), fmt: "%u:%u:%u:%u" , |
1013 | port->firmware[0], port->firmware[1], |
1014 | port->firmware[2], port->firmware[3]); |
1015 | strscpy(p: info->bus_info, q: "internal" , size: sizeof(info->bus_info)); |
1016 | } |
1017 | |
1018 | static int ixp4xx_get_ts_info(struct net_device *dev, |
1019 | struct ethtool_ts_info *info) |
1020 | { |
1021 | struct port *port = netdev_priv(dev); |
1022 | |
1023 | if (port->phc_index < 0) |
1024 | ixp46x_ptp_find(regs: &port->timesync_regs, phc_index: &port->phc_index); |
1025 | |
1026 | info->phc_index = port->phc_index; |
1027 | |
1028 | if (info->phc_index < 0) { |
1029 | info->so_timestamping = |
1030 | SOF_TIMESTAMPING_TX_SOFTWARE | |
1031 | SOF_TIMESTAMPING_RX_SOFTWARE | |
1032 | SOF_TIMESTAMPING_SOFTWARE; |
1033 | return 0; |
1034 | } |
1035 | info->so_timestamping = |
1036 | SOF_TIMESTAMPING_TX_HARDWARE | |
1037 | SOF_TIMESTAMPING_RX_HARDWARE | |
1038 | SOF_TIMESTAMPING_RAW_HARDWARE; |
1039 | info->tx_types = |
1040 | (1 << HWTSTAMP_TX_OFF) | |
1041 | (1 << HWTSTAMP_TX_ON); |
1042 | info->rx_filters = |
1043 | (1 << HWTSTAMP_FILTER_NONE) | |
1044 | (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | |
1045 | (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ); |
1046 | return 0; |
1047 | } |
1048 | |
1049 | static const struct ethtool_ops ixp4xx_ethtool_ops = { |
1050 | .get_drvinfo = ixp4xx_get_drvinfo, |
1051 | .nway_reset = phy_ethtool_nway_reset, |
1052 | .get_link = ethtool_op_get_link, |
1053 | .get_ts_info = ixp4xx_get_ts_info, |
1054 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
1055 | .set_link_ksettings = phy_ethtool_set_link_ksettings, |
1056 | }; |
1057 | |
1058 | |
1059 | static int request_queues(struct port *port) |
1060 | { |
1061 | int err; |
1062 | |
1063 | err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0, |
1064 | "%s:RX-free" , port->netdev->name); |
1065 | if (err) |
1066 | return err; |
1067 | |
1068 | err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0, |
1069 | "%s:RX" , port->netdev->name); |
1070 | if (err) |
1071 | goto rel_rxfree; |
1072 | |
1073 | err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0, |
1074 | "%s:TX" , port->netdev->name); |
1075 | if (err) |
1076 | goto rel_rx; |
1077 | |
1078 | err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, |
1079 | "%s:TX-ready" , port->netdev->name); |
1080 | if (err) |
1081 | goto rel_tx; |
1082 | |
1083 | /* TX-done queue handles skbs sent out by the NPEs */ |
1084 | if (!ports_open) { |
1085 | err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0, |
1086 | "%s:TX-done" , DRV_NAME); |
1087 | if (err) |
1088 | goto rel_txready; |
1089 | } |
1090 | return 0; |
1091 | |
1092 | rel_txready: |
1093 | qmgr_release_queue(queue: port->plat->txreadyq); |
1094 | rel_tx: |
1095 | qmgr_release_queue(TX_QUEUE(port->id)); |
1096 | rel_rx: |
1097 | qmgr_release_queue(queue: port->plat->rxq); |
1098 | rel_rxfree: |
1099 | qmgr_release_queue(RXFREE_QUEUE(port->id)); |
1100 | printk(KERN_DEBUG "%s: unable to request hardware queues\n" , |
1101 | port->netdev->name); |
1102 | return err; |
1103 | } |
1104 | |
1105 | static void release_queues(struct port *port) |
1106 | { |
1107 | qmgr_release_queue(RXFREE_QUEUE(port->id)); |
1108 | qmgr_release_queue(queue: port->plat->rxq); |
1109 | qmgr_release_queue(TX_QUEUE(port->id)); |
1110 | qmgr_release_queue(queue: port->plat->txreadyq); |
1111 | |
1112 | if (!ports_open) |
1113 | qmgr_release_queue(TXDONE_QUEUE); |
1114 | } |
1115 | |
1116 | static int init_queues(struct port *port) |
1117 | { |
1118 | int i; |
1119 | |
1120 | if (!ports_open) { |
1121 | dma_pool = dma_pool_create(DRV_NAME, dev: &port->netdev->dev, |
1122 | POOL_ALLOC_SIZE, align: 32, allocation: 0); |
1123 | if (!dma_pool) |
1124 | return -ENOMEM; |
1125 | } |
1126 | |
1127 | port->desc_tab = dma_pool_zalloc(pool: dma_pool, GFP_KERNEL, handle: &port->desc_tab_phys); |
1128 | if (!port->desc_tab) |
1129 | return -ENOMEM; |
1130 | memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ |
1131 | memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); |
1132 | |
1133 | /* Setup RX buffers */ |
1134 | for (i = 0; i < RX_DESCS; i++) { |
1135 | struct desc *desc = rx_desc_ptr(port, i); |
1136 | buffer_t *buff; /* skb or kmalloc()ated memory */ |
1137 | void *data; |
1138 | #ifdef __ARMEB__ |
1139 | if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE))) |
1140 | return -ENOMEM; |
1141 | data = buff->data; |
1142 | #else |
1143 | if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL))) |
1144 | return -ENOMEM; |
1145 | data = buff; |
1146 | #endif |
1147 | desc->buf_len = MAX_MRU; |
1148 | desc->data = dma_map_single(&port->netdev->dev, data, |
1149 | RX_BUFF_SIZE, DMA_FROM_DEVICE); |
1150 | if (dma_mapping_error(dev: &port->netdev->dev, dma_addr: desc->data)) { |
1151 | free_buffer(objp: buff); |
1152 | return -EIO; |
1153 | } |
1154 | desc->data += NET_IP_ALIGN; |
1155 | port->rx_buff_tab[i] = buff; |
1156 | } |
1157 | |
1158 | return 0; |
1159 | } |
1160 | |
1161 | static void destroy_queues(struct port *port) |
1162 | { |
1163 | int i; |
1164 | |
1165 | if (port->desc_tab) { |
1166 | for (i = 0; i < RX_DESCS; i++) { |
1167 | struct desc *desc = rx_desc_ptr(port, i); |
1168 | buffer_t *buff = port->rx_buff_tab[i]; |
1169 | if (buff) { |
1170 | dma_unmap_single(&port->netdev->dev, |
1171 | desc->data - NET_IP_ALIGN, |
1172 | RX_BUFF_SIZE, DMA_FROM_DEVICE); |
1173 | free_buffer(objp: buff); |
1174 | } |
1175 | } |
1176 | for (i = 0; i < TX_DESCS; i++) { |
1177 | struct desc *desc = tx_desc_ptr(port, i); |
1178 | buffer_t *buff = port->tx_buff_tab[i]; |
1179 | if (buff) { |
1180 | dma_unmap_tx(port, desc); |
1181 | free_buffer(objp: buff); |
1182 | } |
1183 | } |
1184 | dma_pool_free(pool: dma_pool, vaddr: port->desc_tab, addr: port->desc_tab_phys); |
1185 | port->desc_tab = NULL; |
1186 | } |
1187 | |
1188 | if (!ports_open && dma_pool) { |
1189 | dma_pool_destroy(pool: dma_pool); |
1190 | dma_pool = NULL; |
1191 | } |
1192 | } |
1193 | |
1194 | static int ixp4xx_do_change_mtu(struct net_device *dev, int new_mtu) |
1195 | { |
1196 | struct port *port = netdev_priv(dev); |
1197 | struct npe *npe = port->npe; |
1198 | int framesize, chunks; |
1199 | struct msg msg = {}; |
1200 | |
1201 | /* adjust for ethernet headers */ |
1202 | framesize = new_mtu + VLAN_ETH_HLEN; |
1203 | /* max rx/tx 64 byte chunks */ |
1204 | chunks = DIV_ROUND_UP(framesize, 64); |
1205 | |
1206 | msg.cmd = NPE_SETMAXFRAMELENGTHS; |
1207 | msg.eth_id = port->id; |
1208 | |
1209 | /* Firmware wants to know buffer size in 64 byte chunks */ |
1210 | msg.byte2 = chunks << 8; |
1211 | msg.byte3 = chunks << 8; |
1212 | |
1213 | msg.byte4 = msg.byte6 = framesize >> 8; |
1214 | msg.byte5 = msg.byte7 = framesize & 0xff; |
1215 | |
1216 | if (npe_send_recv_message(npe, msg: &msg, what: "ETH_SET_MAX_FRAME_LENGTH" )) |
1217 | return -EIO; |
1218 | netdev_dbg(dev, "set MTU on NPE %s to %d bytes\n" , |
1219 | npe_name(npe), new_mtu); |
1220 | |
1221 | return 0; |
1222 | } |
1223 | |
1224 | static int ixp4xx_eth_change_mtu(struct net_device *dev, int new_mtu) |
1225 | { |
1226 | int ret; |
1227 | |
1228 | /* MTU can only be changed when the interface is up. We also |
1229 | * set the MTU from dev->mtu when opening the device. |
1230 | */ |
1231 | if (dev->flags & IFF_UP) { |
1232 | ret = ixp4xx_do_change_mtu(dev, new_mtu); |
1233 | if (ret < 0) |
1234 | return ret; |
1235 | } |
1236 | |
1237 | dev->mtu = new_mtu; |
1238 | |
1239 | return 0; |
1240 | } |
1241 | |
1242 | static int eth_open(struct net_device *dev) |
1243 | { |
1244 | struct port *port = netdev_priv(dev); |
1245 | struct npe *npe = port->npe; |
1246 | struct msg msg; |
1247 | int i, err; |
1248 | |
1249 | if (!npe_running(npe)) { |
1250 | err = npe_load_firmware(npe, name: npe_name(npe), dev: &dev->dev); |
1251 | if (err) |
1252 | return err; |
1253 | |
1254 | if (npe_recv_message(npe, msg: &msg, what: "ETH_GET_STATUS" )) { |
1255 | netdev_err(dev, format: "%s not responding\n" , npe_name(npe)); |
1256 | return -EIO; |
1257 | } |
1258 | port->firmware[0] = msg.byte4; |
1259 | port->firmware[1] = msg.byte5; |
1260 | port->firmware[2] = msg.byte6; |
1261 | port->firmware[3] = msg.byte7; |
1262 | } |
1263 | |
1264 | memset(&msg, 0, sizeof(msg)); |
1265 | msg.cmd = NPE_VLAN_SETRXQOSENTRY; |
1266 | msg.eth_id = port->id; |
1267 | msg.byte5 = port->plat->rxq | 0x80; |
1268 | msg.byte7 = port->plat->rxq << 4; |
1269 | for (i = 0; i < 8; i++) { |
1270 | msg.byte3 = i; |
1271 | if (npe_send_recv_message(npe: port->npe, msg: &msg, what: "ETH_SET_RXQ" )) |
1272 | return -EIO; |
1273 | } |
1274 | |
1275 | msg.cmd = NPE_EDB_SETPORTADDRESS; |
1276 | msg.eth_id = PHYSICAL_ID(port->id); |
1277 | msg.byte2 = dev->dev_addr[0]; |
1278 | msg.byte3 = dev->dev_addr[1]; |
1279 | msg.byte4 = dev->dev_addr[2]; |
1280 | msg.byte5 = dev->dev_addr[3]; |
1281 | msg.byte6 = dev->dev_addr[4]; |
1282 | msg.byte7 = dev->dev_addr[5]; |
1283 | if (npe_send_recv_message(npe: port->npe, msg: &msg, what: "ETH_SET_MAC" )) |
1284 | return -EIO; |
1285 | |
1286 | memset(&msg, 0, sizeof(msg)); |
1287 | msg.cmd = NPE_FW_SETFIREWALLMODE; |
1288 | msg.eth_id = port->id; |
1289 | if (npe_send_recv_message(npe: port->npe, msg: &msg, what: "ETH_SET_FIREWALL_MODE" )) |
1290 | return -EIO; |
1291 | |
1292 | ixp4xx_do_change_mtu(dev, new_mtu: dev->mtu); |
1293 | |
1294 | if ((err = request_queues(port)) != 0) |
1295 | return err; |
1296 | |
1297 | if ((err = init_queues(port)) != 0) { |
1298 | destroy_queues(port); |
1299 | release_queues(port); |
1300 | return err; |
1301 | } |
1302 | |
1303 | port->speed = 0; /* force "link up" message */ |
1304 | phy_start(phydev: dev->phydev); |
1305 | |
1306 | for (i = 0; i < ETH_ALEN; i++) |
1307 | __raw_writel(val: dev->dev_addr[i], addr: &port->regs->hw_addr[i]); |
1308 | __raw_writel(val: 0x08, addr: &port->regs->random_seed); |
1309 | __raw_writel(val: 0x12, addr: &port->regs->partial_empty_threshold); |
1310 | __raw_writel(val: 0x30, addr: &port->regs->partial_full_threshold); |
1311 | __raw_writel(val: 0x08, addr: &port->regs->tx_start_bytes); |
1312 | __raw_writel(val: 0x15, addr: &port->regs->tx_deferral); |
1313 | __raw_writel(val: 0x08, addr: &port->regs->tx_2part_deferral[0]); |
1314 | __raw_writel(val: 0x07, addr: &port->regs->tx_2part_deferral[1]); |
1315 | __raw_writel(val: 0x80, addr: &port->regs->slot_time); |
1316 | __raw_writel(val: 0x01, addr: &port->regs->int_clock_threshold); |
1317 | |
1318 | /* Populate queues with buffers, no failure after this point */ |
1319 | for (i = 0; i < TX_DESCS; i++) |
1320 | queue_put_desc(queue: port->plat->txreadyq, |
1321 | tx_desc_phys(port, i), tx_desc_ptr(port, i)); |
1322 | |
1323 | for (i = 0; i < RX_DESCS; i++) |
1324 | queue_put_desc(RXFREE_QUEUE(port->id), |
1325 | rx_desc_phys(port, i), rx_desc_ptr(port, i)); |
1326 | |
1327 | __raw_writel(TX_CNTRL1_RETRIES, addr: &port->regs->tx_control[1]); |
1328 | __raw_writel(DEFAULT_TX_CNTRL0, addr: &port->regs->tx_control[0]); |
1329 | __raw_writel(val: 0, addr: &port->regs->rx_control[1]); |
1330 | __raw_writel(DEFAULT_RX_CNTRL0, addr: &port->regs->rx_control[0]); |
1331 | |
1332 | napi_enable(n: &port->napi); |
1333 | eth_set_mcast_list(dev); |
1334 | netif_start_queue(dev); |
1335 | |
1336 | qmgr_set_irq(queue: port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, |
1337 | handler: eth_rx_irq, pdev: dev); |
1338 | if (!ports_open) { |
1339 | qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY, |
1340 | handler: eth_txdone_irq, NULL); |
1341 | qmgr_enable_irq(TXDONE_QUEUE); |
1342 | } |
1343 | ports_open++; |
1344 | /* we may already have RX data, enables IRQ */ |
1345 | napi_schedule(n: &port->napi); |
1346 | return 0; |
1347 | } |
1348 | |
1349 | static int eth_close(struct net_device *dev) |
1350 | { |
1351 | struct port *port = netdev_priv(dev); |
1352 | struct msg msg; |
1353 | int buffs = RX_DESCS; /* allocated RX buffers */ |
1354 | int i; |
1355 | |
1356 | ports_open--; |
1357 | qmgr_disable_irq(queue: port->plat->rxq); |
1358 | napi_disable(n: &port->napi); |
1359 | netif_stop_queue(dev); |
1360 | |
1361 | while (queue_get_desc(RXFREE_QUEUE(port->id), port, is_tx: 0) >= 0) |
1362 | buffs--; |
1363 | |
1364 | memset(&msg, 0, sizeof(msg)); |
1365 | msg.cmd = NPE_SETLOOPBACK_MODE; |
1366 | msg.eth_id = port->id; |
1367 | msg.byte3 = 1; |
1368 | if (npe_send_recv_message(npe: port->npe, msg: &msg, what: "ETH_ENABLE_LOOPBACK" )) |
1369 | netdev_crit(dev, format: "unable to enable loopback\n" ); |
1370 | |
1371 | i = 0; |
1372 | do { /* drain RX buffers */ |
1373 | while (queue_get_desc(queue: port->plat->rxq, port, is_tx: 0) >= 0) |
1374 | buffs--; |
1375 | if (!buffs) |
1376 | break; |
1377 | if (qmgr_stat_empty(TX_QUEUE(port->id))) { |
1378 | /* we have to inject some packet */ |
1379 | struct desc *desc; |
1380 | u32 phys; |
1381 | int n = queue_get_desc(queue: port->plat->txreadyq, port, is_tx: 1); |
1382 | BUG_ON(n < 0); |
1383 | desc = tx_desc_ptr(port, n); |
1384 | phys = tx_desc_phys(port, n); |
1385 | desc->buf_len = desc->pkt_len = 1; |
1386 | wmb(); |
1387 | queue_put_desc(TX_QUEUE(port->id), phys, desc); |
1388 | } |
1389 | udelay(1); |
1390 | } while (++i < MAX_CLOSE_WAIT); |
1391 | |
1392 | if (buffs) |
1393 | netdev_crit(dev, format: "unable to drain RX queue, %i buffer(s)" |
1394 | " left in NPE\n" , buffs); |
1395 | #if DEBUG_CLOSE |
1396 | if (!buffs) |
1397 | netdev_debug(dev, "draining RX queue took %i cycles\n" , i); |
1398 | #endif |
1399 | |
1400 | buffs = TX_DESCS; |
1401 | while (queue_get_desc(TX_QUEUE(port->id), port, is_tx: 1) >= 0) |
1402 | buffs--; /* cancel TX */ |
1403 | |
1404 | i = 0; |
1405 | do { |
1406 | while (queue_get_desc(queue: port->plat->txreadyq, port, is_tx: 1) >= 0) |
1407 | buffs--; |
1408 | if (!buffs) |
1409 | break; |
1410 | } while (++i < MAX_CLOSE_WAIT); |
1411 | |
1412 | if (buffs) |
1413 | netdev_crit(dev, format: "unable to drain TX queue, %i buffer(s) " |
1414 | "left in NPE\n" , buffs); |
1415 | #if DEBUG_CLOSE |
1416 | if (!buffs) |
1417 | netdev_debug(dev, "draining TX queues took %i cycles\n" , i); |
1418 | #endif |
1419 | |
1420 | msg.byte3 = 0; |
1421 | if (npe_send_recv_message(npe: port->npe, msg: &msg, what: "ETH_DISABLE_LOOPBACK" )) |
1422 | netdev_crit(dev, format: "unable to disable loopback\n" ); |
1423 | |
1424 | phy_stop(phydev: dev->phydev); |
1425 | |
1426 | if (!ports_open) |
1427 | qmgr_disable_irq(TXDONE_QUEUE); |
1428 | destroy_queues(port); |
1429 | release_queues(port); |
1430 | return 0; |
1431 | } |
1432 | |
1433 | static const struct net_device_ops ixp4xx_netdev_ops = { |
1434 | .ndo_open = eth_open, |
1435 | .ndo_stop = eth_close, |
1436 | .ndo_change_mtu = ixp4xx_eth_change_mtu, |
1437 | .ndo_start_xmit = eth_xmit, |
1438 | .ndo_set_rx_mode = eth_set_mcast_list, |
1439 | .ndo_eth_ioctl = eth_ioctl, |
1440 | .ndo_set_mac_address = eth_mac_addr, |
1441 | .ndo_validate_addr = eth_validate_addr, |
1442 | }; |
1443 | |
1444 | static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev) |
1445 | { |
1446 | struct device_node *np = dev->of_node; |
1447 | struct of_phandle_args queue_spec; |
1448 | struct of_phandle_args npe_spec; |
1449 | struct device_node *mdio_np; |
1450 | struct eth_plat_info *plat; |
1451 | u8 mac[ETH_ALEN]; |
1452 | int ret; |
1453 | |
1454 | plat = devm_kzalloc(dev, size: sizeof(*plat), GFP_KERNEL); |
1455 | if (!plat) |
1456 | return NULL; |
1457 | |
1458 | ret = of_parse_phandle_with_fixed_args(np, list_name: "intel,npe-handle" , cell_count: 1, index: 0, |
1459 | out_args: &npe_spec); |
1460 | if (ret) { |
1461 | dev_err(dev, "no NPE engine specified\n" ); |
1462 | return NULL; |
1463 | } |
1464 | /* NPE ID 0x00, 0x10, 0x20... */ |
1465 | plat->npe = (npe_spec.args[0] << 4); |
1466 | |
1467 | /* Check if this device has an MDIO bus */ |
1468 | mdio_np = of_get_child_by_name(node: np, name: "mdio" ); |
1469 | if (mdio_np) { |
1470 | plat->has_mdio = true; |
1471 | mdio_bus_np = mdio_np; |
1472 | /* DO NOT put the mdio_np, it will be used */ |
1473 | } |
1474 | |
1475 | /* Get the rx queue as a resource from queue manager */ |
1476 | ret = of_parse_phandle_with_fixed_args(np, list_name: "queue-rx" , cell_count: 1, index: 0, |
1477 | out_args: &queue_spec); |
1478 | if (ret) { |
1479 | dev_err(dev, "no rx queue phandle\n" ); |
1480 | return NULL; |
1481 | } |
1482 | plat->rxq = queue_spec.args[0]; |
1483 | |
1484 | /* Get the txready queue as resource from queue manager */ |
1485 | ret = of_parse_phandle_with_fixed_args(np, list_name: "queue-txready" , cell_count: 1, index: 0, |
1486 | out_args: &queue_spec); |
1487 | if (ret) { |
1488 | dev_err(dev, "no txready queue phandle\n" ); |
1489 | return NULL; |
1490 | } |
1491 | plat->txreadyq = queue_spec.args[0]; |
1492 | |
1493 | ret = of_get_mac_address(np, mac); |
1494 | if (!ret) { |
1495 | dev_info(dev, "Setting macaddr from DT %pM\n" , mac); |
1496 | memcpy(plat->hwaddr, mac, ETH_ALEN); |
1497 | } |
1498 | |
1499 | return plat; |
1500 | } |
1501 | |
1502 | static int ixp4xx_eth_probe(struct platform_device *pdev) |
1503 | { |
1504 | struct phy_device *phydev = NULL; |
1505 | struct device *dev = &pdev->dev; |
1506 | struct device_node *np = dev->of_node; |
1507 | struct eth_plat_info *plat; |
1508 | struct net_device *ndev; |
1509 | struct port *port; |
1510 | int err; |
1511 | |
1512 | plat = ixp4xx_of_get_platdata(dev); |
1513 | if (!plat) |
1514 | return -ENODEV; |
1515 | |
1516 | if (!(ndev = devm_alloc_etherdev(dev, sizeof(struct port)))) |
1517 | return -ENOMEM; |
1518 | |
1519 | SET_NETDEV_DEV(ndev, dev); |
1520 | port = netdev_priv(dev: ndev); |
1521 | port->netdev = ndev; |
1522 | port->id = plat->npe; |
1523 | port->phc_index = -1; |
1524 | |
1525 | /* Get the port resource and remap */ |
1526 | port->regs = devm_platform_get_and_ioremap_resource(pdev, index: 0, NULL); |
1527 | if (IS_ERR(ptr: port->regs)) |
1528 | return PTR_ERR(ptr: port->regs); |
1529 | |
1530 | /* Register the MDIO bus if we have it */ |
1531 | if (plat->has_mdio) { |
1532 | err = ixp4xx_mdio_register(regs: port->regs); |
1533 | if (err) { |
1534 | dev_err(dev, "failed to register MDIO bus\n" ); |
1535 | return err; |
1536 | } |
1537 | } |
1538 | /* If the instance with the MDIO bus has not yet appeared, |
1539 | * defer probing until it gets probed. |
1540 | */ |
1541 | if (!mdio_bus) |
1542 | return -EPROBE_DEFER; |
1543 | |
1544 | ndev->netdev_ops = &ixp4xx_netdev_ops; |
1545 | ndev->ethtool_ops = &ixp4xx_ethtool_ops; |
1546 | ndev->tx_queue_len = 100; |
1547 | /* Inherit the DMA masks from the platform device */ |
1548 | ndev->dev.dma_mask = dev->dma_mask; |
1549 | ndev->dev.coherent_dma_mask = dev->coherent_dma_mask; |
1550 | |
1551 | ndev->min_mtu = ETH_MIN_MTU; |
1552 | ndev->max_mtu = MAX_MRU; |
1553 | |
1554 | netif_napi_add_weight(dev: ndev, napi: &port->napi, poll: eth_poll, NAPI_WEIGHT); |
1555 | |
1556 | if (!(port->npe = npe_request(NPE_ID(port->id)))) |
1557 | return -EIO; |
1558 | |
1559 | port->plat = plat; |
1560 | npe_port_tab[NPE_ID(port->id)] = port; |
1561 | if (is_valid_ether_addr(addr: plat->hwaddr)) |
1562 | eth_hw_addr_set(dev: ndev, addr: plat->hwaddr); |
1563 | else |
1564 | eth_hw_addr_random(dev: ndev); |
1565 | |
1566 | platform_set_drvdata(pdev, data: ndev); |
1567 | |
1568 | __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET, |
1569 | addr: &port->regs->core_control); |
1570 | udelay(50); |
1571 | __raw_writel(DEFAULT_CORE_CNTRL, addr: &port->regs->core_control); |
1572 | udelay(50); |
1573 | |
1574 | phydev = of_phy_get_and_connect(dev: ndev, np, hndlr: ixp4xx_adjust_link); |
1575 | if (!phydev) { |
1576 | err = -ENODEV; |
1577 | dev_err(dev, "no phydev\n" ); |
1578 | goto err_free_mem; |
1579 | } |
1580 | |
1581 | phydev->irq = PHY_POLL; |
1582 | |
1583 | if ((err = register_netdev(dev: ndev))) |
1584 | goto err_phy_dis; |
1585 | |
1586 | netdev_info(dev: ndev, format: "%s: MII PHY %i on %s\n" , ndev->name, plat->phy, |
1587 | npe_name(npe: port->npe)); |
1588 | |
1589 | return 0; |
1590 | |
1591 | err_phy_dis: |
1592 | phy_disconnect(phydev); |
1593 | err_free_mem: |
1594 | npe_port_tab[NPE_ID(port->id)] = NULL; |
1595 | npe_release(npe: port->npe); |
1596 | return err; |
1597 | } |
1598 | |
1599 | static void ixp4xx_eth_remove(struct platform_device *pdev) |
1600 | { |
1601 | struct net_device *ndev = platform_get_drvdata(pdev); |
1602 | struct phy_device *phydev = ndev->phydev; |
1603 | struct port *port = netdev_priv(dev: ndev); |
1604 | |
1605 | unregister_netdev(dev: ndev); |
1606 | phy_disconnect(phydev); |
1607 | ixp4xx_mdio_remove(); |
1608 | npe_port_tab[NPE_ID(port->id)] = NULL; |
1609 | npe_release(npe: port->npe); |
1610 | } |
1611 | |
1612 | static const struct of_device_id ixp4xx_eth_of_match[] = { |
1613 | { |
1614 | .compatible = "intel,ixp4xx-ethernet" , |
1615 | }, |
1616 | { }, |
1617 | }; |
1618 | |
1619 | static struct platform_driver ixp4xx_eth_driver = { |
1620 | .driver = { |
1621 | .name = DRV_NAME, |
1622 | .of_match_table = of_match_ptr(ixp4xx_eth_of_match), |
1623 | }, |
1624 | .probe = ixp4xx_eth_probe, |
1625 | .remove_new = ixp4xx_eth_remove, |
1626 | }; |
1627 | module_platform_driver(ixp4xx_eth_driver); |
1628 | |
1629 | MODULE_AUTHOR("Krzysztof Halasa" ); |
1630 | MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver" ); |
1631 | MODULE_LICENSE("GPL v2" ); |
1632 | MODULE_ALIAS("platform:ixp4xx_eth" ); |
1633 | |