1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Broadcom GENET (Gigabit Ethernet) controller driver |
4 | * |
5 | * Copyright (c) 2014-2020 Broadcom |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) "bcmgenet: " fmt |
9 | |
10 | #include <linux/acpi.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/module.h> |
13 | #include <linux/sched.h> |
14 | #include <linux/types.h> |
15 | #include <linux/fcntl.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/string.h> |
18 | #include <linux/if_ether.h> |
19 | #include <linux/init.h> |
20 | #include <linux/errno.h> |
21 | #include <linux/delay.h> |
22 | #include <linux/platform_device.h> |
23 | #include <linux/dma-mapping.h> |
24 | #include <linux/pm.h> |
25 | #include <linux/clk.h> |
26 | #include <net/arp.h> |
27 | |
28 | #include <linux/mii.h> |
29 | #include <linux/ethtool.h> |
30 | #include <linux/netdevice.h> |
31 | #include <linux/inetdevice.h> |
32 | #include <linux/etherdevice.h> |
33 | #include <linux/skbuff.h> |
34 | #include <linux/in.h> |
35 | #include <linux/ip.h> |
36 | #include <linux/ipv6.h> |
37 | #include <linux/phy.h> |
38 | #include <linux/platform_data/bcmgenet.h> |
39 | |
40 | #include <asm/unaligned.h> |
41 | |
42 | #include "bcmgenet.h" |
43 | |
44 | /* Maximum number of hardware queues, downsized if needed */ |
45 | #define GENET_MAX_MQ_CNT 4 |
46 | |
47 | /* Default highest priority queue for multi queue support */ |
48 | #define GENET_Q0_PRIORITY 0 |
49 | |
50 | #define GENET_Q16_RX_BD_CNT \ |
51 | (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q) |
52 | #define GENET_Q16_TX_BD_CNT \ |
53 | (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) |
54 | |
55 | #define RX_BUF_LENGTH 2048 |
56 | #define SKB_ALIGNMENT 32 |
57 | |
58 | /* Tx/Rx DMA register offset, skip 256 descriptors */ |
59 | #define WORDS_PER_BD(p) (p->hw_params->words_per_bd) |
60 | #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) |
61 | |
62 | #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ |
63 | TOTAL_DESC * DMA_DESC_SIZE) |
64 | |
65 | #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ |
66 | TOTAL_DESC * DMA_DESC_SIZE) |
67 | |
68 | /* Forward declarations */ |
69 | static void bcmgenet_set_rx_mode(struct net_device *dev); |
70 | |
71 | static inline void bcmgenet_writel(u32 value, void __iomem *offset) |
72 | { |
73 | /* MIPS chips strapped for BE will automagically configure the |
74 | * peripheral registers for CPU-native byte order. |
75 | */ |
76 | if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) |
77 | __raw_writel(val: value, addr: offset); |
78 | else |
79 | writel_relaxed(value, offset); |
80 | } |
81 | |
82 | static inline u32 bcmgenet_readl(void __iomem *offset) |
83 | { |
84 | if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) |
85 | return __raw_readl(addr: offset); |
86 | else |
87 | return readl_relaxed(offset); |
88 | } |
89 | |
90 | static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, |
91 | void __iomem *d, u32 value) |
92 | { |
93 | bcmgenet_writel(value, offset: d + DMA_DESC_LENGTH_STATUS); |
94 | } |
95 | |
96 | static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, |
97 | void __iomem *d, |
98 | dma_addr_t addr) |
99 | { |
100 | bcmgenet_writel(lower_32_bits(addr), offset: d + DMA_DESC_ADDRESS_LO); |
101 | |
102 | /* Register writes to GISB bus can take couple hundred nanoseconds |
103 | * and are done for each packet, save these expensive writes unless |
104 | * the platform is explicitly configured for 64-bits/LPAE. |
105 | */ |
106 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
107 | if (priv->hw_params->flags & GENET_HAS_40BITS) |
108 | bcmgenet_writel(upper_32_bits(addr), offset: d + DMA_DESC_ADDRESS_HI); |
109 | #endif |
110 | } |
111 | |
112 | /* Combined address + length/status setter */ |
113 | static inline void dmadesc_set(struct bcmgenet_priv *priv, |
114 | void __iomem *d, dma_addr_t addr, u32 val) |
115 | { |
116 | dmadesc_set_addr(priv, d, addr); |
117 | dmadesc_set_length_status(priv, d, value: val); |
118 | } |
119 | |
120 | #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" |
121 | |
122 | #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ |
123 | NETIF_MSG_LINK) |
124 | |
125 | static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) |
126 | { |
127 | if (GENET_IS_V1(priv)) |
128 | return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); |
129 | else |
130 | return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); |
131 | } |
132 | |
133 | static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) |
134 | { |
135 | if (GENET_IS_V1(priv)) |
136 | bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); |
137 | else |
138 | bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); |
139 | } |
140 | |
141 | /* These macros are defined to deal with register map change |
142 | * between GENET1.1 and GENET2. Only those currently being used |
143 | * by driver are defined. |
144 | */ |
145 | static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) |
146 | { |
147 | if (GENET_IS_V1(priv)) |
148 | return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); |
149 | else |
150 | return bcmgenet_readl(offset: priv->base + |
151 | priv->hw_params->tbuf_offset + TBUF_CTRL); |
152 | } |
153 | |
154 | static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) |
155 | { |
156 | if (GENET_IS_V1(priv)) |
157 | bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); |
158 | else |
159 | bcmgenet_writel(value: val, offset: priv->base + |
160 | priv->hw_params->tbuf_offset + TBUF_CTRL); |
161 | } |
162 | |
163 | static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) |
164 | { |
165 | if (GENET_IS_V1(priv)) |
166 | return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); |
167 | else |
168 | return bcmgenet_readl(offset: priv->base + |
169 | priv->hw_params->tbuf_offset + TBUF_BP_MC); |
170 | } |
171 | |
172 | static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) |
173 | { |
174 | if (GENET_IS_V1(priv)) |
175 | bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); |
176 | else |
177 | bcmgenet_writel(value: val, offset: priv->base + |
178 | priv->hw_params->tbuf_offset + TBUF_BP_MC); |
179 | } |
180 | |
181 | /* RX/TX DMA register accessors */ |
182 | enum dma_reg { |
183 | DMA_RING_CFG = 0, |
184 | DMA_CTRL, |
185 | DMA_STATUS, |
186 | DMA_SCB_BURST_SIZE, |
187 | DMA_ARB_CTRL, |
188 | DMA_PRIORITY_0, |
189 | DMA_PRIORITY_1, |
190 | DMA_PRIORITY_2, |
191 | DMA_INDEX2RING_0, |
192 | DMA_INDEX2RING_1, |
193 | DMA_INDEX2RING_2, |
194 | DMA_INDEX2RING_3, |
195 | DMA_INDEX2RING_4, |
196 | DMA_INDEX2RING_5, |
197 | DMA_INDEX2RING_6, |
198 | DMA_INDEX2RING_7, |
199 | DMA_RING0_TIMEOUT, |
200 | DMA_RING1_TIMEOUT, |
201 | DMA_RING2_TIMEOUT, |
202 | DMA_RING3_TIMEOUT, |
203 | DMA_RING4_TIMEOUT, |
204 | DMA_RING5_TIMEOUT, |
205 | DMA_RING6_TIMEOUT, |
206 | DMA_RING7_TIMEOUT, |
207 | DMA_RING8_TIMEOUT, |
208 | DMA_RING9_TIMEOUT, |
209 | DMA_RING10_TIMEOUT, |
210 | DMA_RING11_TIMEOUT, |
211 | DMA_RING12_TIMEOUT, |
212 | DMA_RING13_TIMEOUT, |
213 | DMA_RING14_TIMEOUT, |
214 | DMA_RING15_TIMEOUT, |
215 | DMA_RING16_TIMEOUT, |
216 | }; |
217 | |
218 | static const u8 bcmgenet_dma_regs_v3plus[] = { |
219 | [DMA_RING_CFG] = 0x00, |
220 | [DMA_CTRL] = 0x04, |
221 | [DMA_STATUS] = 0x08, |
222 | [DMA_SCB_BURST_SIZE] = 0x0C, |
223 | [DMA_ARB_CTRL] = 0x2C, |
224 | [DMA_PRIORITY_0] = 0x30, |
225 | [DMA_PRIORITY_1] = 0x34, |
226 | [DMA_PRIORITY_2] = 0x38, |
227 | [DMA_RING0_TIMEOUT] = 0x2C, |
228 | [DMA_RING1_TIMEOUT] = 0x30, |
229 | [DMA_RING2_TIMEOUT] = 0x34, |
230 | [DMA_RING3_TIMEOUT] = 0x38, |
231 | [DMA_RING4_TIMEOUT] = 0x3c, |
232 | [DMA_RING5_TIMEOUT] = 0x40, |
233 | [DMA_RING6_TIMEOUT] = 0x44, |
234 | [DMA_RING7_TIMEOUT] = 0x48, |
235 | [DMA_RING8_TIMEOUT] = 0x4c, |
236 | [DMA_RING9_TIMEOUT] = 0x50, |
237 | [DMA_RING10_TIMEOUT] = 0x54, |
238 | [DMA_RING11_TIMEOUT] = 0x58, |
239 | [DMA_RING12_TIMEOUT] = 0x5c, |
240 | [DMA_RING13_TIMEOUT] = 0x60, |
241 | [DMA_RING14_TIMEOUT] = 0x64, |
242 | [DMA_RING15_TIMEOUT] = 0x68, |
243 | [DMA_RING16_TIMEOUT] = 0x6C, |
244 | [DMA_INDEX2RING_0] = 0x70, |
245 | [DMA_INDEX2RING_1] = 0x74, |
246 | [DMA_INDEX2RING_2] = 0x78, |
247 | [DMA_INDEX2RING_3] = 0x7C, |
248 | [DMA_INDEX2RING_4] = 0x80, |
249 | [DMA_INDEX2RING_5] = 0x84, |
250 | [DMA_INDEX2RING_6] = 0x88, |
251 | [DMA_INDEX2RING_7] = 0x8C, |
252 | }; |
253 | |
254 | static const u8 bcmgenet_dma_regs_v2[] = { |
255 | [DMA_RING_CFG] = 0x00, |
256 | [DMA_CTRL] = 0x04, |
257 | [DMA_STATUS] = 0x08, |
258 | [DMA_SCB_BURST_SIZE] = 0x0C, |
259 | [DMA_ARB_CTRL] = 0x30, |
260 | [DMA_PRIORITY_0] = 0x34, |
261 | [DMA_PRIORITY_1] = 0x38, |
262 | [DMA_PRIORITY_2] = 0x3C, |
263 | [DMA_RING0_TIMEOUT] = 0x2C, |
264 | [DMA_RING1_TIMEOUT] = 0x30, |
265 | [DMA_RING2_TIMEOUT] = 0x34, |
266 | [DMA_RING3_TIMEOUT] = 0x38, |
267 | [DMA_RING4_TIMEOUT] = 0x3c, |
268 | [DMA_RING5_TIMEOUT] = 0x40, |
269 | [DMA_RING6_TIMEOUT] = 0x44, |
270 | [DMA_RING7_TIMEOUT] = 0x48, |
271 | [DMA_RING8_TIMEOUT] = 0x4c, |
272 | [DMA_RING9_TIMEOUT] = 0x50, |
273 | [DMA_RING10_TIMEOUT] = 0x54, |
274 | [DMA_RING11_TIMEOUT] = 0x58, |
275 | [DMA_RING12_TIMEOUT] = 0x5c, |
276 | [DMA_RING13_TIMEOUT] = 0x60, |
277 | [DMA_RING14_TIMEOUT] = 0x64, |
278 | [DMA_RING15_TIMEOUT] = 0x68, |
279 | [DMA_RING16_TIMEOUT] = 0x6C, |
280 | }; |
281 | |
282 | static const u8 bcmgenet_dma_regs_v1[] = { |
283 | [DMA_CTRL] = 0x00, |
284 | [DMA_STATUS] = 0x04, |
285 | [DMA_SCB_BURST_SIZE] = 0x0C, |
286 | [DMA_ARB_CTRL] = 0x30, |
287 | [DMA_PRIORITY_0] = 0x34, |
288 | [DMA_PRIORITY_1] = 0x38, |
289 | [DMA_PRIORITY_2] = 0x3C, |
290 | [DMA_RING0_TIMEOUT] = 0x2C, |
291 | [DMA_RING1_TIMEOUT] = 0x30, |
292 | [DMA_RING2_TIMEOUT] = 0x34, |
293 | [DMA_RING3_TIMEOUT] = 0x38, |
294 | [DMA_RING4_TIMEOUT] = 0x3c, |
295 | [DMA_RING5_TIMEOUT] = 0x40, |
296 | [DMA_RING6_TIMEOUT] = 0x44, |
297 | [DMA_RING7_TIMEOUT] = 0x48, |
298 | [DMA_RING8_TIMEOUT] = 0x4c, |
299 | [DMA_RING9_TIMEOUT] = 0x50, |
300 | [DMA_RING10_TIMEOUT] = 0x54, |
301 | [DMA_RING11_TIMEOUT] = 0x58, |
302 | [DMA_RING12_TIMEOUT] = 0x5c, |
303 | [DMA_RING13_TIMEOUT] = 0x60, |
304 | [DMA_RING14_TIMEOUT] = 0x64, |
305 | [DMA_RING15_TIMEOUT] = 0x68, |
306 | [DMA_RING16_TIMEOUT] = 0x6C, |
307 | }; |
308 | |
309 | /* Set at runtime once bcmgenet version is known */ |
310 | static const u8 *bcmgenet_dma_regs; |
311 | |
312 | static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) |
313 | { |
314 | return netdev_priv(dev: dev_get_drvdata(dev)); |
315 | } |
316 | |
317 | static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, |
318 | enum dma_reg r) |
319 | { |
320 | return bcmgenet_readl(offset: priv->base + GENET_TDMA_REG_OFF + |
321 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); |
322 | } |
323 | |
324 | static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, |
325 | u32 val, enum dma_reg r) |
326 | { |
327 | bcmgenet_writel(value: val, offset: priv->base + GENET_TDMA_REG_OFF + |
328 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); |
329 | } |
330 | |
331 | static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, |
332 | enum dma_reg r) |
333 | { |
334 | return bcmgenet_readl(offset: priv->base + GENET_RDMA_REG_OFF + |
335 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); |
336 | } |
337 | |
338 | static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, |
339 | u32 val, enum dma_reg r) |
340 | { |
341 | bcmgenet_writel(value: val, offset: priv->base + GENET_RDMA_REG_OFF + |
342 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); |
343 | } |
344 | |
345 | /* RDMA/TDMA ring registers and accessors |
346 | * we merge the common fields and just prefix with T/D the registers |
347 | * having different meaning depending on the direction |
348 | */ |
349 | enum dma_ring_reg { |
350 | TDMA_READ_PTR = 0, |
351 | RDMA_WRITE_PTR = TDMA_READ_PTR, |
352 | TDMA_READ_PTR_HI, |
353 | RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, |
354 | TDMA_CONS_INDEX, |
355 | RDMA_PROD_INDEX = TDMA_CONS_INDEX, |
356 | TDMA_PROD_INDEX, |
357 | RDMA_CONS_INDEX = TDMA_PROD_INDEX, |
358 | DMA_RING_BUF_SIZE, |
359 | DMA_START_ADDR, |
360 | DMA_START_ADDR_HI, |
361 | DMA_END_ADDR, |
362 | DMA_END_ADDR_HI, |
363 | DMA_MBUF_DONE_THRESH, |
364 | TDMA_FLOW_PERIOD, |
365 | RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, |
366 | TDMA_WRITE_PTR, |
367 | RDMA_READ_PTR = TDMA_WRITE_PTR, |
368 | TDMA_WRITE_PTR_HI, |
369 | RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI |
370 | }; |
371 | |
372 | /* GENET v4 supports 40-bits pointer addressing |
373 | * for obvious reasons the LO and HI word parts |
374 | * are contiguous, but this offsets the other |
375 | * registers. |
376 | */ |
377 | static const u8 genet_dma_ring_regs_v4[] = { |
378 | [TDMA_READ_PTR] = 0x00, |
379 | [TDMA_READ_PTR_HI] = 0x04, |
380 | [TDMA_CONS_INDEX] = 0x08, |
381 | [TDMA_PROD_INDEX] = 0x0C, |
382 | [DMA_RING_BUF_SIZE] = 0x10, |
383 | [DMA_START_ADDR] = 0x14, |
384 | [DMA_START_ADDR_HI] = 0x18, |
385 | [DMA_END_ADDR] = 0x1C, |
386 | [DMA_END_ADDR_HI] = 0x20, |
387 | [DMA_MBUF_DONE_THRESH] = 0x24, |
388 | [TDMA_FLOW_PERIOD] = 0x28, |
389 | [TDMA_WRITE_PTR] = 0x2C, |
390 | [TDMA_WRITE_PTR_HI] = 0x30, |
391 | }; |
392 | |
393 | static const u8 genet_dma_ring_regs_v123[] = { |
394 | [TDMA_READ_PTR] = 0x00, |
395 | [TDMA_CONS_INDEX] = 0x04, |
396 | [TDMA_PROD_INDEX] = 0x08, |
397 | [DMA_RING_BUF_SIZE] = 0x0C, |
398 | [DMA_START_ADDR] = 0x10, |
399 | [DMA_END_ADDR] = 0x14, |
400 | [DMA_MBUF_DONE_THRESH] = 0x18, |
401 | [TDMA_FLOW_PERIOD] = 0x1C, |
402 | [TDMA_WRITE_PTR] = 0x20, |
403 | }; |
404 | |
405 | /* Set at runtime once GENET version is known */ |
406 | static const u8 *genet_dma_ring_regs; |
407 | |
408 | static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, |
409 | unsigned int ring, |
410 | enum dma_ring_reg r) |
411 | { |
412 | return bcmgenet_readl(offset: priv->base + GENET_TDMA_REG_OFF + |
413 | (DMA_RING_SIZE * ring) + |
414 | genet_dma_ring_regs[r]); |
415 | } |
416 | |
417 | static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, |
418 | unsigned int ring, u32 val, |
419 | enum dma_ring_reg r) |
420 | { |
421 | bcmgenet_writel(value: val, offset: priv->base + GENET_TDMA_REG_OFF + |
422 | (DMA_RING_SIZE * ring) + |
423 | genet_dma_ring_regs[r]); |
424 | } |
425 | |
426 | static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, |
427 | unsigned int ring, |
428 | enum dma_ring_reg r) |
429 | { |
430 | return bcmgenet_readl(offset: priv->base + GENET_RDMA_REG_OFF + |
431 | (DMA_RING_SIZE * ring) + |
432 | genet_dma_ring_regs[r]); |
433 | } |
434 | |
435 | static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, |
436 | unsigned int ring, u32 val, |
437 | enum dma_ring_reg r) |
438 | { |
439 | bcmgenet_writel(value: val, offset: priv->base + GENET_RDMA_REG_OFF + |
440 | (DMA_RING_SIZE * ring) + |
441 | genet_dma_ring_regs[r]); |
442 | } |
443 | |
444 | static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) |
445 | { |
446 | u32 offset; |
447 | u32 reg; |
448 | |
449 | offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); |
450 | reg = bcmgenet_hfb_reg_readl(priv, off: offset); |
451 | reg |= (1 << (f_index % 32)); |
452 | bcmgenet_hfb_reg_writel(priv, val: reg, off: offset); |
453 | reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); |
454 | reg |= RBUF_HFB_EN; |
455 | bcmgenet_hfb_reg_writel(priv, val: reg, HFB_CTRL); |
456 | } |
457 | |
458 | static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index) |
459 | { |
460 | u32 offset, reg, reg1; |
461 | |
462 | offset = HFB_FLT_ENABLE_V3PLUS; |
463 | reg = bcmgenet_hfb_reg_readl(priv, off: offset); |
464 | reg1 = bcmgenet_hfb_reg_readl(priv, off: offset + sizeof(u32)); |
465 | if (f_index < 32) { |
466 | reg1 &= ~(1 << (f_index % 32)); |
467 | bcmgenet_hfb_reg_writel(priv, val: reg1, off: offset + sizeof(u32)); |
468 | } else { |
469 | reg &= ~(1 << (f_index % 32)); |
470 | bcmgenet_hfb_reg_writel(priv, val: reg, off: offset); |
471 | } |
472 | if (!reg && !reg1) { |
473 | reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); |
474 | reg &= ~RBUF_HFB_EN; |
475 | bcmgenet_hfb_reg_writel(priv, val: reg, HFB_CTRL); |
476 | } |
477 | } |
478 | |
479 | static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv, |
480 | u32 f_index, u32 rx_queue) |
481 | { |
482 | u32 offset; |
483 | u32 reg; |
484 | |
485 | offset = f_index / 8; |
486 | reg = bcmgenet_rdma_readl(priv, r: DMA_INDEX2RING_0 + offset); |
487 | reg &= ~(0xF << (4 * (f_index % 8))); |
488 | reg |= ((rx_queue & 0xF) << (4 * (f_index % 8))); |
489 | bcmgenet_rdma_writel(priv, val: reg, r: DMA_INDEX2RING_0 + offset); |
490 | } |
491 | |
492 | static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, |
493 | u32 f_index, u32 f_length) |
494 | { |
495 | u32 offset; |
496 | u32 reg; |
497 | |
498 | offset = HFB_FLT_LEN_V3PLUS + |
499 | ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * |
500 | sizeof(u32); |
501 | reg = bcmgenet_hfb_reg_readl(priv, off: offset); |
502 | reg &= ~(0xFF << (8 * (f_index % 4))); |
503 | reg |= ((f_length & 0xFF) << (8 * (f_index % 4))); |
504 | bcmgenet_hfb_reg_writel(priv, val: reg, off: offset); |
505 | } |
506 | |
507 | static int bcmgenet_hfb_validate_mask(void *mask, size_t size) |
508 | { |
509 | while (size) { |
510 | switch (*(unsigned char *)mask++) { |
511 | case 0x00: |
512 | case 0x0f: |
513 | case 0xf0: |
514 | case 0xff: |
515 | size--; |
516 | continue; |
517 | default: |
518 | return -EINVAL; |
519 | } |
520 | } |
521 | |
522 | return 0; |
523 | } |
524 | |
525 | #define VALIDATE_MASK(x) \ |
526 | bcmgenet_hfb_validate_mask(&(x), sizeof(x)) |
527 | |
528 | static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index, |
529 | u32 offset, void *val, void *mask, |
530 | size_t size) |
531 | { |
532 | u32 index, tmp; |
533 | |
534 | index = f_index * priv->hw_params->hfb_filter_size + offset / 2; |
535 | tmp = bcmgenet_hfb_readl(priv, off: index * sizeof(u32)); |
536 | |
537 | while (size--) { |
538 | if (offset++ & 1) { |
539 | tmp &= ~0x300FF; |
540 | tmp |= (*(unsigned char *)val++); |
541 | switch ((*(unsigned char *)mask++)) { |
542 | case 0xFF: |
543 | tmp |= 0x30000; |
544 | break; |
545 | case 0xF0: |
546 | tmp |= 0x20000; |
547 | break; |
548 | case 0x0F: |
549 | tmp |= 0x10000; |
550 | break; |
551 | } |
552 | bcmgenet_hfb_writel(priv, val: tmp, off: index++ * sizeof(u32)); |
553 | if (size) |
554 | tmp = bcmgenet_hfb_readl(priv, |
555 | off: index * sizeof(u32)); |
556 | } else { |
557 | tmp &= ~0xCFF00; |
558 | tmp |= (*(unsigned char *)val++) << 8; |
559 | switch ((*(unsigned char *)mask++)) { |
560 | case 0xFF: |
561 | tmp |= 0xC0000; |
562 | break; |
563 | case 0xF0: |
564 | tmp |= 0x80000; |
565 | break; |
566 | case 0x0F: |
567 | tmp |= 0x40000; |
568 | break; |
569 | } |
570 | if (!size) |
571 | bcmgenet_hfb_writel(priv, val: tmp, off: index * sizeof(u32)); |
572 | } |
573 | } |
574 | |
575 | return 0; |
576 | } |
577 | |
578 | static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, |
579 | struct bcmgenet_rxnfc_rule *rule) |
580 | { |
581 | struct ethtool_rx_flow_spec *fs = &rule->fs; |
582 | u32 offset = 0, f_length = 0, f; |
583 | u8 val_8, mask_8; |
584 | __be16 val_16; |
585 | u16 mask_16; |
586 | size_t size; |
587 | |
588 | f = fs->location; |
589 | if (fs->flow_type & FLOW_MAC_EXT) { |
590 | bcmgenet_hfb_insert_data(priv, f_index: f, offset: 0, |
591 | val: &fs->h_ext.h_dest, mask: &fs->m_ext.h_dest, |
592 | size: sizeof(fs->h_ext.h_dest)); |
593 | } |
594 | |
595 | if (fs->flow_type & FLOW_EXT) { |
596 | if (fs->m_ext.vlan_etype || |
597 | fs->m_ext.vlan_tci) { |
598 | bcmgenet_hfb_insert_data(priv, f_index: f, offset: 12, |
599 | val: &fs->h_ext.vlan_etype, |
600 | mask: &fs->m_ext.vlan_etype, |
601 | size: sizeof(fs->h_ext.vlan_etype)); |
602 | bcmgenet_hfb_insert_data(priv, f_index: f, offset: 14, |
603 | val: &fs->h_ext.vlan_tci, |
604 | mask: &fs->m_ext.vlan_tci, |
605 | size: sizeof(fs->h_ext.vlan_tci)); |
606 | offset += VLAN_HLEN; |
607 | f_length += DIV_ROUND_UP(VLAN_HLEN, 2); |
608 | } |
609 | } |
610 | |
611 | switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { |
612 | case ETHER_FLOW: |
613 | f_length += DIV_ROUND_UP(ETH_HLEN, 2); |
614 | bcmgenet_hfb_insert_data(priv, f_index: f, offset: 0, |
615 | val: &fs->h_u.ether_spec.h_dest, |
616 | mask: &fs->m_u.ether_spec.h_dest, |
617 | size: sizeof(fs->h_u.ether_spec.h_dest)); |
618 | bcmgenet_hfb_insert_data(priv, f_index: f, ETH_ALEN, |
619 | val: &fs->h_u.ether_spec.h_source, |
620 | mask: &fs->m_u.ether_spec.h_source, |
621 | size: sizeof(fs->h_u.ether_spec.h_source)); |
622 | bcmgenet_hfb_insert_data(priv, f_index: f, offset: (2 * ETH_ALEN) + offset, |
623 | val: &fs->h_u.ether_spec.h_proto, |
624 | mask: &fs->m_u.ether_spec.h_proto, |
625 | size: sizeof(fs->h_u.ether_spec.h_proto)); |
626 | break; |
627 | case IP_USER_FLOW: |
628 | f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2); |
629 | /* Specify IP Ether Type */ |
630 | val_16 = htons(ETH_P_IP); |
631 | mask_16 = 0xFFFF; |
632 | bcmgenet_hfb_insert_data(priv, f_index: f, offset: (2 * ETH_ALEN) + offset, |
633 | val: &val_16, mask: &mask_16, size: sizeof(val_16)); |
634 | bcmgenet_hfb_insert_data(priv, f_index: f, offset: 15 + offset, |
635 | val: &fs->h_u.usr_ip4_spec.tos, |
636 | mask: &fs->m_u.usr_ip4_spec.tos, |
637 | size: sizeof(fs->h_u.usr_ip4_spec.tos)); |
638 | bcmgenet_hfb_insert_data(priv, f_index: f, offset: 23 + offset, |
639 | val: &fs->h_u.usr_ip4_spec.proto, |
640 | mask: &fs->m_u.usr_ip4_spec.proto, |
641 | size: sizeof(fs->h_u.usr_ip4_spec.proto)); |
642 | bcmgenet_hfb_insert_data(priv, f_index: f, offset: 26 + offset, |
643 | val: &fs->h_u.usr_ip4_spec.ip4src, |
644 | mask: &fs->m_u.usr_ip4_spec.ip4src, |
645 | size: sizeof(fs->h_u.usr_ip4_spec.ip4src)); |
646 | bcmgenet_hfb_insert_data(priv, f_index: f, offset: 30 + offset, |
647 | val: &fs->h_u.usr_ip4_spec.ip4dst, |
648 | mask: &fs->m_u.usr_ip4_spec.ip4dst, |
649 | size: sizeof(fs->h_u.usr_ip4_spec.ip4dst)); |
650 | if (!fs->m_u.usr_ip4_spec.l4_4_bytes) |
651 | break; |
652 | |
653 | /* Only supports 20 byte IPv4 header */ |
654 | val_8 = 0x45; |
655 | mask_8 = 0xFF; |
656 | bcmgenet_hfb_insert_data(priv, f_index: f, ETH_HLEN + offset, |
657 | val: &val_8, mask: &mask_8, |
658 | size: sizeof(val_8)); |
659 | size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes); |
660 | bcmgenet_hfb_insert_data(priv, f_index: f, |
661 | ETH_HLEN + 20 + offset, |
662 | val: &fs->h_u.usr_ip4_spec.l4_4_bytes, |
663 | mask: &fs->m_u.usr_ip4_spec.l4_4_bytes, |
664 | size); |
665 | f_length += DIV_ROUND_UP(size, 2); |
666 | break; |
667 | } |
668 | |
669 | bcmgenet_hfb_set_filter_length(priv, f_index: f, f_length: 2 * f_length); |
670 | if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) { |
671 | /* Ring 0 flows can be handled by the default Descriptor Ring |
672 | * We'll map them to ring 0, but don't enable the filter |
673 | */ |
674 | bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index: f, rx_queue: 0); |
675 | rule->state = BCMGENET_RXNFC_STATE_DISABLED; |
676 | } else { |
677 | /* Other Rx rings are direct mapped here */ |
678 | bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index: f, |
679 | rx_queue: fs->ring_cookie); |
680 | bcmgenet_hfb_enable_filter(priv, f_index: f); |
681 | rule->state = BCMGENET_RXNFC_STATE_ENABLED; |
682 | } |
683 | } |
684 | |
685 | /* bcmgenet_hfb_clear |
686 | * |
687 | * Clear Hardware Filter Block and disable all filtering. |
688 | */ |
689 | static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index) |
690 | { |
691 | u32 base, i; |
692 | |
693 | base = f_index * priv->hw_params->hfb_filter_size; |
694 | for (i = 0; i < priv->hw_params->hfb_filter_size; i++) |
695 | bcmgenet_hfb_writel(priv, val: 0x0, off: (base + i) * sizeof(u32)); |
696 | } |
697 | |
698 | static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) |
699 | { |
700 | u32 i; |
701 | |
702 | if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) |
703 | return; |
704 | |
705 | bcmgenet_hfb_reg_writel(priv, val: 0x0, HFB_CTRL); |
706 | bcmgenet_hfb_reg_writel(priv, val: 0x0, HFB_FLT_ENABLE_V3PLUS); |
707 | bcmgenet_hfb_reg_writel(priv, val: 0x0, HFB_FLT_ENABLE_V3PLUS + 4); |
708 | |
709 | for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++) |
710 | bcmgenet_rdma_writel(priv, val: 0x0, r: i); |
711 | |
712 | for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) |
713 | bcmgenet_hfb_reg_writel(priv, val: 0x0, |
714 | HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); |
715 | |
716 | for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) |
717 | bcmgenet_hfb_clear_filter(priv, f_index: i); |
718 | } |
719 | |
720 | static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) |
721 | { |
722 | int i; |
723 | |
724 | INIT_LIST_HEAD(list: &priv->rxnfc_list); |
725 | if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) |
726 | return; |
727 | |
728 | for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { |
729 | INIT_LIST_HEAD(list: &priv->rxnfc_rules[i].list); |
730 | priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; |
731 | } |
732 | |
733 | bcmgenet_hfb_clear(priv); |
734 | } |
735 | |
736 | static int bcmgenet_begin(struct net_device *dev) |
737 | { |
738 | struct bcmgenet_priv *priv = netdev_priv(dev); |
739 | |
740 | /* Turn on the clock */ |
741 | return clk_prepare_enable(clk: priv->clk); |
742 | } |
743 | |
744 | static void bcmgenet_complete(struct net_device *dev) |
745 | { |
746 | struct bcmgenet_priv *priv = netdev_priv(dev); |
747 | |
748 | /* Turn off the clock */ |
749 | clk_disable_unprepare(clk: priv->clk); |
750 | } |
751 | |
752 | static int bcmgenet_get_link_ksettings(struct net_device *dev, |
753 | struct ethtool_link_ksettings *cmd) |
754 | { |
755 | if (!netif_running(dev)) |
756 | return -EINVAL; |
757 | |
758 | if (!dev->phydev) |
759 | return -ENODEV; |
760 | |
761 | phy_ethtool_ksettings_get(phydev: dev->phydev, cmd); |
762 | |
763 | return 0; |
764 | } |
765 | |
766 | static int bcmgenet_set_link_ksettings(struct net_device *dev, |
767 | const struct ethtool_link_ksettings *cmd) |
768 | { |
769 | if (!netif_running(dev)) |
770 | return -EINVAL; |
771 | |
772 | if (!dev->phydev) |
773 | return -ENODEV; |
774 | |
775 | return phy_ethtool_ksettings_set(phydev: dev->phydev, cmd); |
776 | } |
777 | |
778 | static int bcmgenet_set_features(struct net_device *dev, |
779 | netdev_features_t features) |
780 | { |
781 | struct bcmgenet_priv *priv = netdev_priv(dev); |
782 | u32 reg; |
783 | int ret; |
784 | |
785 | ret = clk_prepare_enable(clk: priv->clk); |
786 | if (ret) |
787 | return ret; |
788 | |
789 | /* Make sure we reflect the value of CRC_CMD_FWD */ |
790 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); |
791 | priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); |
792 | |
793 | clk_disable_unprepare(clk: priv->clk); |
794 | |
795 | return ret; |
796 | } |
797 | |
798 | static u32 bcmgenet_get_msglevel(struct net_device *dev) |
799 | { |
800 | struct bcmgenet_priv *priv = netdev_priv(dev); |
801 | |
802 | return priv->msg_enable; |
803 | } |
804 | |
805 | static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) |
806 | { |
807 | struct bcmgenet_priv *priv = netdev_priv(dev); |
808 | |
809 | priv->msg_enable = level; |
810 | } |
811 | |
812 | static int bcmgenet_get_coalesce(struct net_device *dev, |
813 | struct ethtool_coalesce *ec, |
814 | struct kernel_ethtool_coalesce *kernel_coal, |
815 | struct netlink_ext_ack *extack) |
816 | { |
817 | struct bcmgenet_priv *priv = netdev_priv(dev); |
818 | struct bcmgenet_rx_ring *ring; |
819 | unsigned int i; |
820 | |
821 | ec->tx_max_coalesced_frames = |
822 | bcmgenet_tdma_ring_readl(priv, DESC_INDEX, |
823 | r: DMA_MBUF_DONE_THRESH); |
824 | ec->rx_max_coalesced_frames = |
825 | bcmgenet_rdma_ring_readl(priv, DESC_INDEX, |
826 | r: DMA_MBUF_DONE_THRESH); |
827 | ec->rx_coalesce_usecs = |
828 | bcmgenet_rdma_readl(priv, r: DMA_RING16_TIMEOUT) * 8192 / 1000; |
829 | |
830 | for (i = 0; i < priv->hw_params->rx_queues; i++) { |
831 | ring = &priv->rx_rings[i]; |
832 | ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; |
833 | } |
834 | ring = &priv->rx_rings[DESC_INDEX]; |
835 | ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; |
836 | |
837 | return 0; |
838 | } |
839 | |
840 | static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring, |
841 | u32 usecs, u32 pkts) |
842 | { |
843 | struct bcmgenet_priv *priv = ring->priv; |
844 | unsigned int i = ring->index; |
845 | u32 reg; |
846 | |
847 | bcmgenet_rdma_ring_writel(priv, ring: i, val: pkts, r: DMA_MBUF_DONE_THRESH); |
848 | |
849 | reg = bcmgenet_rdma_readl(priv, r: DMA_RING0_TIMEOUT + i); |
850 | reg &= ~DMA_TIMEOUT_MASK; |
851 | reg |= DIV_ROUND_UP(usecs * 1000, 8192); |
852 | bcmgenet_rdma_writel(priv, val: reg, r: DMA_RING0_TIMEOUT + i); |
853 | } |
854 | |
855 | static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring, |
856 | struct ethtool_coalesce *ec) |
857 | { |
858 | struct dim_cq_moder moder; |
859 | u32 usecs, pkts; |
860 | |
861 | ring->rx_coalesce_usecs = ec->rx_coalesce_usecs; |
862 | ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; |
863 | usecs = ring->rx_coalesce_usecs; |
864 | pkts = ring->rx_max_coalesced_frames; |
865 | |
866 | if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) { |
867 | moder = net_dim_get_def_rx_moderation(cq_period_mode: ring->dim.dim.mode); |
868 | usecs = moder.usec; |
869 | pkts = moder.pkts; |
870 | } |
871 | |
872 | ring->dim.use_dim = ec->use_adaptive_rx_coalesce; |
873 | bcmgenet_set_rx_coalesce(ring, usecs, pkts); |
874 | } |
875 | |
876 | static int bcmgenet_set_coalesce(struct net_device *dev, |
877 | struct ethtool_coalesce *ec, |
878 | struct kernel_ethtool_coalesce *kernel_coal, |
879 | struct netlink_ext_ack *extack) |
880 | { |
881 | struct bcmgenet_priv *priv = netdev_priv(dev); |
882 | unsigned int i; |
883 | |
884 | /* Base system clock is 125Mhz, DMA timeout is this reference clock |
885 | * divided by 1024, which yields roughly 8.192us, our maximum value |
886 | * has to fit in the DMA_TIMEOUT_MASK (16 bits) |
887 | */ |
888 | if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || |
889 | ec->tx_max_coalesced_frames == 0 || |
890 | ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || |
891 | ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1) |
892 | return -EINVAL; |
893 | |
894 | if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) |
895 | return -EINVAL; |
896 | |
897 | /* GENET TDMA hardware does not support a configurable timeout, but will |
898 | * always generate an interrupt either after MBDONE packets have been |
899 | * transmitted, or when the ring is empty. |
900 | */ |
901 | |
902 | /* Program all TX queues with the same values, as there is no |
903 | * ethtool knob to do coalescing on a per-queue basis |
904 | */ |
905 | for (i = 0; i < priv->hw_params->tx_queues; i++) |
906 | bcmgenet_tdma_ring_writel(priv, ring: i, |
907 | val: ec->tx_max_coalesced_frames, |
908 | r: DMA_MBUF_DONE_THRESH); |
909 | bcmgenet_tdma_ring_writel(priv, DESC_INDEX, |
910 | val: ec->tx_max_coalesced_frames, |
911 | r: DMA_MBUF_DONE_THRESH); |
912 | |
913 | for (i = 0; i < priv->hw_params->rx_queues; i++) |
914 | bcmgenet_set_ring_rx_coalesce(ring: &priv->rx_rings[i], ec); |
915 | bcmgenet_set_ring_rx_coalesce(ring: &priv->rx_rings[DESC_INDEX], ec); |
916 | |
917 | return 0; |
918 | } |
919 | |
920 | static void bcmgenet_get_pauseparam(struct net_device *dev, |
921 | struct ethtool_pauseparam *epause) |
922 | { |
923 | struct bcmgenet_priv *priv; |
924 | u32 umac_cmd; |
925 | |
926 | priv = netdev_priv(dev); |
927 | |
928 | epause->autoneg = priv->autoneg_pause; |
929 | |
930 | if (netif_carrier_ok(dev)) { |
931 | /* report active state when link is up */ |
932 | umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD); |
933 | epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE); |
934 | epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE); |
935 | } else { |
936 | /* otherwise report stored settings */ |
937 | epause->tx_pause = priv->tx_pause; |
938 | epause->rx_pause = priv->rx_pause; |
939 | } |
940 | } |
941 | |
942 | static int bcmgenet_set_pauseparam(struct net_device *dev, |
943 | struct ethtool_pauseparam *epause) |
944 | { |
945 | struct bcmgenet_priv *priv = netdev_priv(dev); |
946 | |
947 | if (!dev->phydev) |
948 | return -ENODEV; |
949 | |
950 | if (!phy_validate_pause(phydev: dev->phydev, pp: epause)) |
951 | return -EINVAL; |
952 | |
953 | priv->autoneg_pause = !!epause->autoneg; |
954 | priv->tx_pause = !!epause->tx_pause; |
955 | priv->rx_pause = !!epause->rx_pause; |
956 | |
957 | bcmgenet_phy_pause_set(dev, rx: priv->rx_pause, tx: priv->tx_pause); |
958 | |
959 | return 0; |
960 | } |
961 | |
962 | /* standard ethtool support functions. */ |
963 | enum bcmgenet_stat_type { |
964 | BCMGENET_STAT_NETDEV = -1, |
965 | BCMGENET_STAT_MIB_RX, |
966 | BCMGENET_STAT_MIB_TX, |
967 | BCMGENET_STAT_RUNT, |
968 | BCMGENET_STAT_MISC, |
969 | BCMGENET_STAT_SOFT, |
970 | }; |
971 | |
972 | struct bcmgenet_stats { |
973 | char stat_string[ETH_GSTRING_LEN]; |
974 | int stat_sizeof; |
975 | int stat_offset; |
976 | enum bcmgenet_stat_type type; |
977 | /* reg offset from UMAC base for misc counters */ |
978 | u16 reg_offset; |
979 | }; |
980 | |
981 | #define STAT_NETDEV(m) { \ |
982 | .stat_string = __stringify(m), \ |
983 | .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ |
984 | .stat_offset = offsetof(struct net_device_stats, m), \ |
985 | .type = BCMGENET_STAT_NETDEV, \ |
986 | } |
987 | |
988 | #define STAT_GENET_MIB(str, m, _type) { \ |
989 | .stat_string = str, \ |
990 | .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ |
991 | .stat_offset = offsetof(struct bcmgenet_priv, m), \ |
992 | .type = _type, \ |
993 | } |
994 | |
995 | #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) |
996 | #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) |
997 | #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) |
998 | #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) |
999 | |
1000 | #define STAT_GENET_MISC(str, m, offset) { \ |
1001 | .stat_string = str, \ |
1002 | .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ |
1003 | .stat_offset = offsetof(struct bcmgenet_priv, m), \ |
1004 | .type = BCMGENET_STAT_MISC, \ |
1005 | .reg_offset = offset, \ |
1006 | } |
1007 | |
1008 | #define STAT_GENET_Q(num) \ |
1009 | STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \ |
1010 | tx_rings[num].packets), \ |
1011 | STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \ |
1012 | tx_rings[num].bytes), \ |
1013 | STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \ |
1014 | rx_rings[num].bytes), \ |
1015 | STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \ |
1016 | rx_rings[num].packets), \ |
1017 | STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \ |
1018 | rx_rings[num].errors), \ |
1019 | STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \ |
1020 | rx_rings[num].dropped) |
1021 | |
1022 | /* There is a 0xC gap between the end of RX and beginning of TX stats and then |
1023 | * between the end of TX stats and the beginning of the RX RUNT |
1024 | */ |
1025 | #define BCMGENET_STAT_OFFSET 0xc |
1026 | |
1027 | /* Hardware counters must be kept in sync because the order/offset |
1028 | * is important here (order in structure declaration = order in hardware) |
1029 | */ |
1030 | static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { |
1031 | /* general stats */ |
1032 | STAT_NETDEV(rx_packets), |
1033 | STAT_NETDEV(tx_packets), |
1034 | STAT_NETDEV(rx_bytes), |
1035 | STAT_NETDEV(tx_bytes), |
1036 | STAT_NETDEV(rx_errors), |
1037 | STAT_NETDEV(tx_errors), |
1038 | STAT_NETDEV(rx_dropped), |
1039 | STAT_NETDEV(tx_dropped), |
1040 | STAT_NETDEV(multicast), |
1041 | /* UniMAC RSV counters */ |
1042 | STAT_GENET_MIB_RX("rx_64_octets" , mib.rx.pkt_cnt.cnt_64), |
1043 | STAT_GENET_MIB_RX("rx_65_127_oct" , mib.rx.pkt_cnt.cnt_127), |
1044 | STAT_GENET_MIB_RX("rx_128_255_oct" , mib.rx.pkt_cnt.cnt_255), |
1045 | STAT_GENET_MIB_RX("rx_256_511_oct" , mib.rx.pkt_cnt.cnt_511), |
1046 | STAT_GENET_MIB_RX("rx_512_1023_oct" , mib.rx.pkt_cnt.cnt_1023), |
1047 | STAT_GENET_MIB_RX("rx_1024_1518_oct" , mib.rx.pkt_cnt.cnt_1518), |
1048 | STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct" , mib.rx.pkt_cnt.cnt_mgv), |
1049 | STAT_GENET_MIB_RX("rx_1522_2047_oct" , mib.rx.pkt_cnt.cnt_2047), |
1050 | STAT_GENET_MIB_RX("rx_2048_4095_oct" , mib.rx.pkt_cnt.cnt_4095), |
1051 | STAT_GENET_MIB_RX("rx_4096_9216_oct" , mib.rx.pkt_cnt.cnt_9216), |
1052 | STAT_GENET_MIB_RX("rx_pkts" , mib.rx.pkt), |
1053 | STAT_GENET_MIB_RX("rx_bytes" , mib.rx.bytes), |
1054 | STAT_GENET_MIB_RX("rx_multicast" , mib.rx.mca), |
1055 | STAT_GENET_MIB_RX("rx_broadcast" , mib.rx.bca), |
1056 | STAT_GENET_MIB_RX("rx_fcs" , mib.rx.fcs), |
1057 | STAT_GENET_MIB_RX("rx_control" , mib.rx.cf), |
1058 | STAT_GENET_MIB_RX("rx_pause" , mib.rx.pf), |
1059 | STAT_GENET_MIB_RX("rx_unknown" , mib.rx.uo), |
1060 | STAT_GENET_MIB_RX("rx_align" , mib.rx.aln), |
1061 | STAT_GENET_MIB_RX("rx_outrange" , mib.rx.flr), |
1062 | STAT_GENET_MIB_RX("rx_code" , mib.rx.cde), |
1063 | STAT_GENET_MIB_RX("rx_carrier" , mib.rx.fcr), |
1064 | STAT_GENET_MIB_RX("rx_oversize" , mib.rx.ovr), |
1065 | STAT_GENET_MIB_RX("rx_jabber" , mib.rx.jbr), |
1066 | STAT_GENET_MIB_RX("rx_mtu_err" , mib.rx.mtue), |
1067 | STAT_GENET_MIB_RX("rx_good_pkts" , mib.rx.pok), |
1068 | STAT_GENET_MIB_RX("rx_unicast" , mib.rx.uc), |
1069 | STAT_GENET_MIB_RX("rx_ppp" , mib.rx.ppp), |
1070 | STAT_GENET_MIB_RX("rx_crc" , mib.rx.rcrc), |
1071 | /* UniMAC TSV counters */ |
1072 | STAT_GENET_MIB_TX("tx_64_octets" , mib.tx.pkt_cnt.cnt_64), |
1073 | STAT_GENET_MIB_TX("tx_65_127_oct" , mib.tx.pkt_cnt.cnt_127), |
1074 | STAT_GENET_MIB_TX("tx_128_255_oct" , mib.tx.pkt_cnt.cnt_255), |
1075 | STAT_GENET_MIB_TX("tx_256_511_oct" , mib.tx.pkt_cnt.cnt_511), |
1076 | STAT_GENET_MIB_TX("tx_512_1023_oct" , mib.tx.pkt_cnt.cnt_1023), |
1077 | STAT_GENET_MIB_TX("tx_1024_1518_oct" , mib.tx.pkt_cnt.cnt_1518), |
1078 | STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct" , mib.tx.pkt_cnt.cnt_mgv), |
1079 | STAT_GENET_MIB_TX("tx_1522_2047_oct" , mib.tx.pkt_cnt.cnt_2047), |
1080 | STAT_GENET_MIB_TX("tx_2048_4095_oct" , mib.tx.pkt_cnt.cnt_4095), |
1081 | STAT_GENET_MIB_TX("tx_4096_9216_oct" , mib.tx.pkt_cnt.cnt_9216), |
1082 | STAT_GENET_MIB_TX("tx_pkts" , mib.tx.pkts), |
1083 | STAT_GENET_MIB_TX("tx_multicast" , mib.tx.mca), |
1084 | STAT_GENET_MIB_TX("tx_broadcast" , mib.tx.bca), |
1085 | STAT_GENET_MIB_TX("tx_pause" , mib.tx.pf), |
1086 | STAT_GENET_MIB_TX("tx_control" , mib.tx.cf), |
1087 | STAT_GENET_MIB_TX("tx_fcs_err" , mib.tx.fcs), |
1088 | STAT_GENET_MIB_TX("tx_oversize" , mib.tx.ovr), |
1089 | STAT_GENET_MIB_TX("tx_defer" , mib.tx.drf), |
1090 | STAT_GENET_MIB_TX("tx_excess_defer" , mib.tx.edf), |
1091 | STAT_GENET_MIB_TX("tx_single_col" , mib.tx.scl), |
1092 | STAT_GENET_MIB_TX("tx_multi_col" , mib.tx.mcl), |
1093 | STAT_GENET_MIB_TX("tx_late_col" , mib.tx.lcl), |
1094 | STAT_GENET_MIB_TX("tx_excess_col" , mib.tx.ecl), |
1095 | STAT_GENET_MIB_TX("tx_frags" , mib.tx.frg), |
1096 | STAT_GENET_MIB_TX("tx_total_col" , mib.tx.ncl), |
1097 | STAT_GENET_MIB_TX("tx_jabber" , mib.tx.jbr), |
1098 | STAT_GENET_MIB_TX("tx_bytes" , mib.tx.bytes), |
1099 | STAT_GENET_MIB_TX("tx_good_pkts" , mib.tx.pok), |
1100 | STAT_GENET_MIB_TX("tx_unicast" , mib.tx.uc), |
1101 | /* UniMAC RUNT counters */ |
1102 | STAT_GENET_RUNT("rx_runt_pkts" , mib.rx_runt_cnt), |
1103 | STAT_GENET_RUNT("rx_runt_valid_fcs" , mib.rx_runt_fcs), |
1104 | STAT_GENET_RUNT("rx_runt_inval_fcs_align" , mib.rx_runt_fcs_align), |
1105 | STAT_GENET_RUNT("rx_runt_bytes" , mib.rx_runt_bytes), |
1106 | /* Misc UniMAC counters */ |
1107 | STAT_GENET_MISC("rbuf_ovflow_cnt" , mib.rbuf_ovflow_cnt, |
1108 | UMAC_RBUF_OVFL_CNT_V1), |
1109 | STAT_GENET_MISC("rbuf_err_cnt" , mib.rbuf_err_cnt, |
1110 | UMAC_RBUF_ERR_CNT_V1), |
1111 | STAT_GENET_MISC("mdf_err_cnt" , mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), |
1112 | STAT_GENET_SOFT_MIB("alloc_rx_buff_failed" , mib.alloc_rx_buff_failed), |
1113 | STAT_GENET_SOFT_MIB("rx_dma_failed" , mib.rx_dma_failed), |
1114 | STAT_GENET_SOFT_MIB("tx_dma_failed" , mib.tx_dma_failed), |
1115 | STAT_GENET_SOFT_MIB("tx_realloc_tsb" , mib.tx_realloc_tsb), |
1116 | STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed" , |
1117 | mib.tx_realloc_tsb_failed), |
1118 | /* Per TX queues */ |
1119 | STAT_GENET_Q(0), |
1120 | STAT_GENET_Q(1), |
1121 | STAT_GENET_Q(2), |
1122 | STAT_GENET_Q(3), |
1123 | STAT_GENET_Q(16), |
1124 | }; |
1125 | |
1126 | #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) |
1127 | |
1128 | static void bcmgenet_get_drvinfo(struct net_device *dev, |
1129 | struct ethtool_drvinfo *info) |
1130 | { |
1131 | strscpy(info->driver, "bcmgenet" , sizeof(info->driver)); |
1132 | } |
1133 | |
1134 | static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) |
1135 | { |
1136 | switch (string_set) { |
1137 | case ETH_SS_STATS: |
1138 | return BCMGENET_STATS_LEN; |
1139 | default: |
1140 | return -EOPNOTSUPP; |
1141 | } |
1142 | } |
1143 | |
1144 | static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, |
1145 | u8 *data) |
1146 | { |
1147 | int i; |
1148 | |
1149 | switch (stringset) { |
1150 | case ETH_SS_STATS: |
1151 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { |
1152 | memcpy(data + i * ETH_GSTRING_LEN, |
1153 | bcmgenet_gstrings_stats[i].stat_string, |
1154 | ETH_GSTRING_LEN); |
1155 | } |
1156 | break; |
1157 | } |
1158 | } |
1159 | |
1160 | static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset) |
1161 | { |
1162 | u16 new_offset; |
1163 | u32 val; |
1164 | |
1165 | switch (offset) { |
1166 | case UMAC_RBUF_OVFL_CNT_V1: |
1167 | if (GENET_IS_V2(priv)) |
1168 | new_offset = RBUF_OVFL_CNT_V2; |
1169 | else |
1170 | new_offset = RBUF_OVFL_CNT_V3PLUS; |
1171 | |
1172 | val = bcmgenet_rbuf_readl(priv, off: new_offset); |
1173 | /* clear if overflowed */ |
1174 | if (val == ~0) |
1175 | bcmgenet_rbuf_writel(priv, val: 0, off: new_offset); |
1176 | break; |
1177 | case UMAC_RBUF_ERR_CNT_V1: |
1178 | if (GENET_IS_V2(priv)) |
1179 | new_offset = RBUF_ERR_CNT_V2; |
1180 | else |
1181 | new_offset = RBUF_ERR_CNT_V3PLUS; |
1182 | |
1183 | val = bcmgenet_rbuf_readl(priv, off: new_offset); |
1184 | /* clear if overflowed */ |
1185 | if (val == ~0) |
1186 | bcmgenet_rbuf_writel(priv, val: 0, off: new_offset); |
1187 | break; |
1188 | default: |
1189 | val = bcmgenet_umac_readl(priv, off: offset); |
1190 | /* clear if overflowed */ |
1191 | if (val == ~0) |
1192 | bcmgenet_umac_writel(priv, val: 0, off: offset); |
1193 | break; |
1194 | } |
1195 | |
1196 | return val; |
1197 | } |
1198 | |
1199 | static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) |
1200 | { |
1201 | int i, j = 0; |
1202 | |
1203 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { |
1204 | const struct bcmgenet_stats *s; |
1205 | u8 offset = 0; |
1206 | u32 val = 0; |
1207 | char *p; |
1208 | |
1209 | s = &bcmgenet_gstrings_stats[i]; |
1210 | switch (s->type) { |
1211 | case BCMGENET_STAT_NETDEV: |
1212 | case BCMGENET_STAT_SOFT: |
1213 | continue; |
1214 | case BCMGENET_STAT_RUNT: |
1215 | offset += BCMGENET_STAT_OFFSET; |
1216 | fallthrough; |
1217 | case BCMGENET_STAT_MIB_TX: |
1218 | offset += BCMGENET_STAT_OFFSET; |
1219 | fallthrough; |
1220 | case BCMGENET_STAT_MIB_RX: |
1221 | val = bcmgenet_umac_readl(priv, |
1222 | UMAC_MIB_START + j + offset); |
1223 | offset = 0; /* Reset Offset */ |
1224 | break; |
1225 | case BCMGENET_STAT_MISC: |
1226 | if (GENET_IS_V1(priv)) { |
1227 | val = bcmgenet_umac_readl(priv, off: s->reg_offset); |
1228 | /* clear if overflowed */ |
1229 | if (val == ~0) |
1230 | bcmgenet_umac_writel(priv, val: 0, |
1231 | off: s->reg_offset); |
1232 | } else { |
1233 | val = bcmgenet_update_stat_misc(priv, |
1234 | offset: s->reg_offset); |
1235 | } |
1236 | break; |
1237 | } |
1238 | |
1239 | j += s->stat_sizeof; |
1240 | p = (char *)priv + s->stat_offset; |
1241 | *(u32 *)p = val; |
1242 | } |
1243 | } |
1244 | |
1245 | static void bcmgenet_get_ethtool_stats(struct net_device *dev, |
1246 | struct ethtool_stats *stats, |
1247 | u64 *data) |
1248 | { |
1249 | struct bcmgenet_priv *priv = netdev_priv(dev); |
1250 | int i; |
1251 | |
1252 | if (netif_running(dev)) |
1253 | bcmgenet_update_mib_counters(priv); |
1254 | |
1255 | dev->netdev_ops->ndo_get_stats(dev); |
1256 | |
1257 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { |
1258 | const struct bcmgenet_stats *s; |
1259 | char *p; |
1260 | |
1261 | s = &bcmgenet_gstrings_stats[i]; |
1262 | if (s->type == BCMGENET_STAT_NETDEV) |
1263 | p = (char *)&dev->stats; |
1264 | else |
1265 | p = (char *)priv; |
1266 | p += s->stat_offset; |
1267 | if (sizeof(unsigned long) != sizeof(u32) && |
1268 | s->stat_sizeof == sizeof(unsigned long)) |
1269 | data[i] = *(unsigned long *)p; |
1270 | else |
1271 | data[i] = *(u32 *)p; |
1272 | } |
1273 | } |
1274 | |
1275 | void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, |
1276 | bool tx_lpi_enabled) |
1277 | { |
1278 | struct bcmgenet_priv *priv = netdev_priv(dev); |
1279 | u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; |
1280 | u32 reg; |
1281 | |
1282 | if (enable && !priv->clk_eee_enabled) { |
1283 | clk_prepare_enable(clk: priv->clk_eee); |
1284 | priv->clk_eee_enabled = true; |
1285 | } |
1286 | |
1287 | reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL); |
1288 | if (enable) |
1289 | reg |= EEE_EN; |
1290 | else |
1291 | reg &= ~EEE_EN; |
1292 | bcmgenet_umac_writel(priv, val: reg, UMAC_EEE_CTRL); |
1293 | |
1294 | /* Enable EEE and switch to a 27Mhz clock automatically */ |
1295 | reg = bcmgenet_readl(offset: priv->base + off); |
1296 | if (tx_lpi_enabled) |
1297 | reg |= TBUF_EEE_EN | TBUF_PM_EN; |
1298 | else |
1299 | reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); |
1300 | bcmgenet_writel(value: reg, offset: priv->base + off); |
1301 | |
1302 | /* Do the same for thing for RBUF */ |
1303 | reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); |
1304 | if (enable) |
1305 | reg |= RBUF_EEE_EN | RBUF_PM_EN; |
1306 | else |
1307 | reg &= ~(RBUF_EEE_EN | RBUF_PM_EN); |
1308 | bcmgenet_rbuf_writel(priv, val: reg, RBUF_ENERGY_CTRL); |
1309 | |
1310 | if (!enable && priv->clk_eee_enabled) { |
1311 | clk_disable_unprepare(clk: priv->clk_eee); |
1312 | priv->clk_eee_enabled = false; |
1313 | } |
1314 | |
1315 | priv->eee.eee_enabled = enable; |
1316 | priv->eee.tx_lpi_enabled = tx_lpi_enabled; |
1317 | } |
1318 | |
1319 | static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_keee *e) |
1320 | { |
1321 | struct bcmgenet_priv *priv = netdev_priv(dev); |
1322 | struct ethtool_keee *p = &priv->eee; |
1323 | |
1324 | if (GENET_IS_V1(priv)) |
1325 | return -EOPNOTSUPP; |
1326 | |
1327 | if (!dev->phydev) |
1328 | return -ENODEV; |
1329 | |
1330 | e->tx_lpi_enabled = p->tx_lpi_enabled; |
1331 | e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); |
1332 | |
1333 | return phy_ethtool_get_eee(phydev: dev->phydev, data: e); |
1334 | } |
1335 | |
1336 | static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_keee *e) |
1337 | { |
1338 | struct bcmgenet_priv *priv = netdev_priv(dev); |
1339 | struct ethtool_keee *p = &priv->eee; |
1340 | bool active; |
1341 | |
1342 | if (GENET_IS_V1(priv)) |
1343 | return -EOPNOTSUPP; |
1344 | |
1345 | if (!dev->phydev) |
1346 | return -ENODEV; |
1347 | |
1348 | p->eee_enabled = e->eee_enabled; |
1349 | |
1350 | if (!p->eee_enabled) { |
1351 | bcmgenet_eee_enable_set(dev, enable: false, tx_lpi_enabled: false); |
1352 | } else { |
1353 | active = phy_init_eee(phydev: dev->phydev, clk_stop_enable: false) >= 0; |
1354 | bcmgenet_umac_writel(priv, val: e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); |
1355 | bcmgenet_eee_enable_set(dev, enable: active, tx_lpi_enabled: e->tx_lpi_enabled); |
1356 | } |
1357 | |
1358 | return phy_ethtool_set_eee(phydev: dev->phydev, data: e); |
1359 | } |
1360 | |
1361 | static int bcmgenet_validate_flow(struct net_device *dev, |
1362 | struct ethtool_rxnfc *cmd) |
1363 | { |
1364 | struct ethtool_usrip4_spec *l4_mask; |
1365 | struct ethhdr *eth_mask; |
1366 | |
1367 | if (cmd->fs.location >= MAX_NUM_OF_FS_RULES && |
1368 | cmd->fs.location != RX_CLS_LOC_ANY) { |
1369 | netdev_err(dev, format: "rxnfc: Invalid location (%d)\n" , |
1370 | cmd->fs.location); |
1371 | return -EINVAL; |
1372 | } |
1373 | |
1374 | switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { |
1375 | case IP_USER_FLOW: |
1376 | l4_mask = &cmd->fs.m_u.usr_ip4_spec; |
1377 | /* don't allow mask which isn't valid */ |
1378 | if (VALIDATE_MASK(l4_mask->ip4src) || |
1379 | VALIDATE_MASK(l4_mask->ip4dst) || |
1380 | VALIDATE_MASK(l4_mask->l4_4_bytes) || |
1381 | VALIDATE_MASK(l4_mask->proto) || |
1382 | VALIDATE_MASK(l4_mask->ip_ver) || |
1383 | VALIDATE_MASK(l4_mask->tos)) { |
1384 | netdev_err(dev, format: "rxnfc: Unsupported mask\n" ); |
1385 | return -EINVAL; |
1386 | } |
1387 | break; |
1388 | case ETHER_FLOW: |
1389 | eth_mask = &cmd->fs.m_u.ether_spec; |
1390 | /* don't allow mask which isn't valid */ |
1391 | if (VALIDATE_MASK(eth_mask->h_dest) || |
1392 | VALIDATE_MASK(eth_mask->h_source) || |
1393 | VALIDATE_MASK(eth_mask->h_proto)) { |
1394 | netdev_err(dev, format: "rxnfc: Unsupported mask\n" ); |
1395 | return -EINVAL; |
1396 | } |
1397 | break; |
1398 | default: |
1399 | netdev_err(dev, format: "rxnfc: Unsupported flow type (0x%x)\n" , |
1400 | cmd->fs.flow_type); |
1401 | return -EINVAL; |
1402 | } |
1403 | |
1404 | if ((cmd->fs.flow_type & FLOW_EXT)) { |
1405 | /* don't allow mask which isn't valid */ |
1406 | if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) || |
1407 | VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) { |
1408 | netdev_err(dev, format: "rxnfc: Unsupported mask\n" ); |
1409 | return -EINVAL; |
1410 | } |
1411 | if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) { |
1412 | netdev_err(dev, format: "rxnfc: user-def not supported\n" ); |
1413 | return -EINVAL; |
1414 | } |
1415 | } |
1416 | |
1417 | if ((cmd->fs.flow_type & FLOW_MAC_EXT)) { |
1418 | /* don't allow mask which isn't valid */ |
1419 | if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) { |
1420 | netdev_err(dev, format: "rxnfc: Unsupported mask\n" ); |
1421 | return -EINVAL; |
1422 | } |
1423 | } |
1424 | |
1425 | return 0; |
1426 | } |
1427 | |
1428 | static int bcmgenet_insert_flow(struct net_device *dev, |
1429 | struct ethtool_rxnfc *cmd) |
1430 | { |
1431 | struct bcmgenet_priv *priv = netdev_priv(dev); |
1432 | struct bcmgenet_rxnfc_rule *loc_rule; |
1433 | int err, i; |
1434 | |
1435 | if (priv->hw_params->hfb_filter_size < 128) { |
1436 | netdev_err(dev, format: "rxnfc: Not supported by this device\n" ); |
1437 | return -EINVAL; |
1438 | } |
1439 | |
1440 | if (cmd->fs.ring_cookie > priv->hw_params->rx_queues && |
1441 | cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) { |
1442 | netdev_err(dev, format: "rxnfc: Unsupported action (%llu)\n" , |
1443 | cmd->fs.ring_cookie); |
1444 | return -EINVAL; |
1445 | } |
1446 | |
1447 | err = bcmgenet_validate_flow(dev, cmd); |
1448 | if (err) |
1449 | return err; |
1450 | |
1451 | if (cmd->fs.location == RX_CLS_LOC_ANY) { |
1452 | list_for_each_entry(loc_rule, &priv->rxnfc_list, list) { |
1453 | cmd->fs.location = loc_rule->fs.location; |
1454 | err = memcmp(p: &loc_rule->fs, q: &cmd->fs, |
1455 | size: sizeof(struct ethtool_rx_flow_spec)); |
1456 | if (!err) |
1457 | /* rule exists so return current location */ |
1458 | return 0; |
1459 | } |
1460 | for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { |
1461 | loc_rule = &priv->rxnfc_rules[i]; |
1462 | if (loc_rule->state == BCMGENET_RXNFC_STATE_UNUSED) { |
1463 | cmd->fs.location = i; |
1464 | break; |
1465 | } |
1466 | } |
1467 | if (i == MAX_NUM_OF_FS_RULES) { |
1468 | cmd->fs.location = RX_CLS_LOC_ANY; |
1469 | return -ENOSPC; |
1470 | } |
1471 | } else { |
1472 | loc_rule = &priv->rxnfc_rules[cmd->fs.location]; |
1473 | } |
1474 | if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) |
1475 | bcmgenet_hfb_disable_filter(priv, f_index: cmd->fs.location); |
1476 | if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { |
1477 | list_del(entry: &loc_rule->list); |
1478 | bcmgenet_hfb_clear_filter(priv, f_index: cmd->fs.location); |
1479 | } |
1480 | loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED; |
1481 | memcpy(&loc_rule->fs, &cmd->fs, |
1482 | sizeof(struct ethtool_rx_flow_spec)); |
1483 | |
1484 | bcmgenet_hfb_create_rxnfc_filter(priv, rule: loc_rule); |
1485 | |
1486 | list_add_tail(new: &loc_rule->list, head: &priv->rxnfc_list); |
1487 | |
1488 | return 0; |
1489 | } |
1490 | |
1491 | static int bcmgenet_delete_flow(struct net_device *dev, |
1492 | struct ethtool_rxnfc *cmd) |
1493 | { |
1494 | struct bcmgenet_priv *priv = netdev_priv(dev); |
1495 | struct bcmgenet_rxnfc_rule *rule; |
1496 | int err = 0; |
1497 | |
1498 | if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) |
1499 | return -EINVAL; |
1500 | |
1501 | rule = &priv->rxnfc_rules[cmd->fs.location]; |
1502 | if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) { |
1503 | err = -ENOENT; |
1504 | goto out; |
1505 | } |
1506 | |
1507 | if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) |
1508 | bcmgenet_hfb_disable_filter(priv, f_index: cmd->fs.location); |
1509 | if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) { |
1510 | list_del(entry: &rule->list); |
1511 | bcmgenet_hfb_clear_filter(priv, f_index: cmd->fs.location); |
1512 | } |
1513 | rule->state = BCMGENET_RXNFC_STATE_UNUSED; |
1514 | memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); |
1515 | |
1516 | out: |
1517 | return err; |
1518 | } |
1519 | |
1520 | static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) |
1521 | { |
1522 | struct bcmgenet_priv *priv = netdev_priv(dev); |
1523 | int err = 0; |
1524 | |
1525 | switch (cmd->cmd) { |
1526 | case ETHTOOL_SRXCLSRLINS: |
1527 | err = bcmgenet_insert_flow(dev, cmd); |
1528 | break; |
1529 | case ETHTOOL_SRXCLSRLDEL: |
1530 | err = bcmgenet_delete_flow(dev, cmd); |
1531 | break; |
1532 | default: |
1533 | netdev_warn(dev: priv->dev, format: "Unsupported ethtool command. (%d)\n" , |
1534 | cmd->cmd); |
1535 | return -EINVAL; |
1536 | } |
1537 | |
1538 | return err; |
1539 | } |
1540 | |
1541 | static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd, |
1542 | int loc) |
1543 | { |
1544 | struct bcmgenet_priv *priv = netdev_priv(dev); |
1545 | struct bcmgenet_rxnfc_rule *rule; |
1546 | int err = 0; |
1547 | |
1548 | if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES) |
1549 | return -EINVAL; |
1550 | |
1551 | rule = &priv->rxnfc_rules[loc]; |
1552 | if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) |
1553 | err = -ENOENT; |
1554 | else |
1555 | memcpy(&cmd->fs, &rule->fs, |
1556 | sizeof(struct ethtool_rx_flow_spec)); |
1557 | |
1558 | return err; |
1559 | } |
1560 | |
1561 | static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv) |
1562 | { |
1563 | struct list_head *pos; |
1564 | int res = 0; |
1565 | |
1566 | list_for_each(pos, &priv->rxnfc_list) |
1567 | res++; |
1568 | |
1569 | return res; |
1570 | } |
1571 | |
1572 | static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, |
1573 | u32 *rule_locs) |
1574 | { |
1575 | struct bcmgenet_priv *priv = netdev_priv(dev); |
1576 | struct bcmgenet_rxnfc_rule *rule; |
1577 | int err = 0; |
1578 | int i = 0; |
1579 | |
1580 | switch (cmd->cmd) { |
1581 | case ETHTOOL_GRXRINGS: |
1582 | cmd->data = priv->hw_params->rx_queues ?: 1; |
1583 | break; |
1584 | case ETHTOOL_GRXCLSRLCNT: |
1585 | cmd->rule_cnt = bcmgenet_get_num_flows(priv); |
1586 | cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL; |
1587 | break; |
1588 | case ETHTOOL_GRXCLSRULE: |
1589 | err = bcmgenet_get_flow(dev, cmd, loc: cmd->fs.location); |
1590 | break; |
1591 | case ETHTOOL_GRXCLSRLALL: |
1592 | list_for_each_entry(rule, &priv->rxnfc_list, list) |
1593 | if (i < cmd->rule_cnt) |
1594 | rule_locs[i++] = rule->fs.location; |
1595 | cmd->rule_cnt = i; |
1596 | cmd->data = MAX_NUM_OF_FS_RULES; |
1597 | break; |
1598 | default: |
1599 | err = -EOPNOTSUPP; |
1600 | break; |
1601 | } |
1602 | |
1603 | return err; |
1604 | } |
1605 | |
1606 | /* standard ethtool support functions. */ |
1607 | static const struct ethtool_ops bcmgenet_ethtool_ops = { |
1608 | .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | |
1609 | ETHTOOL_COALESCE_MAX_FRAMES | |
1610 | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, |
1611 | .begin = bcmgenet_begin, |
1612 | .complete = bcmgenet_complete, |
1613 | .get_strings = bcmgenet_get_strings, |
1614 | .get_sset_count = bcmgenet_get_sset_count, |
1615 | .get_ethtool_stats = bcmgenet_get_ethtool_stats, |
1616 | .get_drvinfo = bcmgenet_get_drvinfo, |
1617 | .get_link = ethtool_op_get_link, |
1618 | .get_msglevel = bcmgenet_get_msglevel, |
1619 | .set_msglevel = bcmgenet_set_msglevel, |
1620 | .get_wol = bcmgenet_get_wol, |
1621 | .set_wol = bcmgenet_set_wol, |
1622 | .get_eee = bcmgenet_get_eee, |
1623 | .set_eee = bcmgenet_set_eee, |
1624 | .nway_reset = phy_ethtool_nway_reset, |
1625 | .get_coalesce = bcmgenet_get_coalesce, |
1626 | .set_coalesce = bcmgenet_set_coalesce, |
1627 | .get_link_ksettings = bcmgenet_get_link_ksettings, |
1628 | .set_link_ksettings = bcmgenet_set_link_ksettings, |
1629 | .get_ts_info = ethtool_op_get_ts_info, |
1630 | .get_rxnfc = bcmgenet_get_rxnfc, |
1631 | .set_rxnfc = bcmgenet_set_rxnfc, |
1632 | .get_pauseparam = bcmgenet_get_pauseparam, |
1633 | .set_pauseparam = bcmgenet_set_pauseparam, |
1634 | }; |
1635 | |
1636 | /* Power down the unimac, based on mode. */ |
1637 | static int bcmgenet_power_down(struct bcmgenet_priv *priv, |
1638 | enum bcmgenet_power_mode mode) |
1639 | { |
1640 | int ret = 0; |
1641 | u32 reg; |
1642 | |
1643 | switch (mode) { |
1644 | case GENET_POWER_CABLE_SENSE: |
1645 | phy_detach(phydev: priv->dev->phydev); |
1646 | break; |
1647 | |
1648 | case GENET_POWER_WOL_MAGIC: |
1649 | ret = bcmgenet_wol_power_down_cfg(priv, mode); |
1650 | break; |
1651 | |
1652 | case GENET_POWER_PASSIVE: |
1653 | /* Power down LED */ |
1654 | if (priv->hw_params->flags & GENET_HAS_EXT) { |
1655 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); |
1656 | if (GENET_IS_V5(priv) && !priv->ephy_16nm) |
1657 | reg |= EXT_PWR_DOWN_PHY_EN | |
1658 | EXT_PWR_DOWN_PHY_RD | |
1659 | EXT_PWR_DOWN_PHY_SD | |
1660 | EXT_PWR_DOWN_PHY_RX | |
1661 | EXT_PWR_DOWN_PHY_TX | |
1662 | EXT_IDDQ_GLBL_PWR; |
1663 | else |
1664 | reg |= EXT_PWR_DOWN_PHY; |
1665 | |
1666 | reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); |
1667 | bcmgenet_ext_writel(priv, val: reg, EXT_EXT_PWR_MGMT); |
1668 | |
1669 | bcmgenet_phy_power_set(dev: priv->dev, enable: false); |
1670 | } |
1671 | break; |
1672 | default: |
1673 | break; |
1674 | } |
1675 | |
1676 | return ret; |
1677 | } |
1678 | |
1679 | static void bcmgenet_power_up(struct bcmgenet_priv *priv, |
1680 | enum bcmgenet_power_mode mode) |
1681 | { |
1682 | u32 reg; |
1683 | |
1684 | if (!(priv->hw_params->flags & GENET_HAS_EXT)) |
1685 | return; |
1686 | |
1687 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); |
1688 | |
1689 | switch (mode) { |
1690 | case GENET_POWER_PASSIVE: |
1691 | reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS | |
1692 | EXT_ENERGY_DET_MASK); |
1693 | if (GENET_IS_V5(priv) && !priv->ephy_16nm) { |
1694 | reg &= ~(EXT_PWR_DOWN_PHY_EN | |
1695 | EXT_PWR_DOWN_PHY_RD | |
1696 | EXT_PWR_DOWN_PHY_SD | |
1697 | EXT_PWR_DOWN_PHY_RX | |
1698 | EXT_PWR_DOWN_PHY_TX | |
1699 | EXT_IDDQ_GLBL_PWR); |
1700 | reg |= EXT_PHY_RESET; |
1701 | bcmgenet_ext_writel(priv, val: reg, EXT_EXT_PWR_MGMT); |
1702 | mdelay(1); |
1703 | |
1704 | reg &= ~EXT_PHY_RESET; |
1705 | } else { |
1706 | reg &= ~EXT_PWR_DOWN_PHY; |
1707 | reg |= EXT_PWR_DN_EN_LD; |
1708 | } |
1709 | bcmgenet_ext_writel(priv, val: reg, EXT_EXT_PWR_MGMT); |
1710 | bcmgenet_phy_power_set(dev: priv->dev, enable: true); |
1711 | break; |
1712 | |
1713 | case GENET_POWER_CABLE_SENSE: |
1714 | /* enable APD */ |
1715 | if (!GENET_IS_V5(priv)) { |
1716 | reg |= EXT_PWR_DN_EN_LD; |
1717 | bcmgenet_ext_writel(priv, val: reg, EXT_EXT_PWR_MGMT); |
1718 | } |
1719 | break; |
1720 | case GENET_POWER_WOL_MAGIC: |
1721 | bcmgenet_wol_power_up_cfg(priv, mode); |
1722 | return; |
1723 | default: |
1724 | break; |
1725 | } |
1726 | } |
1727 | |
1728 | static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, |
1729 | struct bcmgenet_tx_ring *ring) |
1730 | { |
1731 | struct enet_cb *tx_cb_ptr; |
1732 | |
1733 | tx_cb_ptr = ring->cbs; |
1734 | tx_cb_ptr += ring->write_ptr - ring->cb_ptr; |
1735 | |
1736 | /* Advancing local write pointer */ |
1737 | if (ring->write_ptr == ring->end_ptr) |
1738 | ring->write_ptr = ring->cb_ptr; |
1739 | else |
1740 | ring->write_ptr++; |
1741 | |
1742 | return tx_cb_ptr; |
1743 | } |
1744 | |
1745 | static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv, |
1746 | struct bcmgenet_tx_ring *ring) |
1747 | { |
1748 | struct enet_cb *tx_cb_ptr; |
1749 | |
1750 | tx_cb_ptr = ring->cbs; |
1751 | tx_cb_ptr += ring->write_ptr - ring->cb_ptr; |
1752 | |
1753 | /* Rewinding local write pointer */ |
1754 | if (ring->write_ptr == ring->cb_ptr) |
1755 | ring->write_ptr = ring->end_ptr; |
1756 | else |
1757 | ring->write_ptr--; |
1758 | |
1759 | return tx_cb_ptr; |
1760 | } |
1761 | |
1762 | static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) |
1763 | { |
1764 | bcmgenet_intrl2_0_writel(priv: ring->priv, UMAC_IRQ_RXDMA_DONE, |
1765 | INTRL2_CPU_MASK_SET); |
1766 | } |
1767 | |
1768 | static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring) |
1769 | { |
1770 | bcmgenet_intrl2_0_writel(priv: ring->priv, UMAC_IRQ_RXDMA_DONE, |
1771 | INTRL2_CPU_MASK_CLEAR); |
1772 | } |
1773 | |
1774 | static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring) |
1775 | { |
1776 | bcmgenet_intrl2_1_writel(priv: ring->priv, |
1777 | val: 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), |
1778 | INTRL2_CPU_MASK_SET); |
1779 | } |
1780 | |
1781 | static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring) |
1782 | { |
1783 | bcmgenet_intrl2_1_writel(priv: ring->priv, |
1784 | val: 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), |
1785 | INTRL2_CPU_MASK_CLEAR); |
1786 | } |
1787 | |
1788 | static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring) |
1789 | { |
1790 | bcmgenet_intrl2_0_writel(priv: ring->priv, UMAC_IRQ_TXDMA_DONE, |
1791 | INTRL2_CPU_MASK_SET); |
1792 | } |
1793 | |
1794 | static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring) |
1795 | { |
1796 | bcmgenet_intrl2_0_writel(priv: ring->priv, UMAC_IRQ_TXDMA_DONE, |
1797 | INTRL2_CPU_MASK_CLEAR); |
1798 | } |
1799 | |
1800 | static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring) |
1801 | { |
1802 | bcmgenet_intrl2_1_writel(priv: ring->priv, val: 1 << ring->index, |
1803 | INTRL2_CPU_MASK_CLEAR); |
1804 | } |
1805 | |
1806 | static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring) |
1807 | { |
1808 | bcmgenet_intrl2_1_writel(priv: ring->priv, val: 1 << ring->index, |
1809 | INTRL2_CPU_MASK_SET); |
1810 | } |
1811 | |
1812 | /* Simple helper to free a transmit control block's resources |
1813 | * Returns an skb when the last transmit control block associated with the |
1814 | * skb is freed. The skb should be freed by the caller if necessary. |
1815 | */ |
1816 | static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev, |
1817 | struct enet_cb *cb) |
1818 | { |
1819 | struct sk_buff *skb; |
1820 | |
1821 | skb = cb->skb; |
1822 | |
1823 | if (skb) { |
1824 | cb->skb = NULL; |
1825 | if (cb == GENET_CB(skb)->first_cb) |
1826 | dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), |
1827 | dma_unmap_len(cb, dma_len), |
1828 | DMA_TO_DEVICE); |
1829 | else |
1830 | dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr), |
1831 | dma_unmap_len(cb, dma_len), |
1832 | DMA_TO_DEVICE); |
1833 | dma_unmap_addr_set(cb, dma_addr, 0); |
1834 | |
1835 | if (cb == GENET_CB(skb)->last_cb) |
1836 | return skb; |
1837 | |
1838 | } else if (dma_unmap_addr(cb, dma_addr)) { |
1839 | dma_unmap_page(dev, |
1840 | dma_unmap_addr(cb, dma_addr), |
1841 | dma_unmap_len(cb, dma_len), |
1842 | DMA_TO_DEVICE); |
1843 | dma_unmap_addr_set(cb, dma_addr, 0); |
1844 | } |
1845 | |
1846 | return NULL; |
1847 | } |
1848 | |
1849 | /* Simple helper to free a receive control block's resources */ |
1850 | static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev, |
1851 | struct enet_cb *cb) |
1852 | { |
1853 | struct sk_buff *skb; |
1854 | |
1855 | skb = cb->skb; |
1856 | cb->skb = NULL; |
1857 | |
1858 | if (dma_unmap_addr(cb, dma_addr)) { |
1859 | dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), |
1860 | dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE); |
1861 | dma_unmap_addr_set(cb, dma_addr, 0); |
1862 | } |
1863 | |
1864 | return skb; |
1865 | } |
1866 | |
1867 | /* Unlocked version of the reclaim routine */ |
1868 | static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, |
1869 | struct bcmgenet_tx_ring *ring) |
1870 | { |
1871 | struct bcmgenet_priv *priv = netdev_priv(dev); |
1872 | unsigned int txbds_processed = 0; |
1873 | unsigned int bytes_compl = 0; |
1874 | unsigned int pkts_compl = 0; |
1875 | unsigned int txbds_ready; |
1876 | unsigned int c_index; |
1877 | struct sk_buff *skb; |
1878 | |
1879 | /* Clear status before servicing to reduce spurious interrupts */ |
1880 | if (ring->index == DESC_INDEX) |
1881 | bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE, |
1882 | INTRL2_CPU_CLEAR); |
1883 | else |
1884 | bcmgenet_intrl2_1_writel(priv, val: (1 << ring->index), |
1885 | INTRL2_CPU_CLEAR); |
1886 | |
1887 | /* Compute how many buffers are transmitted since last xmit call */ |
1888 | c_index = bcmgenet_tdma_ring_readl(priv, ring: ring->index, r: TDMA_CONS_INDEX) |
1889 | & DMA_C_INDEX_MASK; |
1890 | txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK; |
1891 | |
1892 | netif_dbg(priv, tx_done, dev, |
1893 | "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n" , |
1894 | __func__, ring->index, ring->c_index, c_index, txbds_ready); |
1895 | |
1896 | /* Reclaim transmitted buffers */ |
1897 | while (txbds_processed < txbds_ready) { |
1898 | skb = bcmgenet_free_tx_cb(dev: &priv->pdev->dev, |
1899 | cb: &priv->tx_cbs[ring->clean_ptr]); |
1900 | if (skb) { |
1901 | pkts_compl++; |
1902 | bytes_compl += GENET_CB(skb)->bytes_sent; |
1903 | dev_consume_skb_any(skb); |
1904 | } |
1905 | |
1906 | txbds_processed++; |
1907 | if (likely(ring->clean_ptr < ring->end_ptr)) |
1908 | ring->clean_ptr++; |
1909 | else |
1910 | ring->clean_ptr = ring->cb_ptr; |
1911 | } |
1912 | |
1913 | ring->free_bds += txbds_processed; |
1914 | ring->c_index = c_index; |
1915 | |
1916 | ring->packets += pkts_compl; |
1917 | ring->bytes += bytes_compl; |
1918 | |
1919 | netdev_tx_completed_queue(dev_queue: netdev_get_tx_queue(dev, index: ring->queue), |
1920 | pkts: pkts_compl, bytes: bytes_compl); |
1921 | |
1922 | return txbds_processed; |
1923 | } |
1924 | |
1925 | static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, |
1926 | struct bcmgenet_tx_ring *ring) |
1927 | { |
1928 | unsigned int released; |
1929 | |
1930 | spin_lock_bh(lock: &ring->lock); |
1931 | released = __bcmgenet_tx_reclaim(dev, ring); |
1932 | spin_unlock_bh(lock: &ring->lock); |
1933 | |
1934 | return released; |
1935 | } |
1936 | |
1937 | static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) |
1938 | { |
1939 | struct bcmgenet_tx_ring *ring = |
1940 | container_of(napi, struct bcmgenet_tx_ring, napi); |
1941 | unsigned int work_done = 0; |
1942 | struct netdev_queue *txq; |
1943 | |
1944 | spin_lock(lock: &ring->lock); |
1945 | work_done = __bcmgenet_tx_reclaim(dev: ring->priv->dev, ring); |
1946 | if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { |
1947 | txq = netdev_get_tx_queue(dev: ring->priv->dev, index: ring->queue); |
1948 | netif_tx_wake_queue(dev_queue: txq); |
1949 | } |
1950 | spin_unlock(lock: &ring->lock); |
1951 | |
1952 | if (work_done == 0) { |
1953 | napi_complete(n: napi); |
1954 | ring->int_enable(ring); |
1955 | |
1956 | return 0; |
1957 | } |
1958 | |
1959 | return budget; |
1960 | } |
1961 | |
1962 | static void bcmgenet_tx_reclaim_all(struct net_device *dev) |
1963 | { |
1964 | struct bcmgenet_priv *priv = netdev_priv(dev); |
1965 | int i; |
1966 | |
1967 | if (netif_is_multiqueue(dev)) { |
1968 | for (i = 0; i < priv->hw_params->tx_queues; i++) |
1969 | bcmgenet_tx_reclaim(dev, ring: &priv->tx_rings[i]); |
1970 | } |
1971 | |
1972 | bcmgenet_tx_reclaim(dev, ring: &priv->tx_rings[DESC_INDEX]); |
1973 | } |
1974 | |
1975 | /* Reallocate the SKB to put enough headroom in front of it and insert |
1976 | * the transmit checksum offsets in the descriptors |
1977 | */ |
1978 | static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, |
1979 | struct sk_buff *skb) |
1980 | { |
1981 | struct bcmgenet_priv *priv = netdev_priv(dev); |
1982 | struct status_64 *status = NULL; |
1983 | struct sk_buff *new_skb; |
1984 | u16 offset; |
1985 | u8 ip_proto; |
1986 | __be16 ip_ver; |
1987 | u32 tx_csum_info; |
1988 | |
1989 | if (unlikely(skb_headroom(skb) < sizeof(*status))) { |
1990 | /* If 64 byte status block enabled, must make sure skb has |
1991 | * enough headroom for us to insert 64B status block. |
1992 | */ |
1993 | new_skb = skb_realloc_headroom(skb, headroom: sizeof(*status)); |
1994 | if (!new_skb) { |
1995 | dev_kfree_skb_any(skb); |
1996 | priv->mib.tx_realloc_tsb_failed++; |
1997 | dev->stats.tx_dropped++; |
1998 | return NULL; |
1999 | } |
2000 | dev_consume_skb_any(skb); |
2001 | skb = new_skb; |
2002 | priv->mib.tx_realloc_tsb++; |
2003 | } |
2004 | |
2005 | skb_push(skb, len: sizeof(*status)); |
2006 | status = (struct status_64 *)skb->data; |
2007 | |
2008 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2009 | ip_ver = skb->protocol; |
2010 | switch (ip_ver) { |
2011 | case htons(ETH_P_IP): |
2012 | ip_proto = ip_hdr(skb)->protocol; |
2013 | break; |
2014 | case htons(ETH_P_IPV6): |
2015 | ip_proto = ipv6_hdr(skb)->nexthdr; |
2016 | break; |
2017 | default: |
2018 | /* don't use UDP flag */ |
2019 | ip_proto = 0; |
2020 | break; |
2021 | } |
2022 | |
2023 | offset = skb_checksum_start_offset(skb) - sizeof(*status); |
2024 | tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | |
2025 | (offset + skb->csum_offset) | |
2026 | STATUS_TX_CSUM_LV; |
2027 | |
2028 | /* Set the special UDP flag for UDP */ |
2029 | if (ip_proto == IPPROTO_UDP) |
2030 | tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; |
2031 | |
2032 | status->tx_csum_info = tx_csum_info; |
2033 | } |
2034 | |
2035 | return skb; |
2036 | } |
2037 | |
2038 | static void bcmgenet_hide_tsb(struct sk_buff *skb) |
2039 | { |
2040 | __skb_pull(skb, len: sizeof(struct status_64)); |
2041 | } |
2042 | |
2043 | static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) |
2044 | { |
2045 | struct bcmgenet_priv *priv = netdev_priv(dev); |
2046 | struct device *kdev = &priv->pdev->dev; |
2047 | struct bcmgenet_tx_ring *ring = NULL; |
2048 | struct enet_cb *tx_cb_ptr; |
2049 | struct netdev_queue *txq; |
2050 | int nr_frags, index; |
2051 | dma_addr_t mapping; |
2052 | unsigned int size; |
2053 | skb_frag_t *frag; |
2054 | u32 len_stat; |
2055 | int ret; |
2056 | int i; |
2057 | |
2058 | index = skb_get_queue_mapping(skb); |
2059 | /* Mapping strategy: |
2060 | * queue_mapping = 0, unclassified, packet xmited through ring16 |
2061 | * queue_mapping = 1, goes to ring 0. (highest priority queue |
2062 | * queue_mapping = 2, goes to ring 1. |
2063 | * queue_mapping = 3, goes to ring 2. |
2064 | * queue_mapping = 4, goes to ring 3. |
2065 | */ |
2066 | if (index == 0) |
2067 | index = DESC_INDEX; |
2068 | else |
2069 | index -= 1; |
2070 | |
2071 | ring = &priv->tx_rings[index]; |
2072 | txq = netdev_get_tx_queue(dev, index: ring->queue); |
2073 | |
2074 | nr_frags = skb_shinfo(skb)->nr_frags; |
2075 | |
2076 | spin_lock(lock: &ring->lock); |
2077 | if (ring->free_bds <= (nr_frags + 1)) { |
2078 | if (!netif_tx_queue_stopped(dev_queue: txq)) |
2079 | netif_tx_stop_queue(dev_queue: txq); |
2080 | ret = NETDEV_TX_BUSY; |
2081 | goto out; |
2082 | } |
2083 | |
2084 | /* Retain how many bytes will be sent on the wire, without TSB inserted |
2085 | * by transmit checksum offload |
2086 | */ |
2087 | GENET_CB(skb)->bytes_sent = skb->len; |
2088 | |
2089 | /* add the Transmit Status Block */ |
2090 | skb = bcmgenet_add_tsb(dev, skb); |
2091 | if (!skb) { |
2092 | ret = NETDEV_TX_OK; |
2093 | goto out; |
2094 | } |
2095 | |
2096 | for (i = 0; i <= nr_frags; i++) { |
2097 | tx_cb_ptr = bcmgenet_get_txcb(priv, ring); |
2098 | |
2099 | BUG_ON(!tx_cb_ptr); |
2100 | |
2101 | if (!i) { |
2102 | /* Transmit single SKB or head of fragment list */ |
2103 | GENET_CB(skb)->first_cb = tx_cb_ptr; |
2104 | size = skb_headlen(skb); |
2105 | mapping = dma_map_single(kdev, skb->data, size, |
2106 | DMA_TO_DEVICE); |
2107 | } else { |
2108 | /* xmit fragment */ |
2109 | frag = &skb_shinfo(skb)->frags[i - 1]; |
2110 | size = skb_frag_size(frag); |
2111 | mapping = skb_frag_dma_map(dev: kdev, frag, offset: 0, size, |
2112 | dir: DMA_TO_DEVICE); |
2113 | } |
2114 | |
2115 | ret = dma_mapping_error(dev: kdev, dma_addr: mapping); |
2116 | if (ret) { |
2117 | priv->mib.tx_dma_failed++; |
2118 | netif_err(priv, tx_err, dev, "Tx DMA map failed\n" ); |
2119 | ret = NETDEV_TX_OK; |
2120 | goto out_unmap_frags; |
2121 | } |
2122 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); |
2123 | dma_unmap_len_set(tx_cb_ptr, dma_len, size); |
2124 | |
2125 | tx_cb_ptr->skb = skb; |
2126 | |
2127 | len_stat = (size << DMA_BUFLENGTH_SHIFT) | |
2128 | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); |
2129 | |
2130 | /* Note: if we ever change from DMA_TX_APPEND_CRC below we |
2131 | * will need to restore software padding of "runt" packets |
2132 | */ |
2133 | len_stat |= DMA_TX_APPEND_CRC; |
2134 | |
2135 | if (!i) { |
2136 | len_stat |= DMA_SOP; |
2137 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
2138 | len_stat |= DMA_TX_DO_CSUM; |
2139 | } |
2140 | if (i == nr_frags) |
2141 | len_stat |= DMA_EOP; |
2142 | |
2143 | dmadesc_set(priv, d: tx_cb_ptr->bd_addr, addr: mapping, val: len_stat); |
2144 | } |
2145 | |
2146 | GENET_CB(skb)->last_cb = tx_cb_ptr; |
2147 | |
2148 | bcmgenet_hide_tsb(skb); |
2149 | skb_tx_timestamp(skb); |
2150 | |
2151 | /* Decrement total BD count and advance our write pointer */ |
2152 | ring->free_bds -= nr_frags + 1; |
2153 | ring->prod_index += nr_frags + 1; |
2154 | ring->prod_index &= DMA_P_INDEX_MASK; |
2155 | |
2156 | netdev_tx_sent_queue(dev_queue: txq, GENET_CB(skb)->bytes_sent); |
2157 | |
2158 | if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) |
2159 | netif_tx_stop_queue(dev_queue: txq); |
2160 | |
2161 | if (!netdev_xmit_more() || netif_xmit_stopped(dev_queue: txq)) |
2162 | /* Packets are ready, update producer index */ |
2163 | bcmgenet_tdma_ring_writel(priv, ring: ring->index, |
2164 | val: ring->prod_index, r: TDMA_PROD_INDEX); |
2165 | out: |
2166 | spin_unlock(lock: &ring->lock); |
2167 | |
2168 | return ret; |
2169 | |
2170 | out_unmap_frags: |
2171 | /* Back up for failed control block mapping */ |
2172 | bcmgenet_put_txcb(priv, ring); |
2173 | |
2174 | /* Unmap successfully mapped control blocks */ |
2175 | while (i-- > 0) { |
2176 | tx_cb_ptr = bcmgenet_put_txcb(priv, ring); |
2177 | bcmgenet_free_tx_cb(dev: kdev, cb: tx_cb_ptr); |
2178 | } |
2179 | |
2180 | dev_kfree_skb(skb); |
2181 | goto out; |
2182 | } |
2183 | |
2184 | static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, |
2185 | struct enet_cb *cb) |
2186 | { |
2187 | struct device *kdev = &priv->pdev->dev; |
2188 | struct sk_buff *skb; |
2189 | struct sk_buff *rx_skb; |
2190 | dma_addr_t mapping; |
2191 | |
2192 | /* Allocate a new Rx skb */ |
2193 | skb = __netdev_alloc_skb(dev: priv->dev, length: priv->rx_buf_len + SKB_ALIGNMENT, |
2194 | GFP_ATOMIC | __GFP_NOWARN); |
2195 | if (!skb) { |
2196 | priv->mib.alloc_rx_buff_failed++; |
2197 | netif_err(priv, rx_err, priv->dev, |
2198 | "%s: Rx skb allocation failed\n" , __func__); |
2199 | return NULL; |
2200 | } |
2201 | |
2202 | /* DMA-map the new Rx skb */ |
2203 | mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, |
2204 | DMA_FROM_DEVICE); |
2205 | if (dma_mapping_error(dev: kdev, dma_addr: mapping)) { |
2206 | priv->mib.rx_dma_failed++; |
2207 | dev_kfree_skb_any(skb); |
2208 | netif_err(priv, rx_err, priv->dev, |
2209 | "%s: Rx skb DMA mapping failed\n" , __func__); |
2210 | return NULL; |
2211 | } |
2212 | |
2213 | /* Grab the current Rx skb from the ring and DMA-unmap it */ |
2214 | rx_skb = bcmgenet_free_rx_cb(dev: kdev, cb); |
2215 | |
2216 | /* Put the new Rx skb on the ring */ |
2217 | cb->skb = skb; |
2218 | dma_unmap_addr_set(cb, dma_addr, mapping); |
2219 | dma_unmap_len_set(cb, dma_len, priv->rx_buf_len); |
2220 | dmadesc_set_addr(priv, d: cb->bd_addr, addr: mapping); |
2221 | |
2222 | /* Return the current Rx skb to caller */ |
2223 | return rx_skb; |
2224 | } |
2225 | |
2226 | /* bcmgenet_desc_rx - descriptor based rx process. |
2227 | * this could be called from bottom half, or from NAPI polling method. |
2228 | */ |
2229 | static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, |
2230 | unsigned int budget) |
2231 | { |
2232 | struct bcmgenet_priv *priv = ring->priv; |
2233 | struct net_device *dev = priv->dev; |
2234 | struct enet_cb *cb; |
2235 | struct sk_buff *skb; |
2236 | u32 dma_length_status; |
2237 | unsigned long dma_flag; |
2238 | int len; |
2239 | unsigned int rxpktprocessed = 0, rxpkttoprocess; |
2240 | unsigned int bytes_processed = 0; |
2241 | unsigned int p_index, mask; |
2242 | unsigned int discards; |
2243 | |
2244 | /* Clear status before servicing to reduce spurious interrupts */ |
2245 | if (ring->index == DESC_INDEX) { |
2246 | bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE, |
2247 | INTRL2_CPU_CLEAR); |
2248 | } else { |
2249 | mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index); |
2250 | bcmgenet_intrl2_1_writel(priv, |
2251 | val: mask, |
2252 | INTRL2_CPU_CLEAR); |
2253 | } |
2254 | |
2255 | p_index = bcmgenet_rdma_ring_readl(priv, ring: ring->index, r: RDMA_PROD_INDEX); |
2256 | |
2257 | discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & |
2258 | DMA_P_INDEX_DISCARD_CNT_MASK; |
2259 | if (discards > ring->old_discards) { |
2260 | discards = discards - ring->old_discards; |
2261 | ring->errors += discards; |
2262 | ring->old_discards += discards; |
2263 | |
2264 | /* Clear HW register when we reach 75% of maximum 0xFFFF */ |
2265 | if (ring->old_discards >= 0xC000) { |
2266 | ring->old_discards = 0; |
2267 | bcmgenet_rdma_ring_writel(priv, ring: ring->index, val: 0, |
2268 | r: RDMA_PROD_INDEX); |
2269 | } |
2270 | } |
2271 | |
2272 | p_index &= DMA_P_INDEX_MASK; |
2273 | rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK; |
2274 | |
2275 | netif_dbg(priv, rx_status, dev, |
2276 | "RDMA: rxpkttoprocess=%d\n" , rxpkttoprocess); |
2277 | |
2278 | while ((rxpktprocessed < rxpkttoprocess) && |
2279 | (rxpktprocessed < budget)) { |
2280 | struct status_64 *status; |
2281 | __be16 rx_csum; |
2282 | |
2283 | cb = &priv->rx_cbs[ring->read_ptr]; |
2284 | skb = bcmgenet_rx_refill(priv, cb); |
2285 | |
2286 | if (unlikely(!skb)) { |
2287 | ring->dropped++; |
2288 | goto next; |
2289 | } |
2290 | |
2291 | status = (struct status_64 *)skb->data; |
2292 | dma_length_status = status->length_status; |
2293 | if (dev->features & NETIF_F_RXCSUM) { |
2294 | rx_csum = (__force __be16)(status->rx_csum & 0xffff); |
2295 | if (rx_csum) { |
2296 | skb->csum = (__force __wsum)ntohs(rx_csum); |
2297 | skb->ip_summed = CHECKSUM_COMPLETE; |
2298 | } |
2299 | } |
2300 | |
2301 | /* DMA flags and length are still valid no matter how |
2302 | * we got the Receive Status Vector (64B RSB or register) |
2303 | */ |
2304 | dma_flag = dma_length_status & 0xffff; |
2305 | len = dma_length_status >> DMA_BUFLENGTH_SHIFT; |
2306 | |
2307 | netif_dbg(priv, rx_status, dev, |
2308 | "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n" , |
2309 | __func__, p_index, ring->c_index, |
2310 | ring->read_ptr, dma_length_status); |
2311 | |
2312 | if (unlikely(len > RX_BUF_LENGTH)) { |
2313 | netif_err(priv, rx_status, dev, "oversized packet\n" ); |
2314 | dev->stats.rx_length_errors++; |
2315 | dev->stats.rx_errors++; |
2316 | dev_kfree_skb_any(skb); |
2317 | goto next; |
2318 | } |
2319 | |
2320 | if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { |
2321 | netif_err(priv, rx_status, dev, |
2322 | "dropping fragmented packet!\n" ); |
2323 | ring->errors++; |
2324 | dev_kfree_skb_any(skb); |
2325 | goto next; |
2326 | } |
2327 | |
2328 | /* report errors */ |
2329 | if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | |
2330 | DMA_RX_OV | |
2331 | DMA_RX_NO | |
2332 | DMA_RX_LG | |
2333 | DMA_RX_RXER))) { |
2334 | netif_err(priv, rx_status, dev, "dma_flag=0x%x\n" , |
2335 | (unsigned int)dma_flag); |
2336 | if (dma_flag & DMA_RX_CRC_ERROR) |
2337 | dev->stats.rx_crc_errors++; |
2338 | if (dma_flag & DMA_RX_OV) |
2339 | dev->stats.rx_over_errors++; |
2340 | if (dma_flag & DMA_RX_NO) |
2341 | dev->stats.rx_frame_errors++; |
2342 | if (dma_flag & DMA_RX_LG) |
2343 | dev->stats.rx_length_errors++; |
2344 | dev->stats.rx_errors++; |
2345 | dev_kfree_skb_any(skb); |
2346 | goto next; |
2347 | } /* error packet */ |
2348 | |
2349 | skb_put(skb, len); |
2350 | |
2351 | /* remove RSB and hardware 2bytes added for IP alignment */ |
2352 | skb_pull(skb, len: 66); |
2353 | len -= 66; |
2354 | |
2355 | if (priv->crc_fwd_en) { |
2356 | skb_trim(skb, len: len - ETH_FCS_LEN); |
2357 | len -= ETH_FCS_LEN; |
2358 | } |
2359 | |
2360 | bytes_processed += len; |
2361 | |
2362 | /*Finish setting up the received SKB and send it to the kernel*/ |
2363 | skb->protocol = eth_type_trans(skb, dev: priv->dev); |
2364 | ring->packets++; |
2365 | ring->bytes += len; |
2366 | if (dma_flag & DMA_RX_MULT) |
2367 | dev->stats.multicast++; |
2368 | |
2369 | /* Notify kernel */ |
2370 | napi_gro_receive(napi: &ring->napi, skb); |
2371 | netif_dbg(priv, rx_status, dev, "pushed up to kernel\n" ); |
2372 | |
2373 | next: |
2374 | rxpktprocessed++; |
2375 | if (likely(ring->read_ptr < ring->end_ptr)) |
2376 | ring->read_ptr++; |
2377 | else |
2378 | ring->read_ptr = ring->cb_ptr; |
2379 | |
2380 | ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; |
2381 | bcmgenet_rdma_ring_writel(priv, ring: ring->index, val: ring->c_index, r: RDMA_CONS_INDEX); |
2382 | } |
2383 | |
2384 | ring->dim.bytes = bytes_processed; |
2385 | ring->dim.packets = rxpktprocessed; |
2386 | |
2387 | return rxpktprocessed; |
2388 | } |
2389 | |
2390 | /* Rx NAPI polling method */ |
2391 | static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) |
2392 | { |
2393 | struct bcmgenet_rx_ring *ring = container_of(napi, |
2394 | struct bcmgenet_rx_ring, napi); |
2395 | struct dim_sample dim_sample = {}; |
2396 | unsigned int work_done; |
2397 | |
2398 | work_done = bcmgenet_desc_rx(ring, budget); |
2399 | |
2400 | if (work_done < budget) { |
2401 | napi_complete_done(n: napi, work_done); |
2402 | ring->int_enable(ring); |
2403 | } |
2404 | |
2405 | if (ring->dim.use_dim) { |
2406 | dim_update_sample(event_ctr: ring->dim.event_ctr, packets: ring->dim.packets, |
2407 | bytes: ring->dim.bytes, s: &dim_sample); |
2408 | net_dim(dim: &ring->dim.dim, end_sample: dim_sample); |
2409 | } |
2410 | |
2411 | return work_done; |
2412 | } |
2413 | |
2414 | static void bcmgenet_dim_work(struct work_struct *work) |
2415 | { |
2416 | struct dim *dim = container_of(work, struct dim, work); |
2417 | struct bcmgenet_net_dim *ndim = |
2418 | container_of(dim, struct bcmgenet_net_dim, dim); |
2419 | struct bcmgenet_rx_ring *ring = |
2420 | container_of(ndim, struct bcmgenet_rx_ring, dim); |
2421 | struct dim_cq_moder cur_profile = |
2422 | net_dim_get_rx_moderation(cq_period_mode: dim->mode, ix: dim->profile_ix); |
2423 | |
2424 | bcmgenet_set_rx_coalesce(ring, usecs: cur_profile.usec, pkts: cur_profile.pkts); |
2425 | dim->state = DIM_START_MEASURE; |
2426 | } |
2427 | |
2428 | /* Assign skb to RX DMA descriptor. */ |
2429 | static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, |
2430 | struct bcmgenet_rx_ring *ring) |
2431 | { |
2432 | struct enet_cb *cb; |
2433 | struct sk_buff *skb; |
2434 | int i; |
2435 | |
2436 | netif_dbg(priv, hw, priv->dev, "%s\n" , __func__); |
2437 | |
2438 | /* loop here for each buffer needing assign */ |
2439 | for (i = 0; i < ring->size; i++) { |
2440 | cb = ring->cbs + i; |
2441 | skb = bcmgenet_rx_refill(priv, cb); |
2442 | if (skb) |
2443 | dev_consume_skb_any(skb); |
2444 | if (!cb->skb) |
2445 | return -ENOMEM; |
2446 | } |
2447 | |
2448 | return 0; |
2449 | } |
2450 | |
2451 | static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) |
2452 | { |
2453 | struct sk_buff *skb; |
2454 | struct enet_cb *cb; |
2455 | int i; |
2456 | |
2457 | for (i = 0; i < priv->num_rx_bds; i++) { |
2458 | cb = &priv->rx_cbs[i]; |
2459 | |
2460 | skb = bcmgenet_free_rx_cb(dev: &priv->pdev->dev, cb); |
2461 | if (skb) |
2462 | dev_consume_skb_any(skb); |
2463 | } |
2464 | } |
2465 | |
2466 | static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) |
2467 | { |
2468 | u32 reg; |
2469 | |
2470 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); |
2471 | if (reg & CMD_SW_RESET) |
2472 | return; |
2473 | if (enable) |
2474 | reg |= mask; |
2475 | else |
2476 | reg &= ~mask; |
2477 | bcmgenet_umac_writel(priv, val: reg, UMAC_CMD); |
2478 | |
2479 | /* UniMAC stops on a packet boundary, wait for a full-size packet |
2480 | * to be processed |
2481 | */ |
2482 | if (enable == 0) |
2483 | usleep_range(min: 1000, max: 2000); |
2484 | } |
2485 | |
2486 | static void reset_umac(struct bcmgenet_priv *priv) |
2487 | { |
2488 | /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ |
2489 | bcmgenet_rbuf_ctrl_set(priv, val: 0); |
2490 | udelay(10); |
2491 | |
2492 | /* issue soft reset and disable MAC while updating its registers */ |
2493 | bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); |
2494 | udelay(2); |
2495 | } |
2496 | |
2497 | static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) |
2498 | { |
2499 | /* Mask all interrupts.*/ |
2500 | bcmgenet_intrl2_0_writel(priv, val: 0xFFFFFFFF, INTRL2_CPU_MASK_SET); |
2501 | bcmgenet_intrl2_0_writel(priv, val: 0xFFFFFFFF, INTRL2_CPU_CLEAR); |
2502 | bcmgenet_intrl2_1_writel(priv, val: 0xFFFFFFFF, INTRL2_CPU_MASK_SET); |
2503 | bcmgenet_intrl2_1_writel(priv, val: 0xFFFFFFFF, INTRL2_CPU_CLEAR); |
2504 | } |
2505 | |
2506 | static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) |
2507 | { |
2508 | u32 int0_enable = 0; |
2509 | |
2510 | /* Monitor cable plug/unplugged event for internal PHY, external PHY |
2511 | * and MoCA PHY |
2512 | */ |
2513 | if (priv->internal_phy) { |
2514 | int0_enable |= UMAC_IRQ_LINK_EVENT; |
2515 | if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) |
2516 | int0_enable |= UMAC_IRQ_PHY_DET_R; |
2517 | } else if (priv->ext_phy) { |
2518 | int0_enable |= UMAC_IRQ_LINK_EVENT; |
2519 | } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { |
2520 | if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) |
2521 | int0_enable |= UMAC_IRQ_LINK_EVENT; |
2522 | } |
2523 | bcmgenet_intrl2_0_writel(priv, val: int0_enable, INTRL2_CPU_MASK_CLEAR); |
2524 | } |
2525 | |
2526 | static void init_umac(struct bcmgenet_priv *priv) |
2527 | { |
2528 | struct device *kdev = &priv->pdev->dev; |
2529 | u32 reg; |
2530 | u32 int0_enable = 0; |
2531 | |
2532 | dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n" ); |
2533 | |
2534 | reset_umac(priv); |
2535 | |
2536 | /* clear tx/rx counter */ |
2537 | bcmgenet_umac_writel(priv, |
2538 | MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, |
2539 | UMAC_MIB_CTRL); |
2540 | bcmgenet_umac_writel(priv, val: 0, UMAC_MIB_CTRL); |
2541 | |
2542 | bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); |
2543 | |
2544 | /* init tx registers, enable TSB */ |
2545 | reg = bcmgenet_tbuf_ctrl_get(priv); |
2546 | reg |= TBUF_64B_EN; |
2547 | bcmgenet_tbuf_ctrl_set(priv, val: reg); |
2548 | |
2549 | /* init rx registers, enable ip header optimization and RSB */ |
2550 | reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); |
2551 | reg |= RBUF_ALIGN_2B | RBUF_64B_EN; |
2552 | bcmgenet_rbuf_writel(priv, val: reg, RBUF_CTRL); |
2553 | |
2554 | /* enable rx checksumming */ |
2555 | reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); |
2556 | reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS; |
2557 | /* If UniMAC forwards CRC, we need to skip over it to get |
2558 | * a valid CHK bit to be set in the per-packet status word |
2559 | */ |
2560 | if (priv->crc_fwd_en) |
2561 | reg |= RBUF_SKIP_FCS; |
2562 | else |
2563 | reg &= ~RBUF_SKIP_FCS; |
2564 | bcmgenet_rbuf_writel(priv, val: reg, RBUF_CHK_CTRL); |
2565 | |
2566 | if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) |
2567 | bcmgenet_rbuf_writel(priv, val: 1, RBUF_TBUF_SIZE_CTRL); |
2568 | |
2569 | bcmgenet_intr_disable(priv); |
2570 | |
2571 | /* Configure backpressure vectors for MoCA */ |
2572 | if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { |
2573 | reg = bcmgenet_bp_mc_get(priv); |
2574 | reg |= BIT(priv->hw_params->bp_in_en_shift); |
2575 | |
2576 | /* bp_mask: back pressure mask */ |
2577 | if (netif_is_multiqueue(dev: priv->dev)) |
2578 | reg |= priv->hw_params->bp_in_mask; |
2579 | else |
2580 | reg &= ~priv->hw_params->bp_in_mask; |
2581 | bcmgenet_bp_mc_set(priv, val: reg); |
2582 | } |
2583 | |
2584 | /* Enable MDIO interrupts on GENET v3+ */ |
2585 | if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) |
2586 | int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); |
2587 | |
2588 | bcmgenet_intrl2_0_writel(priv, val: int0_enable, INTRL2_CPU_MASK_CLEAR); |
2589 | |
2590 | dev_dbg(kdev, "done init umac\n" ); |
2591 | } |
2592 | |
2593 | static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring, |
2594 | void (*cb)(struct work_struct *work)) |
2595 | { |
2596 | struct bcmgenet_net_dim *dim = &ring->dim; |
2597 | |
2598 | INIT_WORK(&dim->dim.work, cb); |
2599 | dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
2600 | dim->event_ctr = 0; |
2601 | dim->packets = 0; |
2602 | dim->bytes = 0; |
2603 | } |
2604 | |
2605 | static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring) |
2606 | { |
2607 | struct bcmgenet_net_dim *dim = &ring->dim; |
2608 | struct dim_cq_moder moder; |
2609 | u32 usecs, pkts; |
2610 | |
2611 | usecs = ring->rx_coalesce_usecs; |
2612 | pkts = ring->rx_max_coalesced_frames; |
2613 | |
2614 | /* If DIM was enabled, re-apply default parameters */ |
2615 | if (dim->use_dim) { |
2616 | moder = net_dim_get_def_rx_moderation(cq_period_mode: dim->dim.mode); |
2617 | usecs = moder.usec; |
2618 | pkts = moder.pkts; |
2619 | } |
2620 | |
2621 | bcmgenet_set_rx_coalesce(ring, usecs, pkts); |
2622 | } |
2623 | |
2624 | /* Initialize a Tx ring along with corresponding hardware registers */ |
2625 | static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, |
2626 | unsigned int index, unsigned int size, |
2627 | unsigned int start_ptr, unsigned int end_ptr) |
2628 | { |
2629 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; |
2630 | u32 words_per_bd = WORDS_PER_BD(priv); |
2631 | u32 flow_period_val = 0; |
2632 | |
2633 | spin_lock_init(&ring->lock); |
2634 | ring->priv = priv; |
2635 | ring->index = index; |
2636 | if (index == DESC_INDEX) { |
2637 | ring->queue = 0; |
2638 | ring->int_enable = bcmgenet_tx_ring16_int_enable; |
2639 | ring->int_disable = bcmgenet_tx_ring16_int_disable; |
2640 | } else { |
2641 | ring->queue = index + 1; |
2642 | ring->int_enable = bcmgenet_tx_ring_int_enable; |
2643 | ring->int_disable = bcmgenet_tx_ring_int_disable; |
2644 | } |
2645 | ring->cbs = priv->tx_cbs + start_ptr; |
2646 | ring->size = size; |
2647 | ring->clean_ptr = start_ptr; |
2648 | ring->c_index = 0; |
2649 | ring->free_bds = size; |
2650 | ring->write_ptr = start_ptr; |
2651 | ring->cb_ptr = start_ptr; |
2652 | ring->end_ptr = end_ptr - 1; |
2653 | ring->prod_index = 0; |
2654 | |
2655 | /* Set flow period for ring != 16 */ |
2656 | if (index != DESC_INDEX) |
2657 | flow_period_val = ENET_MAX_MTU_SIZE << 16; |
2658 | |
2659 | bcmgenet_tdma_ring_writel(priv, ring: index, val: 0, r: TDMA_PROD_INDEX); |
2660 | bcmgenet_tdma_ring_writel(priv, ring: index, val: 0, r: TDMA_CONS_INDEX); |
2661 | bcmgenet_tdma_ring_writel(priv, ring: index, val: 1, r: DMA_MBUF_DONE_THRESH); |
2662 | /* Disable rate control for now */ |
2663 | bcmgenet_tdma_ring_writel(priv, ring: index, val: flow_period_val, |
2664 | r: TDMA_FLOW_PERIOD); |
2665 | bcmgenet_tdma_ring_writel(priv, ring: index, |
2666 | val: ((size << DMA_RING_SIZE_SHIFT) | |
2667 | RX_BUF_LENGTH), r: DMA_RING_BUF_SIZE); |
2668 | |
2669 | /* Set start and end address, read and write pointers */ |
2670 | bcmgenet_tdma_ring_writel(priv, ring: index, val: start_ptr * words_per_bd, |
2671 | r: DMA_START_ADDR); |
2672 | bcmgenet_tdma_ring_writel(priv, ring: index, val: start_ptr * words_per_bd, |
2673 | r: TDMA_READ_PTR); |
2674 | bcmgenet_tdma_ring_writel(priv, ring: index, val: start_ptr * words_per_bd, |
2675 | r: TDMA_WRITE_PTR); |
2676 | bcmgenet_tdma_ring_writel(priv, ring: index, val: end_ptr * words_per_bd - 1, |
2677 | r: DMA_END_ADDR); |
2678 | |
2679 | /* Initialize Tx NAPI */ |
2680 | netif_napi_add_tx(dev: priv->dev, napi: &ring->napi, poll: bcmgenet_tx_poll); |
2681 | } |
2682 | |
2683 | /* Initialize a RDMA ring */ |
2684 | static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, |
2685 | unsigned int index, unsigned int size, |
2686 | unsigned int start_ptr, unsigned int end_ptr) |
2687 | { |
2688 | struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; |
2689 | u32 words_per_bd = WORDS_PER_BD(priv); |
2690 | int ret; |
2691 | |
2692 | ring->priv = priv; |
2693 | ring->index = index; |
2694 | if (index == DESC_INDEX) { |
2695 | ring->int_enable = bcmgenet_rx_ring16_int_enable; |
2696 | ring->int_disable = bcmgenet_rx_ring16_int_disable; |
2697 | } else { |
2698 | ring->int_enable = bcmgenet_rx_ring_int_enable; |
2699 | ring->int_disable = bcmgenet_rx_ring_int_disable; |
2700 | } |
2701 | ring->cbs = priv->rx_cbs + start_ptr; |
2702 | ring->size = size; |
2703 | ring->c_index = 0; |
2704 | ring->read_ptr = start_ptr; |
2705 | ring->cb_ptr = start_ptr; |
2706 | ring->end_ptr = end_ptr - 1; |
2707 | |
2708 | ret = bcmgenet_alloc_rx_buffers(priv, ring); |
2709 | if (ret) |
2710 | return ret; |
2711 | |
2712 | bcmgenet_init_dim(ring, cb: bcmgenet_dim_work); |
2713 | bcmgenet_init_rx_coalesce(ring); |
2714 | |
2715 | /* Initialize Rx NAPI */ |
2716 | netif_napi_add(dev: priv->dev, napi: &ring->napi, poll: bcmgenet_rx_poll); |
2717 | |
2718 | bcmgenet_rdma_ring_writel(priv, ring: index, val: 0, r: RDMA_PROD_INDEX); |
2719 | bcmgenet_rdma_ring_writel(priv, ring: index, val: 0, r: RDMA_CONS_INDEX); |
2720 | bcmgenet_rdma_ring_writel(priv, ring: index, |
2721 | val: ((size << DMA_RING_SIZE_SHIFT) | |
2722 | RX_BUF_LENGTH), r: DMA_RING_BUF_SIZE); |
2723 | bcmgenet_rdma_ring_writel(priv, ring: index, |
2724 | val: (DMA_FC_THRESH_LO << |
2725 | DMA_XOFF_THRESHOLD_SHIFT) | |
2726 | DMA_FC_THRESH_HI, r: RDMA_XON_XOFF_THRESH); |
2727 | |
2728 | /* Set start and end address, read and write pointers */ |
2729 | bcmgenet_rdma_ring_writel(priv, ring: index, val: start_ptr * words_per_bd, |
2730 | r: DMA_START_ADDR); |
2731 | bcmgenet_rdma_ring_writel(priv, ring: index, val: start_ptr * words_per_bd, |
2732 | r: RDMA_READ_PTR); |
2733 | bcmgenet_rdma_ring_writel(priv, ring: index, val: start_ptr * words_per_bd, |
2734 | r: RDMA_WRITE_PTR); |
2735 | bcmgenet_rdma_ring_writel(priv, ring: index, val: end_ptr * words_per_bd - 1, |
2736 | r: DMA_END_ADDR); |
2737 | |
2738 | return ret; |
2739 | } |
2740 | |
2741 | static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) |
2742 | { |
2743 | unsigned int i; |
2744 | struct bcmgenet_tx_ring *ring; |
2745 | |
2746 | for (i = 0; i < priv->hw_params->tx_queues; ++i) { |
2747 | ring = &priv->tx_rings[i]; |
2748 | napi_enable(n: &ring->napi); |
2749 | ring->int_enable(ring); |
2750 | } |
2751 | |
2752 | ring = &priv->tx_rings[DESC_INDEX]; |
2753 | napi_enable(n: &ring->napi); |
2754 | ring->int_enable(ring); |
2755 | } |
2756 | |
2757 | static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) |
2758 | { |
2759 | unsigned int i; |
2760 | struct bcmgenet_tx_ring *ring; |
2761 | |
2762 | for (i = 0; i < priv->hw_params->tx_queues; ++i) { |
2763 | ring = &priv->tx_rings[i]; |
2764 | napi_disable(n: &ring->napi); |
2765 | } |
2766 | |
2767 | ring = &priv->tx_rings[DESC_INDEX]; |
2768 | napi_disable(n: &ring->napi); |
2769 | } |
2770 | |
2771 | static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv) |
2772 | { |
2773 | unsigned int i; |
2774 | struct bcmgenet_tx_ring *ring; |
2775 | |
2776 | for (i = 0; i < priv->hw_params->tx_queues; ++i) { |
2777 | ring = &priv->tx_rings[i]; |
2778 | netif_napi_del(napi: &ring->napi); |
2779 | } |
2780 | |
2781 | ring = &priv->tx_rings[DESC_INDEX]; |
2782 | netif_napi_del(napi: &ring->napi); |
2783 | } |
2784 | |
2785 | /* Initialize Tx queues |
2786 | * |
2787 | * Queues 0-3 are priority-based, each one has 32 descriptors, |
2788 | * with queue 0 being the highest priority queue. |
2789 | * |
2790 | * Queue 16 is the default Tx queue with |
2791 | * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. |
2792 | * |
2793 | * The transmit control block pool is then partitioned as follows: |
2794 | * - Tx queue 0 uses tx_cbs[0..31] |
2795 | * - Tx queue 1 uses tx_cbs[32..63] |
2796 | * - Tx queue 2 uses tx_cbs[64..95] |
2797 | * - Tx queue 3 uses tx_cbs[96..127] |
2798 | * - Tx queue 16 uses tx_cbs[128..255] |
2799 | */ |
2800 | static void bcmgenet_init_tx_queues(struct net_device *dev) |
2801 | { |
2802 | struct bcmgenet_priv *priv = netdev_priv(dev); |
2803 | u32 i, dma_enable; |
2804 | u32 dma_ctrl, ring_cfg; |
2805 | u32 dma_priority[3] = {0, 0, 0}; |
2806 | |
2807 | dma_ctrl = bcmgenet_tdma_readl(priv, r: DMA_CTRL); |
2808 | dma_enable = dma_ctrl & DMA_EN; |
2809 | dma_ctrl &= ~DMA_EN; |
2810 | bcmgenet_tdma_writel(priv, val: dma_ctrl, r: DMA_CTRL); |
2811 | |
2812 | dma_ctrl = 0; |
2813 | ring_cfg = 0; |
2814 | |
2815 | /* Enable strict priority arbiter mode */ |
2816 | bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, r: DMA_ARB_CTRL); |
2817 | |
2818 | /* Initialize Tx priority queues */ |
2819 | for (i = 0; i < priv->hw_params->tx_queues; i++) { |
2820 | bcmgenet_init_tx_ring(priv, index: i, size: priv->hw_params->tx_bds_per_q, |
2821 | start_ptr: i * priv->hw_params->tx_bds_per_q, |
2822 | end_ptr: (i + 1) * priv->hw_params->tx_bds_per_q); |
2823 | ring_cfg |= (1 << i); |
2824 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); |
2825 | dma_priority[DMA_PRIO_REG_INDEX(i)] |= |
2826 | ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); |
2827 | } |
2828 | |
2829 | /* Initialize Tx default queue 16 */ |
2830 | bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, |
2831 | start_ptr: priv->hw_params->tx_queues * |
2832 | priv->hw_params->tx_bds_per_q, |
2833 | TOTAL_DESC); |
2834 | ring_cfg |= (1 << DESC_INDEX); |
2835 | dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); |
2836 | dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= |
2837 | ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << |
2838 | DMA_PRIO_REG_SHIFT(DESC_INDEX)); |
2839 | |
2840 | /* Set Tx queue priorities */ |
2841 | bcmgenet_tdma_writel(priv, val: dma_priority[0], r: DMA_PRIORITY_0); |
2842 | bcmgenet_tdma_writel(priv, val: dma_priority[1], r: DMA_PRIORITY_1); |
2843 | bcmgenet_tdma_writel(priv, val: dma_priority[2], r: DMA_PRIORITY_2); |
2844 | |
2845 | /* Enable Tx queues */ |
2846 | bcmgenet_tdma_writel(priv, val: ring_cfg, r: DMA_RING_CFG); |
2847 | |
2848 | /* Enable Tx DMA */ |
2849 | if (dma_enable) |
2850 | dma_ctrl |= DMA_EN; |
2851 | bcmgenet_tdma_writel(priv, val: dma_ctrl, r: DMA_CTRL); |
2852 | } |
2853 | |
2854 | static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) |
2855 | { |
2856 | unsigned int i; |
2857 | struct bcmgenet_rx_ring *ring; |
2858 | |
2859 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { |
2860 | ring = &priv->rx_rings[i]; |
2861 | napi_enable(n: &ring->napi); |
2862 | ring->int_enable(ring); |
2863 | } |
2864 | |
2865 | ring = &priv->rx_rings[DESC_INDEX]; |
2866 | napi_enable(n: &ring->napi); |
2867 | ring->int_enable(ring); |
2868 | } |
2869 | |
2870 | static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) |
2871 | { |
2872 | unsigned int i; |
2873 | struct bcmgenet_rx_ring *ring; |
2874 | |
2875 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { |
2876 | ring = &priv->rx_rings[i]; |
2877 | napi_disable(n: &ring->napi); |
2878 | cancel_work_sync(work: &ring->dim.dim.work); |
2879 | } |
2880 | |
2881 | ring = &priv->rx_rings[DESC_INDEX]; |
2882 | napi_disable(n: &ring->napi); |
2883 | cancel_work_sync(work: &ring->dim.dim.work); |
2884 | } |
2885 | |
2886 | static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) |
2887 | { |
2888 | unsigned int i; |
2889 | struct bcmgenet_rx_ring *ring; |
2890 | |
2891 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { |
2892 | ring = &priv->rx_rings[i]; |
2893 | netif_napi_del(napi: &ring->napi); |
2894 | } |
2895 | |
2896 | ring = &priv->rx_rings[DESC_INDEX]; |
2897 | netif_napi_del(napi: &ring->napi); |
2898 | } |
2899 | |
2900 | /* Initialize Rx queues |
2901 | * |
2902 | * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be |
2903 | * used to direct traffic to these queues. |
2904 | * |
2905 | * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors. |
2906 | */ |
2907 | static int bcmgenet_init_rx_queues(struct net_device *dev) |
2908 | { |
2909 | struct bcmgenet_priv *priv = netdev_priv(dev); |
2910 | u32 i; |
2911 | u32 dma_enable; |
2912 | u32 dma_ctrl; |
2913 | u32 ring_cfg; |
2914 | int ret; |
2915 | |
2916 | dma_ctrl = bcmgenet_rdma_readl(priv, r: DMA_CTRL); |
2917 | dma_enable = dma_ctrl & DMA_EN; |
2918 | dma_ctrl &= ~DMA_EN; |
2919 | bcmgenet_rdma_writel(priv, val: dma_ctrl, r: DMA_CTRL); |
2920 | |
2921 | dma_ctrl = 0; |
2922 | ring_cfg = 0; |
2923 | |
2924 | /* Initialize Rx priority queues */ |
2925 | for (i = 0; i < priv->hw_params->rx_queues; i++) { |
2926 | ret = bcmgenet_init_rx_ring(priv, index: i, |
2927 | size: priv->hw_params->rx_bds_per_q, |
2928 | start_ptr: i * priv->hw_params->rx_bds_per_q, |
2929 | end_ptr: (i + 1) * |
2930 | priv->hw_params->rx_bds_per_q); |
2931 | if (ret) |
2932 | return ret; |
2933 | |
2934 | ring_cfg |= (1 << i); |
2935 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); |
2936 | } |
2937 | |
2938 | /* Initialize Rx default queue 16 */ |
2939 | ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, |
2940 | start_ptr: priv->hw_params->rx_queues * |
2941 | priv->hw_params->rx_bds_per_q, |
2942 | TOTAL_DESC); |
2943 | if (ret) |
2944 | return ret; |
2945 | |
2946 | ring_cfg |= (1 << DESC_INDEX); |
2947 | dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); |
2948 | |
2949 | /* Enable rings */ |
2950 | bcmgenet_rdma_writel(priv, val: ring_cfg, r: DMA_RING_CFG); |
2951 | |
2952 | /* Configure ring as descriptor ring and re-enable DMA if enabled */ |
2953 | if (dma_enable) |
2954 | dma_ctrl |= DMA_EN; |
2955 | bcmgenet_rdma_writel(priv, val: dma_ctrl, r: DMA_CTRL); |
2956 | |
2957 | return 0; |
2958 | } |
2959 | |
2960 | static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) |
2961 | { |
2962 | int ret = 0; |
2963 | int timeout = 0; |
2964 | u32 reg; |
2965 | u32 dma_ctrl; |
2966 | int i; |
2967 | |
2968 | /* Disable TDMA to stop add more frames in TX DMA */ |
2969 | reg = bcmgenet_tdma_readl(priv, r: DMA_CTRL); |
2970 | reg &= ~DMA_EN; |
2971 | bcmgenet_tdma_writel(priv, val: reg, r: DMA_CTRL); |
2972 | |
2973 | /* Check TDMA status register to confirm TDMA is disabled */ |
2974 | while (timeout++ < DMA_TIMEOUT_VAL) { |
2975 | reg = bcmgenet_tdma_readl(priv, r: DMA_STATUS); |
2976 | if (reg & DMA_DISABLED) |
2977 | break; |
2978 | |
2979 | udelay(1); |
2980 | } |
2981 | |
2982 | if (timeout == DMA_TIMEOUT_VAL) { |
2983 | netdev_warn(dev: priv->dev, format: "Timed out while disabling TX DMA\n" ); |
2984 | ret = -ETIMEDOUT; |
2985 | } |
2986 | |
2987 | /* Wait 10ms for packet drain in both tx and rx dma */ |
2988 | usleep_range(min: 10000, max: 20000); |
2989 | |
2990 | /* Disable RDMA */ |
2991 | reg = bcmgenet_rdma_readl(priv, r: DMA_CTRL); |
2992 | reg &= ~DMA_EN; |
2993 | bcmgenet_rdma_writel(priv, val: reg, r: DMA_CTRL); |
2994 | |
2995 | timeout = 0; |
2996 | /* Check RDMA status register to confirm RDMA is disabled */ |
2997 | while (timeout++ < DMA_TIMEOUT_VAL) { |
2998 | reg = bcmgenet_rdma_readl(priv, r: DMA_STATUS); |
2999 | if (reg & DMA_DISABLED) |
3000 | break; |
3001 | |
3002 | udelay(1); |
3003 | } |
3004 | |
3005 | if (timeout == DMA_TIMEOUT_VAL) { |
3006 | netdev_warn(dev: priv->dev, format: "Timed out while disabling RX DMA\n" ); |
3007 | ret = -ETIMEDOUT; |
3008 | } |
3009 | |
3010 | dma_ctrl = 0; |
3011 | for (i = 0; i < priv->hw_params->rx_queues; i++) |
3012 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); |
3013 | reg = bcmgenet_rdma_readl(priv, r: DMA_CTRL); |
3014 | reg &= ~dma_ctrl; |
3015 | bcmgenet_rdma_writel(priv, val: reg, r: DMA_CTRL); |
3016 | |
3017 | dma_ctrl = 0; |
3018 | for (i = 0; i < priv->hw_params->tx_queues; i++) |
3019 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); |
3020 | reg = bcmgenet_tdma_readl(priv, r: DMA_CTRL); |
3021 | reg &= ~dma_ctrl; |
3022 | bcmgenet_tdma_writel(priv, val: reg, r: DMA_CTRL); |
3023 | |
3024 | return ret; |
3025 | } |
3026 | |
3027 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) |
3028 | { |
3029 | struct netdev_queue *txq; |
3030 | int i; |
3031 | |
3032 | bcmgenet_fini_rx_napi(priv); |
3033 | bcmgenet_fini_tx_napi(priv); |
3034 | |
3035 | for (i = 0; i < priv->num_tx_bds; i++) |
3036 | dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev, |
3037 | priv->tx_cbs + i)); |
3038 | |
3039 | for (i = 0; i < priv->hw_params->tx_queues; i++) { |
3040 | txq = netdev_get_tx_queue(dev: priv->dev, index: priv->tx_rings[i].queue); |
3041 | netdev_tx_reset_queue(q: txq); |
3042 | } |
3043 | |
3044 | txq = netdev_get_tx_queue(dev: priv->dev, index: priv->tx_rings[DESC_INDEX].queue); |
3045 | netdev_tx_reset_queue(q: txq); |
3046 | |
3047 | bcmgenet_free_rx_buffers(priv); |
3048 | kfree(objp: priv->rx_cbs); |
3049 | kfree(objp: priv->tx_cbs); |
3050 | } |
3051 | |
3052 | /* init_edma: Initialize DMA control register */ |
3053 | static int bcmgenet_init_dma(struct bcmgenet_priv *priv) |
3054 | { |
3055 | int ret; |
3056 | unsigned int i; |
3057 | struct enet_cb *cb; |
3058 | |
3059 | netif_dbg(priv, hw, priv->dev, "%s\n" , __func__); |
3060 | |
3061 | /* Initialize common Rx ring structures */ |
3062 | priv->rx_bds = priv->base + priv->hw_params->rdma_offset; |
3063 | priv->num_rx_bds = TOTAL_DESC; |
3064 | priv->rx_cbs = kcalloc(n: priv->num_rx_bds, size: sizeof(struct enet_cb), |
3065 | GFP_KERNEL); |
3066 | if (!priv->rx_cbs) |
3067 | return -ENOMEM; |
3068 | |
3069 | for (i = 0; i < priv->num_rx_bds; i++) { |
3070 | cb = priv->rx_cbs + i; |
3071 | cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; |
3072 | } |
3073 | |
3074 | /* Initialize common TX ring structures */ |
3075 | priv->tx_bds = priv->base + priv->hw_params->tdma_offset; |
3076 | priv->num_tx_bds = TOTAL_DESC; |
3077 | priv->tx_cbs = kcalloc(n: priv->num_tx_bds, size: sizeof(struct enet_cb), |
3078 | GFP_KERNEL); |
3079 | if (!priv->tx_cbs) { |
3080 | kfree(objp: priv->rx_cbs); |
3081 | return -ENOMEM; |
3082 | } |
3083 | |
3084 | for (i = 0; i < priv->num_tx_bds; i++) { |
3085 | cb = priv->tx_cbs + i; |
3086 | cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; |
3087 | } |
3088 | |
3089 | /* Init rDma */ |
3090 | bcmgenet_rdma_writel(priv, val: priv->dma_max_burst_length, |
3091 | r: DMA_SCB_BURST_SIZE); |
3092 | |
3093 | /* Initialize Rx queues */ |
3094 | ret = bcmgenet_init_rx_queues(dev: priv->dev); |
3095 | if (ret) { |
3096 | netdev_err(dev: priv->dev, format: "failed to initialize Rx queues\n" ); |
3097 | bcmgenet_free_rx_buffers(priv); |
3098 | kfree(objp: priv->rx_cbs); |
3099 | kfree(objp: priv->tx_cbs); |
3100 | return ret; |
3101 | } |
3102 | |
3103 | /* Init tDma */ |
3104 | bcmgenet_tdma_writel(priv, val: priv->dma_max_burst_length, |
3105 | r: DMA_SCB_BURST_SIZE); |
3106 | |
3107 | /* Initialize Tx queues */ |
3108 | bcmgenet_init_tx_queues(dev: priv->dev); |
3109 | |
3110 | return 0; |
3111 | } |
3112 | |
3113 | /* Interrupt bottom half */ |
3114 | static void bcmgenet_irq_task(struct work_struct *work) |
3115 | { |
3116 | unsigned int status; |
3117 | struct bcmgenet_priv *priv = container_of( |
3118 | work, struct bcmgenet_priv, bcmgenet_irq_work); |
3119 | |
3120 | netif_dbg(priv, intr, priv->dev, "%s\n" , __func__); |
3121 | |
3122 | spin_lock_irq(lock: &priv->lock); |
3123 | status = priv->irq0_stat; |
3124 | priv->irq0_stat = 0; |
3125 | spin_unlock_irq(lock: &priv->lock); |
3126 | |
3127 | if (status & UMAC_IRQ_PHY_DET_R && |
3128 | priv->dev->phydev->autoneg != AUTONEG_ENABLE) { |
3129 | phy_init_hw(phydev: priv->dev->phydev); |
3130 | genphy_config_aneg(phydev: priv->dev->phydev); |
3131 | } |
3132 | |
3133 | /* Link UP/DOWN event */ |
3134 | if (status & UMAC_IRQ_LINK_EVENT) |
3135 | phy_mac_interrupt(phydev: priv->dev->phydev); |
3136 | |
3137 | } |
3138 | |
3139 | /* bcmgenet_isr1: handle Rx and Tx priority queues */ |
3140 | static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) |
3141 | { |
3142 | struct bcmgenet_priv *priv = dev_id; |
3143 | struct bcmgenet_rx_ring *rx_ring; |
3144 | struct bcmgenet_tx_ring *tx_ring; |
3145 | unsigned int index, status; |
3146 | |
3147 | /* Read irq status */ |
3148 | status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & |
3149 | ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); |
3150 | |
3151 | /* clear interrupts */ |
3152 | bcmgenet_intrl2_1_writel(priv, val: status, INTRL2_CPU_CLEAR); |
3153 | |
3154 | netif_dbg(priv, intr, priv->dev, |
3155 | "%s: IRQ=0x%x\n" , __func__, status); |
3156 | |
3157 | /* Check Rx priority queue interrupts */ |
3158 | for (index = 0; index < priv->hw_params->rx_queues; index++) { |
3159 | if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) |
3160 | continue; |
3161 | |
3162 | rx_ring = &priv->rx_rings[index]; |
3163 | rx_ring->dim.event_ctr++; |
3164 | |
3165 | if (likely(napi_schedule_prep(&rx_ring->napi))) { |
3166 | rx_ring->int_disable(rx_ring); |
3167 | __napi_schedule_irqoff(n: &rx_ring->napi); |
3168 | } |
3169 | } |
3170 | |
3171 | /* Check Tx priority queue interrupts */ |
3172 | for (index = 0; index < priv->hw_params->tx_queues; index++) { |
3173 | if (!(status & BIT(index))) |
3174 | continue; |
3175 | |
3176 | tx_ring = &priv->tx_rings[index]; |
3177 | |
3178 | if (likely(napi_schedule_prep(&tx_ring->napi))) { |
3179 | tx_ring->int_disable(tx_ring); |
3180 | __napi_schedule_irqoff(n: &tx_ring->napi); |
3181 | } |
3182 | } |
3183 | |
3184 | return IRQ_HANDLED; |
3185 | } |
3186 | |
3187 | /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */ |
3188 | static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) |
3189 | { |
3190 | struct bcmgenet_priv *priv = dev_id; |
3191 | struct bcmgenet_rx_ring *rx_ring; |
3192 | struct bcmgenet_tx_ring *tx_ring; |
3193 | unsigned int status; |
3194 | unsigned long flags; |
3195 | |
3196 | /* Read irq status */ |
3197 | status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & |
3198 | ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); |
3199 | |
3200 | /* clear interrupts */ |
3201 | bcmgenet_intrl2_0_writel(priv, val: status, INTRL2_CPU_CLEAR); |
3202 | |
3203 | netif_dbg(priv, intr, priv->dev, |
3204 | "IRQ=0x%x\n" , status); |
3205 | |
3206 | if (status & UMAC_IRQ_RXDMA_DONE) { |
3207 | rx_ring = &priv->rx_rings[DESC_INDEX]; |
3208 | rx_ring->dim.event_ctr++; |
3209 | |
3210 | if (likely(napi_schedule_prep(&rx_ring->napi))) { |
3211 | rx_ring->int_disable(rx_ring); |
3212 | __napi_schedule_irqoff(n: &rx_ring->napi); |
3213 | } |
3214 | } |
3215 | |
3216 | if (status & UMAC_IRQ_TXDMA_DONE) { |
3217 | tx_ring = &priv->tx_rings[DESC_INDEX]; |
3218 | |
3219 | if (likely(napi_schedule_prep(&tx_ring->napi))) { |
3220 | tx_ring->int_disable(tx_ring); |
3221 | __napi_schedule_irqoff(n: &tx_ring->napi); |
3222 | } |
3223 | } |
3224 | |
3225 | if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && |
3226 | status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { |
3227 | wake_up(&priv->wq); |
3228 | } |
3229 | |
3230 | /* all other interested interrupts handled in bottom half */ |
3231 | status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R); |
3232 | if (status) { |
3233 | /* Save irq status for bottom-half processing. */ |
3234 | spin_lock_irqsave(&priv->lock, flags); |
3235 | priv->irq0_stat |= status; |
3236 | spin_unlock_irqrestore(lock: &priv->lock, flags); |
3237 | |
3238 | schedule_work(work: &priv->bcmgenet_irq_work); |
3239 | } |
3240 | |
3241 | return IRQ_HANDLED; |
3242 | } |
3243 | |
3244 | static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) |
3245 | { |
3246 | /* Acknowledge the interrupt */ |
3247 | return IRQ_HANDLED; |
3248 | } |
3249 | |
3250 | static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) |
3251 | { |
3252 | u32 reg; |
3253 | |
3254 | reg = bcmgenet_rbuf_ctrl_get(priv); |
3255 | reg |= BIT(1); |
3256 | bcmgenet_rbuf_ctrl_set(priv, val: reg); |
3257 | udelay(10); |
3258 | |
3259 | reg &= ~BIT(1); |
3260 | bcmgenet_rbuf_ctrl_set(priv, val: reg); |
3261 | udelay(10); |
3262 | } |
3263 | |
3264 | static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, |
3265 | const unsigned char *addr) |
3266 | { |
3267 | bcmgenet_umac_writel(priv, val: get_unaligned_be32(p: &addr[0]), UMAC_MAC0); |
3268 | bcmgenet_umac_writel(priv, val: get_unaligned_be16(p: &addr[4]), UMAC_MAC1); |
3269 | } |
3270 | |
3271 | static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv, |
3272 | unsigned char *addr) |
3273 | { |
3274 | u32 addr_tmp; |
3275 | |
3276 | addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0); |
3277 | put_unaligned_be32(val: addr_tmp, p: &addr[0]); |
3278 | addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1); |
3279 | put_unaligned_be16(val: addr_tmp, p: &addr[4]); |
3280 | } |
3281 | |
3282 | /* Returns a reusable dma control register value */ |
3283 | static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx) |
3284 | { |
3285 | unsigned int i; |
3286 | u32 reg; |
3287 | u32 dma_ctrl; |
3288 | |
3289 | /* disable DMA */ |
3290 | dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; |
3291 | for (i = 0; i < priv->hw_params->tx_queues; i++) |
3292 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); |
3293 | reg = bcmgenet_tdma_readl(priv, r: DMA_CTRL); |
3294 | reg &= ~dma_ctrl; |
3295 | bcmgenet_tdma_writel(priv, val: reg, r: DMA_CTRL); |
3296 | |
3297 | dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; |
3298 | for (i = 0; i < priv->hw_params->rx_queues; i++) |
3299 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); |
3300 | reg = bcmgenet_rdma_readl(priv, r: DMA_CTRL); |
3301 | reg &= ~dma_ctrl; |
3302 | bcmgenet_rdma_writel(priv, val: reg, r: DMA_CTRL); |
3303 | |
3304 | bcmgenet_umac_writel(priv, val: 1, UMAC_TX_FLUSH); |
3305 | udelay(10); |
3306 | bcmgenet_umac_writel(priv, val: 0, UMAC_TX_FLUSH); |
3307 | |
3308 | if (flush_rx) { |
3309 | reg = bcmgenet_rbuf_ctrl_get(priv); |
3310 | bcmgenet_rbuf_ctrl_set(priv, val: reg | BIT(0)); |
3311 | udelay(10); |
3312 | bcmgenet_rbuf_ctrl_set(priv, val: reg); |
3313 | udelay(10); |
3314 | } |
3315 | |
3316 | return dma_ctrl; |
3317 | } |
3318 | |
3319 | static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) |
3320 | { |
3321 | u32 reg; |
3322 | |
3323 | reg = bcmgenet_rdma_readl(priv, r: DMA_CTRL); |
3324 | reg |= dma_ctrl; |
3325 | bcmgenet_rdma_writel(priv, val: reg, r: DMA_CTRL); |
3326 | |
3327 | reg = bcmgenet_tdma_readl(priv, r: DMA_CTRL); |
3328 | reg |= dma_ctrl; |
3329 | bcmgenet_tdma_writel(priv, val: reg, r: DMA_CTRL); |
3330 | } |
3331 | |
3332 | static void bcmgenet_netif_start(struct net_device *dev) |
3333 | { |
3334 | struct bcmgenet_priv *priv = netdev_priv(dev); |
3335 | |
3336 | /* Start the network engine */ |
3337 | bcmgenet_set_rx_mode(dev); |
3338 | bcmgenet_enable_rx_napi(priv); |
3339 | |
3340 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, enable: true); |
3341 | |
3342 | bcmgenet_enable_tx_napi(priv); |
3343 | |
3344 | /* Monitor link interrupts now */ |
3345 | bcmgenet_link_intr_enable(priv); |
3346 | |
3347 | phy_start(phydev: dev->phydev); |
3348 | } |
3349 | |
3350 | static int bcmgenet_open(struct net_device *dev) |
3351 | { |
3352 | struct bcmgenet_priv *priv = netdev_priv(dev); |
3353 | unsigned long dma_ctrl; |
3354 | int ret; |
3355 | |
3356 | netif_dbg(priv, ifup, dev, "bcmgenet_open\n" ); |
3357 | |
3358 | /* Turn on the clock */ |
3359 | clk_prepare_enable(clk: priv->clk); |
3360 | |
3361 | /* If this is an internal GPHY, power it back on now, before UniMAC is |
3362 | * brought out of reset as absolutely no UniMAC activity is allowed |
3363 | */ |
3364 | if (priv->internal_phy) |
3365 | bcmgenet_power_up(priv, mode: GENET_POWER_PASSIVE); |
3366 | |
3367 | /* take MAC out of reset */ |
3368 | bcmgenet_umac_reset(priv); |
3369 | |
3370 | init_umac(priv); |
3371 | |
3372 | /* Apply features again in case we changed them while interface was |
3373 | * down |
3374 | */ |
3375 | bcmgenet_set_features(dev, features: dev->features); |
3376 | |
3377 | bcmgenet_set_hw_addr(priv, addr: dev->dev_addr); |
3378 | |
3379 | /* Disable RX/TX DMA and flush TX and RX queues */ |
3380 | dma_ctrl = bcmgenet_dma_disable(priv, flush_rx: true); |
3381 | |
3382 | /* Reinitialize TDMA and RDMA and SW housekeeping */ |
3383 | ret = bcmgenet_init_dma(priv); |
3384 | if (ret) { |
3385 | netdev_err(dev, format: "failed to initialize DMA\n" ); |
3386 | goto err_clk_disable; |
3387 | } |
3388 | |
3389 | /* Always enable ring 16 - descriptor ring */ |
3390 | bcmgenet_enable_dma(priv, dma_ctrl); |
3391 | |
3392 | /* HFB init */ |
3393 | bcmgenet_hfb_init(priv); |
3394 | |
3395 | ret = request_irq(irq: priv->irq0, handler: bcmgenet_isr0, IRQF_SHARED, |
3396 | name: dev->name, dev: priv); |
3397 | if (ret < 0) { |
3398 | netdev_err(dev, format: "can't request IRQ %d\n" , priv->irq0); |
3399 | goto err_fini_dma; |
3400 | } |
3401 | |
3402 | ret = request_irq(irq: priv->irq1, handler: bcmgenet_isr1, IRQF_SHARED, |
3403 | name: dev->name, dev: priv); |
3404 | if (ret < 0) { |
3405 | netdev_err(dev, format: "can't request IRQ %d\n" , priv->irq1); |
3406 | goto err_irq0; |
3407 | } |
3408 | |
3409 | ret = bcmgenet_mii_probe(dev); |
3410 | if (ret) { |
3411 | netdev_err(dev, format: "failed to connect to PHY\n" ); |
3412 | goto err_irq1; |
3413 | } |
3414 | |
3415 | bcmgenet_phy_pause_set(dev, rx: priv->rx_pause, tx: priv->tx_pause); |
3416 | |
3417 | bcmgenet_netif_start(dev); |
3418 | |
3419 | netif_tx_start_all_queues(dev); |
3420 | |
3421 | return 0; |
3422 | |
3423 | err_irq1: |
3424 | free_irq(priv->irq1, priv); |
3425 | err_irq0: |
3426 | free_irq(priv->irq0, priv); |
3427 | err_fini_dma: |
3428 | bcmgenet_dma_teardown(priv); |
3429 | bcmgenet_fini_dma(priv); |
3430 | err_clk_disable: |
3431 | if (priv->internal_phy) |
3432 | bcmgenet_power_down(priv, mode: GENET_POWER_PASSIVE); |
3433 | clk_disable_unprepare(clk: priv->clk); |
3434 | return ret; |
3435 | } |
3436 | |
3437 | static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy) |
3438 | { |
3439 | struct bcmgenet_priv *priv = netdev_priv(dev); |
3440 | |
3441 | bcmgenet_disable_tx_napi(priv); |
3442 | netif_tx_disable(dev); |
3443 | |
3444 | /* Disable MAC receive */ |
3445 | umac_enable_set(priv, CMD_RX_EN, enable: false); |
3446 | |
3447 | bcmgenet_dma_teardown(priv); |
3448 | |
3449 | /* Disable MAC transmit. TX DMA disabled must be done before this */ |
3450 | umac_enable_set(priv, CMD_TX_EN, enable: false); |
3451 | |
3452 | if (stop_phy) |
3453 | phy_stop(phydev: dev->phydev); |
3454 | bcmgenet_disable_rx_napi(priv); |
3455 | bcmgenet_intr_disable(priv); |
3456 | |
3457 | /* Wait for pending work items to complete. Since interrupts are |
3458 | * disabled no new work will be scheduled. |
3459 | */ |
3460 | cancel_work_sync(work: &priv->bcmgenet_irq_work); |
3461 | |
3462 | /* tx reclaim */ |
3463 | bcmgenet_tx_reclaim_all(dev); |
3464 | bcmgenet_fini_dma(priv); |
3465 | } |
3466 | |
3467 | static int bcmgenet_close(struct net_device *dev) |
3468 | { |
3469 | struct bcmgenet_priv *priv = netdev_priv(dev); |
3470 | int ret = 0; |
3471 | |
3472 | netif_dbg(priv, ifdown, dev, "bcmgenet_close\n" ); |
3473 | |
3474 | bcmgenet_netif_stop(dev, stop_phy: false); |
3475 | |
3476 | /* Really kill the PHY state machine and disconnect from it */ |
3477 | phy_disconnect(phydev: dev->phydev); |
3478 | |
3479 | free_irq(priv->irq0, priv); |
3480 | free_irq(priv->irq1, priv); |
3481 | |
3482 | if (priv->internal_phy) |
3483 | ret = bcmgenet_power_down(priv, mode: GENET_POWER_PASSIVE); |
3484 | |
3485 | clk_disable_unprepare(clk: priv->clk); |
3486 | |
3487 | return ret; |
3488 | } |
3489 | |
3490 | static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) |
3491 | { |
3492 | struct bcmgenet_priv *priv = ring->priv; |
3493 | u32 p_index, c_index, intsts, intmsk; |
3494 | struct netdev_queue *txq; |
3495 | unsigned int free_bds; |
3496 | bool txq_stopped; |
3497 | |
3498 | if (!netif_msg_tx_err(priv)) |
3499 | return; |
3500 | |
3501 | txq = netdev_get_tx_queue(dev: priv->dev, index: ring->queue); |
3502 | |
3503 | spin_lock(lock: &ring->lock); |
3504 | if (ring->index == DESC_INDEX) { |
3505 | intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); |
3506 | intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE; |
3507 | } else { |
3508 | intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); |
3509 | intmsk = 1 << ring->index; |
3510 | } |
3511 | c_index = bcmgenet_tdma_ring_readl(priv, ring: ring->index, r: TDMA_CONS_INDEX); |
3512 | p_index = bcmgenet_tdma_ring_readl(priv, ring: ring->index, r: TDMA_PROD_INDEX); |
3513 | txq_stopped = netif_tx_queue_stopped(dev_queue: txq); |
3514 | free_bds = ring->free_bds; |
3515 | spin_unlock(lock: &ring->lock); |
3516 | |
3517 | netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" |
3518 | "TX queue status: %s, interrupts: %s\n" |
3519 | "(sw)free_bds: %d (sw)size: %d\n" |
3520 | "(sw)p_index: %d (hw)p_index: %d\n" |
3521 | "(sw)c_index: %d (hw)c_index: %d\n" |
3522 | "(sw)clean_p: %d (sw)write_p: %d\n" |
3523 | "(sw)cb_ptr: %d (sw)end_ptr: %d\n" , |
3524 | ring->index, ring->queue, |
3525 | txq_stopped ? "stopped" : "active" , |
3526 | intsts & intmsk ? "enabled" : "disabled" , |
3527 | free_bds, ring->size, |
3528 | ring->prod_index, p_index & DMA_P_INDEX_MASK, |
3529 | ring->c_index, c_index & DMA_C_INDEX_MASK, |
3530 | ring->clean_ptr, ring->write_ptr, |
3531 | ring->cb_ptr, ring->end_ptr); |
3532 | } |
3533 | |
3534 | static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue) |
3535 | { |
3536 | struct bcmgenet_priv *priv = netdev_priv(dev); |
3537 | u32 int0_enable = 0; |
3538 | u32 int1_enable = 0; |
3539 | unsigned int q; |
3540 | |
3541 | netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n" ); |
3542 | |
3543 | for (q = 0; q < priv->hw_params->tx_queues; q++) |
3544 | bcmgenet_dump_tx_queue(ring: &priv->tx_rings[q]); |
3545 | bcmgenet_dump_tx_queue(ring: &priv->tx_rings[DESC_INDEX]); |
3546 | |
3547 | bcmgenet_tx_reclaim_all(dev); |
3548 | |
3549 | for (q = 0; q < priv->hw_params->tx_queues; q++) |
3550 | int1_enable |= (1 << q); |
3551 | |
3552 | int0_enable = UMAC_IRQ_TXDMA_DONE; |
3553 | |
3554 | /* Re-enable TX interrupts if disabled */ |
3555 | bcmgenet_intrl2_0_writel(priv, val: int0_enable, INTRL2_CPU_MASK_CLEAR); |
3556 | bcmgenet_intrl2_1_writel(priv, val: int1_enable, INTRL2_CPU_MASK_CLEAR); |
3557 | |
3558 | netif_trans_update(dev); |
3559 | |
3560 | dev->stats.tx_errors++; |
3561 | |
3562 | netif_tx_wake_all_queues(dev); |
3563 | } |
3564 | |
3565 | #define MAX_MDF_FILTER 17 |
3566 | |
3567 | static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, |
3568 | const unsigned char *addr, |
3569 | int *i) |
3570 | { |
3571 | bcmgenet_umac_writel(priv, val: addr[0] << 8 | addr[1], |
3572 | UMAC_MDF_ADDR + (*i * 4)); |
3573 | bcmgenet_umac_writel(priv, val: addr[2] << 24 | addr[3] << 16 | |
3574 | addr[4] << 8 | addr[5], |
3575 | UMAC_MDF_ADDR + ((*i + 1) * 4)); |
3576 | *i += 2; |
3577 | } |
3578 | |
3579 | static void bcmgenet_set_rx_mode(struct net_device *dev) |
3580 | { |
3581 | struct bcmgenet_priv *priv = netdev_priv(dev); |
3582 | struct netdev_hw_addr *ha; |
3583 | int i, nfilter; |
3584 | u32 reg; |
3585 | |
3586 | netif_dbg(priv, hw, dev, "%s: %08X\n" , __func__, dev->flags); |
3587 | |
3588 | /* Number of filters needed */ |
3589 | nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2; |
3590 | |
3591 | /* |
3592 | * Turn on promicuous mode for three scenarios |
3593 | * 1. IFF_PROMISC flag is set |
3594 | * 2. IFF_ALLMULTI flag is set |
3595 | * 3. The number of filters needed exceeds the number filters |
3596 | * supported by the hardware. |
3597 | */ |
3598 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); |
3599 | if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) || |
3600 | (nfilter > MAX_MDF_FILTER)) { |
3601 | reg |= CMD_PROMISC; |
3602 | bcmgenet_umac_writel(priv, val: reg, UMAC_CMD); |
3603 | bcmgenet_umac_writel(priv, val: 0, UMAC_MDF_CTRL); |
3604 | return; |
3605 | } else { |
3606 | reg &= ~CMD_PROMISC; |
3607 | bcmgenet_umac_writel(priv, val: reg, UMAC_CMD); |
3608 | } |
3609 | |
3610 | /* update MDF filter */ |
3611 | i = 0; |
3612 | /* Broadcast */ |
3613 | bcmgenet_set_mdf_addr(priv, addr: dev->broadcast, i: &i); |
3614 | /* my own address.*/ |
3615 | bcmgenet_set_mdf_addr(priv, addr: dev->dev_addr, i: &i); |
3616 | |
3617 | /* Unicast */ |
3618 | netdev_for_each_uc_addr(ha, dev) |
3619 | bcmgenet_set_mdf_addr(priv, addr: ha->addr, i: &i); |
3620 | |
3621 | /* Multicast */ |
3622 | netdev_for_each_mc_addr(ha, dev) |
3623 | bcmgenet_set_mdf_addr(priv, addr: ha->addr, i: &i); |
3624 | |
3625 | /* Enable filters */ |
3626 | reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter); |
3627 | bcmgenet_umac_writel(priv, val: reg, UMAC_MDF_CTRL); |
3628 | } |
3629 | |
3630 | /* Set the hardware MAC address. */ |
3631 | static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) |
3632 | { |
3633 | struct sockaddr *addr = p; |
3634 | |
3635 | /* Setting the MAC address at the hardware level is not possible |
3636 | * without disabling the UniMAC RX/TX enable bits. |
3637 | */ |
3638 | if (netif_running(dev)) |
3639 | return -EBUSY; |
3640 | |
3641 | eth_hw_addr_set(dev, addr: addr->sa_data); |
3642 | |
3643 | return 0; |
3644 | } |
3645 | |
3646 | static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) |
3647 | { |
3648 | struct bcmgenet_priv *priv = netdev_priv(dev); |
3649 | unsigned long tx_bytes = 0, tx_packets = 0; |
3650 | unsigned long rx_bytes = 0, rx_packets = 0; |
3651 | unsigned long rx_errors = 0, rx_dropped = 0; |
3652 | struct bcmgenet_tx_ring *tx_ring; |
3653 | struct bcmgenet_rx_ring *rx_ring; |
3654 | unsigned int q; |
3655 | |
3656 | for (q = 0; q < priv->hw_params->tx_queues; q++) { |
3657 | tx_ring = &priv->tx_rings[q]; |
3658 | tx_bytes += tx_ring->bytes; |
3659 | tx_packets += tx_ring->packets; |
3660 | } |
3661 | tx_ring = &priv->tx_rings[DESC_INDEX]; |
3662 | tx_bytes += tx_ring->bytes; |
3663 | tx_packets += tx_ring->packets; |
3664 | |
3665 | for (q = 0; q < priv->hw_params->rx_queues; q++) { |
3666 | rx_ring = &priv->rx_rings[q]; |
3667 | |
3668 | rx_bytes += rx_ring->bytes; |
3669 | rx_packets += rx_ring->packets; |
3670 | rx_errors += rx_ring->errors; |
3671 | rx_dropped += rx_ring->dropped; |
3672 | } |
3673 | rx_ring = &priv->rx_rings[DESC_INDEX]; |
3674 | rx_bytes += rx_ring->bytes; |
3675 | rx_packets += rx_ring->packets; |
3676 | rx_errors += rx_ring->errors; |
3677 | rx_dropped += rx_ring->dropped; |
3678 | |
3679 | dev->stats.tx_bytes = tx_bytes; |
3680 | dev->stats.tx_packets = tx_packets; |
3681 | dev->stats.rx_bytes = rx_bytes; |
3682 | dev->stats.rx_packets = rx_packets; |
3683 | dev->stats.rx_errors = rx_errors; |
3684 | dev->stats.rx_missed_errors = rx_errors; |
3685 | dev->stats.rx_dropped = rx_dropped; |
3686 | return &dev->stats; |
3687 | } |
3688 | |
3689 | static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier) |
3690 | { |
3691 | struct bcmgenet_priv *priv = netdev_priv(dev); |
3692 | |
3693 | if (!dev->phydev || !phy_is_pseudo_fixed_link(phydev: dev->phydev) || |
3694 | priv->phy_interface != PHY_INTERFACE_MODE_MOCA) |
3695 | return -EOPNOTSUPP; |
3696 | |
3697 | if (new_carrier) |
3698 | netif_carrier_on(dev); |
3699 | else |
3700 | netif_carrier_off(dev); |
3701 | |
3702 | return 0; |
3703 | } |
3704 | |
3705 | static const struct net_device_ops bcmgenet_netdev_ops = { |
3706 | .ndo_open = bcmgenet_open, |
3707 | .ndo_stop = bcmgenet_close, |
3708 | .ndo_start_xmit = bcmgenet_xmit, |
3709 | .ndo_tx_timeout = bcmgenet_timeout, |
3710 | .ndo_set_rx_mode = bcmgenet_set_rx_mode, |
3711 | .ndo_set_mac_address = bcmgenet_set_mac_addr, |
3712 | .ndo_eth_ioctl = phy_do_ioctl_running, |
3713 | .ndo_set_features = bcmgenet_set_features, |
3714 | .ndo_get_stats = bcmgenet_get_stats, |
3715 | .ndo_change_carrier = bcmgenet_change_carrier, |
3716 | }; |
3717 | |
3718 | /* Array of GENET hardware parameters/characteristics */ |
3719 | static struct bcmgenet_hw_params bcmgenet_hw_params[] = { |
3720 | [GENET_V1] = { |
3721 | .tx_queues = 0, |
3722 | .tx_bds_per_q = 0, |
3723 | .rx_queues = 0, |
3724 | .rx_bds_per_q = 0, |
3725 | .bp_in_en_shift = 16, |
3726 | .bp_in_mask = 0xffff, |
3727 | .hfb_filter_cnt = 16, |
3728 | .qtag_mask = 0x1F, |
3729 | .hfb_offset = 0x1000, |
3730 | .rdma_offset = 0x2000, |
3731 | .tdma_offset = 0x3000, |
3732 | .words_per_bd = 2, |
3733 | }, |
3734 | [GENET_V2] = { |
3735 | .tx_queues = 4, |
3736 | .tx_bds_per_q = 32, |
3737 | .rx_queues = 0, |
3738 | .rx_bds_per_q = 0, |
3739 | .bp_in_en_shift = 16, |
3740 | .bp_in_mask = 0xffff, |
3741 | .hfb_filter_cnt = 16, |
3742 | .qtag_mask = 0x1F, |
3743 | .tbuf_offset = 0x0600, |
3744 | .hfb_offset = 0x1000, |
3745 | .hfb_reg_offset = 0x2000, |
3746 | .rdma_offset = 0x3000, |
3747 | .tdma_offset = 0x4000, |
3748 | .words_per_bd = 2, |
3749 | .flags = GENET_HAS_EXT, |
3750 | }, |
3751 | [GENET_V3] = { |
3752 | .tx_queues = 4, |
3753 | .tx_bds_per_q = 32, |
3754 | .rx_queues = 0, |
3755 | .rx_bds_per_q = 0, |
3756 | .bp_in_en_shift = 17, |
3757 | .bp_in_mask = 0x1ffff, |
3758 | .hfb_filter_cnt = 48, |
3759 | .hfb_filter_size = 128, |
3760 | .qtag_mask = 0x3F, |
3761 | .tbuf_offset = 0x0600, |
3762 | .hfb_offset = 0x8000, |
3763 | .hfb_reg_offset = 0xfc00, |
3764 | .rdma_offset = 0x10000, |
3765 | .tdma_offset = 0x11000, |
3766 | .words_per_bd = 2, |
3767 | .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR | |
3768 | GENET_HAS_MOCA_LINK_DET, |
3769 | }, |
3770 | [GENET_V4] = { |
3771 | .tx_queues = 4, |
3772 | .tx_bds_per_q = 32, |
3773 | .rx_queues = 0, |
3774 | .rx_bds_per_q = 0, |
3775 | .bp_in_en_shift = 17, |
3776 | .bp_in_mask = 0x1ffff, |
3777 | .hfb_filter_cnt = 48, |
3778 | .hfb_filter_size = 128, |
3779 | .qtag_mask = 0x3F, |
3780 | .tbuf_offset = 0x0600, |
3781 | .hfb_offset = 0x8000, |
3782 | .hfb_reg_offset = 0xfc00, |
3783 | .rdma_offset = 0x2000, |
3784 | .tdma_offset = 0x4000, |
3785 | .words_per_bd = 3, |
3786 | .flags = GENET_HAS_40BITS | GENET_HAS_EXT | |
3787 | GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, |
3788 | }, |
3789 | [GENET_V5] = { |
3790 | .tx_queues = 4, |
3791 | .tx_bds_per_q = 32, |
3792 | .rx_queues = 0, |
3793 | .rx_bds_per_q = 0, |
3794 | .bp_in_en_shift = 17, |
3795 | .bp_in_mask = 0x1ffff, |
3796 | .hfb_filter_cnt = 48, |
3797 | .hfb_filter_size = 128, |
3798 | .qtag_mask = 0x3F, |
3799 | .tbuf_offset = 0x0600, |
3800 | .hfb_offset = 0x8000, |
3801 | .hfb_reg_offset = 0xfc00, |
3802 | .rdma_offset = 0x2000, |
3803 | .tdma_offset = 0x4000, |
3804 | .words_per_bd = 3, |
3805 | .flags = GENET_HAS_40BITS | GENET_HAS_EXT | |
3806 | GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, |
3807 | }, |
3808 | }; |
3809 | |
3810 | /* Infer hardware parameters from the detected GENET version */ |
3811 | static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) |
3812 | { |
3813 | struct bcmgenet_hw_params *params; |
3814 | u32 reg; |
3815 | u8 major; |
3816 | u16 gphy_rev; |
3817 | |
3818 | if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) { |
3819 | bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; |
3820 | genet_dma_ring_regs = genet_dma_ring_regs_v4; |
3821 | } else if (GENET_IS_V3(priv)) { |
3822 | bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; |
3823 | genet_dma_ring_regs = genet_dma_ring_regs_v123; |
3824 | } else if (GENET_IS_V2(priv)) { |
3825 | bcmgenet_dma_regs = bcmgenet_dma_regs_v2; |
3826 | genet_dma_ring_regs = genet_dma_ring_regs_v123; |
3827 | } else if (GENET_IS_V1(priv)) { |
3828 | bcmgenet_dma_regs = bcmgenet_dma_regs_v1; |
3829 | genet_dma_ring_regs = genet_dma_ring_regs_v123; |
3830 | } |
3831 | |
3832 | /* enum genet_version starts at 1 */ |
3833 | priv->hw_params = &bcmgenet_hw_params[priv->version]; |
3834 | params = priv->hw_params; |
3835 | |
3836 | /* Read GENET HW version */ |
3837 | reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); |
3838 | major = (reg >> 24 & 0x0f); |
3839 | if (major == 6) |
3840 | major = 5; |
3841 | else if (major == 5) |
3842 | major = 4; |
3843 | else if (major == 0) |
3844 | major = 1; |
3845 | if (major != priv->version) { |
3846 | dev_err(&priv->pdev->dev, |
3847 | "GENET version mismatch, got: %d, configured for: %d\n" , |
3848 | major, priv->version); |
3849 | } |
3850 | |
3851 | /* Print the GENET core version */ |
3852 | dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, |
3853 | major, (reg >> 16) & 0x0f, reg & 0xffff); |
3854 | |
3855 | /* Store the integrated PHY revision for the MDIO probing function |
3856 | * to pass this information to the PHY driver. The PHY driver expects |
3857 | * to find the PHY major revision in bits 15:8 while the GENET register |
3858 | * stores that information in bits 7:0, account for that. |
3859 | * |
3860 | * On newer chips, starting with PHY revision G0, a new scheme is |
3861 | * deployed similar to the Starfighter 2 switch with GPHY major |
3862 | * revision in bits 15:8 and patch level in bits 7:0. Major revision 0 |
3863 | * is reserved as well as special value 0x01ff, we have a small |
3864 | * heuristic to check for the new GPHY revision and re-arrange things |
3865 | * so the GPHY driver is happy. |
3866 | */ |
3867 | gphy_rev = reg & 0xffff; |
3868 | |
3869 | if (GENET_IS_V5(priv)) { |
3870 | /* The EPHY revision should come from the MDIO registers of |
3871 | * the PHY not from GENET. |
3872 | */ |
3873 | if (gphy_rev != 0) { |
3874 | pr_warn("GENET is reporting EPHY revision: 0x%04x\n" , |
3875 | gphy_rev); |
3876 | } |
3877 | /* This is reserved so should require special treatment */ |
3878 | } else if (gphy_rev == 0 || gphy_rev == 0x01ff) { |
3879 | pr_warn("Invalid GPHY revision detected: 0x%04x\n" , gphy_rev); |
3880 | return; |
3881 | /* This is the good old scheme, just GPHY major, no minor nor patch */ |
3882 | } else if ((gphy_rev & 0xf0) != 0) { |
3883 | priv->gphy_rev = gphy_rev << 8; |
3884 | /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */ |
3885 | } else if ((gphy_rev & 0xff00) != 0) { |
3886 | priv->gphy_rev = gphy_rev; |
3887 | } |
3888 | |
3889 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
3890 | if (!(params->flags & GENET_HAS_40BITS)) |
3891 | pr_warn("GENET does not support 40-bits PA\n" ); |
3892 | #endif |
3893 | |
3894 | pr_debug("Configuration for version: %d\n" |
3895 | "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n" |
3896 | "BP << en: %2d, BP msk: 0x%05x\n" |
3897 | "HFB count: %2d, QTAQ msk: 0x%05x\n" |
3898 | "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" |
3899 | "RDMA: 0x%05x, TDMA: 0x%05x\n" |
3900 | "Words/BD: %d\n" , |
3901 | priv->version, |
3902 | params->tx_queues, params->tx_bds_per_q, |
3903 | params->rx_queues, params->rx_bds_per_q, |
3904 | params->bp_in_en_shift, params->bp_in_mask, |
3905 | params->hfb_filter_cnt, params->qtag_mask, |
3906 | params->tbuf_offset, params->hfb_offset, |
3907 | params->hfb_reg_offset, |
3908 | params->rdma_offset, params->tdma_offset, |
3909 | params->words_per_bd); |
3910 | } |
3911 | |
3912 | struct bcmgenet_plat_data { |
3913 | enum bcmgenet_version version; |
3914 | u32 dma_max_burst_length; |
3915 | bool ephy_16nm; |
3916 | }; |
3917 | |
3918 | static const struct bcmgenet_plat_data v1_plat_data = { |
3919 | .version = GENET_V1, |
3920 | .dma_max_burst_length = DMA_MAX_BURST_LENGTH, |
3921 | }; |
3922 | |
3923 | static const struct bcmgenet_plat_data v2_plat_data = { |
3924 | .version = GENET_V2, |
3925 | .dma_max_burst_length = DMA_MAX_BURST_LENGTH, |
3926 | }; |
3927 | |
3928 | static const struct bcmgenet_plat_data v3_plat_data = { |
3929 | .version = GENET_V3, |
3930 | .dma_max_burst_length = DMA_MAX_BURST_LENGTH, |
3931 | }; |
3932 | |
3933 | static const struct bcmgenet_plat_data v4_plat_data = { |
3934 | .version = GENET_V4, |
3935 | .dma_max_burst_length = DMA_MAX_BURST_LENGTH, |
3936 | }; |
3937 | |
3938 | static const struct bcmgenet_plat_data v5_plat_data = { |
3939 | .version = GENET_V5, |
3940 | .dma_max_burst_length = DMA_MAX_BURST_LENGTH, |
3941 | }; |
3942 | |
3943 | static const struct bcmgenet_plat_data bcm2711_plat_data = { |
3944 | .version = GENET_V5, |
3945 | .dma_max_burst_length = 0x08, |
3946 | }; |
3947 | |
3948 | static const struct bcmgenet_plat_data bcm7712_plat_data = { |
3949 | .version = GENET_V5, |
3950 | .dma_max_burst_length = DMA_MAX_BURST_LENGTH, |
3951 | .ephy_16nm = true, |
3952 | }; |
3953 | |
3954 | static const struct of_device_id bcmgenet_match[] = { |
3955 | { .compatible = "brcm,genet-v1" , .data = &v1_plat_data }, |
3956 | { .compatible = "brcm,genet-v2" , .data = &v2_plat_data }, |
3957 | { .compatible = "brcm,genet-v3" , .data = &v3_plat_data }, |
3958 | { .compatible = "brcm,genet-v4" , .data = &v4_plat_data }, |
3959 | { .compatible = "brcm,genet-v5" , .data = &v5_plat_data }, |
3960 | { .compatible = "brcm,bcm2711-genet-v5" , .data = &bcm2711_plat_data }, |
3961 | { .compatible = "brcm,bcm7712-genet-v5" , .data = &bcm7712_plat_data }, |
3962 | { }, |
3963 | }; |
3964 | MODULE_DEVICE_TABLE(of, bcmgenet_match); |
3965 | |
3966 | static int bcmgenet_probe(struct platform_device *pdev) |
3967 | { |
3968 | struct bcmgenet_platform_data *pd = pdev->dev.platform_data; |
3969 | const struct bcmgenet_plat_data *pdata; |
3970 | struct bcmgenet_priv *priv; |
3971 | struct net_device *dev; |
3972 | unsigned int i; |
3973 | int err = -EIO; |
3974 | |
3975 | /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ |
3976 | dev = alloc_etherdev_mqs(sizeof_priv: sizeof(*priv), GENET_MAX_MQ_CNT + 1, |
3977 | GENET_MAX_MQ_CNT + 1); |
3978 | if (!dev) { |
3979 | dev_err(&pdev->dev, "can't allocate net device\n" ); |
3980 | return -ENOMEM; |
3981 | } |
3982 | |
3983 | priv = netdev_priv(dev); |
3984 | priv->irq0 = platform_get_irq(pdev, 0); |
3985 | if (priv->irq0 < 0) { |
3986 | err = priv->irq0; |
3987 | goto err; |
3988 | } |
3989 | priv->irq1 = platform_get_irq(pdev, 1); |
3990 | if (priv->irq1 < 0) { |
3991 | err = priv->irq1; |
3992 | goto err; |
3993 | } |
3994 | priv->wol_irq = platform_get_irq_optional(pdev, 2); |
3995 | if (priv->wol_irq == -EPROBE_DEFER) { |
3996 | err = priv->wol_irq; |
3997 | goto err; |
3998 | } |
3999 | |
4000 | priv->base = devm_platform_ioremap_resource(pdev, index: 0); |
4001 | if (IS_ERR(ptr: priv->base)) { |
4002 | err = PTR_ERR(ptr: priv->base); |
4003 | goto err; |
4004 | } |
4005 | |
4006 | spin_lock_init(&priv->lock); |
4007 | |
4008 | /* Set default pause parameters */ |
4009 | priv->autoneg_pause = 1; |
4010 | priv->tx_pause = 1; |
4011 | priv->rx_pause = 1; |
4012 | |
4013 | SET_NETDEV_DEV(dev, &pdev->dev); |
4014 | dev_set_drvdata(dev: &pdev->dev, data: dev); |
4015 | dev->watchdog_timeo = 2 * HZ; |
4016 | dev->ethtool_ops = &bcmgenet_ethtool_ops; |
4017 | dev->netdev_ops = &bcmgenet_netdev_ops; |
4018 | |
4019 | priv->msg_enable = netif_msg_init(debug_value: -1, GENET_MSG_DEFAULT); |
4020 | |
4021 | /* Set default features */ |
4022 | dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | |
4023 | NETIF_F_RXCSUM; |
4024 | dev->hw_features |= dev->features; |
4025 | dev->vlan_features |= dev->features; |
4026 | |
4027 | /* Request the WOL interrupt and advertise suspend if available */ |
4028 | priv->wol_irq_disabled = true; |
4029 | if (priv->wol_irq > 0) { |
4030 | err = devm_request_irq(dev: &pdev->dev, irq: priv->wol_irq, |
4031 | handler: bcmgenet_wol_isr, irqflags: 0, devname: dev->name, dev_id: priv); |
4032 | if (!err) |
4033 | device_set_wakeup_capable(dev: &pdev->dev, capable: 1); |
4034 | } |
4035 | |
4036 | /* Set the needed headroom to account for any possible |
4037 | * features enabling/disabling at runtime |
4038 | */ |
4039 | dev->needed_headroom += 64; |
4040 | |
4041 | priv->dev = dev; |
4042 | priv->pdev = pdev; |
4043 | |
4044 | pdata = device_get_match_data(dev: &pdev->dev); |
4045 | if (pdata) { |
4046 | priv->version = pdata->version; |
4047 | priv->dma_max_burst_length = pdata->dma_max_burst_length; |
4048 | priv->ephy_16nm = pdata->ephy_16nm; |
4049 | } else { |
4050 | priv->version = pd->genet_version; |
4051 | priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH; |
4052 | } |
4053 | |
4054 | priv->clk = devm_clk_get_optional(dev: &priv->pdev->dev, id: "enet" ); |
4055 | if (IS_ERR(ptr: priv->clk)) { |
4056 | dev_dbg(&priv->pdev->dev, "failed to get enet clock\n" ); |
4057 | err = PTR_ERR(ptr: priv->clk); |
4058 | goto err; |
4059 | } |
4060 | |
4061 | err = clk_prepare_enable(clk: priv->clk); |
4062 | if (err) |
4063 | goto err; |
4064 | |
4065 | bcmgenet_set_hw_params(priv); |
4066 | |
4067 | err = -EIO; |
4068 | if (priv->hw_params->flags & GENET_HAS_40BITS) |
4069 | err = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(40)); |
4070 | if (err) |
4071 | err = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(32)); |
4072 | if (err) |
4073 | goto err_clk_disable; |
4074 | |
4075 | /* Mii wait queue */ |
4076 | init_waitqueue_head(&priv->wq); |
4077 | /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ |
4078 | priv->rx_buf_len = RX_BUF_LENGTH; |
4079 | INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); |
4080 | |
4081 | priv->clk_wol = devm_clk_get_optional(dev: &priv->pdev->dev, id: "enet-wol" ); |
4082 | if (IS_ERR(ptr: priv->clk_wol)) { |
4083 | dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n" ); |
4084 | err = PTR_ERR(ptr: priv->clk_wol); |
4085 | goto err_clk_disable; |
4086 | } |
4087 | |
4088 | priv->clk_eee = devm_clk_get_optional(dev: &priv->pdev->dev, id: "enet-eee" ); |
4089 | if (IS_ERR(ptr: priv->clk_eee)) { |
4090 | dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n" ); |
4091 | err = PTR_ERR(ptr: priv->clk_eee); |
4092 | goto err_clk_disable; |
4093 | } |
4094 | |
4095 | /* If this is an internal GPHY, power it on now, before UniMAC is |
4096 | * brought out of reset as absolutely no UniMAC activity is allowed |
4097 | */ |
4098 | if (device_get_phy_mode(dev: &pdev->dev) == PHY_INTERFACE_MODE_INTERNAL) |
4099 | bcmgenet_power_up(priv, mode: GENET_POWER_PASSIVE); |
4100 | |
4101 | if (pd && !IS_ERR_OR_NULL(ptr: pd->mac_address)) |
4102 | eth_hw_addr_set(dev, addr: pd->mac_address); |
4103 | else |
4104 | if (device_get_ethdev_address(dev: &pdev->dev, netdev: dev)) |
4105 | if (has_acpi_companion(dev: &pdev->dev)) { |
4106 | u8 addr[ETH_ALEN]; |
4107 | |
4108 | bcmgenet_get_hw_addr(priv, addr); |
4109 | eth_hw_addr_set(dev, addr); |
4110 | } |
4111 | |
4112 | if (!is_valid_ether_addr(addr: dev->dev_addr)) { |
4113 | dev_warn(&pdev->dev, "using random Ethernet MAC\n" ); |
4114 | eth_hw_addr_random(dev); |
4115 | } |
4116 | |
4117 | reset_umac(priv); |
4118 | |
4119 | err = bcmgenet_mii_init(dev); |
4120 | if (err) |
4121 | goto err_clk_disable; |
4122 | |
4123 | /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues |
4124 | * just the ring 16 descriptor based TX |
4125 | */ |
4126 | netif_set_real_num_tx_queues(dev: priv->dev, txq: priv->hw_params->tx_queues + 1); |
4127 | netif_set_real_num_rx_queues(dev: priv->dev, rxq: priv->hw_params->rx_queues + 1); |
4128 | |
4129 | /* Set default coalescing parameters */ |
4130 | for (i = 0; i < priv->hw_params->rx_queues; i++) |
4131 | priv->rx_rings[i].rx_max_coalesced_frames = 1; |
4132 | priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1; |
4133 | |
4134 | /* libphy will determine the link state */ |
4135 | netif_carrier_off(dev); |
4136 | |
4137 | /* Turn off the main clock, WOL clock is handled separately */ |
4138 | clk_disable_unprepare(clk: priv->clk); |
4139 | |
4140 | err = register_netdev(dev); |
4141 | if (err) { |
4142 | bcmgenet_mii_exit(dev); |
4143 | goto err; |
4144 | } |
4145 | |
4146 | return err; |
4147 | |
4148 | err_clk_disable: |
4149 | clk_disable_unprepare(clk: priv->clk); |
4150 | err: |
4151 | free_netdev(dev); |
4152 | return err; |
4153 | } |
4154 | |
4155 | static void bcmgenet_remove(struct platform_device *pdev) |
4156 | { |
4157 | struct bcmgenet_priv *priv = dev_to_priv(dev: &pdev->dev); |
4158 | |
4159 | dev_set_drvdata(dev: &pdev->dev, NULL); |
4160 | unregister_netdev(dev: priv->dev); |
4161 | bcmgenet_mii_exit(dev: priv->dev); |
4162 | free_netdev(dev: priv->dev); |
4163 | } |
4164 | |
4165 | static void bcmgenet_shutdown(struct platform_device *pdev) |
4166 | { |
4167 | bcmgenet_remove(pdev); |
4168 | } |
4169 | |
4170 | #ifdef CONFIG_PM_SLEEP |
4171 | static int bcmgenet_resume_noirq(struct device *d) |
4172 | { |
4173 | struct net_device *dev = dev_get_drvdata(dev: d); |
4174 | struct bcmgenet_priv *priv = netdev_priv(dev); |
4175 | int ret; |
4176 | u32 reg; |
4177 | |
4178 | if (!netif_running(dev)) |
4179 | return 0; |
4180 | |
4181 | /* Turn on the clock */ |
4182 | ret = clk_prepare_enable(clk: priv->clk); |
4183 | if (ret) |
4184 | return ret; |
4185 | |
4186 | if (device_may_wakeup(dev: d) && priv->wolopts) { |
4187 | /* Account for Wake-on-LAN events and clear those events |
4188 | * (Some devices need more time between enabling the clocks |
4189 | * and the interrupt register reflecting the wake event so |
4190 | * read the register twice) |
4191 | */ |
4192 | reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT); |
4193 | reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT); |
4194 | if (reg & UMAC_IRQ_WAKE_EVENT) |
4195 | pm_wakeup_event(dev: &priv->pdev->dev, msec: 0); |
4196 | } |
4197 | |
4198 | bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR); |
4199 | |
4200 | return 0; |
4201 | } |
4202 | |
4203 | static int bcmgenet_resume(struct device *d) |
4204 | { |
4205 | struct net_device *dev = dev_get_drvdata(dev: d); |
4206 | struct bcmgenet_priv *priv = netdev_priv(dev); |
4207 | struct bcmgenet_rxnfc_rule *rule; |
4208 | unsigned long dma_ctrl; |
4209 | int ret; |
4210 | |
4211 | if (!netif_running(dev)) |
4212 | return 0; |
4213 | |
4214 | /* From WOL-enabled suspend, switch to regular clock */ |
4215 | if (device_may_wakeup(dev: d) && priv->wolopts) |
4216 | bcmgenet_power_up(priv, mode: GENET_POWER_WOL_MAGIC); |
4217 | |
4218 | /* If this is an internal GPHY, power it back on now, before UniMAC is |
4219 | * brought out of reset as absolutely no UniMAC activity is allowed |
4220 | */ |
4221 | if (priv->internal_phy) |
4222 | bcmgenet_power_up(priv, mode: GENET_POWER_PASSIVE); |
4223 | |
4224 | bcmgenet_umac_reset(priv); |
4225 | |
4226 | init_umac(priv); |
4227 | |
4228 | phy_init_hw(phydev: dev->phydev); |
4229 | |
4230 | /* Speed settings must be restored */ |
4231 | genphy_config_aneg(phydev: dev->phydev); |
4232 | bcmgenet_mii_config(dev: priv->dev, init: false); |
4233 | |
4234 | /* Restore enabled features */ |
4235 | bcmgenet_set_features(dev, features: dev->features); |
4236 | |
4237 | bcmgenet_set_hw_addr(priv, addr: dev->dev_addr); |
4238 | |
4239 | /* Restore hardware filters */ |
4240 | bcmgenet_hfb_clear(priv); |
4241 | list_for_each_entry(rule, &priv->rxnfc_list, list) |
4242 | if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) |
4243 | bcmgenet_hfb_create_rxnfc_filter(priv, rule); |
4244 | |
4245 | /* Disable RX/TX DMA and flush TX queues */ |
4246 | dma_ctrl = bcmgenet_dma_disable(priv, flush_rx: false); |
4247 | |
4248 | /* Reinitialize TDMA and RDMA and SW housekeeping */ |
4249 | ret = bcmgenet_init_dma(priv); |
4250 | if (ret) { |
4251 | netdev_err(dev, format: "failed to initialize DMA\n" ); |
4252 | goto out_clk_disable; |
4253 | } |
4254 | |
4255 | /* Always enable ring 16 - descriptor ring */ |
4256 | bcmgenet_enable_dma(priv, dma_ctrl); |
4257 | |
4258 | if (!device_may_wakeup(dev: d)) |
4259 | phy_resume(phydev: dev->phydev); |
4260 | |
4261 | bcmgenet_netif_start(dev); |
4262 | |
4263 | netif_device_attach(dev); |
4264 | |
4265 | return 0; |
4266 | |
4267 | out_clk_disable: |
4268 | if (priv->internal_phy) |
4269 | bcmgenet_power_down(priv, mode: GENET_POWER_PASSIVE); |
4270 | clk_disable_unprepare(clk: priv->clk); |
4271 | return ret; |
4272 | } |
4273 | |
4274 | static int bcmgenet_suspend(struct device *d) |
4275 | { |
4276 | struct net_device *dev = dev_get_drvdata(dev: d); |
4277 | struct bcmgenet_priv *priv = netdev_priv(dev); |
4278 | |
4279 | if (!netif_running(dev)) |
4280 | return 0; |
4281 | |
4282 | netif_device_detach(dev); |
4283 | |
4284 | bcmgenet_netif_stop(dev, stop_phy: true); |
4285 | |
4286 | if (!device_may_wakeup(dev: d)) |
4287 | phy_suspend(phydev: dev->phydev); |
4288 | |
4289 | /* Disable filtering */ |
4290 | bcmgenet_hfb_reg_writel(priv, val: 0, HFB_CTRL); |
4291 | |
4292 | return 0; |
4293 | } |
4294 | |
4295 | static int bcmgenet_suspend_noirq(struct device *d) |
4296 | { |
4297 | struct net_device *dev = dev_get_drvdata(dev: d); |
4298 | struct bcmgenet_priv *priv = netdev_priv(dev); |
4299 | int ret = 0; |
4300 | |
4301 | if (!netif_running(dev)) |
4302 | return 0; |
4303 | |
4304 | /* Prepare the device for Wake-on-LAN and switch to the slow clock */ |
4305 | if (device_may_wakeup(dev: d) && priv->wolopts) |
4306 | ret = bcmgenet_power_down(priv, mode: GENET_POWER_WOL_MAGIC); |
4307 | else if (priv->internal_phy) |
4308 | ret = bcmgenet_power_down(priv, mode: GENET_POWER_PASSIVE); |
4309 | |
4310 | /* Let the framework handle resumption and leave the clocks on */ |
4311 | if (ret) |
4312 | return ret; |
4313 | |
4314 | /* Turn off the clocks */ |
4315 | clk_disable_unprepare(clk: priv->clk); |
4316 | |
4317 | return 0; |
4318 | } |
4319 | #else |
4320 | #define bcmgenet_suspend NULL |
4321 | #define bcmgenet_suspend_noirq NULL |
4322 | #define bcmgenet_resume NULL |
4323 | #define bcmgenet_resume_noirq NULL |
4324 | #endif /* CONFIG_PM_SLEEP */ |
4325 | |
4326 | static const struct dev_pm_ops bcmgenet_pm_ops = { |
4327 | .suspend = bcmgenet_suspend, |
4328 | .suspend_noirq = bcmgenet_suspend_noirq, |
4329 | .resume = bcmgenet_resume, |
4330 | .resume_noirq = bcmgenet_resume_noirq, |
4331 | }; |
4332 | |
4333 | static const struct acpi_device_id genet_acpi_match[] = { |
4334 | { "BCM6E4E" , (kernel_ulong_t)&bcm2711_plat_data }, |
4335 | { }, |
4336 | }; |
4337 | MODULE_DEVICE_TABLE(acpi, genet_acpi_match); |
4338 | |
4339 | static struct platform_driver bcmgenet_driver = { |
4340 | .probe = bcmgenet_probe, |
4341 | .remove_new = bcmgenet_remove, |
4342 | .shutdown = bcmgenet_shutdown, |
4343 | .driver = { |
4344 | .name = "bcmgenet" , |
4345 | .of_match_table = bcmgenet_match, |
4346 | .pm = &bcmgenet_pm_ops, |
4347 | .acpi_match_table = genet_acpi_match, |
4348 | }, |
4349 | }; |
4350 | module_platform_driver(bcmgenet_driver); |
4351 | |
4352 | MODULE_AUTHOR("Broadcom Corporation" ); |
4353 | MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver" ); |
4354 | MODULE_ALIAS("platform:bcmgenet" ); |
4355 | MODULE_LICENSE("GPL" ); |
4356 | MODULE_SOFTDEP("pre: mdio-bcm-unimac" ); |
4357 | |