1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. |
4 | * DWC Ether MAC version 4.00 has been used for developing this code. |
5 | * |
6 | * This only implements the mac core functions for this chip. |
7 | * |
8 | * Copyright (C) 2015 STMicroelectronics Ltd |
9 | * |
10 | * Author: Alexandre Torgue <alexandre.torgue@st.com> |
11 | */ |
12 | |
13 | #include <linux/crc32.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/ethtool.h> |
16 | #include <linux/io.h> |
17 | #include "stmmac.h" |
18 | #include "stmmac_pcs.h" |
19 | #include "dwmac4.h" |
20 | #include "dwmac5.h" |
21 | |
22 | static void dwmac4_core_init(struct mac_device_info *hw, |
23 | struct net_device *dev) |
24 | { |
25 | struct stmmac_priv *priv = netdev_priv(dev); |
26 | void __iomem *ioaddr = hw->pcsr; |
27 | u32 value = readl(addr: ioaddr + GMAC_CONFIG); |
28 | u32 clk_rate; |
29 | |
30 | value |= GMAC_CORE_INIT; |
31 | |
32 | if (hw->ps) { |
33 | value |= GMAC_CONFIG_TE; |
34 | |
35 | value &= hw->link.speed_mask; |
36 | switch (hw->ps) { |
37 | case SPEED_1000: |
38 | value |= hw->link.speed1000; |
39 | break; |
40 | case SPEED_100: |
41 | value |= hw->link.speed100; |
42 | break; |
43 | case SPEED_10: |
44 | value |= hw->link.speed10; |
45 | break; |
46 | } |
47 | } |
48 | |
49 | writel(val: value, addr: ioaddr + GMAC_CONFIG); |
50 | |
51 | /* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */ |
52 | clk_rate = clk_get_rate(clk: priv->plat->stmmac_clk); |
53 | writel(val: (clk_rate / 1000000) - 1, addr: ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER); |
54 | |
55 | /* Enable GMAC interrupts */ |
56 | value = GMAC_INT_DEFAULT_ENABLE; |
57 | |
58 | if (hw->pcs) |
59 | value |= GMAC_PCS_IRQ_DEFAULT; |
60 | |
61 | /* Enable FPE interrupt */ |
62 | if ((GMAC_HW_FEAT_FPESEL & readl(addr: ioaddr + GMAC_HW_FEATURE3)) >> 26) |
63 | value |= GMAC_INT_FPE_EN; |
64 | |
65 | writel(val: value, addr: ioaddr + GMAC_INT_EN); |
66 | |
67 | if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE) |
68 | init_waitqueue_head(&priv->tstamp_busy_wait); |
69 | } |
70 | |
71 | static void dwmac4_phylink_get_caps(struct stmmac_priv *priv) |
72 | { |
73 | if (priv->plat->tx_queues_to_use > 1) |
74 | priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD); |
75 | else |
76 | priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD); |
77 | } |
78 | |
79 | static void dwmac4_rx_queue_enable(struct mac_device_info *hw, |
80 | u8 mode, u32 queue) |
81 | { |
82 | void __iomem *ioaddr = hw->pcsr; |
83 | u32 value = readl(addr: ioaddr + GMAC_RXQ_CTRL0); |
84 | |
85 | value &= GMAC_RX_QUEUE_CLEAR(queue); |
86 | if (mode == MTL_QUEUE_AVB) |
87 | value |= GMAC_RX_AV_QUEUE_ENABLE(queue); |
88 | else if (mode == MTL_QUEUE_DCB) |
89 | value |= GMAC_RX_DCB_QUEUE_ENABLE(queue); |
90 | |
91 | writel(val: value, addr: ioaddr + GMAC_RXQ_CTRL0); |
92 | } |
93 | |
94 | static void dwmac4_rx_queue_priority(struct mac_device_info *hw, |
95 | u32 prio, u32 queue) |
96 | { |
97 | void __iomem *ioaddr = hw->pcsr; |
98 | u32 clear_mask = 0; |
99 | u32 ctrl2, ctrl3; |
100 | int i; |
101 | |
102 | ctrl2 = readl(addr: ioaddr + GMAC_RXQ_CTRL2); |
103 | ctrl3 = readl(addr: ioaddr + GMAC_RXQ_CTRL3); |
104 | |
105 | /* The software must ensure that the same priority |
106 | * is not mapped to multiple Rx queues |
107 | */ |
108 | for (i = 0; i < 4; i++) |
109 | clear_mask |= ((prio << GMAC_RXQCTRL_PSRQX_SHIFT(i)) & |
110 | GMAC_RXQCTRL_PSRQX_MASK(i)); |
111 | |
112 | ctrl2 &= ~clear_mask; |
113 | ctrl3 &= ~clear_mask; |
114 | |
115 | /* First assign new priorities to a queue, then |
116 | * clear them from others queues |
117 | */ |
118 | if (queue < 4) { |
119 | ctrl2 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) & |
120 | GMAC_RXQCTRL_PSRQX_MASK(queue); |
121 | |
122 | writel(val: ctrl2, addr: ioaddr + GMAC_RXQ_CTRL2); |
123 | writel(val: ctrl3, addr: ioaddr + GMAC_RXQ_CTRL3); |
124 | } else { |
125 | queue -= 4; |
126 | |
127 | ctrl3 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) & |
128 | GMAC_RXQCTRL_PSRQX_MASK(queue); |
129 | |
130 | writel(val: ctrl3, addr: ioaddr + GMAC_RXQ_CTRL3); |
131 | writel(val: ctrl2, addr: ioaddr + GMAC_RXQ_CTRL2); |
132 | } |
133 | } |
134 | |
135 | static void dwmac4_tx_queue_priority(struct mac_device_info *hw, |
136 | u32 prio, u32 queue) |
137 | { |
138 | void __iomem *ioaddr = hw->pcsr; |
139 | u32 base_register; |
140 | u32 value; |
141 | |
142 | base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1; |
143 | if (queue >= 4) |
144 | queue -= 4; |
145 | |
146 | value = readl(addr: ioaddr + base_register); |
147 | |
148 | value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue); |
149 | value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) & |
150 | GMAC_TXQCTRL_PSTQX_MASK(queue); |
151 | |
152 | writel(val: value, addr: ioaddr + base_register); |
153 | } |
154 | |
155 | static void dwmac4_rx_queue_routing(struct mac_device_info *hw, |
156 | u8 packet, u32 queue) |
157 | { |
158 | void __iomem *ioaddr = hw->pcsr; |
159 | u32 value; |
160 | |
161 | static const struct stmmac_rx_routing route_possibilities[] = { |
162 | { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT }, |
163 | { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT }, |
164 | { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT }, |
165 | { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT }, |
166 | { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT }, |
167 | }; |
168 | |
169 | value = readl(addr: ioaddr + GMAC_RXQ_CTRL1); |
170 | |
171 | /* routing configuration */ |
172 | value &= ~route_possibilities[packet - 1].reg_mask; |
173 | value |= (queue << route_possibilities[packet-1].reg_shift) & |
174 | route_possibilities[packet - 1].reg_mask; |
175 | |
176 | /* some packets require extra ops */ |
177 | if (packet == PACKET_AVCPQ) { |
178 | value &= ~GMAC_RXQCTRL_TACPQE; |
179 | value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT; |
180 | } else if (packet == PACKET_MCBCQ) { |
181 | value &= ~GMAC_RXQCTRL_MCBCQEN; |
182 | value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT; |
183 | } |
184 | |
185 | writel(val: value, addr: ioaddr + GMAC_RXQ_CTRL1); |
186 | } |
187 | |
188 | static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw, |
189 | u32 rx_alg) |
190 | { |
191 | void __iomem *ioaddr = hw->pcsr; |
192 | u32 value = readl(addr: ioaddr + MTL_OPERATION_MODE); |
193 | |
194 | value &= ~MTL_OPERATION_RAA; |
195 | switch (rx_alg) { |
196 | case MTL_RX_ALGORITHM_SP: |
197 | value |= MTL_OPERATION_RAA_SP; |
198 | break; |
199 | case MTL_RX_ALGORITHM_WSP: |
200 | value |= MTL_OPERATION_RAA_WSP; |
201 | break; |
202 | default: |
203 | break; |
204 | } |
205 | |
206 | writel(val: value, addr: ioaddr + MTL_OPERATION_MODE); |
207 | } |
208 | |
209 | static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw, |
210 | u32 tx_alg) |
211 | { |
212 | void __iomem *ioaddr = hw->pcsr; |
213 | u32 value = readl(addr: ioaddr + MTL_OPERATION_MODE); |
214 | |
215 | value &= ~MTL_OPERATION_SCHALG_MASK; |
216 | switch (tx_alg) { |
217 | case MTL_TX_ALGORITHM_WRR: |
218 | value |= MTL_OPERATION_SCHALG_WRR; |
219 | break; |
220 | case MTL_TX_ALGORITHM_WFQ: |
221 | value |= MTL_OPERATION_SCHALG_WFQ; |
222 | break; |
223 | case MTL_TX_ALGORITHM_DWRR: |
224 | value |= MTL_OPERATION_SCHALG_DWRR; |
225 | break; |
226 | case MTL_TX_ALGORITHM_SP: |
227 | value |= MTL_OPERATION_SCHALG_SP; |
228 | break; |
229 | default: |
230 | break; |
231 | } |
232 | |
233 | writel(val: value, addr: ioaddr + MTL_OPERATION_MODE); |
234 | } |
235 | |
236 | static void dwmac4_set_mtl_tx_queue_weight(struct stmmac_priv *priv, |
237 | struct mac_device_info *hw, |
238 | u32 weight, u32 queue) |
239 | { |
240 | const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; |
241 | void __iomem *ioaddr = hw->pcsr; |
242 | u32 value = readl(addr: ioaddr + mtl_txqx_weight_base_addr(addrs: dwmac4_addrs, |
243 | x: queue)); |
244 | |
245 | value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK; |
246 | value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK; |
247 | writel(val: value, addr: ioaddr + mtl_txqx_weight_base_addr(addrs: dwmac4_addrs, x: queue)); |
248 | } |
249 | |
250 | static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan) |
251 | { |
252 | void __iomem *ioaddr = hw->pcsr; |
253 | u32 value; |
254 | |
255 | if (queue < 4) { |
256 | value = readl(addr: ioaddr + MTL_RXQ_DMA_MAP0); |
257 | value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue); |
258 | value |= MTL_RXQ_DMA_QXMDMACH(chan, queue); |
259 | writel(val: value, addr: ioaddr + MTL_RXQ_DMA_MAP0); |
260 | } else { |
261 | value = readl(addr: ioaddr + MTL_RXQ_DMA_MAP1); |
262 | value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4); |
263 | value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4); |
264 | writel(val: value, addr: ioaddr + MTL_RXQ_DMA_MAP1); |
265 | } |
266 | } |
267 | |
268 | static void dwmac4_config_cbs(struct stmmac_priv *priv, |
269 | struct mac_device_info *hw, |
270 | u32 send_slope, u32 idle_slope, |
271 | u32 high_credit, u32 low_credit, u32 queue) |
272 | { |
273 | const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; |
274 | void __iomem *ioaddr = hw->pcsr; |
275 | u32 value; |
276 | |
277 | pr_debug("Queue %d configured as AVB. Parameters:\n" , queue); |
278 | pr_debug("\tsend_slope: 0x%08x\n" , send_slope); |
279 | pr_debug("\tidle_slope: 0x%08x\n" , idle_slope); |
280 | pr_debug("\thigh_credit: 0x%08x\n" , high_credit); |
281 | pr_debug("\tlow_credit: 0x%08x\n" , low_credit); |
282 | |
283 | /* enable AV algorithm */ |
284 | value = readl(addr: ioaddr + mtl_etsx_ctrl_base_addr(addrs: dwmac4_addrs, x: queue)); |
285 | value |= MTL_ETS_CTRL_AVALG; |
286 | value |= MTL_ETS_CTRL_CC; |
287 | writel(val: value, addr: ioaddr + mtl_etsx_ctrl_base_addr(addrs: dwmac4_addrs, x: queue)); |
288 | |
289 | /* configure send slope */ |
290 | value = readl(addr: ioaddr + mtl_send_slp_credx_base_addr(addrs: dwmac4_addrs, |
291 | x: queue)); |
292 | value &= ~MTL_SEND_SLP_CRED_SSC_MASK; |
293 | value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK; |
294 | writel(val: value, addr: ioaddr + mtl_send_slp_credx_base_addr(addrs: dwmac4_addrs, |
295 | x: queue)); |
296 | |
297 | /* configure idle slope (same register as tx weight) */ |
298 | dwmac4_set_mtl_tx_queue_weight(priv, hw, weight: idle_slope, queue); |
299 | |
300 | /* configure high credit */ |
301 | value = readl(addr: ioaddr + mtl_high_credx_base_addr(addrs: dwmac4_addrs, x: queue)); |
302 | value &= ~MTL_HIGH_CRED_HC_MASK; |
303 | value |= high_credit & MTL_HIGH_CRED_HC_MASK; |
304 | writel(val: value, addr: ioaddr + mtl_high_credx_base_addr(addrs: dwmac4_addrs, x: queue)); |
305 | |
306 | /* configure high credit */ |
307 | value = readl(addr: ioaddr + mtl_low_credx_base_addr(addrs: dwmac4_addrs, x: queue)); |
308 | value &= ~MTL_HIGH_CRED_LC_MASK; |
309 | value |= low_credit & MTL_HIGH_CRED_LC_MASK; |
310 | writel(val: value, addr: ioaddr + mtl_low_credx_base_addr(addrs: dwmac4_addrs, x: queue)); |
311 | } |
312 | |
313 | static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space) |
314 | { |
315 | void __iomem *ioaddr = hw->pcsr; |
316 | int i; |
317 | |
318 | for (i = 0; i < GMAC_REG_NUM; i++) |
319 | reg_space[i] = readl(addr: ioaddr + i * 4); |
320 | } |
321 | |
322 | static int dwmac4_rx_ipc_enable(struct mac_device_info *hw) |
323 | { |
324 | void __iomem *ioaddr = hw->pcsr; |
325 | u32 value = readl(addr: ioaddr + GMAC_CONFIG); |
326 | |
327 | if (hw->rx_csum) |
328 | value |= GMAC_CONFIG_IPC; |
329 | else |
330 | value &= ~GMAC_CONFIG_IPC; |
331 | |
332 | writel(val: value, addr: ioaddr + GMAC_CONFIG); |
333 | |
334 | value = readl(addr: ioaddr + GMAC_CONFIG); |
335 | |
336 | return !!(value & GMAC_CONFIG_IPC); |
337 | } |
338 | |
339 | static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode) |
340 | { |
341 | void __iomem *ioaddr = hw->pcsr; |
342 | unsigned int pmt = 0; |
343 | u32 config; |
344 | |
345 | if (mode & WAKE_MAGIC) { |
346 | pr_debug("GMAC: WOL Magic frame\n" ); |
347 | pmt |= power_down | magic_pkt_en; |
348 | } |
349 | if (mode & WAKE_UCAST) { |
350 | pr_debug("GMAC: WOL on global unicast\n" ); |
351 | pmt |= power_down | global_unicast | wake_up_frame_en; |
352 | } |
353 | |
354 | if (pmt) { |
355 | /* The receiver must be enabled for WOL before powering down */ |
356 | config = readl(addr: ioaddr + GMAC_CONFIG); |
357 | config |= GMAC_CONFIG_RE; |
358 | writel(val: config, addr: ioaddr + GMAC_CONFIG); |
359 | } |
360 | writel(val: pmt, addr: ioaddr + GMAC_PMT); |
361 | } |
362 | |
363 | static void dwmac4_set_umac_addr(struct mac_device_info *hw, |
364 | const unsigned char *addr, unsigned int reg_n) |
365 | { |
366 | void __iomem *ioaddr = hw->pcsr; |
367 | |
368 | stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), |
369 | GMAC_ADDR_LOW(reg_n)); |
370 | } |
371 | |
372 | static void dwmac4_get_umac_addr(struct mac_device_info *hw, |
373 | unsigned char *addr, unsigned int reg_n) |
374 | { |
375 | void __iomem *ioaddr = hw->pcsr; |
376 | |
377 | stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), |
378 | GMAC_ADDR_LOW(reg_n)); |
379 | } |
380 | |
381 | static void dwmac4_set_eee_mode(struct mac_device_info *hw, |
382 | bool en_tx_lpi_clockgating) |
383 | { |
384 | void __iomem *ioaddr = hw->pcsr; |
385 | u32 value; |
386 | |
387 | /* Enable the link status receive on RGMII, SGMII ore SMII |
388 | * receive path and instruct the transmit to enter in LPI |
389 | * state. |
390 | */ |
391 | value = readl(addr: ioaddr + GMAC4_LPI_CTRL_STATUS); |
392 | value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA; |
393 | |
394 | if (en_tx_lpi_clockgating) |
395 | value |= GMAC4_LPI_CTRL_STATUS_LPITCSE; |
396 | |
397 | writel(val: value, addr: ioaddr + GMAC4_LPI_CTRL_STATUS); |
398 | } |
399 | |
400 | static void dwmac4_reset_eee_mode(struct mac_device_info *hw) |
401 | { |
402 | void __iomem *ioaddr = hw->pcsr; |
403 | u32 value; |
404 | |
405 | value = readl(addr: ioaddr + GMAC4_LPI_CTRL_STATUS); |
406 | value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA); |
407 | writel(val: value, addr: ioaddr + GMAC4_LPI_CTRL_STATUS); |
408 | } |
409 | |
410 | static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link) |
411 | { |
412 | void __iomem *ioaddr = hw->pcsr; |
413 | u32 value; |
414 | |
415 | value = readl(addr: ioaddr + GMAC4_LPI_CTRL_STATUS); |
416 | |
417 | if (link) |
418 | value |= GMAC4_LPI_CTRL_STATUS_PLS; |
419 | else |
420 | value &= ~GMAC4_LPI_CTRL_STATUS_PLS; |
421 | |
422 | writel(val: value, addr: ioaddr + GMAC4_LPI_CTRL_STATUS); |
423 | } |
424 | |
425 | static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et) |
426 | { |
427 | void __iomem *ioaddr = hw->pcsr; |
428 | int value = et & STMMAC_ET_MAX; |
429 | int regval; |
430 | |
431 | /* Program LPI entry timer value into register */ |
432 | writel(val: value, addr: ioaddr + GMAC4_LPI_ENTRY_TIMER); |
433 | |
434 | /* Enable/disable LPI entry timer */ |
435 | regval = readl(addr: ioaddr + GMAC4_LPI_CTRL_STATUS); |
436 | regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA; |
437 | |
438 | if (et) |
439 | regval |= GMAC4_LPI_CTRL_STATUS_LPIATE; |
440 | else |
441 | regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE; |
442 | |
443 | writel(val: regval, addr: ioaddr + GMAC4_LPI_CTRL_STATUS); |
444 | } |
445 | |
446 | static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw) |
447 | { |
448 | void __iomem *ioaddr = hw->pcsr; |
449 | int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16); |
450 | |
451 | /* Program the timers in the LPI timer control register: |
452 | * LS: minimum time (ms) for which the link |
453 | * status from PHY should be ok before transmitting |
454 | * the LPI pattern. |
455 | * TW: minimum time (us) for which the core waits |
456 | * after it has stopped transmitting the LPI pattern. |
457 | */ |
458 | writel(val: value, addr: ioaddr + GMAC4_LPI_TIMER_CTRL); |
459 | } |
460 | |
461 | static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid) |
462 | { |
463 | void __iomem *ioaddr = (void __iomem *)dev->base_addr; |
464 | u32 val; |
465 | |
466 | val = readl(addr: ioaddr + GMAC_VLAN_TAG); |
467 | val &= ~GMAC_VLAN_TAG_VID; |
468 | val |= GMAC_VLAN_TAG_ETV | vid; |
469 | |
470 | writel(val, addr: ioaddr + GMAC_VLAN_TAG); |
471 | } |
472 | |
473 | static int dwmac4_write_vlan_filter(struct net_device *dev, |
474 | struct mac_device_info *hw, |
475 | u8 index, u32 data) |
476 | { |
477 | void __iomem *ioaddr = (void __iomem *)dev->base_addr; |
478 | int i, timeout = 10; |
479 | u32 val; |
480 | |
481 | if (index >= hw->num_vlan) |
482 | return -EINVAL; |
483 | |
484 | writel(val: data, addr: ioaddr + GMAC_VLAN_TAG_DATA); |
485 | |
486 | val = readl(addr: ioaddr + GMAC_VLAN_TAG); |
487 | val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK | |
488 | GMAC_VLAN_TAG_CTRL_CT | |
489 | GMAC_VLAN_TAG_CTRL_OB); |
490 | val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB; |
491 | |
492 | writel(val, addr: ioaddr + GMAC_VLAN_TAG); |
493 | |
494 | for (i = 0; i < timeout; i++) { |
495 | val = readl(addr: ioaddr + GMAC_VLAN_TAG); |
496 | if (!(val & GMAC_VLAN_TAG_CTRL_OB)) |
497 | return 0; |
498 | udelay(1); |
499 | } |
500 | |
501 | netdev_err(dev, format: "Timeout accessing MAC_VLAN_Tag_Filter\n" ); |
502 | |
503 | return -EBUSY; |
504 | } |
505 | |
506 | static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev, |
507 | struct mac_device_info *hw, |
508 | __be16 proto, u16 vid) |
509 | { |
510 | int index = -1; |
511 | u32 val = 0; |
512 | int i, ret; |
513 | |
514 | if (vid > 4095) |
515 | return -EINVAL; |
516 | |
517 | /* Single Rx VLAN Filter */ |
518 | if (hw->num_vlan == 1) { |
519 | /* For single VLAN filter, VID 0 means VLAN promiscuous */ |
520 | if (vid == 0) { |
521 | netdev_warn(dev, format: "Adding VLAN ID 0 is not supported\n" ); |
522 | return -EPERM; |
523 | } |
524 | |
525 | if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) { |
526 | netdev_err(dev, format: "Only single VLAN ID supported\n" ); |
527 | return -EPERM; |
528 | } |
529 | |
530 | hw->vlan_filter[0] = vid; |
531 | dwmac4_write_single_vlan(dev, vid); |
532 | |
533 | return 0; |
534 | } |
535 | |
536 | /* Extended Rx VLAN Filter Enable */ |
537 | val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid; |
538 | |
539 | for (i = 0; i < hw->num_vlan; i++) { |
540 | if (hw->vlan_filter[i] == val) |
541 | return 0; |
542 | else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN)) |
543 | index = i; |
544 | } |
545 | |
546 | if (index == -1) { |
547 | netdev_err(dev, format: "MAC_VLAN_Tag_Filter full (size: %0u)\n" , |
548 | hw->num_vlan); |
549 | return -EPERM; |
550 | } |
551 | |
552 | ret = dwmac4_write_vlan_filter(dev, hw, index, data: val); |
553 | |
554 | if (!ret) |
555 | hw->vlan_filter[index] = val; |
556 | |
557 | return ret; |
558 | } |
559 | |
560 | static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev, |
561 | struct mac_device_info *hw, |
562 | __be16 proto, u16 vid) |
563 | { |
564 | int i, ret = 0; |
565 | |
566 | /* Single Rx VLAN Filter */ |
567 | if (hw->num_vlan == 1) { |
568 | if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) { |
569 | hw->vlan_filter[0] = 0; |
570 | dwmac4_write_single_vlan(dev, vid: 0); |
571 | } |
572 | return 0; |
573 | } |
574 | |
575 | /* Extended Rx VLAN Filter Enable */ |
576 | for (i = 0; i < hw->num_vlan; i++) { |
577 | if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) { |
578 | ret = dwmac4_write_vlan_filter(dev, hw, index: i, data: 0); |
579 | |
580 | if (!ret) |
581 | hw->vlan_filter[i] = 0; |
582 | else |
583 | return ret; |
584 | } |
585 | } |
586 | |
587 | return ret; |
588 | } |
589 | |
590 | static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev, |
591 | struct mac_device_info *hw) |
592 | { |
593 | void __iomem *ioaddr = hw->pcsr; |
594 | u32 value; |
595 | u32 hash; |
596 | u32 val; |
597 | int i; |
598 | |
599 | /* Single Rx VLAN Filter */ |
600 | if (hw->num_vlan == 1) { |
601 | dwmac4_write_single_vlan(dev, vid: hw->vlan_filter[0]); |
602 | return; |
603 | } |
604 | |
605 | /* Extended Rx VLAN Filter Enable */ |
606 | for (i = 0; i < hw->num_vlan; i++) { |
607 | if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) { |
608 | val = hw->vlan_filter[i]; |
609 | dwmac4_write_vlan_filter(dev, hw, index: i, data: val); |
610 | } |
611 | } |
612 | |
613 | hash = readl(addr: ioaddr + GMAC_VLAN_HASH_TABLE); |
614 | if (hash & GMAC_VLAN_VLHT) { |
615 | value = readl(addr: ioaddr + GMAC_VLAN_TAG); |
616 | value |= GMAC_VLAN_VTHM; |
617 | writel(val: value, addr: ioaddr + GMAC_VLAN_TAG); |
618 | } |
619 | } |
620 | |
621 | static void dwmac4_set_filter(struct mac_device_info *hw, |
622 | struct net_device *dev) |
623 | { |
624 | void __iomem *ioaddr = (void __iomem *)dev->base_addr; |
625 | int numhashregs = (hw->multicast_filter_bins >> 5); |
626 | int mcbitslog2 = hw->mcast_bits_log2; |
627 | unsigned int value; |
628 | u32 mc_filter[8]; |
629 | int i; |
630 | |
631 | memset(mc_filter, 0, sizeof(mc_filter)); |
632 | |
633 | value = readl(addr: ioaddr + GMAC_PACKET_FILTER); |
634 | value &= ~GMAC_PACKET_FILTER_HMC; |
635 | value &= ~GMAC_PACKET_FILTER_HPF; |
636 | value &= ~GMAC_PACKET_FILTER_PCF; |
637 | value &= ~GMAC_PACKET_FILTER_PM; |
638 | value &= ~GMAC_PACKET_FILTER_PR; |
639 | value &= ~GMAC_PACKET_FILTER_RA; |
640 | if (dev->flags & IFF_PROMISC) { |
641 | /* VLAN Tag Filter Fail Packets Queuing */ |
642 | if (hw->vlan_fail_q_en) { |
643 | value = readl(addr: ioaddr + GMAC_RXQ_CTRL4); |
644 | value &= ~GMAC_RXQCTRL_VFFQ_MASK; |
645 | value |= GMAC_RXQCTRL_VFFQE | |
646 | (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT); |
647 | writel(val: value, addr: ioaddr + GMAC_RXQ_CTRL4); |
648 | value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA; |
649 | } else { |
650 | value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF; |
651 | } |
652 | |
653 | } else if ((dev->flags & IFF_ALLMULTI) || |
654 | (netdev_mc_count(dev) > hw->multicast_filter_bins)) { |
655 | /* Pass all multi */ |
656 | value |= GMAC_PACKET_FILTER_PM; |
657 | /* Set all the bits of the HASH tab */ |
658 | memset(mc_filter, 0xff, sizeof(mc_filter)); |
659 | } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) { |
660 | struct netdev_hw_addr *ha; |
661 | |
662 | /* Hash filter for multicast */ |
663 | value |= GMAC_PACKET_FILTER_HMC; |
664 | |
665 | netdev_for_each_mc_addr(ha, dev) { |
666 | /* The upper n bits of the calculated CRC are used to |
667 | * index the contents of the hash table. The number of |
668 | * bits used depends on the hardware configuration |
669 | * selected at core configuration time. |
670 | */ |
671 | u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr, |
672 | ETH_ALEN)) >> (32 - mcbitslog2); |
673 | /* The most significant bit determines the register to |
674 | * use (H/L) while the other 5 bits determine the bit |
675 | * within the register. |
676 | */ |
677 | mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f)); |
678 | } |
679 | } |
680 | |
681 | for (i = 0; i < numhashregs; i++) |
682 | writel(val: mc_filter[i], addr: ioaddr + GMAC_HASH_TAB(i)); |
683 | |
684 | value |= GMAC_PACKET_FILTER_HPF; |
685 | |
686 | /* Handle multiple unicast addresses */ |
687 | if (netdev_uc_count(dev) > hw->unicast_filter_entries) { |
688 | /* Switch to promiscuous mode if more than 128 addrs |
689 | * are required |
690 | */ |
691 | value |= GMAC_PACKET_FILTER_PR; |
692 | } else { |
693 | struct netdev_hw_addr *ha; |
694 | int reg = 1; |
695 | |
696 | netdev_for_each_uc_addr(ha, dev) { |
697 | dwmac4_set_umac_addr(hw, addr: ha->addr, reg_n: reg); |
698 | reg++; |
699 | } |
700 | |
701 | while (reg < GMAC_MAX_PERFECT_ADDRESSES) { |
702 | writel(val: 0, addr: ioaddr + GMAC_ADDR_HIGH(reg)); |
703 | writel(val: 0, addr: ioaddr + GMAC_ADDR_LOW(reg)); |
704 | reg++; |
705 | } |
706 | } |
707 | |
708 | /* VLAN filtering */ |
709 | if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) |
710 | value &= ~GMAC_PACKET_FILTER_VTFE; |
711 | else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) |
712 | value |= GMAC_PACKET_FILTER_VTFE; |
713 | |
714 | writel(val: value, addr: ioaddr + GMAC_PACKET_FILTER); |
715 | } |
716 | |
717 | static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, |
718 | unsigned int fc, unsigned int pause_time, |
719 | u32 tx_cnt) |
720 | { |
721 | void __iomem *ioaddr = hw->pcsr; |
722 | unsigned int flow = 0; |
723 | u32 queue = 0; |
724 | |
725 | pr_debug("GMAC Flow-Control:\n" ); |
726 | if (fc & FLOW_RX) { |
727 | pr_debug("\tReceive Flow-Control ON\n" ); |
728 | flow |= GMAC_RX_FLOW_CTRL_RFE; |
729 | } else { |
730 | pr_debug("\tReceive Flow-Control OFF\n" ); |
731 | } |
732 | writel(val: flow, addr: ioaddr + GMAC_RX_FLOW_CTRL); |
733 | |
734 | if (fc & FLOW_TX) { |
735 | pr_debug("\tTransmit Flow-Control ON\n" ); |
736 | |
737 | if (duplex) |
738 | pr_debug("\tduplex mode: PAUSE %d\n" , pause_time); |
739 | |
740 | for (queue = 0; queue < tx_cnt; queue++) { |
741 | flow = GMAC_TX_FLOW_CTRL_TFE; |
742 | |
743 | if (duplex) |
744 | flow |= |
745 | (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT); |
746 | |
747 | writel(val: flow, addr: ioaddr + GMAC_QX_TX_FLOW_CTRL(queue)); |
748 | } |
749 | } else { |
750 | for (queue = 0; queue < tx_cnt; queue++) |
751 | writel(val: 0, addr: ioaddr + GMAC_QX_TX_FLOW_CTRL(queue)); |
752 | } |
753 | } |
754 | |
755 | static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral, |
756 | bool loopback) |
757 | { |
758 | dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); |
759 | } |
760 | |
761 | static void dwmac4_rane(void __iomem *ioaddr, bool restart) |
762 | { |
763 | dwmac_rane(ioaddr, GMAC_PCS_BASE, restart); |
764 | } |
765 | |
766 | static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv) |
767 | { |
768 | dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv_lp: adv); |
769 | } |
770 | |
771 | /* RGMII or SMII interface */ |
772 | static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x) |
773 | { |
774 | u32 status; |
775 | |
776 | status = readl(addr: ioaddr + GMAC_PHYIF_CONTROL_STATUS); |
777 | x->irq_rgmii_n++; |
778 | |
779 | /* Check the link status */ |
780 | if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) { |
781 | int speed_value; |
782 | |
783 | x->pcs_link = 1; |
784 | |
785 | speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >> |
786 | GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT); |
787 | if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125) |
788 | x->pcs_speed = SPEED_1000; |
789 | else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25) |
790 | x->pcs_speed = SPEED_100; |
791 | else |
792 | x->pcs_speed = SPEED_10; |
793 | |
794 | x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK); |
795 | |
796 | pr_info("Link is Up - %d/%s\n" , (int)x->pcs_speed, |
797 | x->pcs_duplex ? "Full" : "Half" ); |
798 | } else { |
799 | x->pcs_link = 0; |
800 | pr_info("Link is Down\n" ); |
801 | } |
802 | } |
803 | |
804 | static int dwmac4_irq_mtl_status(struct stmmac_priv *priv, |
805 | struct mac_device_info *hw, u32 chan) |
806 | { |
807 | const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; |
808 | void __iomem *ioaddr = hw->pcsr; |
809 | u32 mtl_int_qx_status; |
810 | int ret = 0; |
811 | |
812 | mtl_int_qx_status = readl(addr: ioaddr + MTL_INT_STATUS); |
813 | |
814 | /* Check MTL Interrupt */ |
815 | if (mtl_int_qx_status & MTL_INT_QX(chan)) { |
816 | /* read Queue x Interrupt status */ |
817 | u32 status = readl(addr: ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs, |
818 | chan)); |
819 | |
820 | if (status & MTL_RX_OVERFLOW_INT) { |
821 | /* clear Interrupt */ |
822 | writel(val: status | MTL_RX_OVERFLOW_INT, |
823 | addr: ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs, chan)); |
824 | ret = CORE_IRQ_MTL_RX_OVERFLOW; |
825 | } |
826 | } |
827 | |
828 | return ret; |
829 | } |
830 | |
831 | static int dwmac4_irq_status(struct mac_device_info *hw, |
832 | struct stmmac_extra_stats *x) |
833 | { |
834 | void __iomem *ioaddr = hw->pcsr; |
835 | u32 intr_status = readl(addr: ioaddr + GMAC_INT_STATUS); |
836 | u32 intr_enable = readl(addr: ioaddr + GMAC_INT_EN); |
837 | int ret = 0; |
838 | |
839 | /* Discard disabled bits */ |
840 | intr_status &= intr_enable; |
841 | |
842 | /* Not used events (e.g. MMC interrupts) are not handled. */ |
843 | if ((intr_status & mmc_tx_irq)) |
844 | x->mmc_tx_irq_n++; |
845 | if (unlikely(intr_status & mmc_rx_irq)) |
846 | x->mmc_rx_irq_n++; |
847 | if (unlikely(intr_status & mmc_rx_csum_offload_irq)) |
848 | x->mmc_rx_csum_offload_irq_n++; |
849 | /* Clear the PMT bits 5 and 6 by reading the PMT status reg */ |
850 | if (unlikely(intr_status & pmt_irq)) { |
851 | readl(addr: ioaddr + GMAC_PMT); |
852 | x->irq_receive_pmt_irq_n++; |
853 | } |
854 | |
855 | /* MAC tx/rx EEE LPI entry/exit interrupts */ |
856 | if (intr_status & lpi_irq) { |
857 | /* Clear LPI interrupt by reading MAC_LPI_Control_Status */ |
858 | u32 status = readl(addr: ioaddr + GMAC4_LPI_CTRL_STATUS); |
859 | |
860 | if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) { |
861 | ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE; |
862 | x->irq_tx_path_in_lpi_mode_n++; |
863 | } |
864 | if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) { |
865 | ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE; |
866 | x->irq_tx_path_exit_lpi_mode_n++; |
867 | } |
868 | if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN) |
869 | x->irq_rx_path_in_lpi_mode_n++; |
870 | if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX) |
871 | x->irq_rx_path_exit_lpi_mode_n++; |
872 | } |
873 | |
874 | dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); |
875 | if (intr_status & PCS_RGSMIIIS_IRQ) |
876 | dwmac4_phystatus(ioaddr, x); |
877 | |
878 | return ret; |
879 | } |
880 | |
881 | static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr, |
882 | struct stmmac_extra_stats *x, |
883 | u32 rx_queues, u32 tx_queues) |
884 | { |
885 | const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; |
886 | u32 value; |
887 | u32 queue; |
888 | |
889 | for (queue = 0; queue < tx_queues; queue++) { |
890 | value = readl(addr: ioaddr + MTL_CHAN_TX_DEBUG(dwmac4_addrs, queue)); |
891 | |
892 | if (value & MTL_DEBUG_TXSTSFSTS) |
893 | x->mtl_tx_status_fifo_full++; |
894 | if (value & MTL_DEBUG_TXFSTS) |
895 | x->mtl_tx_fifo_not_empty++; |
896 | if (value & MTL_DEBUG_TWCSTS) |
897 | x->mmtl_fifo_ctrl++; |
898 | if (value & MTL_DEBUG_TRCSTS_MASK) { |
899 | u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK) |
900 | >> MTL_DEBUG_TRCSTS_SHIFT; |
901 | if (trcsts == MTL_DEBUG_TRCSTS_WRITE) |
902 | x->mtl_tx_fifo_read_ctrl_write++; |
903 | else if (trcsts == MTL_DEBUG_TRCSTS_TXW) |
904 | x->mtl_tx_fifo_read_ctrl_wait++; |
905 | else if (trcsts == MTL_DEBUG_TRCSTS_READ) |
906 | x->mtl_tx_fifo_read_ctrl_read++; |
907 | else |
908 | x->mtl_tx_fifo_read_ctrl_idle++; |
909 | } |
910 | if (value & MTL_DEBUG_TXPAUSED) |
911 | x->mac_tx_in_pause++; |
912 | } |
913 | |
914 | for (queue = 0; queue < rx_queues; queue++) { |
915 | value = readl(addr: ioaddr + MTL_CHAN_RX_DEBUG(dwmac4_addrs, queue)); |
916 | |
917 | if (value & MTL_DEBUG_RXFSTS_MASK) { |
918 | u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK) |
919 | >> MTL_DEBUG_RRCSTS_SHIFT; |
920 | |
921 | if (rxfsts == MTL_DEBUG_RXFSTS_FULL) |
922 | x->mtl_rx_fifo_fill_level_full++; |
923 | else if (rxfsts == MTL_DEBUG_RXFSTS_AT) |
924 | x->mtl_rx_fifo_fill_above_thresh++; |
925 | else if (rxfsts == MTL_DEBUG_RXFSTS_BT) |
926 | x->mtl_rx_fifo_fill_below_thresh++; |
927 | else |
928 | x->mtl_rx_fifo_fill_level_empty++; |
929 | } |
930 | if (value & MTL_DEBUG_RRCSTS_MASK) { |
931 | u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >> |
932 | MTL_DEBUG_RRCSTS_SHIFT; |
933 | |
934 | if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH) |
935 | x->mtl_rx_fifo_read_ctrl_flush++; |
936 | else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT) |
937 | x->mtl_rx_fifo_read_ctrl_read_data++; |
938 | else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA) |
939 | x->mtl_rx_fifo_read_ctrl_status++; |
940 | else |
941 | x->mtl_rx_fifo_read_ctrl_idle++; |
942 | } |
943 | if (value & MTL_DEBUG_RWCSTS) |
944 | x->mtl_rx_fifo_ctrl_active++; |
945 | } |
946 | |
947 | /* GMAC debug */ |
948 | value = readl(addr: ioaddr + GMAC_DEBUG); |
949 | |
950 | if (value & GMAC_DEBUG_TFCSTS_MASK) { |
951 | u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK) |
952 | >> GMAC_DEBUG_TFCSTS_SHIFT; |
953 | |
954 | if (tfcsts == GMAC_DEBUG_TFCSTS_XFER) |
955 | x->mac_tx_frame_ctrl_xfer++; |
956 | else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE) |
957 | x->mac_tx_frame_ctrl_pause++; |
958 | else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT) |
959 | x->mac_tx_frame_ctrl_wait++; |
960 | else |
961 | x->mac_tx_frame_ctrl_idle++; |
962 | } |
963 | if (value & GMAC_DEBUG_TPESTS) |
964 | x->mac_gmii_tx_proto_engine++; |
965 | if (value & GMAC_DEBUG_RFCFCSTS_MASK) |
966 | x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK) |
967 | >> GMAC_DEBUG_RFCFCSTS_SHIFT; |
968 | if (value & GMAC_DEBUG_RPESTS) |
969 | x->mac_gmii_rx_proto_engine++; |
970 | } |
971 | |
972 | static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable) |
973 | { |
974 | u32 value = readl(addr: ioaddr + GMAC_CONFIG); |
975 | |
976 | if (enable) |
977 | value |= GMAC_CONFIG_LM; |
978 | else |
979 | value &= ~GMAC_CONFIG_LM; |
980 | |
981 | writel(val: value, addr: ioaddr + GMAC_CONFIG); |
982 | } |
983 | |
984 | static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash, |
985 | __le16 perfect_match, bool is_double) |
986 | { |
987 | void __iomem *ioaddr = hw->pcsr; |
988 | u32 value; |
989 | |
990 | writel(val: hash, addr: ioaddr + GMAC_VLAN_HASH_TABLE); |
991 | |
992 | value = readl(addr: ioaddr + GMAC_VLAN_TAG); |
993 | |
994 | if (hash) { |
995 | value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV; |
996 | if (is_double) { |
997 | value |= GMAC_VLAN_EDVLP; |
998 | value |= GMAC_VLAN_ESVL; |
999 | value |= GMAC_VLAN_DOVLTC; |
1000 | } |
1001 | |
1002 | writel(val: value, addr: ioaddr + GMAC_VLAN_TAG); |
1003 | } else if (perfect_match) { |
1004 | u32 value = GMAC_VLAN_ETV; |
1005 | |
1006 | if (is_double) { |
1007 | value |= GMAC_VLAN_EDVLP; |
1008 | value |= GMAC_VLAN_ESVL; |
1009 | value |= GMAC_VLAN_DOVLTC; |
1010 | } |
1011 | |
1012 | writel(val: value | perfect_match, addr: ioaddr + GMAC_VLAN_TAG); |
1013 | } else { |
1014 | value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV); |
1015 | value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL); |
1016 | value &= ~GMAC_VLAN_DOVLTC; |
1017 | value &= ~GMAC_VLAN_VID; |
1018 | |
1019 | writel(val: value, addr: ioaddr + GMAC_VLAN_TAG); |
1020 | } |
1021 | } |
1022 | |
1023 | static void dwmac4_sarc_configure(void __iomem *ioaddr, int val) |
1024 | { |
1025 | u32 value = readl(addr: ioaddr + GMAC_CONFIG); |
1026 | |
1027 | value &= ~GMAC_CONFIG_SARC; |
1028 | value |= val << GMAC_CONFIG_SARC_SHIFT; |
1029 | |
1030 | writel(val: value, addr: ioaddr + GMAC_CONFIG); |
1031 | } |
1032 | |
1033 | static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type) |
1034 | { |
1035 | void __iomem *ioaddr = hw->pcsr; |
1036 | u32 value; |
1037 | |
1038 | value = readl(addr: ioaddr + GMAC_VLAN_INCL); |
1039 | value |= GMAC_VLAN_VLTI; |
1040 | value |= GMAC_VLAN_CSVL; /* Only use SVLAN */ |
1041 | value &= ~GMAC_VLAN_VLC; |
1042 | value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC; |
1043 | writel(val: value, addr: ioaddr + GMAC_VLAN_INCL); |
1044 | } |
1045 | |
1046 | static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en, |
1047 | u32 addr) |
1048 | { |
1049 | void __iomem *ioaddr = hw->pcsr; |
1050 | u32 value; |
1051 | |
1052 | writel(val: addr, addr: ioaddr + GMAC_ARP_ADDR); |
1053 | |
1054 | value = readl(addr: ioaddr + GMAC_CONFIG); |
1055 | if (en) |
1056 | value |= GMAC_CONFIG_ARPEN; |
1057 | else |
1058 | value &= ~GMAC_CONFIG_ARPEN; |
1059 | writel(val: value, addr: ioaddr + GMAC_CONFIG); |
1060 | } |
1061 | |
1062 | static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no, |
1063 | bool en, bool ipv6, bool sa, bool inv, |
1064 | u32 match) |
1065 | { |
1066 | void __iomem *ioaddr = hw->pcsr; |
1067 | u32 value; |
1068 | |
1069 | value = readl(addr: ioaddr + GMAC_PACKET_FILTER); |
1070 | value |= GMAC_PACKET_FILTER_IPFE; |
1071 | writel(val: value, addr: ioaddr + GMAC_PACKET_FILTER); |
1072 | |
1073 | value = readl(addr: ioaddr + GMAC_L3L4_CTRL(filter_no)); |
1074 | |
1075 | /* For IPv6 not both SA/DA filters can be active */ |
1076 | if (ipv6) { |
1077 | value |= GMAC_L3PEN0; |
1078 | value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0); |
1079 | value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0); |
1080 | if (sa) { |
1081 | value |= GMAC_L3SAM0; |
1082 | if (inv) |
1083 | value |= GMAC_L3SAIM0; |
1084 | } else { |
1085 | value |= GMAC_L3DAM0; |
1086 | if (inv) |
1087 | value |= GMAC_L3DAIM0; |
1088 | } |
1089 | } else { |
1090 | value &= ~GMAC_L3PEN0; |
1091 | if (sa) { |
1092 | value |= GMAC_L3SAM0; |
1093 | if (inv) |
1094 | value |= GMAC_L3SAIM0; |
1095 | } else { |
1096 | value |= GMAC_L3DAM0; |
1097 | if (inv) |
1098 | value |= GMAC_L3DAIM0; |
1099 | } |
1100 | } |
1101 | |
1102 | writel(val: value, addr: ioaddr + GMAC_L3L4_CTRL(filter_no)); |
1103 | |
1104 | if (sa) { |
1105 | writel(val: match, addr: ioaddr + GMAC_L3_ADDR0(filter_no)); |
1106 | } else { |
1107 | writel(val: match, addr: ioaddr + GMAC_L3_ADDR1(filter_no)); |
1108 | } |
1109 | |
1110 | if (!en) |
1111 | writel(val: 0, addr: ioaddr + GMAC_L3L4_CTRL(filter_no)); |
1112 | |
1113 | return 0; |
1114 | } |
1115 | |
1116 | static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no, |
1117 | bool en, bool udp, bool sa, bool inv, |
1118 | u32 match) |
1119 | { |
1120 | void __iomem *ioaddr = hw->pcsr; |
1121 | u32 value; |
1122 | |
1123 | value = readl(addr: ioaddr + GMAC_PACKET_FILTER); |
1124 | value |= GMAC_PACKET_FILTER_IPFE; |
1125 | writel(val: value, addr: ioaddr + GMAC_PACKET_FILTER); |
1126 | |
1127 | value = readl(addr: ioaddr + GMAC_L3L4_CTRL(filter_no)); |
1128 | if (udp) { |
1129 | value |= GMAC_L4PEN0; |
1130 | } else { |
1131 | value &= ~GMAC_L4PEN0; |
1132 | } |
1133 | |
1134 | value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0); |
1135 | value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0); |
1136 | if (sa) { |
1137 | value |= GMAC_L4SPM0; |
1138 | if (inv) |
1139 | value |= GMAC_L4SPIM0; |
1140 | } else { |
1141 | value |= GMAC_L4DPM0; |
1142 | if (inv) |
1143 | value |= GMAC_L4DPIM0; |
1144 | } |
1145 | |
1146 | writel(val: value, addr: ioaddr + GMAC_L3L4_CTRL(filter_no)); |
1147 | |
1148 | if (sa) { |
1149 | value = match & GMAC_L4SP0; |
1150 | } else { |
1151 | value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0; |
1152 | } |
1153 | |
1154 | writel(val: value, addr: ioaddr + GMAC_L4_ADDR(filter_no)); |
1155 | |
1156 | if (!en) |
1157 | writel(val: 0, addr: ioaddr + GMAC_L3L4_CTRL(filter_no)); |
1158 | |
1159 | return 0; |
1160 | } |
1161 | |
1162 | static void dwmac4_rx_hw_vlan(struct mac_device_info *hw, |
1163 | struct dma_desc *rx_desc, struct sk_buff *skb) |
1164 | { |
1165 | if (hw->desc->get_rx_vlan_valid(rx_desc)) { |
1166 | u16 vid = hw->desc->get_rx_vlan_tci(rx_desc); |
1167 | |
1168 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci: vid); |
1169 | } |
1170 | } |
1171 | |
1172 | static void dwmac4_set_hw_vlan_mode(struct mac_device_info *hw) |
1173 | { |
1174 | void __iomem *ioaddr = hw->pcsr; |
1175 | u32 value = readl(addr: ioaddr + GMAC_VLAN_TAG); |
1176 | |
1177 | value &= ~GMAC_VLAN_TAG_CTRL_EVLS_MASK; |
1178 | |
1179 | if (hw->hw_vlan_en) |
1180 | /* Always strip VLAN on Receive */ |
1181 | value |= GMAC_VLAN_TAG_STRIP_ALL; |
1182 | else |
1183 | /* Do not strip VLAN on Receive */ |
1184 | value |= GMAC_VLAN_TAG_STRIP_NONE; |
1185 | |
1186 | /* Enable outer VLAN Tag in Rx DMA descriptor */ |
1187 | value |= GMAC_VLAN_TAG_CTRL_EVLRXS; |
1188 | writel(val: value, addr: ioaddr + GMAC_VLAN_TAG); |
1189 | } |
1190 | |
1191 | const struct stmmac_ops dwmac4_ops = { |
1192 | .core_init = dwmac4_core_init, |
1193 | .phylink_get_caps = dwmac4_phylink_get_caps, |
1194 | .set_mac = stmmac_set_mac, |
1195 | .rx_ipc = dwmac4_rx_ipc_enable, |
1196 | .rx_queue_enable = dwmac4_rx_queue_enable, |
1197 | .rx_queue_prio = dwmac4_rx_queue_priority, |
1198 | .tx_queue_prio = dwmac4_tx_queue_priority, |
1199 | .rx_queue_routing = dwmac4_rx_queue_routing, |
1200 | .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, |
1201 | .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, |
1202 | .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, |
1203 | .map_mtl_to_dma = dwmac4_map_mtl_dma, |
1204 | .config_cbs = dwmac4_config_cbs, |
1205 | .dump_regs = dwmac4_dump_regs, |
1206 | .host_irq_status = dwmac4_irq_status, |
1207 | .host_mtl_irq_status = dwmac4_irq_mtl_status, |
1208 | .flow_ctrl = dwmac4_flow_ctrl, |
1209 | .pmt = dwmac4_pmt, |
1210 | .set_umac_addr = dwmac4_set_umac_addr, |
1211 | .get_umac_addr = dwmac4_get_umac_addr, |
1212 | .set_eee_mode = dwmac4_set_eee_mode, |
1213 | .reset_eee_mode = dwmac4_reset_eee_mode, |
1214 | .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, |
1215 | .set_eee_timer = dwmac4_set_eee_timer, |
1216 | .set_eee_pls = dwmac4_set_eee_pls, |
1217 | .pcs_ctrl_ane = dwmac4_ctrl_ane, |
1218 | .pcs_rane = dwmac4_rane, |
1219 | .pcs_get_adv_lp = dwmac4_get_adv_lp, |
1220 | .debug = dwmac4_debug, |
1221 | .set_filter = dwmac4_set_filter, |
1222 | .set_mac_loopback = dwmac4_set_mac_loopback, |
1223 | .update_vlan_hash = dwmac4_update_vlan_hash, |
1224 | .sarc_configure = dwmac4_sarc_configure, |
1225 | .enable_vlan = dwmac4_enable_vlan, |
1226 | .set_arp_offload = dwmac4_set_arp_offload, |
1227 | .config_l3_filter = dwmac4_config_l3_filter, |
1228 | .config_l4_filter = dwmac4_config_l4_filter, |
1229 | .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, |
1230 | .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, |
1231 | .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, |
1232 | .rx_hw_vlan = dwmac4_rx_hw_vlan, |
1233 | .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode, |
1234 | }; |
1235 | |
1236 | const struct stmmac_ops dwmac410_ops = { |
1237 | .core_init = dwmac4_core_init, |
1238 | .phylink_get_caps = dwmac4_phylink_get_caps, |
1239 | .set_mac = stmmac_dwmac4_set_mac, |
1240 | .rx_ipc = dwmac4_rx_ipc_enable, |
1241 | .rx_queue_enable = dwmac4_rx_queue_enable, |
1242 | .rx_queue_prio = dwmac4_rx_queue_priority, |
1243 | .tx_queue_prio = dwmac4_tx_queue_priority, |
1244 | .rx_queue_routing = dwmac4_rx_queue_routing, |
1245 | .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, |
1246 | .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, |
1247 | .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, |
1248 | .map_mtl_to_dma = dwmac4_map_mtl_dma, |
1249 | .config_cbs = dwmac4_config_cbs, |
1250 | .dump_regs = dwmac4_dump_regs, |
1251 | .host_irq_status = dwmac4_irq_status, |
1252 | .host_mtl_irq_status = dwmac4_irq_mtl_status, |
1253 | .flow_ctrl = dwmac4_flow_ctrl, |
1254 | .pmt = dwmac4_pmt, |
1255 | .set_umac_addr = dwmac4_set_umac_addr, |
1256 | .get_umac_addr = dwmac4_get_umac_addr, |
1257 | .set_eee_mode = dwmac4_set_eee_mode, |
1258 | .reset_eee_mode = dwmac4_reset_eee_mode, |
1259 | .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, |
1260 | .set_eee_timer = dwmac4_set_eee_timer, |
1261 | .set_eee_pls = dwmac4_set_eee_pls, |
1262 | .pcs_ctrl_ane = dwmac4_ctrl_ane, |
1263 | .pcs_rane = dwmac4_rane, |
1264 | .pcs_get_adv_lp = dwmac4_get_adv_lp, |
1265 | .debug = dwmac4_debug, |
1266 | .set_filter = dwmac4_set_filter, |
1267 | .flex_pps_config = dwmac5_flex_pps_config, |
1268 | .set_mac_loopback = dwmac4_set_mac_loopback, |
1269 | .update_vlan_hash = dwmac4_update_vlan_hash, |
1270 | .sarc_configure = dwmac4_sarc_configure, |
1271 | .enable_vlan = dwmac4_enable_vlan, |
1272 | .set_arp_offload = dwmac4_set_arp_offload, |
1273 | .config_l3_filter = dwmac4_config_l3_filter, |
1274 | .config_l4_filter = dwmac4_config_l4_filter, |
1275 | .fpe_configure = dwmac5_fpe_configure, |
1276 | .fpe_send_mpacket = dwmac5_fpe_send_mpacket, |
1277 | .fpe_irq_status = dwmac5_fpe_irq_status, |
1278 | .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, |
1279 | .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, |
1280 | .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, |
1281 | .rx_hw_vlan = dwmac4_rx_hw_vlan, |
1282 | .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode, |
1283 | }; |
1284 | |
1285 | const struct stmmac_ops dwmac510_ops = { |
1286 | .core_init = dwmac4_core_init, |
1287 | .phylink_get_caps = dwmac4_phylink_get_caps, |
1288 | .set_mac = stmmac_dwmac4_set_mac, |
1289 | .rx_ipc = dwmac4_rx_ipc_enable, |
1290 | .rx_queue_enable = dwmac4_rx_queue_enable, |
1291 | .rx_queue_prio = dwmac4_rx_queue_priority, |
1292 | .tx_queue_prio = dwmac4_tx_queue_priority, |
1293 | .rx_queue_routing = dwmac4_rx_queue_routing, |
1294 | .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, |
1295 | .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, |
1296 | .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, |
1297 | .map_mtl_to_dma = dwmac4_map_mtl_dma, |
1298 | .config_cbs = dwmac4_config_cbs, |
1299 | .dump_regs = dwmac4_dump_regs, |
1300 | .host_irq_status = dwmac4_irq_status, |
1301 | .host_mtl_irq_status = dwmac4_irq_mtl_status, |
1302 | .flow_ctrl = dwmac4_flow_ctrl, |
1303 | .pmt = dwmac4_pmt, |
1304 | .set_umac_addr = dwmac4_set_umac_addr, |
1305 | .get_umac_addr = dwmac4_get_umac_addr, |
1306 | .set_eee_mode = dwmac4_set_eee_mode, |
1307 | .reset_eee_mode = dwmac4_reset_eee_mode, |
1308 | .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, |
1309 | .set_eee_timer = dwmac4_set_eee_timer, |
1310 | .set_eee_pls = dwmac4_set_eee_pls, |
1311 | .pcs_ctrl_ane = dwmac4_ctrl_ane, |
1312 | .pcs_rane = dwmac4_rane, |
1313 | .pcs_get_adv_lp = dwmac4_get_adv_lp, |
1314 | .debug = dwmac4_debug, |
1315 | .set_filter = dwmac4_set_filter, |
1316 | .safety_feat_config = dwmac5_safety_feat_config, |
1317 | .safety_feat_irq_status = dwmac5_safety_feat_irq_status, |
1318 | .safety_feat_dump = dwmac5_safety_feat_dump, |
1319 | .rxp_config = dwmac5_rxp_config, |
1320 | .flex_pps_config = dwmac5_flex_pps_config, |
1321 | .set_mac_loopback = dwmac4_set_mac_loopback, |
1322 | .update_vlan_hash = dwmac4_update_vlan_hash, |
1323 | .sarc_configure = dwmac4_sarc_configure, |
1324 | .enable_vlan = dwmac4_enable_vlan, |
1325 | .set_arp_offload = dwmac4_set_arp_offload, |
1326 | .config_l3_filter = dwmac4_config_l3_filter, |
1327 | .config_l4_filter = dwmac4_config_l4_filter, |
1328 | .fpe_configure = dwmac5_fpe_configure, |
1329 | .fpe_send_mpacket = dwmac5_fpe_send_mpacket, |
1330 | .fpe_irq_status = dwmac5_fpe_irq_status, |
1331 | .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, |
1332 | .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, |
1333 | .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, |
1334 | .rx_hw_vlan = dwmac4_rx_hw_vlan, |
1335 | .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode, |
1336 | }; |
1337 | |
1338 | static u32 dwmac4_get_num_vlan(void __iomem *ioaddr) |
1339 | { |
1340 | u32 val, num_vlan; |
1341 | |
1342 | val = readl(addr: ioaddr + GMAC_HW_FEATURE3); |
1343 | switch (val & GMAC_HW_FEAT_NRVF) { |
1344 | case 0: |
1345 | num_vlan = 1; |
1346 | break; |
1347 | case 1: |
1348 | num_vlan = 4; |
1349 | break; |
1350 | case 2: |
1351 | num_vlan = 8; |
1352 | break; |
1353 | case 3: |
1354 | num_vlan = 16; |
1355 | break; |
1356 | case 4: |
1357 | num_vlan = 24; |
1358 | break; |
1359 | case 5: |
1360 | num_vlan = 32; |
1361 | break; |
1362 | default: |
1363 | num_vlan = 1; |
1364 | } |
1365 | |
1366 | return num_vlan; |
1367 | } |
1368 | |
1369 | int dwmac4_setup(struct stmmac_priv *priv) |
1370 | { |
1371 | struct mac_device_info *mac = priv->hw; |
1372 | |
1373 | dev_info(priv->device, "\tDWMAC4/5\n" ); |
1374 | |
1375 | priv->dev->priv_flags |= IFF_UNICAST_FLT; |
1376 | mac->pcsr = priv->ioaddr; |
1377 | mac->multicast_filter_bins = priv->plat->multicast_filter_bins; |
1378 | mac->unicast_filter_entries = priv->plat->unicast_filter_entries; |
1379 | mac->mcast_bits_log2 = 0; |
1380 | |
1381 | if (mac->multicast_filter_bins) |
1382 | mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); |
1383 | |
1384 | mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | |
1385 | MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD; |
1386 | mac->link.duplex = GMAC_CONFIG_DM; |
1387 | mac->link.speed10 = GMAC_CONFIG_PS; |
1388 | mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS; |
1389 | mac->link.speed1000 = 0; |
1390 | mac->link.speed2500 = GMAC_CONFIG_FES; |
1391 | mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS; |
1392 | mac->mii.addr = GMAC_MDIO_ADDR; |
1393 | mac->mii.data = GMAC_MDIO_DATA; |
1394 | mac->mii.addr_shift = 21; |
1395 | mac->mii.addr_mask = GENMASK(25, 21); |
1396 | mac->mii.reg_shift = 16; |
1397 | mac->mii.reg_mask = GENMASK(20, 16); |
1398 | mac->mii.clk_csr_shift = 8; |
1399 | mac->mii.clk_csr_mask = GENMASK(11, 8); |
1400 | mac->num_vlan = dwmac4_get_num_vlan(ioaddr: priv->ioaddr); |
1401 | |
1402 | return 0; |
1403 | } |
1404 | |