1 | /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver |
2 | * |
3 | * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) |
4 | * |
5 | * This program is dual-licensed; you may select either version 2 of |
6 | * the GNU General Public License ("GPL") or BSD license ("BSD"). |
7 | * |
8 | * This Synopsys DWC XLGMAC software driver and associated documentation |
9 | * (hereinafter the "Software") is an unsupported proprietary work of |
10 | * Synopsys, Inc. unless otherwise expressly agreed to in writing between |
11 | * Synopsys and you. The Software IS NOT an item of Licensed Software or a |
12 | * Licensed Product under any End User Software License Agreement or |
13 | * Agreement for Licensed Products with Synopsys or any supplement thereto. |
14 | * Synopsys is a registered trademark of Synopsys, Inc. Other names included |
15 | * in the SOFTWARE may be the trademarks of their respective owners. |
16 | */ |
17 | |
18 | #include <linux/phy.h> |
19 | #include <linux/mdio.h> |
20 | #include <linux/clk.h> |
21 | #include <linux/bitrev.h> |
22 | #include <linux/crc32.h> |
23 | #include <linux/crc32poly.h> |
24 | #include <linux/dcbnl.h> |
25 | |
26 | #include "dwc-xlgmac.h" |
27 | #include "dwc-xlgmac-reg.h" |
28 | |
29 | static int xlgmac_tx_complete(struct xlgmac_dma_desc *dma_desc) |
30 | { |
31 | return !XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, |
32 | TX_NORMAL_DESC3_OWN_POS, |
33 | TX_NORMAL_DESC3_OWN_LEN); |
34 | } |
35 | |
36 | static int xlgmac_disable_rx_csum(struct xlgmac_pdata *pdata) |
37 | { |
38 | u32 regval; |
39 | |
40 | regval = readl(addr: pdata->mac_regs + MAC_RCR); |
41 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS, |
42 | MAC_RCR_IPC_LEN, 0); |
43 | writel(val: regval, addr: pdata->mac_regs + MAC_RCR); |
44 | |
45 | return 0; |
46 | } |
47 | |
48 | static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata) |
49 | { |
50 | u32 regval; |
51 | |
52 | regval = readl(addr: pdata->mac_regs + MAC_RCR); |
53 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS, |
54 | MAC_RCR_IPC_LEN, 1); |
55 | writel(val: regval, addr: pdata->mac_regs + MAC_RCR); |
56 | |
57 | return 0; |
58 | } |
59 | |
60 | static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, const u8 *addr) |
61 | { |
62 | unsigned int mac_addr_hi, mac_addr_lo; |
63 | |
64 | mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); |
65 | mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | |
66 | (addr[1] << 8) | (addr[0] << 0); |
67 | |
68 | writel(val: mac_addr_hi, addr: pdata->mac_regs + MAC_MACA0HR); |
69 | writel(val: mac_addr_lo, addr: pdata->mac_regs + MAC_MACA0LR); |
70 | |
71 | return 0; |
72 | } |
73 | |
74 | static void xlgmac_set_mac_reg(struct xlgmac_pdata *pdata, |
75 | struct netdev_hw_addr *ha, |
76 | unsigned int *mac_reg) |
77 | { |
78 | unsigned int mac_addr_hi, mac_addr_lo; |
79 | u8 *mac_addr; |
80 | |
81 | mac_addr_lo = 0; |
82 | mac_addr_hi = 0; |
83 | |
84 | if (ha) { |
85 | mac_addr = (u8 *)&mac_addr_lo; |
86 | mac_addr[0] = ha->addr[0]; |
87 | mac_addr[1] = ha->addr[1]; |
88 | mac_addr[2] = ha->addr[2]; |
89 | mac_addr[3] = ha->addr[3]; |
90 | mac_addr = (u8 *)&mac_addr_hi; |
91 | mac_addr[0] = ha->addr[4]; |
92 | mac_addr[1] = ha->addr[5]; |
93 | |
94 | netif_dbg(pdata, drv, pdata->netdev, |
95 | "adding mac address %pM at %#x\n" , |
96 | ha->addr, *mac_reg); |
97 | |
98 | mac_addr_hi = XLGMAC_SET_REG_BITS(mac_addr_hi, |
99 | MAC_MACA1HR_AE_POS, |
100 | MAC_MACA1HR_AE_LEN, |
101 | 1); |
102 | } |
103 | |
104 | writel(val: mac_addr_hi, addr: pdata->mac_regs + *mac_reg); |
105 | *mac_reg += MAC_MACA_INC; |
106 | writel(val: mac_addr_lo, addr: pdata->mac_regs + *mac_reg); |
107 | *mac_reg += MAC_MACA_INC; |
108 | } |
109 | |
110 | static int xlgmac_enable_rx_vlan_stripping(struct xlgmac_pdata *pdata) |
111 | { |
112 | u32 regval; |
113 | |
114 | regval = readl(addr: pdata->mac_regs + MAC_VLANTR); |
115 | /* Put the VLAN tag in the Rx descriptor */ |
116 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLRXS_POS, |
117 | MAC_VLANTR_EVLRXS_LEN, 1); |
118 | /* Don't check the VLAN type */ |
119 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_DOVLTC_POS, |
120 | MAC_VLANTR_DOVLTC_LEN, 1); |
121 | /* Check only C-TAG (0x8100) packets */ |
122 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ERSVLM_POS, |
123 | MAC_VLANTR_ERSVLM_LEN, 0); |
124 | /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ |
125 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ESVL_POS, |
126 | MAC_VLANTR_ESVL_LEN, 0); |
127 | /* Enable VLAN tag stripping */ |
128 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, |
129 | MAC_VLANTR_EVLS_LEN, 0x3); |
130 | writel(val: regval, addr: pdata->mac_regs + MAC_VLANTR); |
131 | |
132 | return 0; |
133 | } |
134 | |
135 | static int xlgmac_disable_rx_vlan_stripping(struct xlgmac_pdata *pdata) |
136 | { |
137 | u32 regval; |
138 | |
139 | regval = readl(addr: pdata->mac_regs + MAC_VLANTR); |
140 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, |
141 | MAC_VLANTR_EVLS_LEN, 0); |
142 | writel(val: regval, addr: pdata->mac_regs + MAC_VLANTR); |
143 | |
144 | return 0; |
145 | } |
146 | |
147 | static int xlgmac_enable_rx_vlan_filtering(struct xlgmac_pdata *pdata) |
148 | { |
149 | u32 regval; |
150 | |
151 | regval = readl(addr: pdata->mac_regs + MAC_PFR); |
152 | /* Enable VLAN filtering */ |
153 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS, |
154 | MAC_PFR_VTFE_LEN, 1); |
155 | writel(val: regval, addr: pdata->mac_regs + MAC_PFR); |
156 | |
157 | regval = readl(addr: pdata->mac_regs + MAC_VLANTR); |
158 | /* Enable VLAN Hash Table filtering */ |
159 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTHM_POS, |
160 | MAC_VLANTR_VTHM_LEN, 1); |
161 | /* Disable VLAN tag inverse matching */ |
162 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTIM_POS, |
163 | MAC_VLANTR_VTIM_LEN, 0); |
164 | /* Only filter on the lower 12-bits of the VLAN tag */ |
165 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ETV_POS, |
166 | MAC_VLANTR_ETV_LEN, 1); |
167 | /* In order for the VLAN Hash Table filtering to be effective, |
168 | * the VLAN tag identifier in the VLAN Tag Register must not |
169 | * be zero. Set the VLAN tag identifier to "1" to enable the |
170 | * VLAN Hash Table filtering. This implies that a VLAN tag of |
171 | * 1 will always pass filtering. |
172 | */ |
173 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS, |
174 | MAC_VLANTR_VL_LEN, 1); |
175 | writel(val: regval, addr: pdata->mac_regs + MAC_VLANTR); |
176 | |
177 | return 0; |
178 | } |
179 | |
180 | static int xlgmac_disable_rx_vlan_filtering(struct xlgmac_pdata *pdata) |
181 | { |
182 | u32 regval; |
183 | |
184 | regval = readl(addr: pdata->mac_regs + MAC_PFR); |
185 | /* Disable VLAN filtering */ |
186 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS, |
187 | MAC_PFR_VTFE_LEN, 0); |
188 | writel(val: regval, addr: pdata->mac_regs + MAC_PFR); |
189 | |
190 | return 0; |
191 | } |
192 | |
193 | static u32 xlgmac_vid_crc32_le(__le16 vid_le) |
194 | { |
195 | unsigned char *data = (unsigned char *)&vid_le; |
196 | unsigned char data_byte = 0; |
197 | u32 crc = ~0; |
198 | u32 temp = 0; |
199 | int i, bits; |
200 | |
201 | bits = get_bitmask_order(VLAN_VID_MASK); |
202 | for (i = 0; i < bits; i++) { |
203 | if ((i % 8) == 0) |
204 | data_byte = data[i / 8]; |
205 | |
206 | temp = ((crc & 1) ^ data_byte) & 1; |
207 | crc >>= 1; |
208 | data_byte >>= 1; |
209 | |
210 | if (temp) |
211 | crc ^= CRC32_POLY_LE; |
212 | } |
213 | |
214 | return crc; |
215 | } |
216 | |
217 | static int xlgmac_update_vlan_hash_table(struct xlgmac_pdata *pdata) |
218 | { |
219 | u16 vlan_hash_table = 0; |
220 | __le16 vid_le; |
221 | u32 regval; |
222 | u32 crc; |
223 | u16 vid; |
224 | |
225 | /* Generate the VLAN Hash Table value */ |
226 | for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { |
227 | /* Get the CRC32 value of the VLAN ID */ |
228 | vid_le = cpu_to_le16(vid); |
229 | crc = bitrev32(~xlgmac_vid_crc32_le(vid_le)) >> 28; |
230 | |
231 | vlan_hash_table |= (1 << crc); |
232 | } |
233 | |
234 | regval = readl(addr: pdata->mac_regs + MAC_VLANHTR); |
235 | /* Set the VLAN Hash Table filtering register */ |
236 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANHTR_VLHT_POS, |
237 | MAC_VLANHTR_VLHT_LEN, vlan_hash_table); |
238 | writel(val: regval, addr: pdata->mac_regs + MAC_VLANHTR); |
239 | |
240 | return 0; |
241 | } |
242 | |
243 | static int xlgmac_set_promiscuous_mode(struct xlgmac_pdata *pdata, |
244 | unsigned int enable) |
245 | { |
246 | unsigned int val = enable ? 1 : 0; |
247 | u32 regval; |
248 | |
249 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR), |
250 | MAC_PFR_PR_POS, MAC_PFR_PR_LEN); |
251 | if (regval == val) |
252 | return 0; |
253 | |
254 | netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n" , |
255 | enable ? "entering" : "leaving" ); |
256 | |
257 | regval = readl(addr: pdata->mac_regs + MAC_PFR); |
258 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PR_POS, |
259 | MAC_PFR_PR_LEN, val); |
260 | writel(val: regval, addr: pdata->mac_regs + MAC_PFR); |
261 | |
262 | /* Hardware will still perform VLAN filtering in promiscuous mode */ |
263 | if (enable) { |
264 | xlgmac_disable_rx_vlan_filtering(pdata); |
265 | } else { |
266 | if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) |
267 | xlgmac_enable_rx_vlan_filtering(pdata); |
268 | } |
269 | |
270 | return 0; |
271 | } |
272 | |
273 | static int xlgmac_set_all_multicast_mode(struct xlgmac_pdata *pdata, |
274 | unsigned int enable) |
275 | { |
276 | unsigned int val = enable ? 1 : 0; |
277 | u32 regval; |
278 | |
279 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR), |
280 | MAC_PFR_PM_POS, MAC_PFR_PM_LEN); |
281 | if (regval == val) |
282 | return 0; |
283 | |
284 | netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n" , |
285 | enable ? "entering" : "leaving" ); |
286 | |
287 | regval = readl(addr: pdata->mac_regs + MAC_PFR); |
288 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PM_POS, |
289 | MAC_PFR_PM_LEN, val); |
290 | writel(val: regval, addr: pdata->mac_regs + MAC_PFR); |
291 | |
292 | return 0; |
293 | } |
294 | |
295 | static void xlgmac_set_mac_addn_addrs(struct xlgmac_pdata *pdata) |
296 | { |
297 | struct net_device *netdev = pdata->netdev; |
298 | struct netdev_hw_addr *ha; |
299 | unsigned int addn_macs; |
300 | unsigned int mac_reg; |
301 | |
302 | mac_reg = MAC_MACA1HR; |
303 | addn_macs = pdata->hw_feat.addn_mac; |
304 | |
305 | if (netdev_uc_count(netdev) > addn_macs) { |
306 | xlgmac_set_promiscuous_mode(pdata, enable: 1); |
307 | } else { |
308 | netdev_for_each_uc_addr(ha, netdev) { |
309 | xlgmac_set_mac_reg(pdata, ha, mac_reg: &mac_reg); |
310 | addn_macs--; |
311 | } |
312 | |
313 | if (netdev_mc_count(netdev) > addn_macs) { |
314 | xlgmac_set_all_multicast_mode(pdata, enable: 1); |
315 | } else { |
316 | netdev_for_each_mc_addr(ha, netdev) { |
317 | xlgmac_set_mac_reg(pdata, ha, mac_reg: &mac_reg); |
318 | addn_macs--; |
319 | } |
320 | } |
321 | } |
322 | |
323 | /* Clear remaining additional MAC address entries */ |
324 | while (addn_macs--) |
325 | xlgmac_set_mac_reg(pdata, NULL, mac_reg: &mac_reg); |
326 | } |
327 | |
328 | static void xlgmac_set_mac_hash_table(struct xlgmac_pdata *pdata) |
329 | { |
330 | unsigned int hash_table_shift, hash_table_count; |
331 | u32 hash_table[XLGMAC_MAC_HASH_TABLE_SIZE]; |
332 | struct net_device *netdev = pdata->netdev; |
333 | struct netdev_hw_addr *ha; |
334 | unsigned int hash_reg; |
335 | unsigned int i; |
336 | u32 crc; |
337 | |
338 | hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7); |
339 | hash_table_count = pdata->hw_feat.hash_table_size / 32; |
340 | memset(hash_table, 0, sizeof(hash_table)); |
341 | |
342 | /* Build the MAC Hash Table register values */ |
343 | netdev_for_each_uc_addr(ha, netdev) { |
344 | crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); |
345 | crc >>= hash_table_shift; |
346 | hash_table[crc >> 5] |= (1 << (crc & 0x1f)); |
347 | } |
348 | |
349 | netdev_for_each_mc_addr(ha, netdev) { |
350 | crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); |
351 | crc >>= hash_table_shift; |
352 | hash_table[crc >> 5] |= (1 << (crc & 0x1f)); |
353 | } |
354 | |
355 | /* Set the MAC Hash Table registers */ |
356 | hash_reg = MAC_HTR0; |
357 | for (i = 0; i < hash_table_count; i++) { |
358 | writel(val: hash_table[i], addr: pdata->mac_regs + hash_reg); |
359 | hash_reg += MAC_HTR_INC; |
360 | } |
361 | } |
362 | |
363 | static int xlgmac_add_mac_addresses(struct xlgmac_pdata *pdata) |
364 | { |
365 | if (pdata->hw_feat.hash_table_size) |
366 | xlgmac_set_mac_hash_table(pdata); |
367 | else |
368 | xlgmac_set_mac_addn_addrs(pdata); |
369 | |
370 | return 0; |
371 | } |
372 | |
373 | static void xlgmac_config_mac_address(struct xlgmac_pdata *pdata) |
374 | { |
375 | u32 regval; |
376 | |
377 | xlgmac_set_mac_address(pdata, addr: pdata->netdev->dev_addr); |
378 | |
379 | /* Filtering is done using perfect filtering and hash filtering */ |
380 | if (pdata->hw_feat.hash_table_size) { |
381 | regval = readl(addr: pdata->mac_regs + MAC_PFR); |
382 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HPF_POS, |
383 | MAC_PFR_HPF_LEN, 1); |
384 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HUC_POS, |
385 | MAC_PFR_HUC_LEN, 1); |
386 | regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HMC_POS, |
387 | MAC_PFR_HMC_LEN, 1); |
388 | writel(val: regval, addr: pdata->mac_regs + MAC_PFR); |
389 | } |
390 | } |
391 | |
392 | static void xlgmac_config_jumbo_enable(struct xlgmac_pdata *pdata) |
393 | { |
394 | unsigned int val; |
395 | u32 regval; |
396 | |
397 | val = (pdata->netdev->mtu > XLGMAC_STD_PACKET_MTU) ? 1 : 0; |
398 | |
399 | regval = readl(addr: pdata->mac_regs + MAC_RCR); |
400 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_JE_POS, |
401 | MAC_RCR_JE_LEN, val); |
402 | writel(val: regval, addr: pdata->mac_regs + MAC_RCR); |
403 | } |
404 | |
405 | static void xlgmac_config_checksum_offload(struct xlgmac_pdata *pdata) |
406 | { |
407 | if (pdata->netdev->features & NETIF_F_RXCSUM) |
408 | xlgmac_enable_rx_csum(pdata); |
409 | else |
410 | xlgmac_disable_rx_csum(pdata); |
411 | } |
412 | |
413 | static void xlgmac_config_vlan_support(struct xlgmac_pdata *pdata) |
414 | { |
415 | u32 regval; |
416 | |
417 | regval = readl(addr: pdata->mac_regs + MAC_VLANIR); |
418 | /* Indicate that VLAN Tx CTAGs come from context descriptors */ |
419 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_CSVL_POS, |
420 | MAC_VLANIR_CSVL_LEN, 0); |
421 | regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLTI_POS, |
422 | MAC_VLANIR_VLTI_LEN, 1); |
423 | writel(val: regval, addr: pdata->mac_regs + MAC_VLANIR); |
424 | |
425 | /* Set the current VLAN Hash Table register value */ |
426 | xlgmac_update_vlan_hash_table(pdata); |
427 | |
428 | if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) |
429 | xlgmac_enable_rx_vlan_filtering(pdata); |
430 | else |
431 | xlgmac_disable_rx_vlan_filtering(pdata); |
432 | |
433 | if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) |
434 | xlgmac_enable_rx_vlan_stripping(pdata); |
435 | else |
436 | xlgmac_disable_rx_vlan_stripping(pdata); |
437 | } |
438 | |
439 | static int xlgmac_config_rx_mode(struct xlgmac_pdata *pdata) |
440 | { |
441 | struct net_device *netdev = pdata->netdev; |
442 | unsigned int pr_mode, am_mode; |
443 | |
444 | pr_mode = ((netdev->flags & IFF_PROMISC) != 0); |
445 | am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); |
446 | |
447 | xlgmac_set_promiscuous_mode(pdata, enable: pr_mode); |
448 | xlgmac_set_all_multicast_mode(pdata, enable: am_mode); |
449 | |
450 | xlgmac_add_mac_addresses(pdata); |
451 | |
452 | return 0; |
453 | } |
454 | |
455 | static void xlgmac_prepare_tx_stop(struct xlgmac_pdata *pdata, |
456 | struct xlgmac_channel *channel) |
457 | { |
458 | unsigned int tx_dsr, tx_pos, tx_qidx; |
459 | unsigned long tx_timeout; |
460 | unsigned int tx_status; |
461 | |
462 | /* Calculate the status register to read and the position within */ |
463 | if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) { |
464 | tx_dsr = DMA_DSR0; |
465 | tx_pos = (channel->queue_index * DMA_DSR_Q_LEN) + |
466 | DMA_DSR0_TPS_START; |
467 | } else { |
468 | tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE; |
469 | |
470 | tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); |
471 | tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_LEN) + |
472 | DMA_DSRX_TPS_START; |
473 | } |
474 | |
475 | /* The Tx engine cannot be stopped if it is actively processing |
476 | * descriptors. Wait for the Tx engine to enter the stopped or |
477 | * suspended state. Don't wait forever though... |
478 | */ |
479 | tx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ); |
480 | while (time_before(jiffies, tx_timeout)) { |
481 | tx_status = readl(addr: pdata->mac_regs + tx_dsr); |
482 | tx_status = XLGMAC_GET_REG_BITS(tx_status, tx_pos, |
483 | DMA_DSR_TPS_LEN); |
484 | if ((tx_status == DMA_TPS_STOPPED) || |
485 | (tx_status == DMA_TPS_SUSPENDED)) |
486 | break; |
487 | |
488 | usleep_range(min: 500, max: 1000); |
489 | } |
490 | |
491 | if (!time_before(jiffies, tx_timeout)) |
492 | netdev_info(dev: pdata->netdev, |
493 | format: "timed out waiting for Tx DMA channel %u to stop\n" , |
494 | channel->queue_index); |
495 | } |
496 | |
497 | static void xlgmac_enable_tx(struct xlgmac_pdata *pdata) |
498 | { |
499 | struct xlgmac_channel *channel; |
500 | unsigned int i; |
501 | u32 regval; |
502 | |
503 | /* Enable each Tx DMA channel */ |
504 | channel = pdata->channel_head; |
505 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
506 | if (!channel->tx_ring) |
507 | break; |
508 | |
509 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); |
510 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, |
511 | DMA_CH_TCR_ST_LEN, 1); |
512 | writel(val: regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); |
513 | } |
514 | |
515 | /* Enable each Tx queue */ |
516 | for (i = 0; i < pdata->tx_q_count; i++) { |
517 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); |
518 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, |
519 | MTL_Q_TQOMR_TXQEN_LEN, |
520 | MTL_Q_ENABLED); |
521 | writel(val: regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); |
522 | } |
523 | |
524 | /* Enable MAC Tx */ |
525 | regval = readl(addr: pdata->mac_regs + MAC_TCR); |
526 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS, |
527 | MAC_TCR_TE_LEN, 1); |
528 | writel(val: regval, addr: pdata->mac_regs + MAC_TCR); |
529 | } |
530 | |
531 | static void xlgmac_disable_tx(struct xlgmac_pdata *pdata) |
532 | { |
533 | struct xlgmac_channel *channel; |
534 | unsigned int i; |
535 | u32 regval; |
536 | |
537 | /* Prepare for Tx DMA channel stop */ |
538 | channel = pdata->channel_head; |
539 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
540 | if (!channel->tx_ring) |
541 | break; |
542 | |
543 | xlgmac_prepare_tx_stop(pdata, channel); |
544 | } |
545 | |
546 | /* Disable MAC Tx */ |
547 | regval = readl(addr: pdata->mac_regs + MAC_TCR); |
548 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS, |
549 | MAC_TCR_TE_LEN, 0); |
550 | writel(val: regval, addr: pdata->mac_regs + MAC_TCR); |
551 | |
552 | /* Disable each Tx queue */ |
553 | for (i = 0; i < pdata->tx_q_count; i++) { |
554 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); |
555 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, |
556 | MTL_Q_TQOMR_TXQEN_LEN, 0); |
557 | writel(val: regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); |
558 | } |
559 | |
560 | /* Disable each Tx DMA channel */ |
561 | channel = pdata->channel_head; |
562 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
563 | if (!channel->tx_ring) |
564 | break; |
565 | |
566 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); |
567 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, |
568 | DMA_CH_TCR_ST_LEN, 0); |
569 | writel(val: regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); |
570 | } |
571 | } |
572 | |
573 | static void xlgmac_prepare_rx_stop(struct xlgmac_pdata *pdata, |
574 | unsigned int queue) |
575 | { |
576 | unsigned int rx_status, prxq, rxqsts; |
577 | unsigned long rx_timeout; |
578 | |
579 | /* The Rx engine cannot be stopped if it is actively processing |
580 | * packets. Wait for the Rx queue to empty the Rx fifo. Don't |
581 | * wait forever though... |
582 | */ |
583 | rx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ); |
584 | while (time_before(jiffies, rx_timeout)) { |
585 | rx_status = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR)); |
586 | prxq = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_PRXQ_POS, |
587 | MTL_Q_RQDR_PRXQ_LEN); |
588 | rxqsts = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_RXQSTS_POS, |
589 | MTL_Q_RQDR_RXQSTS_LEN); |
590 | if ((prxq == 0) && (rxqsts == 0)) |
591 | break; |
592 | |
593 | usleep_range(min: 500, max: 1000); |
594 | } |
595 | |
596 | if (!time_before(jiffies, rx_timeout)) |
597 | netdev_info(dev: pdata->netdev, |
598 | format: "timed out waiting for Rx queue %u to empty\n" , |
599 | queue); |
600 | } |
601 | |
602 | static void xlgmac_enable_rx(struct xlgmac_pdata *pdata) |
603 | { |
604 | struct xlgmac_channel *channel; |
605 | unsigned int regval, i; |
606 | |
607 | /* Enable each Rx DMA channel */ |
608 | channel = pdata->channel_head; |
609 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
610 | if (!channel->rx_ring) |
611 | break; |
612 | |
613 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); |
614 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, |
615 | DMA_CH_RCR_SR_LEN, 1); |
616 | writel(val: regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); |
617 | } |
618 | |
619 | /* Enable each Rx queue */ |
620 | regval = 0; |
621 | for (i = 0; i < pdata->rx_q_count; i++) |
622 | regval |= (0x02 << (i << 1)); |
623 | writel(val: regval, addr: pdata->mac_regs + MAC_RQC0R); |
624 | |
625 | /* Enable MAC Rx */ |
626 | regval = readl(addr: pdata->mac_regs + MAC_RCR); |
627 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS, |
628 | MAC_RCR_DCRCC_LEN, 1); |
629 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS, |
630 | MAC_RCR_CST_LEN, 1); |
631 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS, |
632 | MAC_RCR_ACS_LEN, 1); |
633 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS, |
634 | MAC_RCR_RE_LEN, 1); |
635 | writel(val: regval, addr: pdata->mac_regs + MAC_RCR); |
636 | } |
637 | |
638 | static void xlgmac_disable_rx(struct xlgmac_pdata *pdata) |
639 | { |
640 | struct xlgmac_channel *channel; |
641 | unsigned int i; |
642 | u32 regval; |
643 | |
644 | /* Disable MAC Rx */ |
645 | regval = readl(addr: pdata->mac_regs + MAC_RCR); |
646 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS, |
647 | MAC_RCR_DCRCC_LEN, 0); |
648 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS, |
649 | MAC_RCR_CST_LEN, 0); |
650 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS, |
651 | MAC_RCR_ACS_LEN, 0); |
652 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS, |
653 | MAC_RCR_RE_LEN, 0); |
654 | writel(val: regval, addr: pdata->mac_regs + MAC_RCR); |
655 | |
656 | /* Prepare for Rx DMA channel stop */ |
657 | for (i = 0; i < pdata->rx_q_count; i++) |
658 | xlgmac_prepare_rx_stop(pdata, queue: i); |
659 | |
660 | /* Disable each Rx queue */ |
661 | writel(val: 0, addr: pdata->mac_regs + MAC_RQC0R); |
662 | |
663 | /* Disable each Rx DMA channel */ |
664 | channel = pdata->channel_head; |
665 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
666 | if (!channel->rx_ring) |
667 | break; |
668 | |
669 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); |
670 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, |
671 | DMA_CH_RCR_SR_LEN, 0); |
672 | writel(val: regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); |
673 | } |
674 | } |
675 | |
676 | static void xlgmac_tx_start_xmit(struct xlgmac_channel *channel, |
677 | struct xlgmac_ring *ring) |
678 | { |
679 | struct xlgmac_pdata *pdata = channel->pdata; |
680 | struct xlgmac_desc_data *desc_data; |
681 | |
682 | /* Make sure everything is written before the register write */ |
683 | wmb(); |
684 | |
685 | /* Issue a poll command to Tx DMA by writing address |
686 | * of next immediate free descriptor |
687 | */ |
688 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); |
689 | writel(lower_32_bits(desc_data->dma_desc_addr), |
690 | XLGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)); |
691 | |
692 | /* Start the Tx timer */ |
693 | if (pdata->tx_usecs && !channel->tx_timer_active) { |
694 | channel->tx_timer_active = 1; |
695 | mod_timer(timer: &channel->tx_timer, |
696 | expires: jiffies + usecs_to_jiffies(u: pdata->tx_usecs)); |
697 | } |
698 | |
699 | ring->tx.xmit_more = 0; |
700 | } |
701 | |
702 | static void xlgmac_dev_xmit(struct xlgmac_channel *channel) |
703 | { |
704 | struct xlgmac_pdata *pdata = channel->pdata; |
705 | struct xlgmac_ring *ring = channel->tx_ring; |
706 | unsigned int tso_context, vlan_context; |
707 | struct xlgmac_desc_data *desc_data; |
708 | struct xlgmac_dma_desc *dma_desc; |
709 | struct xlgmac_pkt_info *pkt_info; |
710 | unsigned int csum, tso, vlan; |
711 | int start_index = ring->cur; |
712 | int cur_index = ring->cur; |
713 | unsigned int tx_set_ic; |
714 | int i; |
715 | |
716 | pkt_info = &ring->pkt_info; |
717 | csum = XLGMAC_GET_REG_BITS(pkt_info->attributes, |
718 | TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, |
719 | TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN); |
720 | tso = XLGMAC_GET_REG_BITS(pkt_info->attributes, |
721 | TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, |
722 | TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN); |
723 | vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes, |
724 | TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, |
725 | TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN); |
726 | |
727 | if (tso && (pkt_info->mss != ring->tx.cur_mss)) |
728 | tso_context = 1; |
729 | else |
730 | tso_context = 0; |
731 | |
732 | if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)) |
733 | vlan_context = 1; |
734 | else |
735 | vlan_context = 0; |
736 | |
737 | /* Determine if an interrupt should be generated for this Tx: |
738 | * Interrupt: |
739 | * - Tx frame count exceeds the frame count setting |
740 | * - Addition of Tx frame count to the frame count since the |
741 | * last interrupt was set exceeds the frame count setting |
742 | * No interrupt: |
743 | * - No frame count setting specified (ethtool -C ethX tx-frames 0) |
744 | * - Addition of Tx frame count to the frame count since the |
745 | * last interrupt was set does not exceed the frame count setting |
746 | */ |
747 | ring->coalesce_count += pkt_info->tx_packets; |
748 | if (!pdata->tx_frames) |
749 | tx_set_ic = 0; |
750 | else if (pkt_info->tx_packets > pdata->tx_frames) |
751 | tx_set_ic = 1; |
752 | else if ((ring->coalesce_count % pdata->tx_frames) < |
753 | pkt_info->tx_packets) |
754 | tx_set_ic = 1; |
755 | else |
756 | tx_set_ic = 0; |
757 | |
758 | desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); |
759 | dma_desc = desc_data->dma_desc; |
760 | |
761 | /* Create a context descriptor if this is a TSO pkt_info */ |
762 | if (tso_context || vlan_context) { |
763 | if (tso_context) { |
764 | netif_dbg(pdata, tx_queued, pdata->netdev, |
765 | "TSO context descriptor, mss=%u\n" , |
766 | pkt_info->mss); |
767 | |
768 | /* Set the MSS size */ |
769 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( |
770 | dma_desc->desc2, |
771 | TX_CONTEXT_DESC2_MSS_POS, |
772 | TX_CONTEXT_DESC2_MSS_LEN, |
773 | pkt_info->mss); |
774 | |
775 | /* Mark it as a CONTEXT descriptor */ |
776 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
777 | dma_desc->desc3, |
778 | TX_CONTEXT_DESC3_CTXT_POS, |
779 | TX_CONTEXT_DESC3_CTXT_LEN, |
780 | 1); |
781 | |
782 | /* Indicate this descriptor contains the MSS */ |
783 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
784 | dma_desc->desc3, |
785 | TX_CONTEXT_DESC3_TCMSSV_POS, |
786 | TX_CONTEXT_DESC3_TCMSSV_LEN, |
787 | 1); |
788 | |
789 | ring->tx.cur_mss = pkt_info->mss; |
790 | } |
791 | |
792 | if (vlan_context) { |
793 | netif_dbg(pdata, tx_queued, pdata->netdev, |
794 | "VLAN context descriptor, ctag=%u\n" , |
795 | pkt_info->vlan_ctag); |
796 | |
797 | /* Mark it as a CONTEXT descriptor */ |
798 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
799 | dma_desc->desc3, |
800 | TX_CONTEXT_DESC3_CTXT_POS, |
801 | TX_CONTEXT_DESC3_CTXT_LEN, |
802 | 1); |
803 | |
804 | /* Set the VLAN tag */ |
805 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
806 | dma_desc->desc3, |
807 | TX_CONTEXT_DESC3_VT_POS, |
808 | TX_CONTEXT_DESC3_VT_LEN, |
809 | pkt_info->vlan_ctag); |
810 | |
811 | /* Indicate this descriptor contains the VLAN tag */ |
812 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
813 | dma_desc->desc3, |
814 | TX_CONTEXT_DESC3_VLTV_POS, |
815 | TX_CONTEXT_DESC3_VLTV_LEN, |
816 | 1); |
817 | |
818 | ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag; |
819 | } |
820 | |
821 | cur_index++; |
822 | desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); |
823 | dma_desc = desc_data->dma_desc; |
824 | } |
825 | |
826 | /* Update buffer address (for TSO this is the header) */ |
827 | dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma)); |
828 | dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma)); |
829 | |
830 | /* Update the buffer length */ |
831 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( |
832 | dma_desc->desc2, |
833 | TX_NORMAL_DESC2_HL_B1L_POS, |
834 | TX_NORMAL_DESC2_HL_B1L_LEN, |
835 | desc_data->skb_dma_len); |
836 | |
837 | /* VLAN tag insertion check */ |
838 | if (vlan) { |
839 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( |
840 | dma_desc->desc2, |
841 | TX_NORMAL_DESC2_VTIR_POS, |
842 | TX_NORMAL_DESC2_VTIR_LEN, |
843 | TX_NORMAL_DESC2_VLAN_INSERT); |
844 | pdata->stats.tx_vlan_packets++; |
845 | } |
846 | |
847 | /* Timestamp enablement check */ |
848 | if (XLGMAC_GET_REG_BITS(pkt_info->attributes, |
849 | TX_PACKET_ATTRIBUTES_PTP_POS, |
850 | TX_PACKET_ATTRIBUTES_PTP_LEN)) |
851 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( |
852 | dma_desc->desc2, |
853 | TX_NORMAL_DESC2_TTSE_POS, |
854 | TX_NORMAL_DESC2_TTSE_LEN, |
855 | 1); |
856 | |
857 | /* Mark it as First Descriptor */ |
858 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
859 | dma_desc->desc3, |
860 | TX_NORMAL_DESC3_FD_POS, |
861 | TX_NORMAL_DESC3_FD_LEN, |
862 | 1); |
863 | |
864 | /* Mark it as a NORMAL descriptor */ |
865 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
866 | dma_desc->desc3, |
867 | TX_NORMAL_DESC3_CTXT_POS, |
868 | TX_NORMAL_DESC3_CTXT_LEN, |
869 | 0); |
870 | |
871 | /* Set OWN bit if not the first descriptor */ |
872 | if (cur_index != start_index) |
873 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
874 | dma_desc->desc3, |
875 | TX_NORMAL_DESC3_OWN_POS, |
876 | TX_NORMAL_DESC3_OWN_LEN, |
877 | 1); |
878 | |
879 | if (tso) { |
880 | /* Enable TSO */ |
881 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
882 | dma_desc->desc3, |
883 | TX_NORMAL_DESC3_TSE_POS, |
884 | TX_NORMAL_DESC3_TSE_LEN, 1); |
885 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
886 | dma_desc->desc3, |
887 | TX_NORMAL_DESC3_TCPPL_POS, |
888 | TX_NORMAL_DESC3_TCPPL_LEN, |
889 | pkt_info->tcp_payload_len); |
890 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
891 | dma_desc->desc3, |
892 | TX_NORMAL_DESC3_TCPHDRLEN_POS, |
893 | TX_NORMAL_DESC3_TCPHDRLEN_LEN, |
894 | pkt_info->tcp_header_len / 4); |
895 | |
896 | pdata->stats.tx_tso_packets++; |
897 | } else { |
898 | /* Enable CRC and Pad Insertion */ |
899 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
900 | dma_desc->desc3, |
901 | TX_NORMAL_DESC3_CPC_POS, |
902 | TX_NORMAL_DESC3_CPC_LEN, 0); |
903 | |
904 | /* Enable HW CSUM */ |
905 | if (csum) |
906 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
907 | dma_desc->desc3, |
908 | TX_NORMAL_DESC3_CIC_POS, |
909 | TX_NORMAL_DESC3_CIC_LEN, |
910 | 0x3); |
911 | |
912 | /* Set the total length to be transmitted */ |
913 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
914 | dma_desc->desc3, |
915 | TX_NORMAL_DESC3_FL_POS, |
916 | TX_NORMAL_DESC3_FL_LEN, |
917 | pkt_info->length); |
918 | } |
919 | |
920 | for (i = cur_index - start_index + 1; i < pkt_info->desc_count; i++) { |
921 | cur_index++; |
922 | desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); |
923 | dma_desc = desc_data->dma_desc; |
924 | |
925 | /* Update buffer address */ |
926 | dma_desc->desc0 = |
927 | cpu_to_le32(lower_32_bits(desc_data->skb_dma)); |
928 | dma_desc->desc1 = |
929 | cpu_to_le32(upper_32_bits(desc_data->skb_dma)); |
930 | |
931 | /* Update the buffer length */ |
932 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( |
933 | dma_desc->desc2, |
934 | TX_NORMAL_DESC2_HL_B1L_POS, |
935 | TX_NORMAL_DESC2_HL_B1L_LEN, |
936 | desc_data->skb_dma_len); |
937 | |
938 | /* Set OWN bit */ |
939 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
940 | dma_desc->desc3, |
941 | TX_NORMAL_DESC3_OWN_POS, |
942 | TX_NORMAL_DESC3_OWN_LEN, 1); |
943 | |
944 | /* Mark it as NORMAL descriptor */ |
945 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
946 | dma_desc->desc3, |
947 | TX_NORMAL_DESC3_CTXT_POS, |
948 | TX_NORMAL_DESC3_CTXT_LEN, 0); |
949 | |
950 | /* Enable HW CSUM */ |
951 | if (csum) |
952 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
953 | dma_desc->desc3, |
954 | TX_NORMAL_DESC3_CIC_POS, |
955 | TX_NORMAL_DESC3_CIC_LEN, |
956 | 0x3); |
957 | } |
958 | |
959 | /* Set LAST bit for the last descriptor */ |
960 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
961 | dma_desc->desc3, |
962 | TX_NORMAL_DESC3_LD_POS, |
963 | TX_NORMAL_DESC3_LD_LEN, 1); |
964 | |
965 | /* Set IC bit based on Tx coalescing settings */ |
966 | if (tx_set_ic) |
967 | dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( |
968 | dma_desc->desc2, |
969 | TX_NORMAL_DESC2_IC_POS, |
970 | TX_NORMAL_DESC2_IC_LEN, 1); |
971 | |
972 | /* Save the Tx info to report back during cleanup */ |
973 | desc_data->tx.packets = pkt_info->tx_packets; |
974 | desc_data->tx.bytes = pkt_info->tx_bytes; |
975 | |
976 | /* In case the Tx DMA engine is running, make sure everything |
977 | * is written to the descriptor(s) before setting the OWN bit |
978 | * for the first descriptor |
979 | */ |
980 | dma_wmb(); |
981 | |
982 | /* Set OWN bit for the first descriptor */ |
983 | desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); |
984 | dma_desc = desc_data->dma_desc; |
985 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
986 | dma_desc->desc3, |
987 | TX_NORMAL_DESC3_OWN_POS, |
988 | TX_NORMAL_DESC3_OWN_LEN, 1); |
989 | |
990 | if (netif_msg_tx_queued(pdata)) |
991 | xlgmac_dump_tx_desc(pdata, ring, idx: start_index, |
992 | count: pkt_info->desc_count, flag: 1); |
993 | |
994 | /* Make sure ownership is written to the descriptor */ |
995 | smp_wmb(); |
996 | |
997 | ring->cur = cur_index + 1; |
998 | if (!netdev_xmit_more() || |
999 | netif_xmit_stopped(dev_queue: netdev_get_tx_queue(dev: pdata->netdev, |
1000 | index: channel->queue_index))) |
1001 | xlgmac_tx_start_xmit(channel, ring); |
1002 | else |
1003 | ring->tx.xmit_more = 1; |
1004 | |
1005 | XLGMAC_PR("%s: descriptors %u to %u written\n" , |
1006 | channel->name, start_index & (ring->dma_desc_count - 1), |
1007 | (ring->cur - 1) & (ring->dma_desc_count - 1)); |
1008 | } |
1009 | |
1010 | static void xlgmac_get_rx_tstamp(struct xlgmac_pkt_info *pkt_info, |
1011 | struct xlgmac_dma_desc *dma_desc) |
1012 | { |
1013 | u32 tsa, tsd; |
1014 | u64 nsec; |
1015 | |
1016 | tsa = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, |
1017 | RX_CONTEXT_DESC3_TSA_POS, |
1018 | RX_CONTEXT_DESC3_TSA_LEN); |
1019 | tsd = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, |
1020 | RX_CONTEXT_DESC3_TSD_POS, |
1021 | RX_CONTEXT_DESC3_TSD_LEN); |
1022 | if (tsa && !tsd) { |
1023 | nsec = le32_to_cpu(dma_desc->desc1); |
1024 | nsec <<= 32; |
1025 | nsec |= le32_to_cpu(dma_desc->desc0); |
1026 | if (nsec != 0xffffffffffffffffULL) { |
1027 | pkt_info->rx_tstamp = nsec; |
1028 | pkt_info->attributes = XLGMAC_SET_REG_BITS( |
1029 | pkt_info->attributes, |
1030 | RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS, |
1031 | RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN, |
1032 | 1); |
1033 | } |
1034 | } |
1035 | } |
1036 | |
1037 | static void xlgmac_tx_desc_reset(struct xlgmac_desc_data *desc_data) |
1038 | { |
1039 | struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc; |
1040 | |
1041 | /* Reset the Tx descriptor |
1042 | * Set buffer 1 (lo) address to zero |
1043 | * Set buffer 1 (hi) address to zero |
1044 | * Reset all other control bits (IC, TTSE, B2L & B1L) |
1045 | * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) |
1046 | */ |
1047 | dma_desc->desc0 = 0; |
1048 | dma_desc->desc1 = 0; |
1049 | dma_desc->desc2 = 0; |
1050 | dma_desc->desc3 = 0; |
1051 | |
1052 | /* Make sure ownership is written to the descriptor */ |
1053 | dma_wmb(); |
1054 | } |
1055 | |
1056 | static void xlgmac_tx_desc_init(struct xlgmac_channel *channel) |
1057 | { |
1058 | struct xlgmac_ring *ring = channel->tx_ring; |
1059 | struct xlgmac_desc_data *desc_data; |
1060 | int start_index = ring->cur; |
1061 | int i; |
1062 | |
1063 | /* Initialze all descriptors */ |
1064 | for (i = 0; i < ring->dma_desc_count; i++) { |
1065 | desc_data = XLGMAC_GET_DESC_DATA(ring, i); |
1066 | |
1067 | /* Initialize Tx descriptor */ |
1068 | xlgmac_tx_desc_reset(desc_data); |
1069 | } |
1070 | |
1071 | /* Update the total number of Tx descriptors */ |
1072 | writel(val: ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_TDRLR)); |
1073 | |
1074 | /* Update the starting address of descriptor ring */ |
1075 | desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); |
1076 | writel(upper_32_bits(desc_data->dma_desc_addr), |
1077 | XLGMAC_DMA_REG(channel, DMA_CH_TDLR_HI)); |
1078 | writel(lower_32_bits(desc_data->dma_desc_addr), |
1079 | XLGMAC_DMA_REG(channel, DMA_CH_TDLR_LO)); |
1080 | } |
1081 | |
1082 | static void xlgmac_rx_desc_reset(struct xlgmac_pdata *pdata, |
1083 | struct xlgmac_desc_data *desc_data, |
1084 | unsigned int index) |
1085 | { |
1086 | struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc; |
1087 | unsigned int rx_frames = pdata->rx_frames; |
1088 | unsigned int rx_usecs = pdata->rx_usecs; |
1089 | dma_addr_t hdr_dma, buf_dma; |
1090 | unsigned int inte; |
1091 | |
1092 | if (!rx_usecs && !rx_frames) { |
1093 | /* No coalescing, interrupt for every descriptor */ |
1094 | inte = 1; |
1095 | } else { |
1096 | /* Set interrupt based on Rx frame coalescing setting */ |
1097 | if (rx_frames && !((index + 1) % rx_frames)) |
1098 | inte = 1; |
1099 | else |
1100 | inte = 0; |
1101 | } |
1102 | |
1103 | /* Reset the Rx descriptor |
1104 | * Set buffer 1 (lo) address to header dma address (lo) |
1105 | * Set buffer 1 (hi) address to header dma address (hi) |
1106 | * Set buffer 2 (lo) address to buffer dma address (lo) |
1107 | * Set buffer 2 (hi) address to buffer dma address (hi) and |
1108 | * set control bits OWN and INTE |
1109 | */ |
1110 | hdr_dma = desc_data->rx.hdr.dma_base + desc_data->rx.hdr.dma_off; |
1111 | buf_dma = desc_data->rx.buf.dma_base + desc_data->rx.buf.dma_off; |
1112 | dma_desc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); |
1113 | dma_desc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); |
1114 | dma_desc->desc2 = cpu_to_le32(lower_32_bits(buf_dma)); |
1115 | dma_desc->desc3 = cpu_to_le32(upper_32_bits(buf_dma)); |
1116 | |
1117 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
1118 | dma_desc->desc3, |
1119 | RX_NORMAL_DESC3_INTE_POS, |
1120 | RX_NORMAL_DESC3_INTE_LEN, |
1121 | inte); |
1122 | |
1123 | /* Since the Rx DMA engine is likely running, make sure everything |
1124 | * is written to the descriptor(s) before setting the OWN bit |
1125 | * for the descriptor |
1126 | */ |
1127 | dma_wmb(); |
1128 | |
1129 | dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( |
1130 | dma_desc->desc3, |
1131 | RX_NORMAL_DESC3_OWN_POS, |
1132 | RX_NORMAL_DESC3_OWN_LEN, |
1133 | 1); |
1134 | |
1135 | /* Make sure ownership is written to the descriptor */ |
1136 | dma_wmb(); |
1137 | } |
1138 | |
1139 | static void xlgmac_rx_desc_init(struct xlgmac_channel *channel) |
1140 | { |
1141 | struct xlgmac_pdata *pdata = channel->pdata; |
1142 | struct xlgmac_ring *ring = channel->rx_ring; |
1143 | unsigned int start_index = ring->cur; |
1144 | struct xlgmac_desc_data *desc_data; |
1145 | unsigned int i; |
1146 | |
1147 | /* Initialize all descriptors */ |
1148 | for (i = 0; i < ring->dma_desc_count; i++) { |
1149 | desc_data = XLGMAC_GET_DESC_DATA(ring, i); |
1150 | |
1151 | /* Initialize Rx descriptor */ |
1152 | xlgmac_rx_desc_reset(pdata, desc_data, index: i); |
1153 | } |
1154 | |
1155 | /* Update the total number of Rx descriptors */ |
1156 | writel(val: ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_RDRLR)); |
1157 | |
1158 | /* Update the starting address of descriptor ring */ |
1159 | desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); |
1160 | writel(upper_32_bits(desc_data->dma_desc_addr), |
1161 | XLGMAC_DMA_REG(channel, DMA_CH_RDLR_HI)); |
1162 | writel(lower_32_bits(desc_data->dma_desc_addr), |
1163 | XLGMAC_DMA_REG(channel, DMA_CH_RDLR_LO)); |
1164 | |
1165 | /* Update the Rx Descriptor Tail Pointer */ |
1166 | desc_data = XLGMAC_GET_DESC_DATA(ring, start_index + |
1167 | ring->dma_desc_count - 1); |
1168 | writel(lower_32_bits(desc_data->dma_desc_addr), |
1169 | XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); |
1170 | } |
1171 | |
1172 | static int xlgmac_is_context_desc(struct xlgmac_dma_desc *dma_desc) |
1173 | { |
1174 | /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ |
1175 | return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, |
1176 | TX_NORMAL_DESC3_CTXT_POS, |
1177 | TX_NORMAL_DESC3_CTXT_LEN); |
1178 | } |
1179 | |
1180 | static int xlgmac_is_last_desc(struct xlgmac_dma_desc *dma_desc) |
1181 | { |
1182 | /* Rx and Tx share LD bit, so check TDES3.LD bit */ |
1183 | return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, |
1184 | TX_NORMAL_DESC3_LD_POS, |
1185 | TX_NORMAL_DESC3_LD_LEN); |
1186 | } |
1187 | |
1188 | static int xlgmac_disable_tx_flow_control(struct xlgmac_pdata *pdata) |
1189 | { |
1190 | unsigned int max_q_count, q_count; |
1191 | unsigned int reg, regval; |
1192 | unsigned int i; |
1193 | |
1194 | /* Clear MTL flow control */ |
1195 | for (i = 0; i < pdata->rx_q_count; i++) { |
1196 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); |
1197 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS, |
1198 | MTL_Q_RQOMR_EHFC_LEN, 0); |
1199 | writel(val: regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); |
1200 | } |
1201 | |
1202 | /* Clear MAC flow control */ |
1203 | max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES; |
1204 | q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); |
1205 | reg = MAC_Q0TFCR; |
1206 | for (i = 0; i < q_count; i++) { |
1207 | regval = readl(addr: pdata->mac_regs + reg); |
1208 | regval = XLGMAC_SET_REG_BITS(regval, |
1209 | MAC_Q0TFCR_TFE_POS, |
1210 | MAC_Q0TFCR_TFE_LEN, |
1211 | 0); |
1212 | writel(val: regval, addr: pdata->mac_regs + reg); |
1213 | |
1214 | reg += MAC_QTFCR_INC; |
1215 | } |
1216 | |
1217 | return 0; |
1218 | } |
1219 | |
1220 | static int xlgmac_enable_tx_flow_control(struct xlgmac_pdata *pdata) |
1221 | { |
1222 | unsigned int max_q_count, q_count; |
1223 | unsigned int reg, regval; |
1224 | unsigned int i; |
1225 | |
1226 | /* Set MTL flow control */ |
1227 | for (i = 0; i < pdata->rx_q_count; i++) { |
1228 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); |
1229 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS, |
1230 | MTL_Q_RQOMR_EHFC_LEN, 1); |
1231 | writel(val: regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); |
1232 | } |
1233 | |
1234 | /* Set MAC flow control */ |
1235 | max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES; |
1236 | q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); |
1237 | reg = MAC_Q0TFCR; |
1238 | for (i = 0; i < q_count; i++) { |
1239 | regval = readl(addr: pdata->mac_regs + reg); |
1240 | |
1241 | /* Enable transmit flow control */ |
1242 | regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS, |
1243 | MAC_Q0TFCR_TFE_LEN, 1); |
1244 | /* Set pause time */ |
1245 | regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_PT_POS, |
1246 | MAC_Q0TFCR_PT_LEN, 0xffff); |
1247 | |
1248 | writel(val: regval, addr: pdata->mac_regs + reg); |
1249 | |
1250 | reg += MAC_QTFCR_INC; |
1251 | } |
1252 | |
1253 | return 0; |
1254 | } |
1255 | |
1256 | static int xlgmac_disable_rx_flow_control(struct xlgmac_pdata *pdata) |
1257 | { |
1258 | u32 regval; |
1259 | |
1260 | regval = readl(addr: pdata->mac_regs + MAC_RFCR); |
1261 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS, |
1262 | MAC_RFCR_RFE_LEN, 0); |
1263 | writel(val: regval, addr: pdata->mac_regs + MAC_RFCR); |
1264 | |
1265 | return 0; |
1266 | } |
1267 | |
1268 | static int xlgmac_enable_rx_flow_control(struct xlgmac_pdata *pdata) |
1269 | { |
1270 | u32 regval; |
1271 | |
1272 | regval = readl(addr: pdata->mac_regs + MAC_RFCR); |
1273 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS, |
1274 | MAC_RFCR_RFE_LEN, 1); |
1275 | writel(val: regval, addr: pdata->mac_regs + MAC_RFCR); |
1276 | |
1277 | return 0; |
1278 | } |
1279 | |
1280 | static int xlgmac_config_tx_flow_control(struct xlgmac_pdata *pdata) |
1281 | { |
1282 | if (pdata->tx_pause) |
1283 | xlgmac_enable_tx_flow_control(pdata); |
1284 | else |
1285 | xlgmac_disable_tx_flow_control(pdata); |
1286 | |
1287 | return 0; |
1288 | } |
1289 | |
1290 | static int xlgmac_config_rx_flow_control(struct xlgmac_pdata *pdata) |
1291 | { |
1292 | if (pdata->rx_pause) |
1293 | xlgmac_enable_rx_flow_control(pdata); |
1294 | else |
1295 | xlgmac_disable_rx_flow_control(pdata); |
1296 | |
1297 | return 0; |
1298 | } |
1299 | |
1300 | static int xlgmac_config_rx_coalesce(struct xlgmac_pdata *pdata) |
1301 | { |
1302 | struct xlgmac_channel *channel; |
1303 | unsigned int i; |
1304 | u32 regval; |
1305 | |
1306 | channel = pdata->channel_head; |
1307 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
1308 | if (!channel->rx_ring) |
1309 | break; |
1310 | |
1311 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RIWT)); |
1312 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RIWT_RWT_POS, |
1313 | DMA_CH_RIWT_RWT_LEN, |
1314 | pdata->rx_riwt); |
1315 | writel(val: regval, XLGMAC_DMA_REG(channel, DMA_CH_RIWT)); |
1316 | } |
1317 | |
1318 | return 0; |
1319 | } |
1320 | |
1321 | static void xlgmac_config_flow_control(struct xlgmac_pdata *pdata) |
1322 | { |
1323 | xlgmac_config_tx_flow_control(pdata); |
1324 | xlgmac_config_rx_flow_control(pdata); |
1325 | } |
1326 | |
1327 | static void xlgmac_config_rx_fep_enable(struct xlgmac_pdata *pdata) |
1328 | { |
1329 | unsigned int i; |
1330 | u32 regval; |
1331 | |
1332 | for (i = 0; i < pdata->rx_q_count; i++) { |
1333 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); |
1334 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FEP_POS, |
1335 | MTL_Q_RQOMR_FEP_LEN, 1); |
1336 | writel(val: regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); |
1337 | } |
1338 | } |
1339 | |
1340 | static void xlgmac_config_rx_fup_enable(struct xlgmac_pdata *pdata) |
1341 | { |
1342 | unsigned int i; |
1343 | u32 regval; |
1344 | |
1345 | for (i = 0; i < pdata->rx_q_count; i++) { |
1346 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); |
1347 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FUP_POS, |
1348 | MTL_Q_RQOMR_FUP_LEN, 1); |
1349 | writel(val: regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); |
1350 | } |
1351 | } |
1352 | |
1353 | static int xlgmac_config_tx_coalesce(struct xlgmac_pdata *pdata) |
1354 | { |
1355 | return 0; |
1356 | } |
1357 | |
1358 | static void xlgmac_config_rx_buffer_size(struct xlgmac_pdata *pdata) |
1359 | { |
1360 | struct xlgmac_channel *channel; |
1361 | unsigned int i; |
1362 | u32 regval; |
1363 | |
1364 | channel = pdata->channel_head; |
1365 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
1366 | if (!channel->rx_ring) |
1367 | break; |
1368 | |
1369 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); |
1370 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_RBSZ_POS, |
1371 | DMA_CH_RCR_RBSZ_LEN, |
1372 | pdata->rx_buf_size); |
1373 | writel(val: regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); |
1374 | } |
1375 | } |
1376 | |
1377 | static void xlgmac_config_tso_mode(struct xlgmac_pdata *pdata) |
1378 | { |
1379 | struct xlgmac_channel *channel; |
1380 | unsigned int i; |
1381 | u32 regval; |
1382 | |
1383 | channel = pdata->channel_head; |
1384 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
1385 | if (!channel->tx_ring) |
1386 | break; |
1387 | |
1388 | if (pdata->hw_feat.tso) { |
1389 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); |
1390 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS, |
1391 | DMA_CH_TCR_TSE_LEN, 1); |
1392 | writel(val: regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); |
1393 | } |
1394 | } |
1395 | } |
1396 | |
1397 | static void xlgmac_config_sph_mode(struct xlgmac_pdata *pdata) |
1398 | { |
1399 | struct xlgmac_channel *channel; |
1400 | unsigned int i; |
1401 | u32 regval; |
1402 | |
1403 | channel = pdata->channel_head; |
1404 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
1405 | if (!channel->rx_ring) |
1406 | break; |
1407 | |
1408 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR)); |
1409 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_SPH_POS, |
1410 | DMA_CH_CR_SPH_LEN, 1); |
1411 | writel(val: regval, XLGMAC_DMA_REG(channel, DMA_CH_CR)); |
1412 | } |
1413 | |
1414 | regval = readl(addr: pdata->mac_regs + MAC_RCR); |
1415 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_HDSMS_POS, |
1416 | MAC_RCR_HDSMS_LEN, |
1417 | XLGMAC_SPH_HDSMS_SIZE); |
1418 | writel(val: regval, addr: pdata->mac_regs + MAC_RCR); |
1419 | } |
1420 | |
1421 | static unsigned int xlgmac_usec_to_riwt(struct xlgmac_pdata *pdata, |
1422 | unsigned int usec) |
1423 | { |
1424 | unsigned long rate; |
1425 | unsigned int ret; |
1426 | |
1427 | rate = pdata->sysclk_rate; |
1428 | |
1429 | /* Convert the input usec value to the watchdog timer value. Each |
1430 | * watchdog timer value is equivalent to 256 clock cycles. |
1431 | * Calculate the required value as: |
1432 | * ( usec * ( system_clock_mhz / 10^6 ) / 256 |
1433 | */ |
1434 | ret = (usec * (rate / 1000000)) / 256; |
1435 | |
1436 | return ret; |
1437 | } |
1438 | |
1439 | static unsigned int xlgmac_riwt_to_usec(struct xlgmac_pdata *pdata, |
1440 | unsigned int riwt) |
1441 | { |
1442 | unsigned long rate; |
1443 | unsigned int ret; |
1444 | |
1445 | rate = pdata->sysclk_rate; |
1446 | |
1447 | /* Convert the input watchdog timer value to the usec value. Each |
1448 | * watchdog timer value is equivalent to 256 clock cycles. |
1449 | * Calculate the required value as: |
1450 | * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) |
1451 | */ |
1452 | ret = (riwt * 256) / (rate / 1000000); |
1453 | |
1454 | return ret; |
1455 | } |
1456 | |
1457 | static int xlgmac_config_rx_threshold(struct xlgmac_pdata *pdata, |
1458 | unsigned int val) |
1459 | { |
1460 | unsigned int i; |
1461 | u32 regval; |
1462 | |
1463 | for (i = 0; i < pdata->rx_q_count; i++) { |
1464 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); |
1465 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RTC_POS, |
1466 | MTL_Q_RQOMR_RTC_LEN, val); |
1467 | writel(val: regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); |
1468 | } |
1469 | |
1470 | return 0; |
1471 | } |
1472 | |
1473 | static void xlgmac_config_mtl_mode(struct xlgmac_pdata *pdata) |
1474 | { |
1475 | unsigned int i; |
1476 | u32 regval; |
1477 | |
1478 | /* Set Tx to weighted round robin scheduling algorithm */ |
1479 | regval = readl(addr: pdata->mac_regs + MTL_OMR); |
1480 | regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_ETSALG_POS, |
1481 | MTL_OMR_ETSALG_LEN, MTL_ETSALG_WRR); |
1482 | writel(val: regval, addr: pdata->mac_regs + MTL_OMR); |
1483 | |
1484 | /* Set Tx traffic classes to use WRR algorithm with equal weights */ |
1485 | for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { |
1486 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR)); |
1487 | regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_ETSCR_TSA_POS, |
1488 | MTL_TC_ETSCR_TSA_LEN, MTL_TSA_ETS); |
1489 | writel(val: regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR)); |
1490 | |
1491 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR)); |
1492 | regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_QWR_QW_POS, |
1493 | MTL_TC_QWR_QW_LEN, 1); |
1494 | writel(val: regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR)); |
1495 | } |
1496 | |
1497 | /* Set Rx to strict priority algorithm */ |
1498 | regval = readl(addr: pdata->mac_regs + MTL_OMR); |
1499 | regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_RAA_POS, |
1500 | MTL_OMR_RAA_LEN, MTL_RAA_SP); |
1501 | writel(val: regval, addr: pdata->mac_regs + MTL_OMR); |
1502 | } |
1503 | |
1504 | static void xlgmac_config_queue_mapping(struct xlgmac_pdata *pdata) |
1505 | { |
1506 | unsigned int ppq, , prio, prio_queues; |
1507 | unsigned int qptc, , queue; |
1508 | unsigned int reg, regval; |
1509 | unsigned int mask; |
1510 | unsigned int i, j; |
1511 | |
1512 | /* Map the MTL Tx Queues to Traffic Classes |
1513 | * Note: Tx Queues >= Traffic Classes |
1514 | */ |
1515 | qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; |
1516 | qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; |
1517 | |
1518 | for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { |
1519 | for (j = 0; j < qptc; j++) { |
1520 | netif_dbg(pdata, drv, pdata->netdev, |
1521 | "TXq%u mapped to TC%u\n" , queue, i); |
1522 | regval = readl(XLGMAC_MTL_REG(pdata, queue, |
1523 | MTL_Q_TQOMR)); |
1524 | regval = XLGMAC_SET_REG_BITS(regval, |
1525 | MTL_Q_TQOMR_Q2TCMAP_POS, |
1526 | MTL_Q_TQOMR_Q2TCMAP_LEN, |
1527 | i); |
1528 | writel(val: regval, XLGMAC_MTL_REG(pdata, queue, |
1529 | MTL_Q_TQOMR)); |
1530 | queue++; |
1531 | } |
1532 | |
1533 | if (i < qptc_extra) { |
1534 | netif_dbg(pdata, drv, pdata->netdev, |
1535 | "TXq%u mapped to TC%u\n" , queue, i); |
1536 | regval = readl(XLGMAC_MTL_REG(pdata, queue, |
1537 | MTL_Q_TQOMR)); |
1538 | regval = XLGMAC_SET_REG_BITS(regval, |
1539 | MTL_Q_TQOMR_Q2TCMAP_POS, |
1540 | MTL_Q_TQOMR_Q2TCMAP_LEN, |
1541 | i); |
1542 | writel(val: regval, XLGMAC_MTL_REG(pdata, queue, |
1543 | MTL_Q_TQOMR)); |
1544 | queue++; |
1545 | } |
1546 | } |
1547 | |
1548 | /* Map the 8 VLAN priority values to available MTL Rx queues */ |
1549 | prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, |
1550 | pdata->rx_q_count); |
1551 | ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; |
1552 | ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; |
1553 | |
1554 | reg = MAC_RQC2R; |
1555 | regval = 0; |
1556 | for (i = 0, prio = 0; i < prio_queues;) { |
1557 | mask = 0; |
1558 | for (j = 0; j < ppq; j++) { |
1559 | netif_dbg(pdata, drv, pdata->netdev, |
1560 | "PRIO%u mapped to RXq%u\n" , prio, i); |
1561 | mask |= (1 << prio); |
1562 | prio++; |
1563 | } |
1564 | |
1565 | if (i < ppq_extra) { |
1566 | netif_dbg(pdata, drv, pdata->netdev, |
1567 | "PRIO%u mapped to RXq%u\n" , prio, i); |
1568 | mask |= (1 << prio); |
1569 | prio++; |
1570 | } |
1571 | |
1572 | regval |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); |
1573 | |
1574 | if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) |
1575 | continue; |
1576 | |
1577 | writel(val: regval, addr: pdata->mac_regs + reg); |
1578 | reg += MAC_RQC2_INC; |
1579 | regval = 0; |
1580 | } |
1581 | |
1582 | /* Configure one to one, MTL Rx queue to DMA Rx channel mapping |
1583 | * ie Q0 <--> CH0, Q1 <--> CH1 ... Q11 <--> CH11 |
1584 | */ |
1585 | reg = MTL_RQDCM0R; |
1586 | regval = readl(addr: pdata->mac_regs + reg); |
1587 | regval |= (MTL_RQDCM0R_Q0MDMACH | MTL_RQDCM0R_Q1MDMACH | |
1588 | MTL_RQDCM0R_Q2MDMACH | MTL_RQDCM0R_Q3MDMACH); |
1589 | writel(val: regval, addr: pdata->mac_regs + reg); |
1590 | |
1591 | reg += MTL_RQDCM_INC; |
1592 | regval = readl(addr: pdata->mac_regs + reg); |
1593 | regval |= (MTL_RQDCM1R_Q4MDMACH | MTL_RQDCM1R_Q5MDMACH | |
1594 | MTL_RQDCM1R_Q6MDMACH | MTL_RQDCM1R_Q7MDMACH); |
1595 | writel(val: regval, addr: pdata->mac_regs + reg); |
1596 | |
1597 | reg += MTL_RQDCM_INC; |
1598 | regval = readl(addr: pdata->mac_regs + reg); |
1599 | regval |= (MTL_RQDCM2R_Q8MDMACH | MTL_RQDCM2R_Q9MDMACH | |
1600 | MTL_RQDCM2R_Q10MDMACH | MTL_RQDCM2R_Q11MDMACH); |
1601 | writel(val: regval, addr: pdata->mac_regs + reg); |
1602 | } |
1603 | |
1604 | static unsigned int xlgmac_calculate_per_queue_fifo( |
1605 | unsigned int fifo_size, |
1606 | unsigned int queue_count) |
1607 | { |
1608 | unsigned int q_fifo_size; |
1609 | unsigned int p_fifo; |
1610 | |
1611 | /* Calculate the configured fifo size */ |
1612 | q_fifo_size = 1 << (fifo_size + 7); |
1613 | |
1614 | /* The configured value may not be the actual amount of fifo RAM */ |
1615 | q_fifo_size = min_t(unsigned int, XLGMAC_MAX_FIFO, q_fifo_size); |
1616 | |
1617 | q_fifo_size = q_fifo_size / queue_count; |
1618 | |
1619 | /* Each increment in the queue fifo size represents 256 bytes of |
1620 | * fifo, with 0 representing 256 bytes. Distribute the fifo equally |
1621 | * between the queues. |
1622 | */ |
1623 | p_fifo = q_fifo_size / 256; |
1624 | if (p_fifo) |
1625 | p_fifo--; |
1626 | |
1627 | return p_fifo; |
1628 | } |
1629 | |
1630 | static void xlgmac_config_tx_fifo_size(struct xlgmac_pdata *pdata) |
1631 | { |
1632 | unsigned int fifo_size; |
1633 | unsigned int i; |
1634 | u32 regval; |
1635 | |
1636 | fifo_size = xlgmac_calculate_per_queue_fifo( |
1637 | fifo_size: pdata->hw_feat.tx_fifo_size, |
1638 | queue_count: pdata->tx_q_count); |
1639 | |
1640 | for (i = 0; i < pdata->tx_q_count; i++) { |
1641 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); |
1642 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TQS_POS, |
1643 | MTL_Q_TQOMR_TQS_LEN, fifo_size); |
1644 | writel(val: regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); |
1645 | } |
1646 | |
1647 | netif_info(pdata, drv, pdata->netdev, |
1648 | "%d Tx hardware queues, %d byte fifo per queue\n" , |
1649 | pdata->tx_q_count, ((fifo_size + 1) * 256)); |
1650 | } |
1651 | |
1652 | static void xlgmac_config_rx_fifo_size(struct xlgmac_pdata *pdata) |
1653 | { |
1654 | unsigned int fifo_size; |
1655 | unsigned int i; |
1656 | u32 regval; |
1657 | |
1658 | fifo_size = xlgmac_calculate_per_queue_fifo( |
1659 | fifo_size: pdata->hw_feat.rx_fifo_size, |
1660 | queue_count: pdata->rx_q_count); |
1661 | |
1662 | for (i = 0; i < pdata->rx_q_count; i++) { |
1663 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); |
1664 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RQS_POS, |
1665 | MTL_Q_RQOMR_RQS_LEN, fifo_size); |
1666 | writel(val: regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); |
1667 | } |
1668 | |
1669 | netif_info(pdata, drv, pdata->netdev, |
1670 | "%d Rx hardware queues, %d byte fifo per queue\n" , |
1671 | pdata->rx_q_count, ((fifo_size + 1) * 256)); |
1672 | } |
1673 | |
1674 | static void xlgmac_config_flow_control_threshold(struct xlgmac_pdata *pdata) |
1675 | { |
1676 | unsigned int i; |
1677 | u32 regval; |
1678 | |
1679 | for (i = 0; i < pdata->rx_q_count; i++) { |
1680 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR)); |
1681 | /* Activate flow control when less than 4k left in fifo */ |
1682 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFA_POS, |
1683 | MTL_Q_RQFCR_RFA_LEN, 2); |
1684 | /* De-activate flow control when more than 6k left in fifo */ |
1685 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFD_POS, |
1686 | MTL_Q_RQFCR_RFD_LEN, 4); |
1687 | writel(val: regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR)); |
1688 | } |
1689 | } |
1690 | |
1691 | static int xlgmac_config_tx_threshold(struct xlgmac_pdata *pdata, |
1692 | unsigned int val) |
1693 | { |
1694 | unsigned int i; |
1695 | u32 regval; |
1696 | |
1697 | for (i = 0; i < pdata->tx_q_count; i++) { |
1698 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); |
1699 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TTC_POS, |
1700 | MTL_Q_TQOMR_TTC_LEN, val); |
1701 | writel(val: regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); |
1702 | } |
1703 | |
1704 | return 0; |
1705 | } |
1706 | |
1707 | static int xlgmac_config_rsf_mode(struct xlgmac_pdata *pdata, |
1708 | unsigned int val) |
1709 | { |
1710 | unsigned int i; |
1711 | u32 regval; |
1712 | |
1713 | for (i = 0; i < pdata->rx_q_count; i++) { |
1714 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); |
1715 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RSF_POS, |
1716 | MTL_Q_RQOMR_RSF_LEN, val); |
1717 | writel(val: regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); |
1718 | } |
1719 | |
1720 | return 0; |
1721 | } |
1722 | |
1723 | static int xlgmac_config_tsf_mode(struct xlgmac_pdata *pdata, |
1724 | unsigned int val) |
1725 | { |
1726 | unsigned int i; |
1727 | u32 regval; |
1728 | |
1729 | for (i = 0; i < pdata->tx_q_count; i++) { |
1730 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); |
1731 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TSF_POS, |
1732 | MTL_Q_TQOMR_TSF_LEN, val); |
1733 | writel(val: regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); |
1734 | } |
1735 | |
1736 | return 0; |
1737 | } |
1738 | |
1739 | static int xlgmac_config_osp_mode(struct xlgmac_pdata *pdata) |
1740 | { |
1741 | struct xlgmac_channel *channel; |
1742 | unsigned int i; |
1743 | u32 regval; |
1744 | |
1745 | channel = pdata->channel_head; |
1746 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
1747 | if (!channel->tx_ring) |
1748 | break; |
1749 | |
1750 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); |
1751 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_OSP_POS, |
1752 | DMA_CH_TCR_OSP_LEN, |
1753 | pdata->tx_osp_mode); |
1754 | writel(val: regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); |
1755 | } |
1756 | |
1757 | return 0; |
1758 | } |
1759 | |
1760 | static int xlgmac_config_pblx8(struct xlgmac_pdata *pdata) |
1761 | { |
1762 | struct xlgmac_channel *channel; |
1763 | unsigned int i; |
1764 | u32 regval; |
1765 | |
1766 | channel = pdata->channel_head; |
1767 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
1768 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR)); |
1769 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_PBLX8_POS, |
1770 | DMA_CH_CR_PBLX8_LEN, |
1771 | pdata->pblx8); |
1772 | writel(val: regval, XLGMAC_DMA_REG(channel, DMA_CH_CR)); |
1773 | } |
1774 | |
1775 | return 0; |
1776 | } |
1777 | |
1778 | static int xlgmac_get_tx_pbl_val(struct xlgmac_pdata *pdata) |
1779 | { |
1780 | u32 regval; |
1781 | |
1782 | regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_TCR)); |
1783 | regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_TCR_PBL_POS, |
1784 | DMA_CH_TCR_PBL_LEN); |
1785 | return regval; |
1786 | } |
1787 | |
1788 | static int xlgmac_config_tx_pbl_val(struct xlgmac_pdata *pdata) |
1789 | { |
1790 | struct xlgmac_channel *channel; |
1791 | unsigned int i; |
1792 | u32 regval; |
1793 | |
1794 | channel = pdata->channel_head; |
1795 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
1796 | if (!channel->tx_ring) |
1797 | break; |
1798 | |
1799 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); |
1800 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_PBL_POS, |
1801 | DMA_CH_TCR_PBL_LEN, |
1802 | pdata->tx_pbl); |
1803 | writel(val: regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); |
1804 | } |
1805 | |
1806 | return 0; |
1807 | } |
1808 | |
1809 | static int xlgmac_get_rx_pbl_val(struct xlgmac_pdata *pdata) |
1810 | { |
1811 | u32 regval; |
1812 | |
1813 | regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_RCR)); |
1814 | regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_RCR_PBL_POS, |
1815 | DMA_CH_RCR_PBL_LEN); |
1816 | return regval; |
1817 | } |
1818 | |
1819 | static int xlgmac_config_rx_pbl_val(struct xlgmac_pdata *pdata) |
1820 | { |
1821 | struct xlgmac_channel *channel; |
1822 | unsigned int i; |
1823 | u32 regval; |
1824 | |
1825 | channel = pdata->channel_head; |
1826 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
1827 | if (!channel->rx_ring) |
1828 | break; |
1829 | |
1830 | regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); |
1831 | regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_PBL_POS, |
1832 | DMA_CH_RCR_PBL_LEN, |
1833 | pdata->rx_pbl); |
1834 | writel(val: regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); |
1835 | } |
1836 | |
1837 | return 0; |
1838 | } |
1839 | |
1840 | static u64 xlgmac_mmc_read(struct xlgmac_pdata *pdata, unsigned int reg_lo) |
1841 | { |
1842 | bool read_hi; |
1843 | u64 val; |
1844 | |
1845 | switch (reg_lo) { |
1846 | /* These registers are always 64 bit */ |
1847 | case MMC_TXOCTETCOUNT_GB_LO: |
1848 | case MMC_TXOCTETCOUNT_G_LO: |
1849 | case MMC_RXOCTETCOUNT_GB_LO: |
1850 | case MMC_RXOCTETCOUNT_G_LO: |
1851 | read_hi = true; |
1852 | break; |
1853 | |
1854 | default: |
1855 | read_hi = false; |
1856 | } |
1857 | |
1858 | val = (u64)readl(addr: pdata->mac_regs + reg_lo); |
1859 | |
1860 | if (read_hi) |
1861 | val |= ((u64)readl(addr: pdata->mac_regs + reg_lo + 4) << 32); |
1862 | |
1863 | return val; |
1864 | } |
1865 | |
1866 | static void xlgmac_tx_mmc_int(struct xlgmac_pdata *pdata) |
1867 | { |
1868 | unsigned int mmc_isr = readl(addr: pdata->mac_regs + MMC_TISR); |
1869 | struct xlgmac_stats *stats = &pdata->stats; |
1870 | |
1871 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1872 | MMC_TISR_TXOCTETCOUNT_GB_POS, |
1873 | MMC_TISR_TXOCTETCOUNT_GB_LEN)) |
1874 | stats->txoctetcount_gb += |
1875 | xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); |
1876 | |
1877 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1878 | MMC_TISR_TXFRAMECOUNT_GB_POS, |
1879 | MMC_TISR_TXFRAMECOUNT_GB_LEN)) |
1880 | stats->txframecount_gb += |
1881 | xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); |
1882 | |
1883 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1884 | MMC_TISR_TXBROADCASTFRAMES_G_POS, |
1885 | MMC_TISR_TXBROADCASTFRAMES_G_LEN)) |
1886 | stats->txbroadcastframes_g += |
1887 | xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); |
1888 | |
1889 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1890 | MMC_TISR_TXMULTICASTFRAMES_G_POS, |
1891 | MMC_TISR_TXMULTICASTFRAMES_G_LEN)) |
1892 | stats->txmulticastframes_g += |
1893 | xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); |
1894 | |
1895 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1896 | MMC_TISR_TX64OCTETS_GB_POS, |
1897 | MMC_TISR_TX64OCTETS_GB_LEN)) |
1898 | stats->tx64octets_gb += |
1899 | xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); |
1900 | |
1901 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1902 | MMC_TISR_TX65TO127OCTETS_GB_POS, |
1903 | MMC_TISR_TX65TO127OCTETS_GB_LEN)) |
1904 | stats->tx65to127octets_gb += |
1905 | xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); |
1906 | |
1907 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1908 | MMC_TISR_TX128TO255OCTETS_GB_POS, |
1909 | MMC_TISR_TX128TO255OCTETS_GB_LEN)) |
1910 | stats->tx128to255octets_gb += |
1911 | xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); |
1912 | |
1913 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1914 | MMC_TISR_TX256TO511OCTETS_GB_POS, |
1915 | MMC_TISR_TX256TO511OCTETS_GB_LEN)) |
1916 | stats->tx256to511octets_gb += |
1917 | xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); |
1918 | |
1919 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1920 | MMC_TISR_TX512TO1023OCTETS_GB_POS, |
1921 | MMC_TISR_TX512TO1023OCTETS_GB_LEN)) |
1922 | stats->tx512to1023octets_gb += |
1923 | xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); |
1924 | |
1925 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1926 | MMC_TISR_TX1024TOMAXOCTETS_GB_POS, |
1927 | MMC_TISR_TX1024TOMAXOCTETS_GB_LEN)) |
1928 | stats->tx1024tomaxoctets_gb += |
1929 | xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); |
1930 | |
1931 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1932 | MMC_TISR_TXUNICASTFRAMES_GB_POS, |
1933 | MMC_TISR_TXUNICASTFRAMES_GB_LEN)) |
1934 | stats->txunicastframes_gb += |
1935 | xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); |
1936 | |
1937 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1938 | MMC_TISR_TXMULTICASTFRAMES_GB_POS, |
1939 | MMC_TISR_TXMULTICASTFRAMES_GB_LEN)) |
1940 | stats->txmulticastframes_gb += |
1941 | xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); |
1942 | |
1943 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1944 | MMC_TISR_TXBROADCASTFRAMES_GB_POS, |
1945 | MMC_TISR_TXBROADCASTFRAMES_GB_LEN)) |
1946 | stats->txbroadcastframes_g += |
1947 | xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); |
1948 | |
1949 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1950 | MMC_TISR_TXUNDERFLOWERROR_POS, |
1951 | MMC_TISR_TXUNDERFLOWERROR_LEN)) |
1952 | stats->txunderflowerror += |
1953 | xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); |
1954 | |
1955 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1956 | MMC_TISR_TXOCTETCOUNT_G_POS, |
1957 | MMC_TISR_TXOCTETCOUNT_G_LEN)) |
1958 | stats->txoctetcount_g += |
1959 | xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); |
1960 | |
1961 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1962 | MMC_TISR_TXFRAMECOUNT_G_POS, |
1963 | MMC_TISR_TXFRAMECOUNT_G_LEN)) |
1964 | stats->txframecount_g += |
1965 | xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); |
1966 | |
1967 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1968 | MMC_TISR_TXPAUSEFRAMES_POS, |
1969 | MMC_TISR_TXPAUSEFRAMES_LEN)) |
1970 | stats->txpauseframes += |
1971 | xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); |
1972 | |
1973 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1974 | MMC_TISR_TXVLANFRAMES_G_POS, |
1975 | MMC_TISR_TXVLANFRAMES_G_LEN)) |
1976 | stats->txvlanframes_g += |
1977 | xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); |
1978 | } |
1979 | |
1980 | static void xlgmac_rx_mmc_int(struct xlgmac_pdata *pdata) |
1981 | { |
1982 | unsigned int mmc_isr = readl(addr: pdata->mac_regs + MMC_RISR); |
1983 | struct xlgmac_stats *stats = &pdata->stats; |
1984 | |
1985 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1986 | MMC_RISR_RXFRAMECOUNT_GB_POS, |
1987 | MMC_RISR_RXFRAMECOUNT_GB_LEN)) |
1988 | stats->rxframecount_gb += |
1989 | xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); |
1990 | |
1991 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1992 | MMC_RISR_RXOCTETCOUNT_GB_POS, |
1993 | MMC_RISR_RXOCTETCOUNT_GB_LEN)) |
1994 | stats->rxoctetcount_gb += |
1995 | xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); |
1996 | |
1997 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
1998 | MMC_RISR_RXOCTETCOUNT_G_POS, |
1999 | MMC_RISR_RXOCTETCOUNT_G_LEN)) |
2000 | stats->rxoctetcount_g += |
2001 | xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); |
2002 | |
2003 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2004 | MMC_RISR_RXBROADCASTFRAMES_G_POS, |
2005 | MMC_RISR_RXBROADCASTFRAMES_G_LEN)) |
2006 | stats->rxbroadcastframes_g += |
2007 | xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); |
2008 | |
2009 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2010 | MMC_RISR_RXMULTICASTFRAMES_G_POS, |
2011 | MMC_RISR_RXMULTICASTFRAMES_G_LEN)) |
2012 | stats->rxmulticastframes_g += |
2013 | xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); |
2014 | |
2015 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2016 | MMC_RISR_RXCRCERROR_POS, |
2017 | MMC_RISR_RXCRCERROR_LEN)) |
2018 | stats->rxcrcerror += |
2019 | xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO); |
2020 | |
2021 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2022 | MMC_RISR_RXRUNTERROR_POS, |
2023 | MMC_RISR_RXRUNTERROR_LEN)) |
2024 | stats->rxrunterror += |
2025 | xlgmac_mmc_read(pdata, MMC_RXRUNTERROR); |
2026 | |
2027 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2028 | MMC_RISR_RXJABBERERROR_POS, |
2029 | MMC_RISR_RXJABBERERROR_LEN)) |
2030 | stats->rxjabbererror += |
2031 | xlgmac_mmc_read(pdata, MMC_RXJABBERERROR); |
2032 | |
2033 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2034 | MMC_RISR_RXUNDERSIZE_G_POS, |
2035 | MMC_RISR_RXUNDERSIZE_G_LEN)) |
2036 | stats->rxundersize_g += |
2037 | xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G); |
2038 | |
2039 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2040 | MMC_RISR_RXOVERSIZE_G_POS, |
2041 | MMC_RISR_RXOVERSIZE_G_LEN)) |
2042 | stats->rxoversize_g += |
2043 | xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G); |
2044 | |
2045 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2046 | MMC_RISR_RX64OCTETS_GB_POS, |
2047 | MMC_RISR_RX64OCTETS_GB_LEN)) |
2048 | stats->rx64octets_gb += |
2049 | xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); |
2050 | |
2051 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2052 | MMC_RISR_RX65TO127OCTETS_GB_POS, |
2053 | MMC_RISR_RX65TO127OCTETS_GB_LEN)) |
2054 | stats->rx65to127octets_gb += |
2055 | xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); |
2056 | |
2057 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2058 | MMC_RISR_RX128TO255OCTETS_GB_POS, |
2059 | MMC_RISR_RX128TO255OCTETS_GB_LEN)) |
2060 | stats->rx128to255octets_gb += |
2061 | xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); |
2062 | |
2063 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2064 | MMC_RISR_RX256TO511OCTETS_GB_POS, |
2065 | MMC_RISR_RX256TO511OCTETS_GB_LEN)) |
2066 | stats->rx256to511octets_gb += |
2067 | xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); |
2068 | |
2069 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2070 | MMC_RISR_RX512TO1023OCTETS_GB_POS, |
2071 | MMC_RISR_RX512TO1023OCTETS_GB_LEN)) |
2072 | stats->rx512to1023octets_gb += |
2073 | xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); |
2074 | |
2075 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2076 | MMC_RISR_RX1024TOMAXOCTETS_GB_POS, |
2077 | MMC_RISR_RX1024TOMAXOCTETS_GB_LEN)) |
2078 | stats->rx1024tomaxoctets_gb += |
2079 | xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); |
2080 | |
2081 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2082 | MMC_RISR_RXUNICASTFRAMES_G_POS, |
2083 | MMC_RISR_RXUNICASTFRAMES_G_LEN)) |
2084 | stats->rxunicastframes_g += |
2085 | xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); |
2086 | |
2087 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2088 | MMC_RISR_RXLENGTHERROR_POS, |
2089 | MMC_RISR_RXLENGTHERROR_LEN)) |
2090 | stats->rxlengtherror += |
2091 | xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO); |
2092 | |
2093 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2094 | MMC_RISR_RXOUTOFRANGETYPE_POS, |
2095 | MMC_RISR_RXOUTOFRANGETYPE_LEN)) |
2096 | stats->rxoutofrangetype += |
2097 | xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); |
2098 | |
2099 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2100 | MMC_RISR_RXPAUSEFRAMES_POS, |
2101 | MMC_RISR_RXPAUSEFRAMES_LEN)) |
2102 | stats->rxpauseframes += |
2103 | xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); |
2104 | |
2105 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2106 | MMC_RISR_RXFIFOOVERFLOW_POS, |
2107 | MMC_RISR_RXFIFOOVERFLOW_LEN)) |
2108 | stats->rxfifooverflow += |
2109 | xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); |
2110 | |
2111 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2112 | MMC_RISR_RXVLANFRAMES_GB_POS, |
2113 | MMC_RISR_RXVLANFRAMES_GB_LEN)) |
2114 | stats->rxvlanframes_gb += |
2115 | xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); |
2116 | |
2117 | if (XLGMAC_GET_REG_BITS(mmc_isr, |
2118 | MMC_RISR_RXWATCHDOGERROR_POS, |
2119 | MMC_RISR_RXWATCHDOGERROR_LEN)) |
2120 | stats->rxwatchdogerror += |
2121 | xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR); |
2122 | } |
2123 | |
2124 | static void xlgmac_read_mmc_stats(struct xlgmac_pdata *pdata) |
2125 | { |
2126 | struct xlgmac_stats *stats = &pdata->stats; |
2127 | u32 regval; |
2128 | |
2129 | /* Freeze counters */ |
2130 | regval = readl(addr: pdata->mac_regs + MMC_CR); |
2131 | regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, |
2132 | MMC_CR_MCF_LEN, 1); |
2133 | writel(val: regval, addr: pdata->mac_regs + MMC_CR); |
2134 | |
2135 | stats->txoctetcount_gb += |
2136 | xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); |
2137 | |
2138 | stats->txframecount_gb += |
2139 | xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); |
2140 | |
2141 | stats->txbroadcastframes_g += |
2142 | xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); |
2143 | |
2144 | stats->txmulticastframes_g += |
2145 | xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); |
2146 | |
2147 | stats->tx64octets_gb += |
2148 | xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); |
2149 | |
2150 | stats->tx65to127octets_gb += |
2151 | xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); |
2152 | |
2153 | stats->tx128to255octets_gb += |
2154 | xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); |
2155 | |
2156 | stats->tx256to511octets_gb += |
2157 | xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); |
2158 | |
2159 | stats->tx512to1023octets_gb += |
2160 | xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); |
2161 | |
2162 | stats->tx1024tomaxoctets_gb += |
2163 | xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); |
2164 | |
2165 | stats->txunicastframes_gb += |
2166 | xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); |
2167 | |
2168 | stats->txmulticastframes_gb += |
2169 | xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); |
2170 | |
2171 | stats->txbroadcastframes_g += |
2172 | xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); |
2173 | |
2174 | stats->txunderflowerror += |
2175 | xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); |
2176 | |
2177 | stats->txoctetcount_g += |
2178 | xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); |
2179 | |
2180 | stats->txframecount_g += |
2181 | xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); |
2182 | |
2183 | stats->txpauseframes += |
2184 | xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); |
2185 | |
2186 | stats->txvlanframes_g += |
2187 | xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); |
2188 | |
2189 | stats->rxframecount_gb += |
2190 | xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); |
2191 | |
2192 | stats->rxoctetcount_gb += |
2193 | xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); |
2194 | |
2195 | stats->rxoctetcount_g += |
2196 | xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); |
2197 | |
2198 | stats->rxbroadcastframes_g += |
2199 | xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); |
2200 | |
2201 | stats->rxmulticastframes_g += |
2202 | xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); |
2203 | |
2204 | stats->rxcrcerror += |
2205 | xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO); |
2206 | |
2207 | stats->rxrunterror += |
2208 | xlgmac_mmc_read(pdata, MMC_RXRUNTERROR); |
2209 | |
2210 | stats->rxjabbererror += |
2211 | xlgmac_mmc_read(pdata, MMC_RXJABBERERROR); |
2212 | |
2213 | stats->rxundersize_g += |
2214 | xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G); |
2215 | |
2216 | stats->rxoversize_g += |
2217 | xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G); |
2218 | |
2219 | stats->rx64octets_gb += |
2220 | xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); |
2221 | |
2222 | stats->rx65to127octets_gb += |
2223 | xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); |
2224 | |
2225 | stats->rx128to255octets_gb += |
2226 | xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); |
2227 | |
2228 | stats->rx256to511octets_gb += |
2229 | xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); |
2230 | |
2231 | stats->rx512to1023octets_gb += |
2232 | xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); |
2233 | |
2234 | stats->rx1024tomaxoctets_gb += |
2235 | xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); |
2236 | |
2237 | stats->rxunicastframes_g += |
2238 | xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); |
2239 | |
2240 | stats->rxlengtherror += |
2241 | xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO); |
2242 | |
2243 | stats->rxoutofrangetype += |
2244 | xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); |
2245 | |
2246 | stats->rxpauseframes += |
2247 | xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); |
2248 | |
2249 | stats->rxfifooverflow += |
2250 | xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); |
2251 | |
2252 | stats->rxvlanframes_gb += |
2253 | xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); |
2254 | |
2255 | stats->rxwatchdogerror += |
2256 | xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR); |
2257 | |
2258 | /* Un-freeze counters */ |
2259 | regval = readl(addr: pdata->mac_regs + MMC_CR); |
2260 | regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, |
2261 | MMC_CR_MCF_LEN, 0); |
2262 | writel(val: regval, addr: pdata->mac_regs + MMC_CR); |
2263 | } |
2264 | |
2265 | static void xlgmac_config_mmc(struct xlgmac_pdata *pdata) |
2266 | { |
2267 | u32 regval; |
2268 | |
2269 | regval = readl(addr: pdata->mac_regs + MMC_CR); |
2270 | /* Set counters to reset on read */ |
2271 | regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_ROR_POS, |
2272 | MMC_CR_ROR_LEN, 1); |
2273 | /* Reset the counters */ |
2274 | regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_CR_POS, |
2275 | MMC_CR_CR_LEN, 1); |
2276 | writel(val: regval, addr: pdata->mac_regs + MMC_CR); |
2277 | } |
2278 | |
2279 | static int (struct xlgmac_pdata *pdata, unsigned int type, |
2280 | unsigned int index, unsigned int val) |
2281 | { |
2282 | unsigned int wait; |
2283 | int ret = 0; |
2284 | u32 regval; |
2285 | |
2286 | mutex_lock(&pdata->rss_mutex); |
2287 | |
2288 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR), |
2289 | MAC_RSSAR_OB_POS, MAC_RSSAR_OB_LEN); |
2290 | if (regval) { |
2291 | ret = -EBUSY; |
2292 | goto unlock; |
2293 | } |
2294 | |
2295 | writel(val, addr: pdata->mac_regs + MAC_RSSDR); |
2296 | |
2297 | regval = readl(addr: pdata->mac_regs + MAC_RSSAR); |
2298 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_RSSIA_POS, |
2299 | MAC_RSSAR_RSSIA_LEN, index); |
2300 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_ADDRT_POS, |
2301 | MAC_RSSAR_ADDRT_LEN, type); |
2302 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_CT_POS, |
2303 | MAC_RSSAR_CT_LEN, 0); |
2304 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_OB_POS, |
2305 | MAC_RSSAR_OB_LEN, 1); |
2306 | writel(val: regval, addr: pdata->mac_regs + MAC_RSSAR); |
2307 | |
2308 | wait = 1000; |
2309 | while (wait--) { |
2310 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR), |
2311 | MAC_RSSAR_OB_POS, |
2312 | MAC_RSSAR_OB_LEN); |
2313 | if (!regval) |
2314 | goto unlock; |
2315 | |
2316 | usleep_range(min: 1000, max: 1500); |
2317 | } |
2318 | |
2319 | ret = -EBUSY; |
2320 | |
2321 | unlock: |
2322 | mutex_unlock(lock: &pdata->rss_mutex); |
2323 | |
2324 | return ret; |
2325 | } |
2326 | |
2327 | static int (struct xlgmac_pdata *pdata) |
2328 | { |
2329 | unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); |
2330 | unsigned int *key = (unsigned int *)&pdata->rss_key; |
2331 | int ret; |
2332 | |
2333 | while (key_regs--) { |
2334 | ret = xlgmac_write_rss_reg(pdata, XLGMAC_RSS_HASH_KEY_TYPE, |
2335 | index: key_regs, val: *key++); |
2336 | if (ret) |
2337 | return ret; |
2338 | } |
2339 | |
2340 | return 0; |
2341 | } |
2342 | |
2343 | static int (struct xlgmac_pdata *pdata) |
2344 | { |
2345 | unsigned int i; |
2346 | int ret; |
2347 | |
2348 | for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { |
2349 | ret = xlgmac_write_rss_reg(pdata, |
2350 | XLGMAC_RSS_LOOKUP_TABLE_TYPE, index: i, |
2351 | val: pdata->rss_table[i]); |
2352 | if (ret) |
2353 | return ret; |
2354 | } |
2355 | |
2356 | return 0; |
2357 | } |
2358 | |
2359 | static int (struct xlgmac_pdata *pdata, const u8 *key) |
2360 | { |
2361 | memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); |
2362 | |
2363 | return xlgmac_write_rss_hash_key(pdata); |
2364 | } |
2365 | |
2366 | static int (struct xlgmac_pdata *pdata, |
2367 | const u32 *table) |
2368 | { |
2369 | unsigned int i; |
2370 | u32 tval; |
2371 | |
2372 | for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { |
2373 | tval = table[i]; |
2374 | pdata->rss_table[i] = XLGMAC_SET_REG_BITS( |
2375 | pdata->rss_table[i], |
2376 | MAC_RSSDR_DMCH_POS, |
2377 | MAC_RSSDR_DMCH_LEN, |
2378 | tval); |
2379 | } |
2380 | |
2381 | return xlgmac_write_rss_lookup_table(pdata); |
2382 | } |
2383 | |
2384 | static int (struct xlgmac_pdata *pdata) |
2385 | { |
2386 | u32 regval; |
2387 | int ret; |
2388 | |
2389 | if (!pdata->hw_feat.rss) |
2390 | return -EOPNOTSUPP; |
2391 | |
2392 | /* Program the hash key */ |
2393 | ret = xlgmac_write_rss_hash_key(pdata); |
2394 | if (ret) |
2395 | return ret; |
2396 | |
2397 | /* Program the lookup table */ |
2398 | ret = xlgmac_write_rss_lookup_table(pdata); |
2399 | if (ret) |
2400 | return ret; |
2401 | |
2402 | /* Set the RSS options */ |
2403 | writel(val: pdata->rss_options, addr: pdata->mac_regs + MAC_RSSCR); |
2404 | |
2405 | /* Enable RSS */ |
2406 | regval = readl(addr: pdata->mac_regs + MAC_RSSCR); |
2407 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS, |
2408 | MAC_RSSCR_RSSE_LEN, 1); |
2409 | writel(val: regval, addr: pdata->mac_regs + MAC_RSSCR); |
2410 | |
2411 | return 0; |
2412 | } |
2413 | |
2414 | static int (struct xlgmac_pdata *pdata) |
2415 | { |
2416 | u32 regval; |
2417 | |
2418 | if (!pdata->hw_feat.rss) |
2419 | return -EOPNOTSUPP; |
2420 | |
2421 | regval = readl(addr: pdata->mac_regs + MAC_RSSCR); |
2422 | regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS, |
2423 | MAC_RSSCR_RSSE_LEN, 0); |
2424 | writel(val: regval, addr: pdata->mac_regs + MAC_RSSCR); |
2425 | |
2426 | return 0; |
2427 | } |
2428 | |
2429 | static void (struct xlgmac_pdata *pdata) |
2430 | { |
2431 | int ret; |
2432 | |
2433 | if (!pdata->hw_feat.rss) |
2434 | return; |
2435 | |
2436 | if (pdata->netdev->features & NETIF_F_RXHASH) |
2437 | ret = xlgmac_enable_rss(pdata); |
2438 | else |
2439 | ret = xlgmac_disable_rss(pdata); |
2440 | |
2441 | if (ret) |
2442 | netdev_err(dev: pdata->netdev, |
2443 | format: "error configuring RSS, RSS disabled\n" ); |
2444 | } |
2445 | |
2446 | static void xlgmac_enable_dma_interrupts(struct xlgmac_pdata *pdata) |
2447 | { |
2448 | unsigned int dma_ch_isr, dma_ch_ier; |
2449 | struct xlgmac_channel *channel; |
2450 | unsigned int i; |
2451 | |
2452 | channel = pdata->channel_head; |
2453 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
2454 | /* Clear all the interrupts which are set */ |
2455 | dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR)); |
2456 | writel(val: dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR)); |
2457 | |
2458 | /* Clear all interrupt enable bits */ |
2459 | dma_ch_ier = 0; |
2460 | |
2461 | /* Enable following interrupts |
2462 | * NIE - Normal Interrupt Summary Enable |
2463 | * AIE - Abnormal Interrupt Summary Enable |
2464 | * FBEE - Fatal Bus Error Enable |
2465 | */ |
2466 | dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, |
2467 | DMA_CH_IER_NIE_POS, |
2468 | DMA_CH_IER_NIE_LEN, 1); |
2469 | dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, |
2470 | DMA_CH_IER_AIE_POS, |
2471 | DMA_CH_IER_AIE_LEN, 1); |
2472 | dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, |
2473 | DMA_CH_IER_FBEE_POS, |
2474 | DMA_CH_IER_FBEE_LEN, 1); |
2475 | |
2476 | if (channel->tx_ring) { |
2477 | /* Enable the following Tx interrupts |
2478 | * TIE - Transmit Interrupt Enable (unless using |
2479 | * per channel interrupts) |
2480 | */ |
2481 | if (!pdata->per_channel_irq) |
2482 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2483 | dma_ch_ier, |
2484 | DMA_CH_IER_TIE_POS, |
2485 | DMA_CH_IER_TIE_LEN, |
2486 | 1); |
2487 | } |
2488 | if (channel->rx_ring) { |
2489 | /* Enable following Rx interrupts |
2490 | * RBUE - Receive Buffer Unavailable Enable |
2491 | * RIE - Receive Interrupt Enable (unless using |
2492 | * per channel interrupts) |
2493 | */ |
2494 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2495 | dma_ch_ier, |
2496 | DMA_CH_IER_RBUE_POS, |
2497 | DMA_CH_IER_RBUE_LEN, |
2498 | 1); |
2499 | if (!pdata->per_channel_irq) |
2500 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2501 | dma_ch_ier, |
2502 | DMA_CH_IER_RIE_POS, |
2503 | DMA_CH_IER_RIE_LEN, |
2504 | 1); |
2505 | } |
2506 | |
2507 | writel(val: dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_IER)); |
2508 | } |
2509 | } |
2510 | |
2511 | static void xlgmac_enable_mtl_interrupts(struct xlgmac_pdata *pdata) |
2512 | { |
2513 | unsigned int q_count, i; |
2514 | unsigned int mtl_q_isr; |
2515 | |
2516 | q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); |
2517 | for (i = 0; i < q_count; i++) { |
2518 | /* Clear all the interrupts which are set */ |
2519 | mtl_q_isr = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); |
2520 | writel(val: mtl_q_isr, XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); |
2521 | |
2522 | /* No MTL interrupts to be enabled */ |
2523 | writel(val: 0, XLGMAC_MTL_REG(pdata, i, MTL_Q_IER)); |
2524 | } |
2525 | } |
2526 | |
2527 | static void xlgmac_enable_mac_interrupts(struct xlgmac_pdata *pdata) |
2528 | { |
2529 | unsigned int mac_ier = 0; |
2530 | u32 regval; |
2531 | |
2532 | /* Enable Timestamp interrupt */ |
2533 | mac_ier = XLGMAC_SET_REG_BITS(mac_ier, MAC_IER_TSIE_POS, |
2534 | MAC_IER_TSIE_LEN, 1); |
2535 | |
2536 | writel(val: mac_ier, addr: pdata->mac_regs + MAC_IER); |
2537 | |
2538 | /* Enable all counter interrupts */ |
2539 | regval = readl(addr: pdata->mac_regs + MMC_RIER); |
2540 | regval = XLGMAC_SET_REG_BITS(regval, MMC_RIER_ALL_INTERRUPTS_POS, |
2541 | MMC_RIER_ALL_INTERRUPTS_LEN, 0xffffffff); |
2542 | writel(val: regval, addr: pdata->mac_regs + MMC_RIER); |
2543 | regval = readl(addr: pdata->mac_regs + MMC_TIER); |
2544 | regval = XLGMAC_SET_REG_BITS(regval, MMC_TIER_ALL_INTERRUPTS_POS, |
2545 | MMC_TIER_ALL_INTERRUPTS_LEN, 0xffffffff); |
2546 | writel(val: regval, addr: pdata->mac_regs + MMC_TIER); |
2547 | } |
2548 | |
2549 | static int xlgmac_set_xlgmii_25000_speed(struct xlgmac_pdata *pdata) |
2550 | { |
2551 | u32 regval; |
2552 | |
2553 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), |
2554 | MAC_TCR_SS_POS, MAC_TCR_SS_LEN); |
2555 | if (regval == 0x1) |
2556 | return 0; |
2557 | |
2558 | regval = readl(addr: pdata->mac_regs + MAC_TCR); |
2559 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, |
2560 | MAC_TCR_SS_LEN, 0x1); |
2561 | writel(val: regval, addr: pdata->mac_regs + MAC_TCR); |
2562 | |
2563 | return 0; |
2564 | } |
2565 | |
2566 | static int xlgmac_set_xlgmii_40000_speed(struct xlgmac_pdata *pdata) |
2567 | { |
2568 | u32 regval; |
2569 | |
2570 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), |
2571 | MAC_TCR_SS_POS, MAC_TCR_SS_LEN); |
2572 | if (regval == 0) |
2573 | return 0; |
2574 | |
2575 | regval = readl(addr: pdata->mac_regs + MAC_TCR); |
2576 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, |
2577 | MAC_TCR_SS_LEN, 0); |
2578 | writel(val: regval, addr: pdata->mac_regs + MAC_TCR); |
2579 | |
2580 | return 0; |
2581 | } |
2582 | |
2583 | static int xlgmac_set_xlgmii_50000_speed(struct xlgmac_pdata *pdata) |
2584 | { |
2585 | u32 regval; |
2586 | |
2587 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), |
2588 | MAC_TCR_SS_POS, MAC_TCR_SS_LEN); |
2589 | if (regval == 0x2) |
2590 | return 0; |
2591 | |
2592 | regval = readl(addr: pdata->mac_regs + MAC_TCR); |
2593 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, |
2594 | MAC_TCR_SS_LEN, 0x2); |
2595 | writel(val: regval, addr: pdata->mac_regs + MAC_TCR); |
2596 | |
2597 | return 0; |
2598 | } |
2599 | |
2600 | static int xlgmac_set_xlgmii_100000_speed(struct xlgmac_pdata *pdata) |
2601 | { |
2602 | u32 regval; |
2603 | |
2604 | regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), |
2605 | MAC_TCR_SS_POS, MAC_TCR_SS_LEN); |
2606 | if (regval == 0x3) |
2607 | return 0; |
2608 | |
2609 | regval = readl(addr: pdata->mac_regs + MAC_TCR); |
2610 | regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, |
2611 | MAC_TCR_SS_LEN, 0x3); |
2612 | writel(val: regval, addr: pdata->mac_regs + MAC_TCR); |
2613 | |
2614 | return 0; |
2615 | } |
2616 | |
2617 | static void xlgmac_config_mac_speed(struct xlgmac_pdata *pdata) |
2618 | { |
2619 | switch (pdata->phy_speed) { |
2620 | case SPEED_100000: |
2621 | xlgmac_set_xlgmii_100000_speed(pdata); |
2622 | break; |
2623 | |
2624 | case SPEED_50000: |
2625 | xlgmac_set_xlgmii_50000_speed(pdata); |
2626 | break; |
2627 | |
2628 | case SPEED_40000: |
2629 | xlgmac_set_xlgmii_40000_speed(pdata); |
2630 | break; |
2631 | |
2632 | case SPEED_25000: |
2633 | xlgmac_set_xlgmii_25000_speed(pdata); |
2634 | break; |
2635 | } |
2636 | } |
2637 | |
2638 | static int xlgmac_dev_read(struct xlgmac_channel *channel) |
2639 | { |
2640 | struct xlgmac_pdata *pdata = channel->pdata; |
2641 | struct xlgmac_ring *ring = channel->rx_ring; |
2642 | struct net_device *netdev = pdata->netdev; |
2643 | struct xlgmac_desc_data *desc_data; |
2644 | struct xlgmac_dma_desc *dma_desc; |
2645 | struct xlgmac_pkt_info *pkt_info; |
2646 | unsigned int err, etlt, l34t; |
2647 | |
2648 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); |
2649 | dma_desc = desc_data->dma_desc; |
2650 | pkt_info = &ring->pkt_info; |
2651 | |
2652 | /* Check for data availability */ |
2653 | if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, |
2654 | RX_NORMAL_DESC3_OWN_POS, |
2655 | RX_NORMAL_DESC3_OWN_LEN)) |
2656 | return 1; |
2657 | |
2658 | /* Make sure descriptor fields are read after reading the OWN bit */ |
2659 | dma_rmb(); |
2660 | |
2661 | if (netif_msg_rx_status(pdata)) |
2662 | xlgmac_dump_rx_desc(pdata, ring, idx: ring->cur); |
2663 | |
2664 | if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, |
2665 | RX_NORMAL_DESC3_CTXT_POS, |
2666 | RX_NORMAL_DESC3_CTXT_LEN)) { |
2667 | /* Timestamp Context Descriptor */ |
2668 | xlgmac_get_rx_tstamp(pkt_info, dma_desc); |
2669 | |
2670 | pkt_info->attributes = XLGMAC_SET_REG_BITS( |
2671 | pkt_info->attributes, |
2672 | RX_PACKET_ATTRIBUTES_CONTEXT_POS, |
2673 | RX_PACKET_ATTRIBUTES_CONTEXT_LEN, |
2674 | 1); |
2675 | pkt_info->attributes = XLGMAC_SET_REG_BITS( |
2676 | pkt_info->attributes, |
2677 | RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, |
2678 | RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, |
2679 | 0); |
2680 | return 0; |
2681 | } |
2682 | |
2683 | /* Normal Descriptor, be sure Context Descriptor bit is off */ |
2684 | pkt_info->attributes = XLGMAC_SET_REG_BITS( |
2685 | pkt_info->attributes, |
2686 | RX_PACKET_ATTRIBUTES_CONTEXT_POS, |
2687 | RX_PACKET_ATTRIBUTES_CONTEXT_LEN, |
2688 | 0); |
2689 | |
2690 | /* Indicate if a Context Descriptor is next */ |
2691 | if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, |
2692 | RX_NORMAL_DESC3_CDA_POS, |
2693 | RX_NORMAL_DESC3_CDA_LEN)) |
2694 | pkt_info->attributes = XLGMAC_SET_REG_BITS( |
2695 | pkt_info->attributes, |
2696 | RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, |
2697 | RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, |
2698 | 1); |
2699 | |
2700 | /* Get the header length */ |
2701 | if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, |
2702 | RX_NORMAL_DESC3_FD_POS, |
2703 | RX_NORMAL_DESC3_FD_LEN)) { |
2704 | desc_data->rx.hdr_len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc2, |
2705 | RX_NORMAL_DESC2_HL_POS, |
2706 | RX_NORMAL_DESC2_HL_LEN); |
2707 | if (desc_data->rx.hdr_len) |
2708 | pdata->stats.rx_split_header_packets++; |
2709 | } |
2710 | |
2711 | /* Get the RSS hash */ |
2712 | if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, |
2713 | RX_NORMAL_DESC3_RSV_POS, |
2714 | RX_NORMAL_DESC3_RSV_LEN)) { |
2715 | pkt_info->attributes = XLGMAC_SET_REG_BITS( |
2716 | pkt_info->attributes, |
2717 | RX_PACKET_ATTRIBUTES_RSS_HASH_POS, |
2718 | RX_PACKET_ATTRIBUTES_RSS_HASH_LEN, |
2719 | 1); |
2720 | |
2721 | pkt_info->rss_hash = le32_to_cpu(dma_desc->desc1); |
2722 | |
2723 | l34t = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, |
2724 | RX_NORMAL_DESC3_L34T_POS, |
2725 | RX_NORMAL_DESC3_L34T_LEN); |
2726 | switch (l34t) { |
2727 | case RX_DESC3_L34T_IPV4_TCP: |
2728 | case RX_DESC3_L34T_IPV4_UDP: |
2729 | case RX_DESC3_L34T_IPV6_TCP: |
2730 | case RX_DESC3_L34T_IPV6_UDP: |
2731 | pkt_info->rss_hash_type = PKT_HASH_TYPE_L4; |
2732 | break; |
2733 | default: |
2734 | pkt_info->rss_hash_type = PKT_HASH_TYPE_L3; |
2735 | } |
2736 | } |
2737 | |
2738 | /* Get the pkt_info length */ |
2739 | desc_data->rx.len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, |
2740 | RX_NORMAL_DESC3_PL_POS, |
2741 | RX_NORMAL_DESC3_PL_LEN); |
2742 | |
2743 | if (!XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, |
2744 | RX_NORMAL_DESC3_LD_POS, |
2745 | RX_NORMAL_DESC3_LD_LEN)) { |
2746 | /* Not all the data has been transferred for this pkt_info */ |
2747 | pkt_info->attributes = XLGMAC_SET_REG_BITS( |
2748 | pkt_info->attributes, |
2749 | RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, |
2750 | RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, |
2751 | 1); |
2752 | return 0; |
2753 | } |
2754 | |
2755 | /* This is the last of the data for this pkt_info */ |
2756 | pkt_info->attributes = XLGMAC_SET_REG_BITS( |
2757 | pkt_info->attributes, |
2758 | RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, |
2759 | RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, |
2760 | 0); |
2761 | |
2762 | /* Set checksum done indicator as appropriate */ |
2763 | if (netdev->features & NETIF_F_RXCSUM) |
2764 | pkt_info->attributes = XLGMAC_SET_REG_BITS( |
2765 | pkt_info->attributes, |
2766 | RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, |
2767 | RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, |
2768 | 1); |
2769 | |
2770 | /* Check for errors (only valid in last descriptor) */ |
2771 | err = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, |
2772 | RX_NORMAL_DESC3_ES_POS, |
2773 | RX_NORMAL_DESC3_ES_LEN); |
2774 | etlt = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, |
2775 | RX_NORMAL_DESC3_ETLT_POS, |
2776 | RX_NORMAL_DESC3_ETLT_LEN); |
2777 | netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n" , err, etlt); |
2778 | |
2779 | if (!err || !etlt) { |
2780 | /* No error if err is 0 or etlt is 0 */ |
2781 | if ((etlt == 0x09) && |
2782 | (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { |
2783 | pkt_info->attributes = XLGMAC_SET_REG_BITS( |
2784 | pkt_info->attributes, |
2785 | RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, |
2786 | RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, |
2787 | 1); |
2788 | pkt_info->vlan_ctag = |
2789 | XLGMAC_GET_REG_BITS_LE(dma_desc->desc0, |
2790 | RX_NORMAL_DESC0_OVT_POS, |
2791 | RX_NORMAL_DESC0_OVT_LEN); |
2792 | netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n" , |
2793 | pkt_info->vlan_ctag); |
2794 | } |
2795 | } else { |
2796 | if ((etlt == 0x05) || (etlt == 0x06)) |
2797 | pkt_info->attributes = XLGMAC_SET_REG_BITS( |
2798 | pkt_info->attributes, |
2799 | RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, |
2800 | RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, |
2801 | 0); |
2802 | else |
2803 | pkt_info->errors = XLGMAC_SET_REG_BITS( |
2804 | pkt_info->errors, |
2805 | RX_PACKET_ERRORS_FRAME_POS, |
2806 | RX_PACKET_ERRORS_FRAME_LEN, |
2807 | 1); |
2808 | } |
2809 | |
2810 | XLGMAC_PR("%s - descriptor=%u (cur=%d)\n" , channel->name, |
2811 | ring->cur & (ring->dma_desc_count - 1), ring->cur); |
2812 | |
2813 | return 0; |
2814 | } |
2815 | |
2816 | static int xlgmac_enable_int(struct xlgmac_channel *channel, |
2817 | enum xlgmac_int int_id) |
2818 | { |
2819 | unsigned int dma_ch_ier; |
2820 | |
2821 | dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER)); |
2822 | |
2823 | switch (int_id) { |
2824 | case XLGMAC_INT_DMA_CH_SR_TI: |
2825 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2826 | dma_ch_ier, DMA_CH_IER_TIE_POS, |
2827 | DMA_CH_IER_TIE_LEN, 1); |
2828 | break; |
2829 | case XLGMAC_INT_DMA_CH_SR_TPS: |
2830 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2831 | dma_ch_ier, DMA_CH_IER_TXSE_POS, |
2832 | DMA_CH_IER_TXSE_LEN, 1); |
2833 | break; |
2834 | case XLGMAC_INT_DMA_CH_SR_TBU: |
2835 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2836 | dma_ch_ier, DMA_CH_IER_TBUE_POS, |
2837 | DMA_CH_IER_TBUE_LEN, 1); |
2838 | break; |
2839 | case XLGMAC_INT_DMA_CH_SR_RI: |
2840 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2841 | dma_ch_ier, DMA_CH_IER_RIE_POS, |
2842 | DMA_CH_IER_RIE_LEN, 1); |
2843 | break; |
2844 | case XLGMAC_INT_DMA_CH_SR_RBU: |
2845 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2846 | dma_ch_ier, DMA_CH_IER_RBUE_POS, |
2847 | DMA_CH_IER_RBUE_LEN, 1); |
2848 | break; |
2849 | case XLGMAC_INT_DMA_CH_SR_RPS: |
2850 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2851 | dma_ch_ier, DMA_CH_IER_RSE_POS, |
2852 | DMA_CH_IER_RSE_LEN, 1); |
2853 | break; |
2854 | case XLGMAC_INT_DMA_CH_SR_TI_RI: |
2855 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2856 | dma_ch_ier, DMA_CH_IER_TIE_POS, |
2857 | DMA_CH_IER_TIE_LEN, 1); |
2858 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2859 | dma_ch_ier, DMA_CH_IER_RIE_POS, |
2860 | DMA_CH_IER_RIE_LEN, 1); |
2861 | break; |
2862 | case XLGMAC_INT_DMA_CH_SR_FBE: |
2863 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2864 | dma_ch_ier, DMA_CH_IER_FBEE_POS, |
2865 | DMA_CH_IER_FBEE_LEN, 1); |
2866 | break; |
2867 | case XLGMAC_INT_DMA_ALL: |
2868 | dma_ch_ier |= channel->saved_ier; |
2869 | break; |
2870 | default: |
2871 | return -1; |
2872 | } |
2873 | |
2874 | writel(val: dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER)); |
2875 | |
2876 | return 0; |
2877 | } |
2878 | |
2879 | static int xlgmac_disable_int(struct xlgmac_channel *channel, |
2880 | enum xlgmac_int int_id) |
2881 | { |
2882 | unsigned int dma_ch_ier; |
2883 | |
2884 | dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER)); |
2885 | |
2886 | switch (int_id) { |
2887 | case XLGMAC_INT_DMA_CH_SR_TI: |
2888 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2889 | dma_ch_ier, DMA_CH_IER_TIE_POS, |
2890 | DMA_CH_IER_TIE_LEN, 0); |
2891 | break; |
2892 | case XLGMAC_INT_DMA_CH_SR_TPS: |
2893 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2894 | dma_ch_ier, DMA_CH_IER_TXSE_POS, |
2895 | DMA_CH_IER_TXSE_LEN, 0); |
2896 | break; |
2897 | case XLGMAC_INT_DMA_CH_SR_TBU: |
2898 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2899 | dma_ch_ier, DMA_CH_IER_TBUE_POS, |
2900 | DMA_CH_IER_TBUE_LEN, 0); |
2901 | break; |
2902 | case XLGMAC_INT_DMA_CH_SR_RI: |
2903 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2904 | dma_ch_ier, DMA_CH_IER_RIE_POS, |
2905 | DMA_CH_IER_RIE_LEN, 0); |
2906 | break; |
2907 | case XLGMAC_INT_DMA_CH_SR_RBU: |
2908 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2909 | dma_ch_ier, DMA_CH_IER_RBUE_POS, |
2910 | DMA_CH_IER_RBUE_LEN, 0); |
2911 | break; |
2912 | case XLGMAC_INT_DMA_CH_SR_RPS: |
2913 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2914 | dma_ch_ier, DMA_CH_IER_RSE_POS, |
2915 | DMA_CH_IER_RSE_LEN, 0); |
2916 | break; |
2917 | case XLGMAC_INT_DMA_CH_SR_TI_RI: |
2918 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2919 | dma_ch_ier, DMA_CH_IER_TIE_POS, |
2920 | DMA_CH_IER_TIE_LEN, 0); |
2921 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2922 | dma_ch_ier, DMA_CH_IER_RIE_POS, |
2923 | DMA_CH_IER_RIE_LEN, 0); |
2924 | break; |
2925 | case XLGMAC_INT_DMA_CH_SR_FBE: |
2926 | dma_ch_ier = XLGMAC_SET_REG_BITS( |
2927 | dma_ch_ier, DMA_CH_IER_FBEE_POS, |
2928 | DMA_CH_IER_FBEE_LEN, 0); |
2929 | break; |
2930 | case XLGMAC_INT_DMA_ALL: |
2931 | channel->saved_ier = dma_ch_ier & XLGMAC_DMA_INTERRUPT_MASK; |
2932 | dma_ch_ier &= ~XLGMAC_DMA_INTERRUPT_MASK; |
2933 | break; |
2934 | default: |
2935 | return -1; |
2936 | } |
2937 | |
2938 | writel(val: dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER)); |
2939 | |
2940 | return 0; |
2941 | } |
2942 | |
2943 | static int xlgmac_flush_tx_queues(struct xlgmac_pdata *pdata) |
2944 | { |
2945 | unsigned int i, count; |
2946 | u32 regval; |
2947 | |
2948 | for (i = 0; i < pdata->tx_q_count; i++) { |
2949 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); |
2950 | regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS, |
2951 | MTL_Q_TQOMR_FTQ_LEN, 1); |
2952 | writel(val: regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); |
2953 | } |
2954 | |
2955 | /* Poll Until Poll Condition */ |
2956 | for (i = 0; i < pdata->tx_q_count; i++) { |
2957 | count = 2000; |
2958 | regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); |
2959 | regval = XLGMAC_GET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS, |
2960 | MTL_Q_TQOMR_FTQ_LEN); |
2961 | while (--count && regval) |
2962 | usleep_range(min: 500, max: 600); |
2963 | |
2964 | if (!count) |
2965 | return -EBUSY; |
2966 | } |
2967 | |
2968 | return 0; |
2969 | } |
2970 | |
2971 | static void xlgmac_config_dma_bus(struct xlgmac_pdata *pdata) |
2972 | { |
2973 | u32 regval; |
2974 | |
2975 | regval = readl(addr: pdata->mac_regs + DMA_SBMR); |
2976 | /* Set enhanced addressing mode */ |
2977 | regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_EAME_POS, |
2978 | DMA_SBMR_EAME_LEN, 1); |
2979 | /* Set the System Bus mode */ |
2980 | regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_UNDEF_POS, |
2981 | DMA_SBMR_UNDEF_LEN, 1); |
2982 | regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_256_POS, |
2983 | DMA_SBMR_BLEN_256_LEN, 1); |
2984 | writel(val: regval, addr: pdata->mac_regs + DMA_SBMR); |
2985 | } |
2986 | |
2987 | static int xlgmac_hw_init(struct xlgmac_pdata *pdata) |
2988 | { |
2989 | struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops; |
2990 | int ret; |
2991 | |
2992 | /* Flush Tx queues */ |
2993 | ret = xlgmac_flush_tx_queues(pdata); |
2994 | if (ret) |
2995 | return ret; |
2996 | |
2997 | /* Initialize DMA related features */ |
2998 | xlgmac_config_dma_bus(pdata); |
2999 | xlgmac_config_osp_mode(pdata); |
3000 | xlgmac_config_pblx8(pdata); |
3001 | xlgmac_config_tx_pbl_val(pdata); |
3002 | xlgmac_config_rx_pbl_val(pdata); |
3003 | xlgmac_config_rx_coalesce(pdata); |
3004 | xlgmac_config_tx_coalesce(pdata); |
3005 | xlgmac_config_rx_buffer_size(pdata); |
3006 | xlgmac_config_tso_mode(pdata); |
3007 | xlgmac_config_sph_mode(pdata); |
3008 | xlgmac_config_rss(pdata); |
3009 | desc_ops->tx_desc_init(pdata); |
3010 | desc_ops->rx_desc_init(pdata); |
3011 | xlgmac_enable_dma_interrupts(pdata); |
3012 | |
3013 | /* Initialize MTL related features */ |
3014 | xlgmac_config_mtl_mode(pdata); |
3015 | xlgmac_config_queue_mapping(pdata); |
3016 | xlgmac_config_tsf_mode(pdata, val: pdata->tx_sf_mode); |
3017 | xlgmac_config_rsf_mode(pdata, val: pdata->rx_sf_mode); |
3018 | xlgmac_config_tx_threshold(pdata, val: pdata->tx_threshold); |
3019 | xlgmac_config_rx_threshold(pdata, val: pdata->rx_threshold); |
3020 | xlgmac_config_tx_fifo_size(pdata); |
3021 | xlgmac_config_rx_fifo_size(pdata); |
3022 | xlgmac_config_flow_control_threshold(pdata); |
3023 | xlgmac_config_rx_fep_enable(pdata); |
3024 | xlgmac_config_rx_fup_enable(pdata); |
3025 | xlgmac_enable_mtl_interrupts(pdata); |
3026 | |
3027 | /* Initialize MAC related features */ |
3028 | xlgmac_config_mac_address(pdata); |
3029 | xlgmac_config_rx_mode(pdata); |
3030 | xlgmac_config_jumbo_enable(pdata); |
3031 | xlgmac_config_flow_control(pdata); |
3032 | xlgmac_config_mac_speed(pdata); |
3033 | xlgmac_config_checksum_offload(pdata); |
3034 | xlgmac_config_vlan_support(pdata); |
3035 | xlgmac_config_mmc(pdata); |
3036 | xlgmac_enable_mac_interrupts(pdata); |
3037 | |
3038 | return 0; |
3039 | } |
3040 | |
3041 | static int xlgmac_hw_exit(struct xlgmac_pdata *pdata) |
3042 | { |
3043 | unsigned int count = 2000; |
3044 | u32 regval; |
3045 | |
3046 | /* Issue a software reset */ |
3047 | regval = readl(addr: pdata->mac_regs + DMA_MR); |
3048 | regval = XLGMAC_SET_REG_BITS(regval, DMA_MR_SWR_POS, |
3049 | DMA_MR_SWR_LEN, 1); |
3050 | writel(val: regval, addr: pdata->mac_regs + DMA_MR); |
3051 | usleep_range(min: 10, max: 15); |
3052 | |
3053 | /* Poll Until Poll Condition */ |
3054 | while (--count && |
3055 | XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + DMA_MR), |
3056 | DMA_MR_SWR_POS, DMA_MR_SWR_LEN)) |
3057 | usleep_range(min: 500, max: 600); |
3058 | |
3059 | if (!count) |
3060 | return -EBUSY; |
3061 | |
3062 | return 0; |
3063 | } |
3064 | |
3065 | void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops) |
3066 | { |
3067 | hw_ops->init = xlgmac_hw_init; |
3068 | hw_ops->exit = xlgmac_hw_exit; |
3069 | |
3070 | hw_ops->tx_complete = xlgmac_tx_complete; |
3071 | |
3072 | hw_ops->enable_tx = xlgmac_enable_tx; |
3073 | hw_ops->disable_tx = xlgmac_disable_tx; |
3074 | hw_ops->enable_rx = xlgmac_enable_rx; |
3075 | hw_ops->disable_rx = xlgmac_disable_rx; |
3076 | |
3077 | hw_ops->dev_xmit = xlgmac_dev_xmit; |
3078 | hw_ops->dev_read = xlgmac_dev_read; |
3079 | hw_ops->enable_int = xlgmac_enable_int; |
3080 | hw_ops->disable_int = xlgmac_disable_int; |
3081 | |
3082 | hw_ops->set_mac_address = xlgmac_set_mac_address; |
3083 | hw_ops->config_rx_mode = xlgmac_config_rx_mode; |
3084 | hw_ops->enable_rx_csum = xlgmac_enable_rx_csum; |
3085 | hw_ops->disable_rx_csum = xlgmac_disable_rx_csum; |
3086 | |
3087 | /* For MII speed configuration */ |
3088 | hw_ops->set_xlgmii_25000_speed = xlgmac_set_xlgmii_25000_speed; |
3089 | hw_ops->set_xlgmii_40000_speed = xlgmac_set_xlgmii_40000_speed; |
3090 | hw_ops->set_xlgmii_50000_speed = xlgmac_set_xlgmii_50000_speed; |
3091 | hw_ops->set_xlgmii_100000_speed = xlgmac_set_xlgmii_100000_speed; |
3092 | |
3093 | /* For descriptor related operation */ |
3094 | hw_ops->tx_desc_init = xlgmac_tx_desc_init; |
3095 | hw_ops->rx_desc_init = xlgmac_rx_desc_init; |
3096 | hw_ops->tx_desc_reset = xlgmac_tx_desc_reset; |
3097 | hw_ops->rx_desc_reset = xlgmac_rx_desc_reset; |
3098 | hw_ops->is_last_desc = xlgmac_is_last_desc; |
3099 | hw_ops->is_context_desc = xlgmac_is_context_desc; |
3100 | hw_ops->tx_start_xmit = xlgmac_tx_start_xmit; |
3101 | |
3102 | /* For Flow Control */ |
3103 | hw_ops->config_tx_flow_control = xlgmac_config_tx_flow_control; |
3104 | hw_ops->config_rx_flow_control = xlgmac_config_rx_flow_control; |
3105 | |
3106 | /* For Vlan related config */ |
3107 | hw_ops->enable_rx_vlan_stripping = xlgmac_enable_rx_vlan_stripping; |
3108 | hw_ops->disable_rx_vlan_stripping = xlgmac_disable_rx_vlan_stripping; |
3109 | hw_ops->enable_rx_vlan_filtering = xlgmac_enable_rx_vlan_filtering; |
3110 | hw_ops->disable_rx_vlan_filtering = xlgmac_disable_rx_vlan_filtering; |
3111 | hw_ops->update_vlan_hash_table = xlgmac_update_vlan_hash_table; |
3112 | |
3113 | /* For RX coalescing */ |
3114 | hw_ops->config_rx_coalesce = xlgmac_config_rx_coalesce; |
3115 | hw_ops->config_tx_coalesce = xlgmac_config_tx_coalesce; |
3116 | hw_ops->usec_to_riwt = xlgmac_usec_to_riwt; |
3117 | hw_ops->riwt_to_usec = xlgmac_riwt_to_usec; |
3118 | |
3119 | /* For RX and TX threshold config */ |
3120 | hw_ops->config_rx_threshold = xlgmac_config_rx_threshold; |
3121 | hw_ops->config_tx_threshold = xlgmac_config_tx_threshold; |
3122 | |
3123 | /* For RX and TX Store and Forward Mode config */ |
3124 | hw_ops->config_rsf_mode = xlgmac_config_rsf_mode; |
3125 | hw_ops->config_tsf_mode = xlgmac_config_tsf_mode; |
3126 | |
3127 | /* For TX DMA Operating on Second Frame config */ |
3128 | hw_ops->config_osp_mode = xlgmac_config_osp_mode; |
3129 | |
3130 | /* For RX and TX PBL config */ |
3131 | hw_ops->config_rx_pbl_val = xlgmac_config_rx_pbl_val; |
3132 | hw_ops->get_rx_pbl_val = xlgmac_get_rx_pbl_val; |
3133 | hw_ops->config_tx_pbl_val = xlgmac_config_tx_pbl_val; |
3134 | hw_ops->get_tx_pbl_val = xlgmac_get_tx_pbl_val; |
3135 | hw_ops->config_pblx8 = xlgmac_config_pblx8; |
3136 | |
3137 | /* For MMC statistics support */ |
3138 | hw_ops->tx_mmc_int = xlgmac_tx_mmc_int; |
3139 | hw_ops->rx_mmc_int = xlgmac_rx_mmc_int; |
3140 | hw_ops->read_mmc_stats = xlgmac_read_mmc_stats; |
3141 | |
3142 | /* For Receive Side Scaling */ |
3143 | hw_ops->enable_rss = xlgmac_enable_rss; |
3144 | hw_ops->disable_rss = xlgmac_disable_rss; |
3145 | hw_ops->set_rss_hash_key = xlgmac_set_rss_hash_key; |
3146 | hw_ops->set_rss_lookup_table = xlgmac_set_rss_lookup_table; |
3147 | } |
3148 | |