1 | /* |
2 | * AMD 10Gb Ethernet driver |
3 | * |
4 | * This file is available to you under your choice of the following two |
5 | * licenses: |
6 | * |
7 | * License 1: GPLv2 |
8 | * |
9 | * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. |
10 | * |
11 | * This file is free software; you may copy, redistribute and/or modify |
12 | * it under the terms of the GNU General Public License as published by |
13 | * the Free Software Foundation, either version 2 of the License, or (at |
14 | * your option) any later version. |
15 | * |
16 | * This file is distributed in the hope that it will be useful, but |
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
19 | * General Public License for more details. |
20 | * |
21 | * You should have received a copy of the GNU General Public License |
22 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
23 | * |
24 | * This file incorporates work covered by the following copyright and |
25 | * permission notice: |
26 | * The Synopsys DWC ETHER XGMAC Software Driver and documentation |
27 | * (hereinafter "Software") is an unsupported proprietary work of Synopsys, |
28 | * Inc. unless otherwise expressly agreed to in writing between Synopsys |
29 | * and you. |
30 | * |
31 | * The Software IS NOT an item of Licensed Software or Licensed Product |
32 | * under any End User Software License Agreement or Agreement for Licensed |
33 | * Product with Synopsys or any supplement thereto. Permission is hereby |
34 | * granted, free of charge, to any person obtaining a copy of this software |
35 | * annotated with this license and the Software, to deal in the Software |
36 | * without restriction, including without limitation the rights to use, |
37 | * copy, modify, merge, publish, distribute, sublicense, and/or sell copies |
38 | * of the Software, and to permit persons to whom the Software is furnished |
39 | * to do so, subject to the following conditions: |
40 | * |
41 | * The above copyright notice and this permission notice shall be included |
42 | * in all copies or substantial portions of the Software. |
43 | * |
44 | * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" |
45 | * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
46 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A |
47 | * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS |
48 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
49 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
50 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
51 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
52 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
53 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
54 | * THE POSSIBILITY OF SUCH DAMAGE. |
55 | * |
56 | * |
57 | * License 2: Modified BSD |
58 | * |
59 | * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. |
60 | * All rights reserved. |
61 | * |
62 | * Redistribution and use in source and binary forms, with or without |
63 | * modification, are permitted provided that the following conditions are met: |
64 | * * Redistributions of source code must retain the above copyright |
65 | * notice, this list of conditions and the following disclaimer. |
66 | * * Redistributions in binary form must reproduce the above copyright |
67 | * notice, this list of conditions and the following disclaimer in the |
68 | * documentation and/or other materials provided with the distribution. |
69 | * * Neither the name of Advanced Micro Devices, Inc. nor the |
70 | * names of its contributors may be used to endorse or promote products |
71 | * derived from this software without specific prior written permission. |
72 | * |
73 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
74 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
75 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
76 | * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY |
77 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
78 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
79 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
80 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
81 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
82 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
83 | * |
84 | * This file incorporates work covered by the following copyright and |
85 | * permission notice: |
86 | * The Synopsys DWC ETHER XGMAC Software Driver and documentation |
87 | * (hereinafter "Software") is an unsupported proprietary work of Synopsys, |
88 | * Inc. unless otherwise expressly agreed to in writing between Synopsys |
89 | * and you. |
90 | * |
91 | * The Software IS NOT an item of Licensed Software or Licensed Product |
92 | * under any End User Software License Agreement or Agreement for Licensed |
93 | * Product with Synopsys or any supplement thereto. Permission is hereby |
94 | * granted, free of charge, to any person obtaining a copy of this software |
95 | * annotated with this license and the Software, to deal in the Software |
96 | * without restriction, including without limitation the rights to use, |
97 | * copy, modify, merge, publish, distribute, sublicense, and/or sell copies |
98 | * of the Software, and to permit persons to whom the Software is furnished |
99 | * to do so, subject to the following conditions: |
100 | * |
101 | * The above copyright notice and this permission notice shall be included |
102 | * in all copies or substantial portions of the Software. |
103 | * |
104 | * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" |
105 | * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
106 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A |
107 | * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS |
108 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
109 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
110 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
111 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
112 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
113 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
114 | * THE POSSIBILITY OF SUCH DAMAGE. |
115 | */ |
116 | |
117 | #include <linux/phy.h> |
118 | #include <linux/mdio.h> |
119 | #include <linux/clk.h> |
120 | #include <linux/bitrev.h> |
121 | #include <linux/crc32.h> |
122 | #include <linux/crc32poly.h> |
123 | |
124 | #include "xgbe.h" |
125 | #include "xgbe-common.h" |
126 | |
127 | static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata) |
128 | { |
129 | return pdata->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
130 | } |
131 | |
132 | static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, |
133 | unsigned int usec) |
134 | { |
135 | unsigned long rate; |
136 | unsigned int ret; |
137 | |
138 | DBGPR("-->xgbe_usec_to_riwt\n" ); |
139 | |
140 | rate = pdata->sysclk_rate; |
141 | |
142 | /* |
143 | * Convert the input usec value to the watchdog timer value. Each |
144 | * watchdog timer value is equivalent to 256 clock cycles. |
145 | * Calculate the required value as: |
146 | * ( usec * ( system_clock_mhz / 10^6 ) / 256 |
147 | */ |
148 | ret = (usec * (rate / 1000000)) / 256; |
149 | |
150 | DBGPR("<--xgbe_usec_to_riwt\n" ); |
151 | |
152 | return ret; |
153 | } |
154 | |
155 | static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, |
156 | unsigned int riwt) |
157 | { |
158 | unsigned long rate; |
159 | unsigned int ret; |
160 | |
161 | DBGPR("-->xgbe_riwt_to_usec\n" ); |
162 | |
163 | rate = pdata->sysclk_rate; |
164 | |
165 | /* |
166 | * Convert the input watchdog timer value to the usec value. Each |
167 | * watchdog timer value is equivalent to 256 clock cycles. |
168 | * Calculate the required value as: |
169 | * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) |
170 | */ |
171 | ret = (riwt * 256) / (rate / 1000000); |
172 | |
173 | DBGPR("<--xgbe_riwt_to_usec\n" ); |
174 | |
175 | return ret; |
176 | } |
177 | |
178 | static int xgbe_config_pbl_val(struct xgbe_prv_data *pdata) |
179 | { |
180 | unsigned int pblx8, pbl; |
181 | unsigned int i; |
182 | |
183 | pblx8 = DMA_PBL_X8_DISABLE; |
184 | pbl = pdata->pbl; |
185 | |
186 | if (pdata->pbl > 32) { |
187 | pblx8 = DMA_PBL_X8_ENABLE; |
188 | pbl >>= 3; |
189 | } |
190 | |
191 | for (i = 0; i < pdata->channel_count; i++) { |
192 | XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8, |
193 | pblx8); |
194 | |
195 | if (pdata->channel[i]->tx_ring) |
196 | XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, |
197 | PBL, pbl); |
198 | |
199 | if (pdata->channel[i]->rx_ring) |
200 | XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, |
201 | PBL, pbl); |
202 | } |
203 | |
204 | return 0; |
205 | } |
206 | |
207 | static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata) |
208 | { |
209 | unsigned int i; |
210 | |
211 | for (i = 0; i < pdata->channel_count; i++) { |
212 | if (!pdata->channel[i]->tx_ring) |
213 | break; |
214 | |
215 | XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP, |
216 | pdata->tx_osp_mode); |
217 | } |
218 | |
219 | return 0; |
220 | } |
221 | |
222 | static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val) |
223 | { |
224 | unsigned int i; |
225 | |
226 | for (i = 0; i < pdata->rx_q_count; i++) |
227 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); |
228 | |
229 | return 0; |
230 | } |
231 | |
232 | static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val) |
233 | { |
234 | unsigned int i; |
235 | |
236 | for (i = 0; i < pdata->tx_q_count; i++) |
237 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); |
238 | |
239 | return 0; |
240 | } |
241 | |
242 | static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, |
243 | unsigned int val) |
244 | { |
245 | unsigned int i; |
246 | |
247 | for (i = 0; i < pdata->rx_q_count; i++) |
248 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); |
249 | |
250 | return 0; |
251 | } |
252 | |
253 | static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, |
254 | unsigned int val) |
255 | { |
256 | unsigned int i; |
257 | |
258 | for (i = 0; i < pdata->tx_q_count; i++) |
259 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); |
260 | |
261 | return 0; |
262 | } |
263 | |
264 | static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata) |
265 | { |
266 | unsigned int i; |
267 | |
268 | for (i = 0; i < pdata->channel_count; i++) { |
269 | if (!pdata->channel[i]->rx_ring) |
270 | break; |
271 | |
272 | XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT, |
273 | pdata->rx_riwt); |
274 | } |
275 | |
276 | return 0; |
277 | } |
278 | |
279 | static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata) |
280 | { |
281 | return 0; |
282 | } |
283 | |
284 | static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata) |
285 | { |
286 | unsigned int i; |
287 | |
288 | for (i = 0; i < pdata->channel_count; i++) { |
289 | if (!pdata->channel[i]->rx_ring) |
290 | break; |
291 | |
292 | XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ, |
293 | pdata->rx_buf_size); |
294 | } |
295 | } |
296 | |
297 | static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata) |
298 | { |
299 | unsigned int i; |
300 | |
301 | for (i = 0; i < pdata->channel_count; i++) { |
302 | if (!pdata->channel[i]->tx_ring) |
303 | break; |
304 | |
305 | XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1); |
306 | } |
307 | } |
308 | |
309 | static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) |
310 | { |
311 | unsigned int i; |
312 | |
313 | for (i = 0; i < pdata->channel_count; i++) { |
314 | if (!pdata->channel[i]->rx_ring) |
315 | break; |
316 | |
317 | XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1); |
318 | } |
319 | |
320 | XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); |
321 | } |
322 | |
323 | static int (struct xgbe_prv_data *pdata, unsigned int type, |
324 | unsigned int index, unsigned int val) |
325 | { |
326 | unsigned int wait; |
327 | int ret = 0; |
328 | |
329 | mutex_lock(&pdata->rss_mutex); |
330 | |
331 | if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) { |
332 | ret = -EBUSY; |
333 | goto unlock; |
334 | } |
335 | |
336 | XGMAC_IOWRITE(pdata, MAC_RSSDR, val); |
337 | |
338 | XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); |
339 | XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); |
340 | XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); |
341 | XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); |
342 | |
343 | wait = 1000; |
344 | while (wait--) { |
345 | if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) |
346 | goto unlock; |
347 | |
348 | usleep_range(min: 1000, max: 1500); |
349 | } |
350 | |
351 | ret = -EBUSY; |
352 | |
353 | unlock: |
354 | mutex_unlock(lock: &pdata->rss_mutex); |
355 | |
356 | return ret; |
357 | } |
358 | |
359 | static int (struct xgbe_prv_data *pdata) |
360 | { |
361 | unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); |
362 | unsigned int *key = (unsigned int *)&pdata->rss_key; |
363 | int ret; |
364 | |
365 | while (key_regs--) { |
366 | ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE, |
367 | index: key_regs, val: *key++); |
368 | if (ret) |
369 | return ret; |
370 | } |
371 | |
372 | return 0; |
373 | } |
374 | |
375 | static int (struct xgbe_prv_data *pdata) |
376 | { |
377 | unsigned int i; |
378 | int ret; |
379 | |
380 | for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { |
381 | ret = xgbe_write_rss_reg(pdata, |
382 | XGBE_RSS_LOOKUP_TABLE_TYPE, index: i, |
383 | val: pdata->rss_table[i]); |
384 | if (ret) |
385 | return ret; |
386 | } |
387 | |
388 | return 0; |
389 | } |
390 | |
391 | static int (struct xgbe_prv_data *pdata, const u8 *key) |
392 | { |
393 | memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); |
394 | |
395 | return xgbe_write_rss_hash_key(pdata); |
396 | } |
397 | |
398 | static int (struct xgbe_prv_data *pdata, |
399 | const u32 *table) |
400 | { |
401 | unsigned int i; |
402 | |
403 | for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) |
404 | XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]); |
405 | |
406 | return xgbe_write_rss_lookup_table(pdata); |
407 | } |
408 | |
409 | static int (struct xgbe_prv_data *pdata) |
410 | { |
411 | int ret; |
412 | |
413 | if (!pdata->hw_feat.rss) |
414 | return -EOPNOTSUPP; |
415 | |
416 | /* Program the hash key */ |
417 | ret = xgbe_write_rss_hash_key(pdata); |
418 | if (ret) |
419 | return ret; |
420 | |
421 | /* Program the lookup table */ |
422 | ret = xgbe_write_rss_lookup_table(pdata); |
423 | if (ret) |
424 | return ret; |
425 | |
426 | /* Set the RSS options */ |
427 | XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); |
428 | |
429 | /* Enable RSS */ |
430 | XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); |
431 | |
432 | return 0; |
433 | } |
434 | |
435 | static int (struct xgbe_prv_data *pdata) |
436 | { |
437 | if (!pdata->hw_feat.rss) |
438 | return -EOPNOTSUPP; |
439 | |
440 | XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); |
441 | |
442 | return 0; |
443 | } |
444 | |
445 | static void (struct xgbe_prv_data *pdata) |
446 | { |
447 | int ret; |
448 | |
449 | if (!pdata->hw_feat.rss) |
450 | return; |
451 | |
452 | if (pdata->netdev->features & NETIF_F_RXHASH) |
453 | ret = xgbe_enable_rss(pdata); |
454 | else |
455 | ret = xgbe_disable_rss(pdata); |
456 | |
457 | if (ret) |
458 | netdev_err(dev: pdata->netdev, |
459 | format: "error configuring RSS, RSS disabled\n" ); |
460 | } |
461 | |
462 | static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata, |
463 | unsigned int queue) |
464 | { |
465 | unsigned int prio, tc; |
466 | |
467 | for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { |
468 | /* Does this queue handle the priority? */ |
469 | if (pdata->prio2q_map[prio] != queue) |
470 | continue; |
471 | |
472 | /* Get the Traffic Class for this priority */ |
473 | tc = pdata->ets->prio_tc[prio]; |
474 | |
475 | /* Check if PFC is enabled for this traffic class */ |
476 | if (pdata->pfc->pfc_en & (1 << tc)) |
477 | return true; |
478 | } |
479 | |
480 | return false; |
481 | } |
482 | |
483 | static void xgbe_set_vxlan_id(struct xgbe_prv_data *pdata) |
484 | { |
485 | /* Program the VXLAN port */ |
486 | XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, pdata->vxlan_port); |
487 | |
488 | netif_dbg(pdata, drv, pdata->netdev, "VXLAN tunnel id set to %hx\n" , |
489 | pdata->vxlan_port); |
490 | } |
491 | |
492 | static void xgbe_enable_vxlan(struct xgbe_prv_data *pdata) |
493 | { |
494 | if (!pdata->hw_feat.vxn) |
495 | return; |
496 | |
497 | /* Program the VXLAN port */ |
498 | xgbe_set_vxlan_id(pdata); |
499 | |
500 | /* Allow for IPv6/UDP zero-checksum VXLAN packets */ |
501 | XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 1); |
502 | |
503 | /* Enable VXLAN tunneling mode */ |
504 | XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNM, 0); |
505 | XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 1); |
506 | |
507 | netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration enabled\n" ); |
508 | } |
509 | |
510 | static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata) |
511 | { |
512 | if (!pdata->hw_feat.vxn) |
513 | return; |
514 | |
515 | /* Disable tunneling mode */ |
516 | XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 0); |
517 | |
518 | /* Clear IPv6/UDP zero-checksum VXLAN packets setting */ |
519 | XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 0); |
520 | |
521 | /* Clear the VXLAN port */ |
522 | XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, 0); |
523 | |
524 | netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n" ); |
525 | } |
526 | |
527 | static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata) |
528 | { |
529 | unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; |
530 | |
531 | /* From MAC ver 30H the TFCR is per priority, instead of per queue */ |
532 | if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30) |
533 | return max_q_count; |
534 | else |
535 | return min_t(unsigned int, pdata->tx_q_count, max_q_count); |
536 | } |
537 | |
538 | static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) |
539 | { |
540 | unsigned int reg, reg_val; |
541 | unsigned int i, q_count; |
542 | |
543 | /* Clear MTL flow control */ |
544 | for (i = 0; i < pdata->rx_q_count; i++) |
545 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); |
546 | |
547 | /* Clear MAC flow control */ |
548 | q_count = xgbe_get_fc_queue_count(pdata); |
549 | reg = MAC_Q0TFCR; |
550 | for (i = 0; i < q_count; i++) { |
551 | reg_val = XGMAC_IOREAD(pdata, reg); |
552 | XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); |
553 | XGMAC_IOWRITE(pdata, reg, reg_val); |
554 | |
555 | reg += MAC_QTFCR_INC; |
556 | } |
557 | |
558 | return 0; |
559 | } |
560 | |
561 | static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) |
562 | { |
563 | struct ieee_pfc *pfc = pdata->pfc; |
564 | struct ieee_ets *ets = pdata->ets; |
565 | unsigned int reg, reg_val; |
566 | unsigned int i, q_count; |
567 | |
568 | /* Set MTL flow control */ |
569 | for (i = 0; i < pdata->rx_q_count; i++) { |
570 | unsigned int ehfc = 0; |
571 | |
572 | if (pdata->rx_rfd[i]) { |
573 | /* Flow control thresholds are established */ |
574 | if (pfc && ets) { |
575 | if (xgbe_is_pfc_queue(pdata, queue: i)) |
576 | ehfc = 1; |
577 | } else { |
578 | ehfc = 1; |
579 | } |
580 | } |
581 | |
582 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc); |
583 | |
584 | netif_dbg(pdata, drv, pdata->netdev, |
585 | "flow control %s for RXq%u\n" , |
586 | ehfc ? "enabled" : "disabled" , i); |
587 | } |
588 | |
589 | /* Set MAC flow control */ |
590 | q_count = xgbe_get_fc_queue_count(pdata); |
591 | reg = MAC_Q0TFCR; |
592 | for (i = 0; i < q_count; i++) { |
593 | reg_val = XGMAC_IOREAD(pdata, reg); |
594 | |
595 | /* Enable transmit flow control */ |
596 | XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); |
597 | /* Set pause time */ |
598 | XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff); |
599 | |
600 | XGMAC_IOWRITE(pdata, reg, reg_val); |
601 | |
602 | reg += MAC_QTFCR_INC; |
603 | } |
604 | |
605 | return 0; |
606 | } |
607 | |
608 | static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata) |
609 | { |
610 | XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0); |
611 | |
612 | return 0; |
613 | } |
614 | |
615 | static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata) |
616 | { |
617 | XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1); |
618 | |
619 | return 0; |
620 | } |
621 | |
622 | static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) |
623 | { |
624 | struct ieee_pfc *pfc = pdata->pfc; |
625 | |
626 | if (pdata->tx_pause || (pfc && pfc->pfc_en)) |
627 | xgbe_enable_tx_flow_control(pdata); |
628 | else |
629 | xgbe_disable_tx_flow_control(pdata); |
630 | |
631 | return 0; |
632 | } |
633 | |
634 | static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) |
635 | { |
636 | struct ieee_pfc *pfc = pdata->pfc; |
637 | |
638 | if (pdata->rx_pause || (pfc && pfc->pfc_en)) |
639 | xgbe_enable_rx_flow_control(pdata); |
640 | else |
641 | xgbe_disable_rx_flow_control(pdata); |
642 | |
643 | return 0; |
644 | } |
645 | |
646 | static void xgbe_config_flow_control(struct xgbe_prv_data *pdata) |
647 | { |
648 | struct ieee_pfc *pfc = pdata->pfc; |
649 | |
650 | xgbe_config_tx_flow_control(pdata); |
651 | xgbe_config_rx_flow_control(pdata); |
652 | |
653 | XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, |
654 | (pfc && pfc->pfc_en) ? 1 : 0); |
655 | } |
656 | |
657 | static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) |
658 | { |
659 | struct xgbe_channel *channel; |
660 | unsigned int i, ver; |
661 | |
662 | /* Set the interrupt mode if supported */ |
663 | if (pdata->channel_irq_mode) |
664 | XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM, |
665 | pdata->channel_irq_mode); |
666 | |
667 | ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER); |
668 | |
669 | for (i = 0; i < pdata->channel_count; i++) { |
670 | channel = pdata->channel[i]; |
671 | |
672 | /* Clear all the interrupts which are set */ |
673 | XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, |
674 | XGMAC_DMA_IOREAD(channel, DMA_CH_SR)); |
675 | |
676 | /* Clear all interrupt enable bits */ |
677 | channel->curr_ier = 0; |
678 | |
679 | /* Enable following interrupts |
680 | * NIE - Normal Interrupt Summary Enable |
681 | * AIE - Abnormal Interrupt Summary Enable |
682 | * FBEE - Fatal Bus Error Enable |
683 | */ |
684 | if (ver < 0x21) { |
685 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1); |
686 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1); |
687 | } else { |
688 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1); |
689 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1); |
690 | } |
691 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); |
692 | |
693 | if (channel->tx_ring) { |
694 | /* Enable the following Tx interrupts |
695 | * TIE - Transmit Interrupt Enable (unless using |
696 | * per channel interrupts in edge triggered |
697 | * mode) |
698 | */ |
699 | if (!pdata->per_channel_irq || pdata->channel_irq_mode) |
700 | XGMAC_SET_BITS(channel->curr_ier, |
701 | DMA_CH_IER, TIE, 1); |
702 | } |
703 | if (channel->rx_ring) { |
704 | /* Enable following Rx interrupts |
705 | * RBUE - Receive Buffer Unavailable Enable |
706 | * RIE - Receive Interrupt Enable (unless using |
707 | * per channel interrupts in edge triggered |
708 | * mode) |
709 | */ |
710 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); |
711 | if (!pdata->per_channel_irq || pdata->channel_irq_mode) |
712 | XGMAC_SET_BITS(channel->curr_ier, |
713 | DMA_CH_IER, RIE, 1); |
714 | } |
715 | |
716 | XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); |
717 | } |
718 | } |
719 | |
720 | static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata) |
721 | { |
722 | unsigned int mtl_q_isr; |
723 | unsigned int q_count, i; |
724 | |
725 | q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); |
726 | for (i = 0; i < q_count; i++) { |
727 | /* Clear all the interrupts which are set */ |
728 | mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR); |
729 | XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); |
730 | |
731 | /* No MTL interrupts to be enabled */ |
732 | XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0); |
733 | } |
734 | } |
735 | |
736 | static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) |
737 | { |
738 | unsigned int mac_ier = 0; |
739 | |
740 | /* Enable Timestamp interrupt */ |
741 | XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1); |
742 | |
743 | XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); |
744 | |
745 | /* Enable all counter interrupts */ |
746 | XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff); |
747 | XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff); |
748 | |
749 | /* Enable MDIO single command completion interrupt */ |
750 | XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1); |
751 | } |
752 | |
753 | static void xgbe_enable_ecc_interrupts(struct xgbe_prv_data *pdata) |
754 | { |
755 | unsigned int ecc_isr, ecc_ier = 0; |
756 | |
757 | if (!pdata->vdata->ecc_support) |
758 | return; |
759 | |
760 | /* Clear all the interrupts which are set */ |
761 | ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR); |
762 | XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr); |
763 | |
764 | /* Enable ECC interrupts */ |
765 | XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 1); |
766 | XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 1); |
767 | XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 1); |
768 | XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 1); |
769 | XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 1); |
770 | XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 1); |
771 | |
772 | XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier); |
773 | } |
774 | |
775 | static void xgbe_disable_ecc_ded(struct xgbe_prv_data *pdata) |
776 | { |
777 | unsigned int ecc_ier; |
778 | |
779 | ecc_ier = XP_IOREAD(pdata, XP_ECC_IER); |
780 | |
781 | /* Disable ECC DED interrupts */ |
782 | XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 0); |
783 | XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 0); |
784 | XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 0); |
785 | |
786 | XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier); |
787 | } |
788 | |
789 | static void xgbe_disable_ecc_sec(struct xgbe_prv_data *pdata, |
790 | enum xgbe_ecc_sec sec) |
791 | { |
792 | unsigned int ecc_ier; |
793 | |
794 | ecc_ier = XP_IOREAD(pdata, XP_ECC_IER); |
795 | |
796 | /* Disable ECC SEC interrupt */ |
797 | switch (sec) { |
798 | case XGBE_ECC_SEC_TX: |
799 | XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 0); |
800 | break; |
801 | case XGBE_ECC_SEC_RX: |
802 | XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 0); |
803 | break; |
804 | case XGBE_ECC_SEC_DESC: |
805 | XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 0); |
806 | break; |
807 | } |
808 | |
809 | XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier); |
810 | } |
811 | |
812 | static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed) |
813 | { |
814 | unsigned int ss; |
815 | |
816 | switch (speed) { |
817 | case SPEED_10: |
818 | ss = 0x07; |
819 | break; |
820 | case SPEED_1000: |
821 | ss = 0x03; |
822 | break; |
823 | case SPEED_2500: |
824 | ss = 0x02; |
825 | break; |
826 | case SPEED_10000: |
827 | ss = 0x00; |
828 | break; |
829 | default: |
830 | return -EINVAL; |
831 | } |
832 | |
833 | if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss) |
834 | XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss); |
835 | |
836 | return 0; |
837 | } |
838 | |
839 | static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) |
840 | { |
841 | /* Put the VLAN tag in the Rx descriptor */ |
842 | XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); |
843 | |
844 | /* Don't check the VLAN type */ |
845 | XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); |
846 | |
847 | /* Check only C-TAG (0x8100) packets */ |
848 | XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); |
849 | |
850 | /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ |
851 | XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); |
852 | |
853 | /* Enable VLAN tag stripping */ |
854 | XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); |
855 | |
856 | return 0; |
857 | } |
858 | |
859 | static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) |
860 | { |
861 | XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); |
862 | |
863 | return 0; |
864 | } |
865 | |
866 | static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) |
867 | { |
868 | /* Enable VLAN filtering */ |
869 | XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); |
870 | |
871 | /* Enable VLAN Hash Table filtering */ |
872 | XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); |
873 | |
874 | /* Disable VLAN tag inverse matching */ |
875 | XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); |
876 | |
877 | /* Only filter on the lower 12-bits of the VLAN tag */ |
878 | XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); |
879 | |
880 | /* In order for the VLAN Hash Table filtering to be effective, |
881 | * the VLAN tag identifier in the VLAN Tag Register must not |
882 | * be zero. Set the VLAN tag identifier to "1" to enable the |
883 | * VLAN Hash Table filtering. This implies that a VLAN tag of |
884 | * 1 will always pass filtering. |
885 | */ |
886 | XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); |
887 | |
888 | return 0; |
889 | } |
890 | |
891 | static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) |
892 | { |
893 | /* Disable VLAN filtering */ |
894 | XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); |
895 | |
896 | return 0; |
897 | } |
898 | |
899 | static u32 xgbe_vid_crc32_le(__le16 vid_le) |
900 | { |
901 | u32 crc = ~0; |
902 | u32 temp = 0; |
903 | unsigned char *data = (unsigned char *)&vid_le; |
904 | unsigned char data_byte = 0; |
905 | int i, bits; |
906 | |
907 | bits = get_bitmask_order(VLAN_VID_MASK); |
908 | for (i = 0; i < bits; i++) { |
909 | if ((i % 8) == 0) |
910 | data_byte = data[i / 8]; |
911 | |
912 | temp = ((crc & 1) ^ data_byte) & 1; |
913 | crc >>= 1; |
914 | data_byte >>= 1; |
915 | |
916 | if (temp) |
917 | crc ^= CRC32_POLY_LE; |
918 | } |
919 | |
920 | return crc; |
921 | } |
922 | |
923 | static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) |
924 | { |
925 | u32 crc; |
926 | u16 vid; |
927 | __le16 vid_le; |
928 | u16 vlan_hash_table = 0; |
929 | |
930 | /* Generate the VLAN Hash Table value */ |
931 | for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { |
932 | /* Get the CRC32 value of the VLAN ID */ |
933 | vid_le = cpu_to_le16(vid); |
934 | crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; |
935 | |
936 | vlan_hash_table |= (1 << crc); |
937 | } |
938 | |
939 | /* Set the VLAN Hash Table filtering register */ |
940 | XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); |
941 | |
942 | return 0; |
943 | } |
944 | |
945 | static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, |
946 | unsigned int enable) |
947 | { |
948 | unsigned int val = enable ? 1 : 0; |
949 | |
950 | if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val) |
951 | return 0; |
952 | |
953 | netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n" , |
954 | enable ? "entering" : "leaving" ); |
955 | XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); |
956 | |
957 | /* Hardware will still perform VLAN filtering in promiscuous mode */ |
958 | if (enable) { |
959 | xgbe_disable_rx_vlan_filtering(pdata); |
960 | } else { |
961 | if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) |
962 | xgbe_enable_rx_vlan_filtering(pdata); |
963 | } |
964 | |
965 | return 0; |
966 | } |
967 | |
968 | static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, |
969 | unsigned int enable) |
970 | { |
971 | unsigned int val = enable ? 1 : 0; |
972 | |
973 | if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val) |
974 | return 0; |
975 | |
976 | netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n" , |
977 | enable ? "entering" : "leaving" ); |
978 | XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val); |
979 | |
980 | return 0; |
981 | } |
982 | |
983 | static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata, |
984 | struct netdev_hw_addr *ha, unsigned int *mac_reg) |
985 | { |
986 | unsigned int mac_addr_hi, mac_addr_lo; |
987 | u8 *mac_addr; |
988 | |
989 | mac_addr_lo = 0; |
990 | mac_addr_hi = 0; |
991 | |
992 | if (ha) { |
993 | mac_addr = (u8 *)&mac_addr_lo; |
994 | mac_addr[0] = ha->addr[0]; |
995 | mac_addr[1] = ha->addr[1]; |
996 | mac_addr[2] = ha->addr[2]; |
997 | mac_addr[3] = ha->addr[3]; |
998 | mac_addr = (u8 *)&mac_addr_hi; |
999 | mac_addr[0] = ha->addr[4]; |
1000 | mac_addr[1] = ha->addr[5]; |
1001 | |
1002 | netif_dbg(pdata, drv, pdata->netdev, |
1003 | "adding mac address %pM at %#x\n" , |
1004 | ha->addr, *mac_reg); |
1005 | |
1006 | XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); |
1007 | } |
1008 | |
1009 | XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi); |
1010 | *mac_reg += MAC_MACA_INC; |
1011 | XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo); |
1012 | *mac_reg += MAC_MACA_INC; |
1013 | } |
1014 | |
1015 | static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata) |
1016 | { |
1017 | struct net_device *netdev = pdata->netdev; |
1018 | struct netdev_hw_addr *ha; |
1019 | unsigned int mac_reg; |
1020 | unsigned int addn_macs; |
1021 | |
1022 | mac_reg = MAC_MACA1HR; |
1023 | addn_macs = pdata->hw_feat.addn_mac; |
1024 | |
1025 | if (netdev_uc_count(netdev) > addn_macs) { |
1026 | xgbe_set_promiscuous_mode(pdata, enable: 1); |
1027 | } else { |
1028 | netdev_for_each_uc_addr(ha, netdev) { |
1029 | xgbe_set_mac_reg(pdata, ha, mac_reg: &mac_reg); |
1030 | addn_macs--; |
1031 | } |
1032 | |
1033 | if (netdev_mc_count(netdev) > addn_macs) { |
1034 | xgbe_set_all_multicast_mode(pdata, enable: 1); |
1035 | } else { |
1036 | netdev_for_each_mc_addr(ha, netdev) { |
1037 | xgbe_set_mac_reg(pdata, ha, mac_reg: &mac_reg); |
1038 | addn_macs--; |
1039 | } |
1040 | } |
1041 | } |
1042 | |
1043 | /* Clear remaining additional MAC address entries */ |
1044 | while (addn_macs--) |
1045 | xgbe_set_mac_reg(pdata, NULL, mac_reg: &mac_reg); |
1046 | } |
1047 | |
1048 | static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata) |
1049 | { |
1050 | struct net_device *netdev = pdata->netdev; |
1051 | struct netdev_hw_addr *ha; |
1052 | unsigned int hash_reg; |
1053 | unsigned int hash_table_shift, hash_table_count; |
1054 | u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE]; |
1055 | u32 crc; |
1056 | unsigned int i; |
1057 | |
1058 | hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7); |
1059 | hash_table_count = pdata->hw_feat.hash_table_size / 32; |
1060 | memset(hash_table, 0, sizeof(hash_table)); |
1061 | |
1062 | /* Build the MAC Hash Table register values */ |
1063 | netdev_for_each_uc_addr(ha, netdev) { |
1064 | crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); |
1065 | crc >>= hash_table_shift; |
1066 | hash_table[crc >> 5] |= (1 << (crc & 0x1f)); |
1067 | } |
1068 | |
1069 | netdev_for_each_mc_addr(ha, netdev) { |
1070 | crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); |
1071 | crc >>= hash_table_shift; |
1072 | hash_table[crc >> 5] |= (1 << (crc & 0x1f)); |
1073 | } |
1074 | |
1075 | /* Set the MAC Hash Table registers */ |
1076 | hash_reg = MAC_HTR0; |
1077 | for (i = 0; i < hash_table_count; i++) { |
1078 | XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]); |
1079 | hash_reg += MAC_HTR_INC; |
1080 | } |
1081 | } |
1082 | |
1083 | static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata) |
1084 | { |
1085 | if (pdata->hw_feat.hash_table_size) |
1086 | xgbe_set_mac_hash_table(pdata); |
1087 | else |
1088 | xgbe_set_mac_addn_addrs(pdata); |
1089 | |
1090 | return 0; |
1091 | } |
1092 | |
1093 | static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, const u8 *addr) |
1094 | { |
1095 | unsigned int mac_addr_hi, mac_addr_lo; |
1096 | |
1097 | mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); |
1098 | mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | |
1099 | (addr[1] << 8) | (addr[0] << 0); |
1100 | |
1101 | XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi); |
1102 | XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo); |
1103 | |
1104 | return 0; |
1105 | } |
1106 | |
1107 | static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata) |
1108 | { |
1109 | struct net_device *netdev = pdata->netdev; |
1110 | unsigned int pr_mode, am_mode; |
1111 | |
1112 | pr_mode = ((netdev->flags & IFF_PROMISC) != 0); |
1113 | am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); |
1114 | |
1115 | xgbe_set_promiscuous_mode(pdata, enable: pr_mode); |
1116 | xgbe_set_all_multicast_mode(pdata, enable: am_mode); |
1117 | |
1118 | xgbe_add_mac_addresses(pdata); |
1119 | |
1120 | return 0; |
1121 | } |
1122 | |
1123 | static int xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) |
1124 | { |
1125 | unsigned int reg; |
1126 | |
1127 | if (gpio > 15) |
1128 | return -EINVAL; |
1129 | |
1130 | reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); |
1131 | |
1132 | reg &= ~(1 << (gpio + 16)); |
1133 | XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); |
1134 | |
1135 | return 0; |
1136 | } |
1137 | |
1138 | static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) |
1139 | { |
1140 | unsigned int reg; |
1141 | |
1142 | if (gpio > 15) |
1143 | return -EINVAL; |
1144 | |
1145 | reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); |
1146 | |
1147 | reg |= (1 << (gpio + 16)); |
1148 | XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); |
1149 | |
1150 | return 0; |
1151 | } |
1152 | |
1153 | static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, |
1154 | int mmd_reg) |
1155 | { |
1156 | unsigned long flags; |
1157 | unsigned int mmd_address, index, offset; |
1158 | int mmd_data; |
1159 | |
1160 | if (mmd_reg & XGBE_ADDR_C45) |
1161 | mmd_address = mmd_reg & ~XGBE_ADDR_C45; |
1162 | else |
1163 | mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); |
1164 | |
1165 | /* The PCS registers are accessed using mmio. The underlying |
1166 | * management interface uses indirect addressing to access the MMD |
1167 | * register sets. This requires accessing of the PCS register in two |
1168 | * phases, an address phase and a data phase. |
1169 | * |
1170 | * The mmio interface is based on 16-bit offsets and values. All |
1171 | * register offsets must therefore be adjusted by left shifting the |
1172 | * offset 1 bit and reading 16 bits of data. |
1173 | */ |
1174 | mmd_address <<= 1; |
1175 | index = mmd_address & ~pdata->xpcs_window_mask; |
1176 | offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); |
1177 | |
1178 | spin_lock_irqsave(&pdata->xpcs_lock, flags); |
1179 | XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); |
1180 | mmd_data = XPCS16_IOREAD(pdata, offset); |
1181 | spin_unlock_irqrestore(lock: &pdata->xpcs_lock, flags); |
1182 | |
1183 | return mmd_data; |
1184 | } |
1185 | |
1186 | static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, |
1187 | int mmd_reg, int mmd_data) |
1188 | { |
1189 | unsigned long flags; |
1190 | unsigned int mmd_address, index, offset; |
1191 | |
1192 | if (mmd_reg & XGBE_ADDR_C45) |
1193 | mmd_address = mmd_reg & ~XGBE_ADDR_C45; |
1194 | else |
1195 | mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); |
1196 | |
1197 | /* The PCS registers are accessed using mmio. The underlying |
1198 | * management interface uses indirect addressing to access the MMD |
1199 | * register sets. This requires accessing of the PCS register in two |
1200 | * phases, an address phase and a data phase. |
1201 | * |
1202 | * The mmio interface is based on 16-bit offsets and values. All |
1203 | * register offsets must therefore be adjusted by left shifting the |
1204 | * offset 1 bit and writing 16 bits of data. |
1205 | */ |
1206 | mmd_address <<= 1; |
1207 | index = mmd_address & ~pdata->xpcs_window_mask; |
1208 | offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); |
1209 | |
1210 | spin_lock_irqsave(&pdata->xpcs_lock, flags); |
1211 | XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); |
1212 | XPCS16_IOWRITE(pdata, offset, mmd_data); |
1213 | spin_unlock_irqrestore(lock: &pdata->xpcs_lock, flags); |
1214 | } |
1215 | |
1216 | static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, |
1217 | int mmd_reg) |
1218 | { |
1219 | unsigned long flags; |
1220 | unsigned int mmd_address; |
1221 | int mmd_data; |
1222 | |
1223 | if (mmd_reg & XGBE_ADDR_C45) |
1224 | mmd_address = mmd_reg & ~XGBE_ADDR_C45; |
1225 | else |
1226 | mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); |
1227 | |
1228 | /* The PCS registers are accessed using mmio. The underlying APB3 |
1229 | * management interface uses indirect addressing to access the MMD |
1230 | * register sets. This requires accessing of the PCS register in two |
1231 | * phases, an address phase and a data phase. |
1232 | * |
1233 | * The mmio interface is based on 32-bit offsets and values. All |
1234 | * register offsets must therefore be adjusted by left shifting the |
1235 | * offset 2 bits and reading 32 bits of data. |
1236 | */ |
1237 | spin_lock_irqsave(&pdata->xpcs_lock, flags); |
1238 | XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); |
1239 | mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2); |
1240 | spin_unlock_irqrestore(lock: &pdata->xpcs_lock, flags); |
1241 | |
1242 | return mmd_data; |
1243 | } |
1244 | |
1245 | static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, |
1246 | int mmd_reg, int mmd_data) |
1247 | { |
1248 | unsigned int mmd_address; |
1249 | unsigned long flags; |
1250 | |
1251 | if (mmd_reg & XGBE_ADDR_C45) |
1252 | mmd_address = mmd_reg & ~XGBE_ADDR_C45; |
1253 | else |
1254 | mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); |
1255 | |
1256 | /* The PCS registers are accessed using mmio. The underlying APB3 |
1257 | * management interface uses indirect addressing to access the MMD |
1258 | * register sets. This requires accessing of the PCS register in two |
1259 | * phases, an address phase and a data phase. |
1260 | * |
1261 | * The mmio interface is based on 32-bit offsets and values. All |
1262 | * register offsets must therefore be adjusted by left shifting the |
1263 | * offset 2 bits and writing 32 bits of data. |
1264 | */ |
1265 | spin_lock_irqsave(&pdata->xpcs_lock, flags); |
1266 | XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); |
1267 | XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); |
1268 | spin_unlock_irqrestore(lock: &pdata->xpcs_lock, flags); |
1269 | } |
1270 | |
1271 | static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, |
1272 | int mmd_reg) |
1273 | { |
1274 | switch (pdata->vdata->xpcs_access) { |
1275 | case XGBE_XPCS_ACCESS_V1: |
1276 | return xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg); |
1277 | |
1278 | case XGBE_XPCS_ACCESS_V2: |
1279 | default: |
1280 | return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg); |
1281 | } |
1282 | } |
1283 | |
1284 | static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, |
1285 | int mmd_reg, int mmd_data) |
1286 | { |
1287 | switch (pdata->vdata->xpcs_access) { |
1288 | case XGBE_XPCS_ACCESS_V1: |
1289 | return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data); |
1290 | |
1291 | case XGBE_XPCS_ACCESS_V2: |
1292 | default: |
1293 | return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data); |
1294 | } |
1295 | } |
1296 | |
1297 | static unsigned int xgbe_create_mdio_sca_c22(int port, int reg) |
1298 | { |
1299 | unsigned int mdio_sca; |
1300 | |
1301 | mdio_sca = 0; |
1302 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); |
1303 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); |
1304 | |
1305 | return mdio_sca; |
1306 | } |
1307 | |
1308 | static unsigned int xgbe_create_mdio_sca_c45(int port, unsigned int da, int reg) |
1309 | { |
1310 | unsigned int mdio_sca; |
1311 | |
1312 | mdio_sca = 0; |
1313 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); |
1314 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); |
1315 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); |
1316 | |
1317 | return mdio_sca; |
1318 | } |
1319 | |
1320 | static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, |
1321 | unsigned int mdio_sca, u16 val) |
1322 | { |
1323 | unsigned int mdio_sccd; |
1324 | |
1325 | reinit_completion(x: &pdata->mdio_complete); |
1326 | |
1327 | XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); |
1328 | |
1329 | mdio_sccd = 0; |
1330 | XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val); |
1331 | XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1); |
1332 | XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); |
1333 | XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); |
1334 | |
1335 | if (!wait_for_completion_timeout(x: &pdata->mdio_complete, HZ)) { |
1336 | netdev_err(dev: pdata->netdev, format: "mdio write operation timed out\n" ); |
1337 | return -ETIMEDOUT; |
1338 | } |
1339 | |
1340 | return 0; |
1341 | } |
1342 | |
1343 | static int xgbe_write_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr, |
1344 | int reg, u16 val) |
1345 | { |
1346 | unsigned int mdio_sca; |
1347 | |
1348 | mdio_sca = xgbe_create_mdio_sca_c22(port: addr, reg); |
1349 | |
1350 | return xgbe_write_ext_mii_regs(pdata, mdio_sca, val); |
1351 | } |
1352 | |
1353 | static int xgbe_write_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr, |
1354 | int devad, int reg, u16 val) |
1355 | { |
1356 | unsigned int mdio_sca; |
1357 | |
1358 | mdio_sca = xgbe_create_mdio_sca_c45(port: addr, da: devad, reg); |
1359 | |
1360 | return xgbe_write_ext_mii_regs(pdata, mdio_sca, val); |
1361 | } |
1362 | |
1363 | static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, |
1364 | unsigned int mdio_sca) |
1365 | { |
1366 | unsigned int mdio_sccd; |
1367 | |
1368 | reinit_completion(x: &pdata->mdio_complete); |
1369 | |
1370 | XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); |
1371 | |
1372 | mdio_sccd = 0; |
1373 | XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3); |
1374 | XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); |
1375 | XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); |
1376 | |
1377 | if (!wait_for_completion_timeout(x: &pdata->mdio_complete, HZ)) { |
1378 | netdev_err(dev: pdata->netdev, format: "mdio read operation timed out\n" ); |
1379 | return -ETIMEDOUT; |
1380 | } |
1381 | |
1382 | return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA); |
1383 | } |
1384 | |
1385 | static int xgbe_read_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr, |
1386 | int reg) |
1387 | { |
1388 | unsigned int mdio_sca; |
1389 | |
1390 | mdio_sca = xgbe_create_mdio_sca_c22(port: addr, reg); |
1391 | |
1392 | return xgbe_read_ext_mii_regs(pdata, mdio_sca); |
1393 | } |
1394 | |
1395 | static int xgbe_read_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr, |
1396 | int devad, int reg) |
1397 | { |
1398 | unsigned int mdio_sca; |
1399 | |
1400 | mdio_sca = xgbe_create_mdio_sca_c45(port: addr, da: devad, reg); |
1401 | |
1402 | return xgbe_read_ext_mii_regs(pdata, mdio_sca); |
1403 | } |
1404 | |
1405 | static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port, |
1406 | enum xgbe_mdio_mode mode) |
1407 | { |
1408 | unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R); |
1409 | |
1410 | switch (mode) { |
1411 | case XGBE_MDIO_MODE_CL22: |
1412 | if (port > XGMAC_MAX_C22_PORT) |
1413 | return -EINVAL; |
1414 | reg_val |= (1 << port); |
1415 | break; |
1416 | case XGBE_MDIO_MODE_CL45: |
1417 | break; |
1418 | default: |
1419 | return -EINVAL; |
1420 | } |
1421 | |
1422 | XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val); |
1423 | |
1424 | return 0; |
1425 | } |
1426 | |
1427 | static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc) |
1428 | { |
1429 | return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN); |
1430 | } |
1431 | |
1432 | static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata) |
1433 | { |
1434 | XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); |
1435 | |
1436 | return 0; |
1437 | } |
1438 | |
1439 | static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) |
1440 | { |
1441 | XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); |
1442 | |
1443 | return 0; |
1444 | } |
1445 | |
1446 | static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) |
1447 | { |
1448 | struct xgbe_ring_desc *rdesc = rdata->rdesc; |
1449 | |
1450 | /* Reset the Tx descriptor |
1451 | * Set buffer 1 (lo) address to zero |
1452 | * Set buffer 1 (hi) address to zero |
1453 | * Reset all other control bits (IC, TTSE, B2L & B1L) |
1454 | * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) |
1455 | */ |
1456 | rdesc->desc0 = 0; |
1457 | rdesc->desc1 = 0; |
1458 | rdesc->desc2 = 0; |
1459 | rdesc->desc3 = 0; |
1460 | |
1461 | /* Make sure ownership is written to the descriptor */ |
1462 | dma_wmb(); |
1463 | } |
1464 | |
1465 | static void xgbe_tx_desc_init(struct xgbe_channel *channel) |
1466 | { |
1467 | struct xgbe_ring *ring = channel->tx_ring; |
1468 | struct xgbe_ring_data *rdata; |
1469 | int i; |
1470 | int start_index = ring->cur; |
1471 | |
1472 | DBGPR("-->tx_desc_init\n" ); |
1473 | |
1474 | /* Initialze all descriptors */ |
1475 | for (i = 0; i < ring->rdesc_count; i++) { |
1476 | rdata = XGBE_GET_DESC_DATA(ring, i); |
1477 | |
1478 | /* Initialize Tx descriptor */ |
1479 | xgbe_tx_desc_reset(rdata); |
1480 | } |
1481 | |
1482 | /* Update the total number of Tx descriptors */ |
1483 | XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); |
1484 | |
1485 | /* Update the starting address of descriptor ring */ |
1486 | rdata = XGBE_GET_DESC_DATA(ring, start_index); |
1487 | XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI, |
1488 | upper_32_bits(rdata->rdesc_dma)); |
1489 | XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO, |
1490 | lower_32_bits(rdata->rdesc_dma)); |
1491 | |
1492 | DBGPR("<--tx_desc_init\n" ); |
1493 | } |
1494 | |
1495 | static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, |
1496 | struct xgbe_ring_data *rdata, unsigned int index) |
1497 | { |
1498 | struct xgbe_ring_desc *rdesc = rdata->rdesc; |
1499 | unsigned int rx_usecs = pdata->rx_usecs; |
1500 | unsigned int rx_frames = pdata->rx_frames; |
1501 | unsigned int inte; |
1502 | dma_addr_t hdr_dma, buf_dma; |
1503 | |
1504 | if (!rx_usecs && !rx_frames) { |
1505 | /* No coalescing, interrupt for every descriptor */ |
1506 | inte = 1; |
1507 | } else { |
1508 | /* Set interrupt based on Rx frame coalescing setting */ |
1509 | if (rx_frames && !((index + 1) % rx_frames)) |
1510 | inte = 1; |
1511 | else |
1512 | inte = 0; |
1513 | } |
1514 | |
1515 | /* Reset the Rx descriptor |
1516 | * Set buffer 1 (lo) address to header dma address (lo) |
1517 | * Set buffer 1 (hi) address to header dma address (hi) |
1518 | * Set buffer 2 (lo) address to buffer dma address (lo) |
1519 | * Set buffer 2 (hi) address to buffer dma address (hi) and |
1520 | * set control bits OWN and INTE |
1521 | */ |
1522 | hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off; |
1523 | buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off; |
1524 | rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); |
1525 | rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); |
1526 | rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma)); |
1527 | rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma)); |
1528 | |
1529 | XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); |
1530 | |
1531 | /* Since the Rx DMA engine is likely running, make sure everything |
1532 | * is written to the descriptor(s) before setting the OWN bit |
1533 | * for the descriptor |
1534 | */ |
1535 | dma_wmb(); |
1536 | |
1537 | XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); |
1538 | |
1539 | /* Make sure ownership is written to the descriptor */ |
1540 | dma_wmb(); |
1541 | } |
1542 | |
1543 | static void xgbe_rx_desc_init(struct xgbe_channel *channel) |
1544 | { |
1545 | struct xgbe_prv_data *pdata = channel->pdata; |
1546 | struct xgbe_ring *ring = channel->rx_ring; |
1547 | struct xgbe_ring_data *rdata; |
1548 | unsigned int start_index = ring->cur; |
1549 | unsigned int i; |
1550 | |
1551 | DBGPR("-->rx_desc_init\n" ); |
1552 | |
1553 | /* Initialize all descriptors */ |
1554 | for (i = 0; i < ring->rdesc_count; i++) { |
1555 | rdata = XGBE_GET_DESC_DATA(ring, i); |
1556 | |
1557 | /* Initialize Rx descriptor */ |
1558 | xgbe_rx_desc_reset(pdata, rdata, index: i); |
1559 | } |
1560 | |
1561 | /* Update the total number of Rx descriptors */ |
1562 | XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); |
1563 | |
1564 | /* Update the starting address of descriptor ring */ |
1565 | rdata = XGBE_GET_DESC_DATA(ring, start_index); |
1566 | XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI, |
1567 | upper_32_bits(rdata->rdesc_dma)); |
1568 | XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO, |
1569 | lower_32_bits(rdata->rdesc_dma)); |
1570 | |
1571 | /* Update the Rx Descriptor Tail Pointer */ |
1572 | rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1); |
1573 | XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, |
1574 | lower_32_bits(rdata->rdesc_dma)); |
1575 | |
1576 | DBGPR("<--rx_desc_init\n" ); |
1577 | } |
1578 | |
1579 | static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata, |
1580 | unsigned int addend) |
1581 | { |
1582 | unsigned int count = 10000; |
1583 | |
1584 | /* Set the addend register value and tell the device */ |
1585 | XGMAC_IOWRITE(pdata, MAC_TSAR, addend); |
1586 | XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1); |
1587 | |
1588 | /* Wait for addend update to complete */ |
1589 | while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG)) |
1590 | udelay(5); |
1591 | |
1592 | if (!count) |
1593 | netdev_err(dev: pdata->netdev, |
1594 | format: "timed out updating timestamp addend register\n" ); |
1595 | } |
1596 | |
1597 | static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec, |
1598 | unsigned int nsec) |
1599 | { |
1600 | unsigned int count = 10000; |
1601 | |
1602 | /* Set the time values and tell the device */ |
1603 | XGMAC_IOWRITE(pdata, MAC_STSUR, sec); |
1604 | XGMAC_IOWRITE(pdata, MAC_STNUR, nsec); |
1605 | XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1); |
1606 | |
1607 | /* Wait for time update to complete */ |
1608 | while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT)) |
1609 | udelay(5); |
1610 | |
1611 | if (!count) |
1612 | netdev_err(dev: pdata->netdev, format: "timed out initializing timestamp\n" ); |
1613 | } |
1614 | |
1615 | static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata) |
1616 | { |
1617 | u64 nsec; |
1618 | |
1619 | nsec = XGMAC_IOREAD(pdata, MAC_STSR); |
1620 | nsec *= NSEC_PER_SEC; |
1621 | nsec += XGMAC_IOREAD(pdata, MAC_STNR); |
1622 | |
1623 | return nsec; |
1624 | } |
1625 | |
1626 | static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata) |
1627 | { |
1628 | unsigned int tx_snr, tx_ssr; |
1629 | u64 nsec; |
1630 | |
1631 | if (pdata->vdata->tx_tstamp_workaround) { |
1632 | tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR); |
1633 | tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR); |
1634 | } else { |
1635 | tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR); |
1636 | tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR); |
1637 | } |
1638 | |
1639 | if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS)) |
1640 | return 0; |
1641 | |
1642 | nsec = tx_ssr; |
1643 | nsec *= NSEC_PER_SEC; |
1644 | nsec += tx_snr; |
1645 | |
1646 | return nsec; |
1647 | } |
1648 | |
1649 | static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet, |
1650 | struct xgbe_ring_desc *rdesc) |
1651 | { |
1652 | u64 nsec; |
1653 | |
1654 | if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) && |
1655 | !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) { |
1656 | nsec = le32_to_cpu(rdesc->desc1); |
1657 | nsec <<= 32; |
1658 | nsec |= le32_to_cpu(rdesc->desc0); |
1659 | if (nsec != 0xffffffffffffffffULL) { |
1660 | packet->rx_tstamp = nsec; |
1661 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
1662 | RX_TSTAMP, 1); |
1663 | } |
1664 | } |
1665 | } |
1666 | |
1667 | static int xgbe_config_tstamp(struct xgbe_prv_data *pdata, |
1668 | unsigned int mac_tscr) |
1669 | { |
1670 | /* Set one nano-second accuracy */ |
1671 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1); |
1672 | |
1673 | /* Set fine timestamp update */ |
1674 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1); |
1675 | |
1676 | /* Overwrite earlier timestamps */ |
1677 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1); |
1678 | |
1679 | XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr); |
1680 | |
1681 | /* Exit if timestamping is not enabled */ |
1682 | if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA)) |
1683 | return 0; |
1684 | |
1685 | /* Initialize time registers */ |
1686 | XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC); |
1687 | XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC); |
1688 | xgbe_update_tstamp_addend(pdata, addend: pdata->tstamp_addend); |
1689 | xgbe_set_tstamp_time(pdata, sec: 0, nsec: 0); |
1690 | |
1691 | /* Initialize the timecounter */ |
1692 | timecounter_init(tc: &pdata->tstamp_tc, cc: &pdata->tstamp_cc, |
1693 | start_tstamp: ktime_to_ns(kt: ktime_get_real())); |
1694 | |
1695 | return 0; |
1696 | } |
1697 | |
1698 | static void xgbe_tx_start_xmit(struct xgbe_channel *channel, |
1699 | struct xgbe_ring *ring) |
1700 | { |
1701 | struct xgbe_prv_data *pdata = channel->pdata; |
1702 | struct xgbe_ring_data *rdata; |
1703 | |
1704 | /* Make sure everything is written before the register write */ |
1705 | wmb(); |
1706 | |
1707 | /* Issue a poll command to Tx DMA by writing address |
1708 | * of next immediate free descriptor */ |
1709 | rdata = XGBE_GET_DESC_DATA(ring, ring->cur); |
1710 | XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, |
1711 | lower_32_bits(rdata->rdesc_dma)); |
1712 | |
1713 | /* Start the Tx timer */ |
1714 | if (pdata->tx_usecs && !channel->tx_timer_active) { |
1715 | channel->tx_timer_active = 1; |
1716 | mod_timer(timer: &channel->tx_timer, |
1717 | expires: jiffies + usecs_to_jiffies(u: pdata->tx_usecs)); |
1718 | } |
1719 | |
1720 | ring->tx.xmit_more = 0; |
1721 | } |
1722 | |
1723 | static void xgbe_dev_xmit(struct xgbe_channel *channel) |
1724 | { |
1725 | struct xgbe_prv_data *pdata = channel->pdata; |
1726 | struct xgbe_ring *ring = channel->tx_ring; |
1727 | struct xgbe_ring_data *rdata; |
1728 | struct xgbe_ring_desc *rdesc; |
1729 | struct xgbe_packet_data *packet = &ring->packet_data; |
1730 | unsigned int tx_packets, tx_bytes; |
1731 | unsigned int csum, tso, vlan, vxlan; |
1732 | unsigned int tso_context, vlan_context; |
1733 | unsigned int tx_set_ic; |
1734 | int start_index = ring->cur; |
1735 | int cur_index = ring->cur; |
1736 | int i; |
1737 | |
1738 | DBGPR("-->xgbe_dev_xmit\n" ); |
1739 | |
1740 | tx_packets = packet->tx_packets; |
1741 | tx_bytes = packet->tx_bytes; |
1742 | |
1743 | csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, |
1744 | CSUM_ENABLE); |
1745 | tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, |
1746 | TSO_ENABLE); |
1747 | vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, |
1748 | VLAN_CTAG); |
1749 | vxlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, |
1750 | VXLAN); |
1751 | |
1752 | if (tso && (packet->mss != ring->tx.cur_mss)) |
1753 | tso_context = 1; |
1754 | else |
1755 | tso_context = 0; |
1756 | |
1757 | if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)) |
1758 | vlan_context = 1; |
1759 | else |
1760 | vlan_context = 0; |
1761 | |
1762 | /* Determine if an interrupt should be generated for this Tx: |
1763 | * Interrupt: |
1764 | * - Tx frame count exceeds the frame count setting |
1765 | * - Addition of Tx frame count to the frame count since the |
1766 | * last interrupt was set exceeds the frame count setting |
1767 | * No interrupt: |
1768 | * - No frame count setting specified (ethtool -C ethX tx-frames 0) |
1769 | * - Addition of Tx frame count to the frame count since the |
1770 | * last interrupt was set does not exceed the frame count setting |
1771 | */ |
1772 | ring->coalesce_count += tx_packets; |
1773 | if (!pdata->tx_frames) |
1774 | tx_set_ic = 0; |
1775 | else if (tx_packets > pdata->tx_frames) |
1776 | tx_set_ic = 1; |
1777 | else if ((ring->coalesce_count % pdata->tx_frames) < tx_packets) |
1778 | tx_set_ic = 1; |
1779 | else |
1780 | tx_set_ic = 0; |
1781 | |
1782 | rdata = XGBE_GET_DESC_DATA(ring, cur_index); |
1783 | rdesc = rdata->rdesc; |
1784 | |
1785 | /* Create a context descriptor if this is a TSO packet */ |
1786 | if (tso_context || vlan_context) { |
1787 | if (tso_context) { |
1788 | netif_dbg(pdata, tx_queued, pdata->netdev, |
1789 | "TSO context descriptor, mss=%u\n" , |
1790 | packet->mss); |
1791 | |
1792 | /* Set the MSS size */ |
1793 | XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2, |
1794 | MSS, packet->mss); |
1795 | |
1796 | /* Mark it as a CONTEXT descriptor */ |
1797 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, |
1798 | CTXT, 1); |
1799 | |
1800 | /* Indicate this descriptor contains the MSS */ |
1801 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, |
1802 | TCMSSV, 1); |
1803 | |
1804 | ring->tx.cur_mss = packet->mss; |
1805 | } |
1806 | |
1807 | if (vlan_context) { |
1808 | netif_dbg(pdata, tx_queued, pdata->netdev, |
1809 | "VLAN context descriptor, ctag=%u\n" , |
1810 | packet->vlan_ctag); |
1811 | |
1812 | /* Mark it as a CONTEXT descriptor */ |
1813 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, |
1814 | CTXT, 1); |
1815 | |
1816 | /* Set the VLAN tag */ |
1817 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, |
1818 | VT, packet->vlan_ctag); |
1819 | |
1820 | /* Indicate this descriptor contains the VLAN tag */ |
1821 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, |
1822 | VLTV, 1); |
1823 | |
1824 | ring->tx.cur_vlan_ctag = packet->vlan_ctag; |
1825 | } |
1826 | |
1827 | cur_index++; |
1828 | rdata = XGBE_GET_DESC_DATA(ring, cur_index); |
1829 | rdesc = rdata->rdesc; |
1830 | } |
1831 | |
1832 | /* Update buffer address (for TSO this is the header) */ |
1833 | rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); |
1834 | rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); |
1835 | |
1836 | /* Update the buffer length */ |
1837 | XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, |
1838 | rdata->skb_dma_len); |
1839 | |
1840 | /* VLAN tag insertion check */ |
1841 | if (vlan) |
1842 | XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR, |
1843 | TX_NORMAL_DESC2_VLAN_INSERT); |
1844 | |
1845 | /* Timestamp enablement check */ |
1846 | if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) |
1847 | XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1); |
1848 | |
1849 | /* Mark it as First Descriptor */ |
1850 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1); |
1851 | |
1852 | /* Mark it as a NORMAL descriptor */ |
1853 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); |
1854 | |
1855 | /* Set OWN bit if not the first descriptor */ |
1856 | if (cur_index != start_index) |
1857 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); |
1858 | |
1859 | if (tso) { |
1860 | /* Enable TSO */ |
1861 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1); |
1862 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL, |
1863 | packet->tcp_payload_len); |
1864 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, |
1865 | packet->tcp_header_len / 4); |
1866 | |
1867 | pdata->ext_stats.tx_tso_packets += tx_packets; |
1868 | } else { |
1869 | /* Enable CRC and Pad Insertion */ |
1870 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); |
1871 | |
1872 | /* Enable HW CSUM */ |
1873 | if (csum) |
1874 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, |
1875 | CIC, 0x3); |
1876 | |
1877 | /* Set the total length to be transmitted */ |
1878 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, |
1879 | packet->length); |
1880 | } |
1881 | |
1882 | if (vxlan) { |
1883 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, VNP, |
1884 | TX_NORMAL_DESC3_VXLAN_PACKET); |
1885 | |
1886 | pdata->ext_stats.tx_vxlan_packets += packet->tx_packets; |
1887 | } |
1888 | |
1889 | for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) { |
1890 | cur_index++; |
1891 | rdata = XGBE_GET_DESC_DATA(ring, cur_index); |
1892 | rdesc = rdata->rdesc; |
1893 | |
1894 | /* Update buffer address */ |
1895 | rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); |
1896 | rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); |
1897 | |
1898 | /* Update the buffer length */ |
1899 | XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, |
1900 | rdata->skb_dma_len); |
1901 | |
1902 | /* Set OWN bit */ |
1903 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); |
1904 | |
1905 | /* Mark it as NORMAL descriptor */ |
1906 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); |
1907 | |
1908 | /* Enable HW CSUM */ |
1909 | if (csum) |
1910 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, |
1911 | CIC, 0x3); |
1912 | } |
1913 | |
1914 | /* Set LAST bit for the last descriptor */ |
1915 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1); |
1916 | |
1917 | /* Set IC bit based on Tx coalescing settings */ |
1918 | if (tx_set_ic) |
1919 | XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); |
1920 | |
1921 | /* Save the Tx info to report back during cleanup */ |
1922 | rdata->tx.packets = tx_packets; |
1923 | rdata->tx.bytes = tx_bytes; |
1924 | |
1925 | pdata->ext_stats.txq_packets[channel->queue_index] += tx_packets; |
1926 | pdata->ext_stats.txq_bytes[channel->queue_index] += tx_bytes; |
1927 | |
1928 | /* In case the Tx DMA engine is running, make sure everything |
1929 | * is written to the descriptor(s) before setting the OWN bit |
1930 | * for the first descriptor |
1931 | */ |
1932 | dma_wmb(); |
1933 | |
1934 | /* Set OWN bit for the first descriptor */ |
1935 | rdata = XGBE_GET_DESC_DATA(ring, start_index); |
1936 | rdesc = rdata->rdesc; |
1937 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); |
1938 | |
1939 | if (netif_msg_tx_queued(pdata)) |
1940 | xgbe_dump_tx_desc(pdata, ring, start_index, |
1941 | packet->rdesc_count, 1); |
1942 | |
1943 | /* Make sure ownership is written to the descriptor */ |
1944 | smp_wmb(); |
1945 | |
1946 | ring->cur = cur_index + 1; |
1947 | if (!netdev_xmit_more() || |
1948 | netif_xmit_stopped(dev_queue: netdev_get_tx_queue(dev: pdata->netdev, |
1949 | index: channel->queue_index))) |
1950 | xgbe_tx_start_xmit(channel, ring); |
1951 | else |
1952 | ring->tx.xmit_more = 1; |
1953 | |
1954 | DBGPR(" %s: descriptors %u to %u written\n" , |
1955 | channel->name, start_index & (ring->rdesc_count - 1), |
1956 | (ring->cur - 1) & (ring->rdesc_count - 1)); |
1957 | |
1958 | DBGPR("<--xgbe_dev_xmit\n" ); |
1959 | } |
1960 | |
1961 | static int xgbe_dev_read(struct xgbe_channel *channel) |
1962 | { |
1963 | struct xgbe_prv_data *pdata = channel->pdata; |
1964 | struct xgbe_ring *ring = channel->rx_ring; |
1965 | struct xgbe_ring_data *rdata; |
1966 | struct xgbe_ring_desc *rdesc; |
1967 | struct xgbe_packet_data *packet = &ring->packet_data; |
1968 | struct net_device *netdev = pdata->netdev; |
1969 | unsigned int err, etlt, l34t; |
1970 | |
1971 | DBGPR("-->xgbe_dev_read: cur = %d\n" , ring->cur); |
1972 | |
1973 | rdata = XGBE_GET_DESC_DATA(ring, ring->cur); |
1974 | rdesc = rdata->rdesc; |
1975 | |
1976 | /* Check for data availability */ |
1977 | if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) |
1978 | return 1; |
1979 | |
1980 | /* Make sure descriptor fields are read after reading the OWN bit */ |
1981 | dma_rmb(); |
1982 | |
1983 | if (netif_msg_rx_status(pdata)) |
1984 | xgbe_dump_rx_desc(pdata, ring, ring->cur); |
1985 | |
1986 | if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { |
1987 | /* Timestamp Context Descriptor */ |
1988 | xgbe_get_rx_tstamp(packet, rdesc); |
1989 | |
1990 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
1991 | CONTEXT, 1); |
1992 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
1993 | CONTEXT_NEXT, 0); |
1994 | return 0; |
1995 | } |
1996 | |
1997 | /* Normal Descriptor, be sure Context Descriptor bit is off */ |
1998 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0); |
1999 | |
2000 | /* Indicate if a Context Descriptor is next */ |
2001 | if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) |
2002 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
2003 | CONTEXT_NEXT, 1); |
2004 | |
2005 | /* Get the header length */ |
2006 | if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { |
2007 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
2008 | FIRST, 1); |
2009 | rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, |
2010 | RX_NORMAL_DESC2, HL); |
2011 | if (rdata->rx.hdr_len) |
2012 | pdata->ext_stats.rx_split_header_packets++; |
2013 | } else { |
2014 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
2015 | FIRST, 0); |
2016 | } |
2017 | |
2018 | /* Get the RSS hash */ |
2019 | if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { |
2020 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
2021 | RSS_HASH, 1); |
2022 | |
2023 | packet->rss_hash = le32_to_cpu(rdesc->desc1); |
2024 | |
2025 | l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); |
2026 | switch (l34t) { |
2027 | case RX_DESC3_L34T_IPV4_TCP: |
2028 | case RX_DESC3_L34T_IPV4_UDP: |
2029 | case RX_DESC3_L34T_IPV6_TCP: |
2030 | case RX_DESC3_L34T_IPV6_UDP: |
2031 | packet->rss_hash_type = PKT_HASH_TYPE_L4; |
2032 | break; |
2033 | default: |
2034 | packet->rss_hash_type = PKT_HASH_TYPE_L3; |
2035 | } |
2036 | } |
2037 | |
2038 | /* Not all the data has been transferred for this packet */ |
2039 | if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) |
2040 | return 0; |
2041 | |
2042 | /* This is the last of the data for this packet */ |
2043 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
2044 | LAST, 1); |
2045 | |
2046 | /* Get the packet length */ |
2047 | rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); |
2048 | |
2049 | /* Set checksum done indicator as appropriate */ |
2050 | if (netdev->features & NETIF_F_RXCSUM) { |
2051 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
2052 | CSUM_DONE, 1); |
2053 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
2054 | TNPCSUM_DONE, 1); |
2055 | } |
2056 | |
2057 | /* Set the tunneled packet indicator */ |
2058 | if (XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, TNP)) { |
2059 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
2060 | TNP, 1); |
2061 | pdata->ext_stats.rx_vxlan_packets++; |
2062 | |
2063 | l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); |
2064 | switch (l34t) { |
2065 | case RX_DESC3_L34T_IPV4_UNKNOWN: |
2066 | case RX_DESC3_L34T_IPV6_UNKNOWN: |
2067 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
2068 | TNPCSUM_DONE, 0); |
2069 | break; |
2070 | } |
2071 | } |
2072 | |
2073 | /* Check for errors (only valid in last descriptor) */ |
2074 | err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); |
2075 | etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); |
2076 | netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n" , err, etlt); |
2077 | |
2078 | if (!err || !etlt) { |
2079 | /* No error if err is 0 or etlt is 0 */ |
2080 | if ((etlt == 0x09) && |
2081 | (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { |
2082 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
2083 | VLAN_CTAG, 1); |
2084 | packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, |
2085 | RX_NORMAL_DESC0, |
2086 | OVT); |
2087 | netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n" , |
2088 | packet->vlan_ctag); |
2089 | } |
2090 | } else { |
2091 | unsigned int tnp = XGMAC_GET_BITS(packet->attributes, |
2092 | RX_PACKET_ATTRIBUTES, TNP); |
2093 | |
2094 | if ((etlt == 0x05) || (etlt == 0x06)) { |
2095 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
2096 | CSUM_DONE, 0); |
2097 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
2098 | TNPCSUM_DONE, 0); |
2099 | pdata->ext_stats.rx_csum_errors++; |
2100 | } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) { |
2101 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
2102 | CSUM_DONE, 0); |
2103 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
2104 | TNPCSUM_DONE, 0); |
2105 | pdata->ext_stats.rx_vxlan_csum_errors++; |
2106 | } else { |
2107 | XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, |
2108 | FRAME, 1); |
2109 | } |
2110 | } |
2111 | |
2112 | pdata->ext_stats.rxq_packets[channel->queue_index]++; |
2113 | pdata->ext_stats.rxq_bytes[channel->queue_index] += rdata->rx.len; |
2114 | |
2115 | DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n" , channel->name, |
2116 | ring->cur & (ring->rdesc_count - 1), ring->cur); |
2117 | |
2118 | return 0; |
2119 | } |
2120 | |
2121 | static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc) |
2122 | { |
2123 | /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ |
2124 | return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT); |
2125 | } |
2126 | |
2127 | static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) |
2128 | { |
2129 | /* Rx and Tx share LD bit, so check TDES3.LD bit */ |
2130 | return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD); |
2131 | } |
2132 | |
2133 | static int xgbe_enable_int(struct xgbe_channel *channel, |
2134 | enum xgbe_int int_id) |
2135 | { |
2136 | switch (int_id) { |
2137 | case XGMAC_INT_DMA_CH_SR_TI: |
2138 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); |
2139 | break; |
2140 | case XGMAC_INT_DMA_CH_SR_TPS: |
2141 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1); |
2142 | break; |
2143 | case XGMAC_INT_DMA_CH_SR_TBU: |
2144 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1); |
2145 | break; |
2146 | case XGMAC_INT_DMA_CH_SR_RI: |
2147 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); |
2148 | break; |
2149 | case XGMAC_INT_DMA_CH_SR_RBU: |
2150 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); |
2151 | break; |
2152 | case XGMAC_INT_DMA_CH_SR_RPS: |
2153 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1); |
2154 | break; |
2155 | case XGMAC_INT_DMA_CH_SR_TI_RI: |
2156 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); |
2157 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); |
2158 | break; |
2159 | case XGMAC_INT_DMA_CH_SR_FBE: |
2160 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); |
2161 | break; |
2162 | case XGMAC_INT_DMA_ALL: |
2163 | channel->curr_ier |= channel->saved_ier; |
2164 | break; |
2165 | default: |
2166 | return -1; |
2167 | } |
2168 | |
2169 | XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); |
2170 | |
2171 | return 0; |
2172 | } |
2173 | |
2174 | static int xgbe_disable_int(struct xgbe_channel *channel, |
2175 | enum xgbe_int int_id) |
2176 | { |
2177 | switch (int_id) { |
2178 | case XGMAC_INT_DMA_CH_SR_TI: |
2179 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); |
2180 | break; |
2181 | case XGMAC_INT_DMA_CH_SR_TPS: |
2182 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0); |
2183 | break; |
2184 | case XGMAC_INT_DMA_CH_SR_TBU: |
2185 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0); |
2186 | break; |
2187 | case XGMAC_INT_DMA_CH_SR_RI: |
2188 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); |
2189 | break; |
2190 | case XGMAC_INT_DMA_CH_SR_RBU: |
2191 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0); |
2192 | break; |
2193 | case XGMAC_INT_DMA_CH_SR_RPS: |
2194 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0); |
2195 | break; |
2196 | case XGMAC_INT_DMA_CH_SR_TI_RI: |
2197 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); |
2198 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); |
2199 | break; |
2200 | case XGMAC_INT_DMA_CH_SR_FBE: |
2201 | XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0); |
2202 | break; |
2203 | case XGMAC_INT_DMA_ALL: |
2204 | channel->saved_ier = channel->curr_ier; |
2205 | channel->curr_ier = 0; |
2206 | break; |
2207 | default: |
2208 | return -1; |
2209 | } |
2210 | |
2211 | XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); |
2212 | |
2213 | return 0; |
2214 | } |
2215 | |
2216 | static int __xgbe_exit(struct xgbe_prv_data *pdata) |
2217 | { |
2218 | unsigned int count = 2000; |
2219 | |
2220 | DBGPR("-->xgbe_exit\n" ); |
2221 | |
2222 | /* Issue a software reset */ |
2223 | XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1); |
2224 | usleep_range(min: 10, max: 15); |
2225 | |
2226 | /* Poll Until Poll Condition */ |
2227 | while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) |
2228 | usleep_range(min: 500, max: 600); |
2229 | |
2230 | if (!count) |
2231 | return -EBUSY; |
2232 | |
2233 | DBGPR("<--xgbe_exit\n" ); |
2234 | |
2235 | return 0; |
2236 | } |
2237 | |
2238 | static int xgbe_exit(struct xgbe_prv_data *pdata) |
2239 | { |
2240 | int ret; |
2241 | |
2242 | /* To guard against possible incorrectly generated interrupts, |
2243 | * issue the software reset twice. |
2244 | */ |
2245 | ret = __xgbe_exit(pdata); |
2246 | if (ret) |
2247 | return ret; |
2248 | |
2249 | return __xgbe_exit(pdata); |
2250 | } |
2251 | |
2252 | static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) |
2253 | { |
2254 | unsigned int i, count; |
2255 | |
2256 | if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) |
2257 | return 0; |
2258 | |
2259 | for (i = 0; i < pdata->tx_q_count; i++) |
2260 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); |
2261 | |
2262 | /* Poll Until Poll Condition */ |
2263 | for (i = 0; i < pdata->tx_q_count; i++) { |
2264 | count = 2000; |
2265 | while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i, |
2266 | MTL_Q_TQOMR, FTQ)) |
2267 | usleep_range(min: 500, max: 600); |
2268 | |
2269 | if (!count) |
2270 | return -EBUSY; |
2271 | } |
2272 | |
2273 | return 0; |
2274 | } |
2275 | |
2276 | static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata) |
2277 | { |
2278 | unsigned int sbmr; |
2279 | |
2280 | sbmr = XGMAC_IOREAD(pdata, DMA_SBMR); |
2281 | |
2282 | /* Set enhanced addressing mode */ |
2283 | XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1); |
2284 | |
2285 | /* Set the System Bus mode */ |
2286 | XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1); |
2287 | XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2); |
2288 | XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal); |
2289 | XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1); |
2290 | XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1); |
2291 | |
2292 | XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr); |
2293 | |
2294 | /* Set descriptor fetching threshold */ |
2295 | if (pdata->vdata->tx_desc_prefetch) |
2296 | XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS, |
2297 | pdata->vdata->tx_desc_prefetch); |
2298 | |
2299 | if (pdata->vdata->rx_desc_prefetch) |
2300 | XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS, |
2301 | pdata->vdata->rx_desc_prefetch); |
2302 | } |
2303 | |
2304 | static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata) |
2305 | { |
2306 | XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr); |
2307 | XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr); |
2308 | if (pdata->awarcr) |
2309 | XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr); |
2310 | } |
2311 | |
2312 | static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) |
2313 | { |
2314 | unsigned int i; |
2315 | |
2316 | /* Set Tx to weighted round robin scheduling algorithm */ |
2317 | XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); |
2318 | |
2319 | /* Set Tx traffic classes to use WRR algorithm with equal weights */ |
2320 | for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { |
2321 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, |
2322 | MTL_TSA_ETS); |
2323 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); |
2324 | } |
2325 | |
2326 | /* Set Rx to strict priority algorithm */ |
2327 | XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); |
2328 | } |
2329 | |
2330 | static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata, |
2331 | unsigned int queue, |
2332 | unsigned int q_fifo_size) |
2333 | { |
2334 | unsigned int frame_fifo_size; |
2335 | unsigned int rfa, rfd; |
2336 | |
2337 | frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata)); |
2338 | |
2339 | if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) { |
2340 | /* PFC is active for this queue */ |
2341 | rfa = pdata->pfc_rfa; |
2342 | rfd = rfa + frame_fifo_size; |
2343 | if (rfd > XGMAC_FLOW_CONTROL_MAX) |
2344 | rfd = XGMAC_FLOW_CONTROL_MAX; |
2345 | if (rfa >= XGMAC_FLOW_CONTROL_MAX) |
2346 | rfa = XGMAC_FLOW_CONTROL_MAX - XGMAC_FLOW_CONTROL_UNIT; |
2347 | } else { |
2348 | /* This path deals with just maximum frame sizes which are |
2349 | * limited to a jumbo frame of 9,000 (plus headers, etc.) |
2350 | * so we can never exceed the maximum allowable RFA/RFD |
2351 | * values. |
2352 | */ |
2353 | if (q_fifo_size <= 2048) { |
2354 | /* rx_rfd to zero to signal no flow control */ |
2355 | pdata->rx_rfa[queue] = 0; |
2356 | pdata->rx_rfd[queue] = 0; |
2357 | return; |
2358 | } |
2359 | |
2360 | if (q_fifo_size <= 4096) { |
2361 | /* Between 2048 and 4096 */ |
2362 | pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */ |
2363 | pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */ |
2364 | return; |
2365 | } |
2366 | |
2367 | if (q_fifo_size <= frame_fifo_size) { |
2368 | /* Between 4096 and max-frame */ |
2369 | pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */ |
2370 | pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */ |
2371 | return; |
2372 | } |
2373 | |
2374 | if (q_fifo_size <= (frame_fifo_size * 3)) { |
2375 | /* Between max-frame and 3 max-frames, |
2376 | * trigger if we get just over a frame of data and |
2377 | * resume when we have just under half a frame left. |
2378 | */ |
2379 | rfa = q_fifo_size - frame_fifo_size; |
2380 | rfd = rfa + (frame_fifo_size / 2); |
2381 | } else { |
2382 | /* Above 3 max-frames - trigger when just over |
2383 | * 2 frames of space available |
2384 | */ |
2385 | rfa = frame_fifo_size * 2; |
2386 | rfa += XGMAC_FLOW_CONTROL_UNIT; |
2387 | rfd = rfa + frame_fifo_size; |
2388 | } |
2389 | } |
2390 | |
2391 | pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa); |
2392 | pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd); |
2393 | } |
2394 | |
2395 | static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata, |
2396 | unsigned int *fifo) |
2397 | { |
2398 | unsigned int q_fifo_size; |
2399 | unsigned int i; |
2400 | |
2401 | for (i = 0; i < pdata->rx_q_count; i++) { |
2402 | q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT; |
2403 | |
2404 | xgbe_queue_flow_control_threshold(pdata, queue: i, q_fifo_size); |
2405 | } |
2406 | } |
2407 | |
2408 | static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) |
2409 | { |
2410 | unsigned int i; |
2411 | |
2412 | for (i = 0; i < pdata->rx_q_count; i++) { |
2413 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, |
2414 | pdata->rx_rfa[i]); |
2415 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, |
2416 | pdata->rx_rfd[i]); |
2417 | } |
2418 | } |
2419 | |
2420 | static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata) |
2421 | { |
2422 | /* The configured value may not be the actual amount of fifo RAM */ |
2423 | return min_t(unsigned int, pdata->tx_max_fifo_size, |
2424 | pdata->hw_feat.tx_fifo_size); |
2425 | } |
2426 | |
2427 | static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata) |
2428 | { |
2429 | /* The configured value may not be the actual amount of fifo RAM */ |
2430 | return min_t(unsigned int, pdata->rx_max_fifo_size, |
2431 | pdata->hw_feat.rx_fifo_size); |
2432 | } |
2433 | |
2434 | static void xgbe_calculate_equal_fifo(unsigned int fifo_size, |
2435 | unsigned int queue_count, |
2436 | unsigned int *fifo) |
2437 | { |
2438 | unsigned int q_fifo_size; |
2439 | unsigned int p_fifo; |
2440 | unsigned int i; |
2441 | |
2442 | q_fifo_size = fifo_size / queue_count; |
2443 | |
2444 | /* Calculate the fifo setting by dividing the queue's fifo size |
2445 | * by the fifo allocation increment (with 0 representing the |
2446 | * base allocation increment so decrement the result by 1). |
2447 | */ |
2448 | p_fifo = q_fifo_size / XGMAC_FIFO_UNIT; |
2449 | if (p_fifo) |
2450 | p_fifo--; |
2451 | |
2452 | /* Distribute the fifo equally amongst the queues */ |
2453 | for (i = 0; i < queue_count; i++) |
2454 | fifo[i] = p_fifo; |
2455 | } |
2456 | |
2457 | static unsigned int xgbe_set_nonprio_fifos(unsigned int fifo_size, |
2458 | unsigned int queue_count, |
2459 | unsigned int *fifo) |
2460 | { |
2461 | unsigned int i; |
2462 | |
2463 | BUILD_BUG_ON_NOT_POWER_OF_2(XGMAC_FIFO_MIN_ALLOC); |
2464 | |
2465 | if (queue_count <= IEEE_8021QAZ_MAX_TCS) |
2466 | return fifo_size; |
2467 | |
2468 | /* Rx queues 9 and up are for specialized packets, |
2469 | * such as PTP or DCB control packets, etc. and |
2470 | * don't require a large fifo |
2471 | */ |
2472 | for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) { |
2473 | fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1; |
2474 | fifo_size -= XGMAC_FIFO_MIN_ALLOC; |
2475 | } |
2476 | |
2477 | return fifo_size; |
2478 | } |
2479 | |
2480 | static unsigned int xgbe_get_pfc_delay(struct xgbe_prv_data *pdata) |
2481 | { |
2482 | unsigned int delay; |
2483 | |
2484 | /* If a delay has been provided, use that */ |
2485 | if (pdata->pfc->delay) |
2486 | return pdata->pfc->delay / 8; |
2487 | |
2488 | /* Allow for two maximum size frames */ |
2489 | delay = xgbe_get_max_frame(pdata); |
2490 | delay += XGMAC_ETH_PREAMBLE; |
2491 | delay *= 2; |
2492 | |
2493 | /* Allow for PFC frame */ |
2494 | delay += XGMAC_PFC_DATA_LEN; |
2495 | delay += ETH_HLEN + ETH_FCS_LEN; |
2496 | delay += XGMAC_ETH_PREAMBLE; |
2497 | |
2498 | /* Allow for miscellaneous delays (LPI exit, cable, etc.) */ |
2499 | delay += XGMAC_PFC_DELAYS; |
2500 | |
2501 | return delay; |
2502 | } |
2503 | |
2504 | static unsigned int xgbe_get_pfc_queues(struct xgbe_prv_data *pdata) |
2505 | { |
2506 | unsigned int count, prio_queues; |
2507 | unsigned int i; |
2508 | |
2509 | if (!pdata->pfc->pfc_en) |
2510 | return 0; |
2511 | |
2512 | count = 0; |
2513 | prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); |
2514 | for (i = 0; i < prio_queues; i++) { |
2515 | if (!xgbe_is_pfc_queue(pdata, queue: i)) |
2516 | continue; |
2517 | |
2518 | pdata->pfcq[i] = 1; |
2519 | count++; |
2520 | } |
2521 | |
2522 | return count; |
2523 | } |
2524 | |
2525 | static void xgbe_calculate_dcb_fifo(struct xgbe_prv_data *pdata, |
2526 | unsigned int fifo_size, |
2527 | unsigned int *fifo) |
2528 | { |
2529 | unsigned int q_fifo_size, rem_fifo, addn_fifo; |
2530 | unsigned int prio_queues; |
2531 | unsigned int pfc_count; |
2532 | unsigned int i; |
2533 | |
2534 | q_fifo_size = XGMAC_FIFO_ALIGN(xgbe_get_max_frame(pdata)); |
2535 | prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); |
2536 | pfc_count = xgbe_get_pfc_queues(pdata); |
2537 | |
2538 | if (!pfc_count || ((q_fifo_size * prio_queues) > fifo_size)) { |
2539 | /* No traffic classes with PFC enabled or can't do lossless */ |
2540 | xgbe_calculate_equal_fifo(fifo_size, queue_count: prio_queues, fifo); |
2541 | return; |
2542 | } |
2543 | |
2544 | /* Calculate how much fifo we have to play with */ |
2545 | rem_fifo = fifo_size - (q_fifo_size * prio_queues); |
2546 | |
2547 | /* Calculate how much more than base fifo PFC needs, which also |
2548 | * becomes the threshold activation point (RFA) |
2549 | */ |
2550 | pdata->pfc_rfa = xgbe_get_pfc_delay(pdata); |
2551 | pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa); |
2552 | |
2553 | if (pdata->pfc_rfa > q_fifo_size) { |
2554 | addn_fifo = pdata->pfc_rfa - q_fifo_size; |
2555 | addn_fifo = XGMAC_FIFO_ALIGN(addn_fifo); |
2556 | } else { |
2557 | addn_fifo = 0; |
2558 | } |
2559 | |
2560 | /* Calculate DCB fifo settings: |
2561 | * - distribute remaining fifo between the VLAN priority |
2562 | * queues based on traffic class PFC enablement and overall |
2563 | * priority (0 is lowest priority, so start at highest) |
2564 | */ |
2565 | i = prio_queues; |
2566 | while (i > 0) { |
2567 | i--; |
2568 | |
2569 | fifo[i] = (q_fifo_size / XGMAC_FIFO_UNIT) - 1; |
2570 | |
2571 | if (!pdata->pfcq[i] || !addn_fifo) |
2572 | continue; |
2573 | |
2574 | if (addn_fifo > rem_fifo) { |
2575 | netdev_warn(dev: pdata->netdev, |
2576 | format: "RXq%u cannot set needed fifo size\n" , i); |
2577 | if (!rem_fifo) |
2578 | continue; |
2579 | |
2580 | addn_fifo = rem_fifo; |
2581 | } |
2582 | |
2583 | fifo[i] += (addn_fifo / XGMAC_FIFO_UNIT); |
2584 | rem_fifo -= addn_fifo; |
2585 | } |
2586 | |
2587 | if (rem_fifo) { |
2588 | unsigned int inc_fifo = rem_fifo / prio_queues; |
2589 | |
2590 | /* Distribute remaining fifo across queues */ |
2591 | for (i = 0; i < prio_queues; i++) |
2592 | fifo[i] += (inc_fifo / XGMAC_FIFO_UNIT); |
2593 | } |
2594 | } |
2595 | |
2596 | static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) |
2597 | { |
2598 | unsigned int fifo_size; |
2599 | unsigned int fifo[XGBE_MAX_QUEUES]; |
2600 | unsigned int i; |
2601 | |
2602 | fifo_size = xgbe_get_tx_fifo_size(pdata); |
2603 | |
2604 | xgbe_calculate_equal_fifo(fifo_size, queue_count: pdata->tx_q_count, fifo); |
2605 | |
2606 | for (i = 0; i < pdata->tx_q_count; i++) |
2607 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]); |
2608 | |
2609 | netif_info(pdata, drv, pdata->netdev, |
2610 | "%d Tx hardware queues, %d byte fifo per queue\n" , |
2611 | pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); |
2612 | } |
2613 | |
2614 | static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) |
2615 | { |
2616 | unsigned int fifo_size; |
2617 | unsigned int fifo[XGBE_MAX_QUEUES]; |
2618 | unsigned int prio_queues; |
2619 | unsigned int i; |
2620 | |
2621 | /* Clear any DCB related fifo/queue information */ |
2622 | memset(pdata->pfcq, 0, sizeof(pdata->pfcq)); |
2623 | pdata->pfc_rfa = 0; |
2624 | |
2625 | fifo_size = xgbe_get_rx_fifo_size(pdata); |
2626 | prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); |
2627 | |
2628 | /* Assign a minimum fifo to the non-VLAN priority queues */ |
2629 | fifo_size = xgbe_set_nonprio_fifos(fifo_size, queue_count: pdata->rx_q_count, fifo); |
2630 | |
2631 | if (pdata->pfc && pdata->ets) |
2632 | xgbe_calculate_dcb_fifo(pdata, fifo_size, fifo); |
2633 | else |
2634 | xgbe_calculate_equal_fifo(fifo_size, queue_count: prio_queues, fifo); |
2635 | |
2636 | for (i = 0; i < pdata->rx_q_count; i++) |
2637 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]); |
2638 | |
2639 | xgbe_calculate_flow_control_threshold(pdata, fifo); |
2640 | xgbe_config_flow_control_threshold(pdata); |
2641 | |
2642 | if (pdata->pfc && pdata->ets && pdata->pfc->pfc_en) { |
2643 | netif_info(pdata, drv, pdata->netdev, |
2644 | "%u Rx hardware queues\n" , pdata->rx_q_count); |
2645 | for (i = 0; i < pdata->rx_q_count; i++) |
2646 | netif_info(pdata, drv, pdata->netdev, |
2647 | "RxQ%u, %u byte fifo queue\n" , i, |
2648 | ((fifo[i] + 1) * XGMAC_FIFO_UNIT)); |
2649 | } else { |
2650 | netif_info(pdata, drv, pdata->netdev, |
2651 | "%u Rx hardware queues, %u byte fifo per queue\n" , |
2652 | pdata->rx_q_count, |
2653 | ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); |
2654 | } |
2655 | } |
2656 | |
2657 | static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) |
2658 | { |
2659 | unsigned int qptc, , queue; |
2660 | unsigned int prio_queues; |
2661 | unsigned int ppq, , prio; |
2662 | unsigned int mask; |
2663 | unsigned int i, j, reg, reg_val; |
2664 | |
2665 | /* Map the MTL Tx Queues to Traffic Classes |
2666 | * Note: Tx Queues >= Traffic Classes |
2667 | */ |
2668 | qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; |
2669 | qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; |
2670 | |
2671 | for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { |
2672 | for (j = 0; j < qptc; j++) { |
2673 | netif_dbg(pdata, drv, pdata->netdev, |
2674 | "TXq%u mapped to TC%u\n" , queue, i); |
2675 | XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, |
2676 | Q2TCMAP, i); |
2677 | pdata->q2tc_map[queue++] = i; |
2678 | } |
2679 | |
2680 | if (i < qptc_extra) { |
2681 | netif_dbg(pdata, drv, pdata->netdev, |
2682 | "TXq%u mapped to TC%u\n" , queue, i); |
2683 | XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, |
2684 | Q2TCMAP, i); |
2685 | pdata->q2tc_map[queue++] = i; |
2686 | } |
2687 | } |
2688 | |
2689 | /* Map the 8 VLAN priority values to available MTL Rx queues */ |
2690 | prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); |
2691 | ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; |
2692 | ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; |
2693 | |
2694 | reg = MAC_RQC2R; |
2695 | reg_val = 0; |
2696 | for (i = 0, prio = 0; i < prio_queues;) { |
2697 | mask = 0; |
2698 | for (j = 0; j < ppq; j++) { |
2699 | netif_dbg(pdata, drv, pdata->netdev, |
2700 | "PRIO%u mapped to RXq%u\n" , prio, i); |
2701 | mask |= (1 << prio); |
2702 | pdata->prio2q_map[prio++] = i; |
2703 | } |
2704 | |
2705 | if (i < ppq_extra) { |
2706 | netif_dbg(pdata, drv, pdata->netdev, |
2707 | "PRIO%u mapped to RXq%u\n" , prio, i); |
2708 | mask |= (1 << prio); |
2709 | pdata->prio2q_map[prio++] = i; |
2710 | } |
2711 | |
2712 | reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); |
2713 | |
2714 | if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) |
2715 | continue; |
2716 | |
2717 | XGMAC_IOWRITE(pdata, reg, reg_val); |
2718 | reg += MAC_RQC2_INC; |
2719 | reg_val = 0; |
2720 | } |
2721 | |
2722 | /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ |
2723 | reg = MTL_RQDCM0R; |
2724 | reg_val = 0; |
2725 | for (i = 0; i < pdata->rx_q_count;) { |
2726 | reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); |
2727 | |
2728 | if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count)) |
2729 | continue; |
2730 | |
2731 | XGMAC_IOWRITE(pdata, reg, reg_val); |
2732 | |
2733 | reg += MTL_RQDCM_INC; |
2734 | reg_val = 0; |
2735 | } |
2736 | } |
2737 | |
2738 | static void xgbe_config_tc(struct xgbe_prv_data *pdata) |
2739 | { |
2740 | unsigned int offset, queue, prio; |
2741 | u8 i; |
2742 | |
2743 | netdev_reset_tc(dev: pdata->netdev); |
2744 | if (!pdata->num_tcs) |
2745 | return; |
2746 | |
2747 | netdev_set_num_tc(dev: pdata->netdev, num_tc: pdata->num_tcs); |
2748 | |
2749 | for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) { |
2750 | while ((queue < pdata->tx_q_count) && |
2751 | (pdata->q2tc_map[queue] == i)) |
2752 | queue++; |
2753 | |
2754 | netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n" , |
2755 | i, offset, queue - 1); |
2756 | netdev_set_tc_queue(dev: pdata->netdev, tc: i, count: queue - offset, offset); |
2757 | offset = queue; |
2758 | } |
2759 | |
2760 | if (!pdata->ets) |
2761 | return; |
2762 | |
2763 | for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) |
2764 | netdev_set_prio_tc_map(dev: pdata->netdev, prio, |
2765 | tc: pdata->ets->prio_tc[prio]); |
2766 | } |
2767 | |
2768 | static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) |
2769 | { |
2770 | struct ieee_ets *ets = pdata->ets; |
2771 | unsigned int total_weight, min_weight, weight; |
2772 | unsigned int mask, reg, reg_val; |
2773 | unsigned int i, prio; |
2774 | |
2775 | if (!ets) |
2776 | return; |
2777 | |
2778 | /* Set Tx to deficit weighted round robin scheduling algorithm (when |
2779 | * traffic class is using ETS algorithm) |
2780 | */ |
2781 | XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR); |
2782 | |
2783 | /* Set Traffic Class algorithms */ |
2784 | total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt; |
2785 | min_weight = total_weight / 100; |
2786 | if (!min_weight) |
2787 | min_weight = 1; |
2788 | |
2789 | for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { |
2790 | /* Map the priorities to the traffic class */ |
2791 | mask = 0; |
2792 | for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { |
2793 | if (ets->prio_tc[prio] == i) |
2794 | mask |= (1 << prio); |
2795 | } |
2796 | mask &= 0xff; |
2797 | |
2798 | netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n" , |
2799 | i, mask); |
2800 | reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG)); |
2801 | reg_val = XGMAC_IOREAD(pdata, reg); |
2802 | |
2803 | reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3)); |
2804 | reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3)); |
2805 | |
2806 | XGMAC_IOWRITE(pdata, reg, reg_val); |
2807 | |
2808 | /* Set the traffic class algorithm */ |
2809 | switch (ets->tc_tsa[i]) { |
2810 | case IEEE_8021QAZ_TSA_STRICT: |
2811 | netif_dbg(pdata, drv, pdata->netdev, |
2812 | "TC%u using SP\n" , i); |
2813 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, |
2814 | MTL_TSA_SP); |
2815 | break; |
2816 | case IEEE_8021QAZ_TSA_ETS: |
2817 | weight = total_weight * ets->tc_tx_bw[i] / 100; |
2818 | weight = clamp(weight, min_weight, total_weight); |
2819 | |
2820 | netif_dbg(pdata, drv, pdata->netdev, |
2821 | "TC%u using DWRR (weight %u)\n" , i, weight); |
2822 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, |
2823 | MTL_TSA_ETS); |
2824 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, |
2825 | weight); |
2826 | break; |
2827 | } |
2828 | } |
2829 | |
2830 | xgbe_config_tc(pdata); |
2831 | } |
2832 | |
2833 | static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) |
2834 | { |
2835 | if (!test_bit(XGBE_DOWN, &pdata->dev_state)) { |
2836 | /* Just stop the Tx queues while Rx fifo is changed */ |
2837 | netif_tx_stop_all_queues(dev: pdata->netdev); |
2838 | |
2839 | /* Suspend Rx so that fifo's can be adjusted */ |
2840 | pdata->hw_if.disable_rx(pdata); |
2841 | } |
2842 | |
2843 | xgbe_config_rx_fifo_size(pdata); |
2844 | xgbe_config_flow_control(pdata); |
2845 | |
2846 | if (!test_bit(XGBE_DOWN, &pdata->dev_state)) { |
2847 | /* Resume Rx */ |
2848 | pdata->hw_if.enable_rx(pdata); |
2849 | |
2850 | /* Resume Tx queues */ |
2851 | netif_tx_start_all_queues(dev: pdata->netdev); |
2852 | } |
2853 | } |
2854 | |
2855 | static void xgbe_config_mac_address(struct xgbe_prv_data *pdata) |
2856 | { |
2857 | xgbe_set_mac_address(pdata, addr: pdata->netdev->dev_addr); |
2858 | |
2859 | /* Filtering is done using perfect filtering and hash filtering */ |
2860 | if (pdata->hw_feat.hash_table_size) { |
2861 | XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); |
2862 | XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); |
2863 | XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1); |
2864 | } |
2865 | } |
2866 | |
2867 | static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata) |
2868 | { |
2869 | unsigned int val; |
2870 | |
2871 | val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0; |
2872 | |
2873 | XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); |
2874 | } |
2875 | |
2876 | static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata) |
2877 | { |
2878 | xgbe_set_speed(pdata, speed: pdata->phy_speed); |
2879 | } |
2880 | |
2881 | static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata) |
2882 | { |
2883 | if (pdata->netdev->features & NETIF_F_RXCSUM) |
2884 | xgbe_enable_rx_csum(pdata); |
2885 | else |
2886 | xgbe_disable_rx_csum(pdata); |
2887 | } |
2888 | |
2889 | static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata) |
2890 | { |
2891 | /* Indicate that VLAN Tx CTAGs come from context descriptors */ |
2892 | XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0); |
2893 | XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1); |
2894 | |
2895 | /* Set the current VLAN Hash Table register value */ |
2896 | xgbe_update_vlan_hash_table(pdata); |
2897 | |
2898 | if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) |
2899 | xgbe_enable_rx_vlan_filtering(pdata); |
2900 | else |
2901 | xgbe_disable_rx_vlan_filtering(pdata); |
2902 | |
2903 | if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) |
2904 | xgbe_enable_rx_vlan_stripping(pdata); |
2905 | else |
2906 | xgbe_disable_rx_vlan_stripping(pdata); |
2907 | } |
2908 | |
2909 | static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo) |
2910 | { |
2911 | bool read_hi; |
2912 | u64 val; |
2913 | |
2914 | if (pdata->vdata->mmc_64bit) { |
2915 | switch (reg_lo) { |
2916 | /* These registers are always 32 bit */ |
2917 | case MMC_RXRUNTERROR: |
2918 | case MMC_RXJABBERERROR: |
2919 | case MMC_RXUNDERSIZE_G: |
2920 | case MMC_RXOVERSIZE_G: |
2921 | case MMC_RXWATCHDOGERROR: |
2922 | read_hi = false; |
2923 | break; |
2924 | |
2925 | default: |
2926 | read_hi = true; |
2927 | } |
2928 | } else { |
2929 | switch (reg_lo) { |
2930 | /* These registers are always 64 bit */ |
2931 | case MMC_TXOCTETCOUNT_GB_LO: |
2932 | case MMC_TXOCTETCOUNT_G_LO: |
2933 | case MMC_RXOCTETCOUNT_GB_LO: |
2934 | case MMC_RXOCTETCOUNT_G_LO: |
2935 | read_hi = true; |
2936 | break; |
2937 | |
2938 | default: |
2939 | read_hi = false; |
2940 | } |
2941 | } |
2942 | |
2943 | val = XGMAC_IOREAD(pdata, reg_lo); |
2944 | |
2945 | if (read_hi) |
2946 | val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32); |
2947 | |
2948 | return val; |
2949 | } |
2950 | |
2951 | static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) |
2952 | { |
2953 | struct xgbe_mmc_stats *stats = &pdata->mmc_stats; |
2954 | unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR); |
2955 | |
2956 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) |
2957 | stats->txoctetcount_gb += |
2958 | xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); |
2959 | |
2960 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) |
2961 | stats->txframecount_gb += |
2962 | xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); |
2963 | |
2964 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) |
2965 | stats->txbroadcastframes_g += |
2966 | xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); |
2967 | |
2968 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) |
2969 | stats->txmulticastframes_g += |
2970 | xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); |
2971 | |
2972 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) |
2973 | stats->tx64octets_gb += |
2974 | xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); |
2975 | |
2976 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) |
2977 | stats->tx65to127octets_gb += |
2978 | xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); |
2979 | |
2980 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) |
2981 | stats->tx128to255octets_gb += |
2982 | xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); |
2983 | |
2984 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) |
2985 | stats->tx256to511octets_gb += |
2986 | xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); |
2987 | |
2988 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) |
2989 | stats->tx512to1023octets_gb += |
2990 | xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); |
2991 | |
2992 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) |
2993 | stats->tx1024tomaxoctets_gb += |
2994 | xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); |
2995 | |
2996 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) |
2997 | stats->txunicastframes_gb += |
2998 | xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); |
2999 | |
3000 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) |
3001 | stats->txmulticastframes_gb += |
3002 | xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); |
3003 | |
3004 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) |
3005 | stats->txbroadcastframes_g += |
3006 | xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); |
3007 | |
3008 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) |
3009 | stats->txunderflowerror += |
3010 | xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); |
3011 | |
3012 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) |
3013 | stats->txoctetcount_g += |
3014 | xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); |
3015 | |
3016 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) |
3017 | stats->txframecount_g += |
3018 | xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); |
3019 | |
3020 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) |
3021 | stats->txpauseframes += |
3022 | xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); |
3023 | |
3024 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) |
3025 | stats->txvlanframes_g += |
3026 | xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); |
3027 | } |
3028 | |
3029 | static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) |
3030 | { |
3031 | struct xgbe_mmc_stats *stats = &pdata->mmc_stats; |
3032 | unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR); |
3033 | |
3034 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) |
3035 | stats->rxframecount_gb += |
3036 | xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); |
3037 | |
3038 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) |
3039 | stats->rxoctetcount_gb += |
3040 | xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); |
3041 | |
3042 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) |
3043 | stats->rxoctetcount_g += |
3044 | xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); |
3045 | |
3046 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) |
3047 | stats->rxbroadcastframes_g += |
3048 | xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); |
3049 | |
3050 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) |
3051 | stats->rxmulticastframes_g += |
3052 | xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); |
3053 | |
3054 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) |
3055 | stats->rxcrcerror += |
3056 | xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); |
3057 | |
3058 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) |
3059 | stats->rxrunterror += |
3060 | xgbe_mmc_read(pdata, MMC_RXRUNTERROR); |
3061 | |
3062 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) |
3063 | stats->rxjabbererror += |
3064 | xgbe_mmc_read(pdata, MMC_RXJABBERERROR); |
3065 | |
3066 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) |
3067 | stats->rxundersize_g += |
3068 | xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); |
3069 | |
3070 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) |
3071 | stats->rxoversize_g += |
3072 | xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); |
3073 | |
3074 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) |
3075 | stats->rx64octets_gb += |
3076 | xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); |
3077 | |
3078 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) |
3079 | stats->rx65to127octets_gb += |
3080 | xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); |
3081 | |
3082 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) |
3083 | stats->rx128to255octets_gb += |
3084 | xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); |
3085 | |
3086 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) |
3087 | stats->rx256to511octets_gb += |
3088 | xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); |
3089 | |
3090 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) |
3091 | stats->rx512to1023octets_gb += |
3092 | xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); |
3093 | |
3094 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) |
3095 | stats->rx1024tomaxoctets_gb += |
3096 | xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); |
3097 | |
3098 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) |
3099 | stats->rxunicastframes_g += |
3100 | xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); |
3101 | |
3102 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) |
3103 | stats->rxlengtherror += |
3104 | xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); |
3105 | |
3106 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) |
3107 | stats->rxoutofrangetype += |
3108 | xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); |
3109 | |
3110 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) |
3111 | stats->rxpauseframes += |
3112 | xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); |
3113 | |
3114 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) |
3115 | stats->rxfifooverflow += |
3116 | xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); |
3117 | |
3118 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) |
3119 | stats->rxvlanframes_gb += |
3120 | xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); |
3121 | |
3122 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) |
3123 | stats->rxwatchdogerror += |
3124 | xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); |
3125 | } |
3126 | |
3127 | static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) |
3128 | { |
3129 | struct xgbe_mmc_stats *stats = &pdata->mmc_stats; |
3130 | |
3131 | /* Freeze counters */ |
3132 | XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); |
3133 | |
3134 | stats->txoctetcount_gb += |
3135 | xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); |
3136 | |
3137 | stats->txframecount_gb += |
3138 | xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); |
3139 | |
3140 | stats->txbroadcastframes_g += |
3141 | xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); |
3142 | |
3143 | stats->txmulticastframes_g += |
3144 | xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); |
3145 | |
3146 | stats->tx64octets_gb += |
3147 | xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); |
3148 | |
3149 | stats->tx65to127octets_gb += |
3150 | xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); |
3151 | |
3152 | stats->tx128to255octets_gb += |
3153 | xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); |
3154 | |
3155 | stats->tx256to511octets_gb += |
3156 | xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); |
3157 | |
3158 | stats->tx512to1023octets_gb += |
3159 | xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); |
3160 | |
3161 | stats->tx1024tomaxoctets_gb += |
3162 | xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); |
3163 | |
3164 | stats->txunicastframes_gb += |
3165 | xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); |
3166 | |
3167 | stats->txmulticastframes_gb += |
3168 | xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); |
3169 | |
3170 | stats->txbroadcastframes_g += |
3171 | xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); |
3172 | |
3173 | stats->txunderflowerror += |
3174 | xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); |
3175 | |
3176 | stats->txoctetcount_g += |
3177 | xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); |
3178 | |
3179 | stats->txframecount_g += |
3180 | xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); |
3181 | |
3182 | stats->txpauseframes += |
3183 | xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); |
3184 | |
3185 | stats->txvlanframes_g += |
3186 | xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); |
3187 | |
3188 | stats->rxframecount_gb += |
3189 | xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); |
3190 | |
3191 | stats->rxoctetcount_gb += |
3192 | xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); |
3193 | |
3194 | stats->rxoctetcount_g += |
3195 | xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); |
3196 | |
3197 | stats->rxbroadcastframes_g += |
3198 | xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); |
3199 | |
3200 | stats->rxmulticastframes_g += |
3201 | xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); |
3202 | |
3203 | stats->rxcrcerror += |
3204 | xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); |
3205 | |
3206 | stats->rxrunterror += |
3207 | xgbe_mmc_read(pdata, MMC_RXRUNTERROR); |
3208 | |
3209 | stats->rxjabbererror += |
3210 | xgbe_mmc_read(pdata, MMC_RXJABBERERROR); |
3211 | |
3212 | stats->rxundersize_g += |
3213 | xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); |
3214 | |
3215 | stats->rxoversize_g += |
3216 | xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); |
3217 | |
3218 | stats->rx64octets_gb += |
3219 | xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); |
3220 | |
3221 | stats->rx65to127octets_gb += |
3222 | xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); |
3223 | |
3224 | stats->rx128to255octets_gb += |
3225 | xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); |
3226 | |
3227 | stats->rx256to511octets_gb += |
3228 | xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); |
3229 | |
3230 | stats->rx512to1023octets_gb += |
3231 | xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); |
3232 | |
3233 | stats->rx1024tomaxoctets_gb += |
3234 | xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); |
3235 | |
3236 | stats->rxunicastframes_g += |
3237 | xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); |
3238 | |
3239 | stats->rxlengtherror += |
3240 | xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); |
3241 | |
3242 | stats->rxoutofrangetype += |
3243 | xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); |
3244 | |
3245 | stats->rxpauseframes += |
3246 | xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); |
3247 | |
3248 | stats->rxfifooverflow += |
3249 | xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); |
3250 | |
3251 | stats->rxvlanframes_gb += |
3252 | xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); |
3253 | |
3254 | stats->rxwatchdogerror += |
3255 | xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); |
3256 | |
3257 | /* Un-freeze counters */ |
3258 | XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); |
3259 | } |
3260 | |
3261 | static void xgbe_config_mmc(struct xgbe_prv_data *pdata) |
3262 | { |
3263 | /* Set counters to reset on read */ |
3264 | XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1); |
3265 | |
3266 | /* Reset the counters */ |
3267 | XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1); |
3268 | } |
3269 | |
3270 | static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata, |
3271 | unsigned int queue) |
3272 | { |
3273 | unsigned int tx_status; |
3274 | unsigned long tx_timeout; |
3275 | |
3276 | /* The Tx engine cannot be stopped if it is actively processing |
3277 | * packets. Wait for the Tx queue to empty the Tx fifo. Don't |
3278 | * wait forever though... |
3279 | */ |
3280 | tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); |
3281 | while (time_before(jiffies, tx_timeout)) { |
3282 | tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR); |
3283 | if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) && |
3284 | (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0)) |
3285 | break; |
3286 | |
3287 | usleep_range(min: 500, max: 1000); |
3288 | } |
3289 | |
3290 | if (!time_before(jiffies, tx_timeout)) |
3291 | netdev_info(dev: pdata->netdev, |
3292 | format: "timed out waiting for Tx queue %u to empty\n" , |
3293 | queue); |
3294 | } |
3295 | |
3296 | static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, |
3297 | unsigned int queue) |
3298 | { |
3299 | unsigned int tx_dsr, tx_pos, tx_qidx; |
3300 | unsigned int tx_status; |
3301 | unsigned long tx_timeout; |
3302 | |
3303 | if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) |
3304 | return xgbe_txq_prepare_tx_stop(pdata, queue); |
3305 | |
3306 | /* Calculate the status register to read and the position within */ |
3307 | if (queue < DMA_DSRX_FIRST_QUEUE) { |
3308 | tx_dsr = DMA_DSR0; |
3309 | tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START; |
3310 | } else { |
3311 | tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; |
3312 | |
3313 | tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); |
3314 | tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + |
3315 | DMA_DSRX_TPS_START; |
3316 | } |
3317 | |
3318 | /* The Tx engine cannot be stopped if it is actively processing |
3319 | * descriptors. Wait for the Tx engine to enter the stopped or |
3320 | * suspended state. Don't wait forever though... |
3321 | */ |
3322 | tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); |
3323 | while (time_before(jiffies, tx_timeout)) { |
3324 | tx_status = XGMAC_IOREAD(pdata, tx_dsr); |
3325 | tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); |
3326 | if ((tx_status == DMA_TPS_STOPPED) || |
3327 | (tx_status == DMA_TPS_SUSPENDED)) |
3328 | break; |
3329 | |
3330 | usleep_range(min: 500, max: 1000); |
3331 | } |
3332 | |
3333 | if (!time_before(jiffies, tx_timeout)) |
3334 | netdev_info(dev: pdata->netdev, |
3335 | format: "timed out waiting for Tx DMA channel %u to stop\n" , |
3336 | queue); |
3337 | } |
3338 | |
3339 | static void xgbe_enable_tx(struct xgbe_prv_data *pdata) |
3340 | { |
3341 | unsigned int i; |
3342 | |
3343 | /* Enable each Tx DMA channel */ |
3344 | for (i = 0; i < pdata->channel_count; i++) { |
3345 | if (!pdata->channel[i]->tx_ring) |
3346 | break; |
3347 | |
3348 | XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); |
3349 | } |
3350 | |
3351 | /* Enable each Tx queue */ |
3352 | for (i = 0; i < pdata->tx_q_count; i++) |
3353 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, |
3354 | MTL_Q_ENABLED); |
3355 | |
3356 | /* Enable MAC Tx */ |
3357 | XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); |
3358 | } |
3359 | |
3360 | static void xgbe_disable_tx(struct xgbe_prv_data *pdata) |
3361 | { |
3362 | unsigned int i; |
3363 | |
3364 | /* Prepare for Tx DMA channel stop */ |
3365 | for (i = 0; i < pdata->tx_q_count; i++) |
3366 | xgbe_prepare_tx_stop(pdata, queue: i); |
3367 | |
3368 | /* Disable MAC Tx */ |
3369 | XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); |
3370 | |
3371 | /* Disable each Tx queue */ |
3372 | for (i = 0; i < pdata->tx_q_count; i++) |
3373 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); |
3374 | |
3375 | /* Disable each Tx DMA channel */ |
3376 | for (i = 0; i < pdata->channel_count; i++) { |
3377 | if (!pdata->channel[i]->tx_ring) |
3378 | break; |
3379 | |
3380 | XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); |
3381 | } |
3382 | } |
3383 | |
3384 | static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, |
3385 | unsigned int queue) |
3386 | { |
3387 | unsigned int rx_status; |
3388 | unsigned long rx_timeout; |
3389 | |
3390 | /* The Rx engine cannot be stopped if it is actively processing |
3391 | * packets. Wait for the Rx queue to empty the Rx fifo. Don't |
3392 | * wait forever though... |
3393 | */ |
3394 | rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); |
3395 | while (time_before(jiffies, rx_timeout)) { |
3396 | rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); |
3397 | if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && |
3398 | (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) |
3399 | break; |
3400 | |
3401 | usleep_range(min: 500, max: 1000); |
3402 | } |
3403 | |
3404 | if (!time_before(jiffies, rx_timeout)) |
3405 | netdev_info(dev: pdata->netdev, |
3406 | format: "timed out waiting for Rx queue %u to empty\n" , |
3407 | queue); |
3408 | } |
3409 | |
3410 | static void xgbe_enable_rx(struct xgbe_prv_data *pdata) |
3411 | { |
3412 | unsigned int reg_val, i; |
3413 | |
3414 | /* Enable each Rx DMA channel */ |
3415 | for (i = 0; i < pdata->channel_count; i++) { |
3416 | if (!pdata->channel[i]->rx_ring) |
3417 | break; |
3418 | |
3419 | XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); |
3420 | } |
3421 | |
3422 | /* Enable each Rx queue */ |
3423 | reg_val = 0; |
3424 | for (i = 0; i < pdata->rx_q_count; i++) |
3425 | reg_val |= (0x02 << (i << 1)); |
3426 | XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); |
3427 | |
3428 | /* Enable MAC Rx */ |
3429 | XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); |
3430 | XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); |
3431 | XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); |
3432 | XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); |
3433 | } |
3434 | |
3435 | static void xgbe_disable_rx(struct xgbe_prv_data *pdata) |
3436 | { |
3437 | unsigned int i; |
3438 | |
3439 | /* Disable MAC Rx */ |
3440 | XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); |
3441 | XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); |
3442 | XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); |
3443 | XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); |
3444 | |
3445 | /* Prepare for Rx DMA channel stop */ |
3446 | for (i = 0; i < pdata->rx_q_count; i++) |
3447 | xgbe_prepare_rx_stop(pdata, queue: i); |
3448 | |
3449 | /* Disable each Rx queue */ |
3450 | XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); |
3451 | |
3452 | /* Disable each Rx DMA channel */ |
3453 | for (i = 0; i < pdata->channel_count; i++) { |
3454 | if (!pdata->channel[i]->rx_ring) |
3455 | break; |
3456 | |
3457 | XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); |
3458 | } |
3459 | } |
3460 | |
3461 | static void xgbe_powerup_tx(struct xgbe_prv_data *pdata) |
3462 | { |
3463 | unsigned int i; |
3464 | |
3465 | /* Enable each Tx DMA channel */ |
3466 | for (i = 0; i < pdata->channel_count; i++) { |
3467 | if (!pdata->channel[i]->tx_ring) |
3468 | break; |
3469 | |
3470 | XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); |
3471 | } |
3472 | |
3473 | /* Enable MAC Tx */ |
3474 | XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); |
3475 | } |
3476 | |
3477 | static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata) |
3478 | { |
3479 | unsigned int i; |
3480 | |
3481 | /* Prepare for Tx DMA channel stop */ |
3482 | for (i = 0; i < pdata->tx_q_count; i++) |
3483 | xgbe_prepare_tx_stop(pdata, queue: i); |
3484 | |
3485 | /* Disable MAC Tx */ |
3486 | XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); |
3487 | |
3488 | /* Disable each Tx DMA channel */ |
3489 | for (i = 0; i < pdata->channel_count; i++) { |
3490 | if (!pdata->channel[i]->tx_ring) |
3491 | break; |
3492 | |
3493 | XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); |
3494 | } |
3495 | } |
3496 | |
3497 | static void xgbe_powerup_rx(struct xgbe_prv_data *pdata) |
3498 | { |
3499 | unsigned int i; |
3500 | |
3501 | /* Enable each Rx DMA channel */ |
3502 | for (i = 0; i < pdata->channel_count; i++) { |
3503 | if (!pdata->channel[i]->rx_ring) |
3504 | break; |
3505 | |
3506 | XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); |
3507 | } |
3508 | } |
3509 | |
3510 | static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata) |
3511 | { |
3512 | unsigned int i; |
3513 | |
3514 | /* Disable each Rx DMA channel */ |
3515 | for (i = 0; i < pdata->channel_count; i++) { |
3516 | if (!pdata->channel[i]->rx_ring) |
3517 | break; |
3518 | |
3519 | XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); |
3520 | } |
3521 | } |
3522 | |
3523 | static int xgbe_init(struct xgbe_prv_data *pdata) |
3524 | { |
3525 | struct xgbe_desc_if *desc_if = &pdata->desc_if; |
3526 | int ret; |
3527 | |
3528 | DBGPR("-->xgbe_init\n" ); |
3529 | |
3530 | /* Flush Tx queues */ |
3531 | ret = xgbe_flush_tx_queues(pdata); |
3532 | if (ret) { |
3533 | netdev_err(dev: pdata->netdev, format: "error flushing TX queues\n" ); |
3534 | return ret; |
3535 | } |
3536 | |
3537 | /* |
3538 | * Initialize DMA related features |
3539 | */ |
3540 | xgbe_config_dma_bus(pdata); |
3541 | xgbe_config_dma_cache(pdata); |
3542 | xgbe_config_osp_mode(pdata); |
3543 | xgbe_config_pbl_val(pdata); |
3544 | xgbe_config_rx_coalesce(pdata); |
3545 | xgbe_config_tx_coalesce(pdata); |
3546 | xgbe_config_rx_buffer_size(pdata); |
3547 | xgbe_config_tso_mode(pdata); |
3548 | xgbe_config_sph_mode(pdata); |
3549 | xgbe_config_rss(pdata); |
3550 | desc_if->wrapper_tx_desc_init(pdata); |
3551 | desc_if->wrapper_rx_desc_init(pdata); |
3552 | xgbe_enable_dma_interrupts(pdata); |
3553 | |
3554 | /* |
3555 | * Initialize MTL related features |
3556 | */ |
3557 | xgbe_config_mtl_mode(pdata); |
3558 | xgbe_config_queue_mapping(pdata); |
3559 | xgbe_config_tsf_mode(pdata, val: pdata->tx_sf_mode); |
3560 | xgbe_config_rsf_mode(pdata, val: pdata->rx_sf_mode); |
3561 | xgbe_config_tx_threshold(pdata, val: pdata->tx_threshold); |
3562 | xgbe_config_rx_threshold(pdata, val: pdata->rx_threshold); |
3563 | xgbe_config_tx_fifo_size(pdata); |
3564 | xgbe_config_rx_fifo_size(pdata); |
3565 | /*TODO: Error Packet and undersized good Packet forwarding enable |
3566 | (FEP and FUP) |
3567 | */ |
3568 | xgbe_config_dcb_tc(pdata); |
3569 | xgbe_enable_mtl_interrupts(pdata); |
3570 | |
3571 | /* |
3572 | * Initialize MAC related features |
3573 | */ |
3574 | xgbe_config_mac_address(pdata); |
3575 | xgbe_config_rx_mode(pdata); |
3576 | xgbe_config_jumbo_enable(pdata); |
3577 | xgbe_config_flow_control(pdata); |
3578 | xgbe_config_mac_speed(pdata); |
3579 | xgbe_config_checksum_offload(pdata); |
3580 | xgbe_config_vlan_support(pdata); |
3581 | xgbe_config_mmc(pdata); |
3582 | xgbe_enable_mac_interrupts(pdata); |
3583 | |
3584 | /* |
3585 | * Initialize ECC related features |
3586 | */ |
3587 | xgbe_enable_ecc_interrupts(pdata); |
3588 | |
3589 | DBGPR("<--xgbe_init\n" ); |
3590 | |
3591 | return 0; |
3592 | } |
3593 | |
3594 | void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) |
3595 | { |
3596 | DBGPR("-->xgbe_init_function_ptrs\n" ); |
3597 | |
3598 | hw_if->tx_complete = xgbe_tx_complete; |
3599 | |
3600 | hw_if->set_mac_address = xgbe_set_mac_address; |
3601 | hw_if->config_rx_mode = xgbe_config_rx_mode; |
3602 | |
3603 | hw_if->enable_rx_csum = xgbe_enable_rx_csum; |
3604 | hw_if->disable_rx_csum = xgbe_disable_rx_csum; |
3605 | |
3606 | hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping; |
3607 | hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping; |
3608 | hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering; |
3609 | hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering; |
3610 | hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table; |
3611 | |
3612 | hw_if->read_mmd_regs = xgbe_read_mmd_regs; |
3613 | hw_if->write_mmd_regs = xgbe_write_mmd_regs; |
3614 | |
3615 | hw_if->set_speed = xgbe_set_speed; |
3616 | |
3617 | hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode; |
3618 | hw_if->read_ext_mii_regs_c22 = xgbe_read_ext_mii_regs_c22; |
3619 | hw_if->write_ext_mii_regs_c22 = xgbe_write_ext_mii_regs_c22; |
3620 | hw_if->read_ext_mii_regs_c45 = xgbe_read_ext_mii_regs_c45; |
3621 | hw_if->write_ext_mii_regs_c45 = xgbe_write_ext_mii_regs_c45; |
3622 | |
3623 | hw_if->set_gpio = xgbe_set_gpio; |
3624 | hw_if->clr_gpio = xgbe_clr_gpio; |
3625 | |
3626 | hw_if->enable_tx = xgbe_enable_tx; |
3627 | hw_if->disable_tx = xgbe_disable_tx; |
3628 | hw_if->enable_rx = xgbe_enable_rx; |
3629 | hw_if->disable_rx = xgbe_disable_rx; |
3630 | |
3631 | hw_if->powerup_tx = xgbe_powerup_tx; |
3632 | hw_if->powerdown_tx = xgbe_powerdown_tx; |
3633 | hw_if->powerup_rx = xgbe_powerup_rx; |
3634 | hw_if->powerdown_rx = xgbe_powerdown_rx; |
3635 | |
3636 | hw_if->dev_xmit = xgbe_dev_xmit; |
3637 | hw_if->dev_read = xgbe_dev_read; |
3638 | hw_if->enable_int = xgbe_enable_int; |
3639 | hw_if->disable_int = xgbe_disable_int; |
3640 | hw_if->init = xgbe_init; |
3641 | hw_if->exit = xgbe_exit; |
3642 | |
3643 | /* Descriptor related Sequences have to be initialized here */ |
3644 | hw_if->tx_desc_init = xgbe_tx_desc_init; |
3645 | hw_if->rx_desc_init = xgbe_rx_desc_init; |
3646 | hw_if->tx_desc_reset = xgbe_tx_desc_reset; |
3647 | hw_if->rx_desc_reset = xgbe_rx_desc_reset; |
3648 | hw_if->is_last_desc = xgbe_is_last_desc; |
3649 | hw_if->is_context_desc = xgbe_is_context_desc; |
3650 | hw_if->tx_start_xmit = xgbe_tx_start_xmit; |
3651 | |
3652 | /* For FLOW ctrl */ |
3653 | hw_if->config_tx_flow_control = xgbe_config_tx_flow_control; |
3654 | hw_if->config_rx_flow_control = xgbe_config_rx_flow_control; |
3655 | |
3656 | /* For RX coalescing */ |
3657 | hw_if->config_rx_coalesce = xgbe_config_rx_coalesce; |
3658 | hw_if->config_tx_coalesce = xgbe_config_tx_coalesce; |
3659 | hw_if->usec_to_riwt = xgbe_usec_to_riwt; |
3660 | hw_if->riwt_to_usec = xgbe_riwt_to_usec; |
3661 | |
3662 | /* For RX and TX threshold config */ |
3663 | hw_if->config_rx_threshold = xgbe_config_rx_threshold; |
3664 | hw_if->config_tx_threshold = xgbe_config_tx_threshold; |
3665 | |
3666 | /* For RX and TX Store and Forward Mode config */ |
3667 | hw_if->config_rsf_mode = xgbe_config_rsf_mode; |
3668 | hw_if->config_tsf_mode = xgbe_config_tsf_mode; |
3669 | |
3670 | /* For TX DMA Operating on Second Frame config */ |
3671 | hw_if->config_osp_mode = xgbe_config_osp_mode; |
3672 | |
3673 | /* For MMC statistics support */ |
3674 | hw_if->tx_mmc_int = xgbe_tx_mmc_int; |
3675 | hw_if->rx_mmc_int = xgbe_rx_mmc_int; |
3676 | hw_if->read_mmc_stats = xgbe_read_mmc_stats; |
3677 | |
3678 | /* For PTP config */ |
3679 | hw_if->config_tstamp = xgbe_config_tstamp; |
3680 | hw_if->update_tstamp_addend = xgbe_update_tstamp_addend; |
3681 | hw_if->set_tstamp_time = xgbe_set_tstamp_time; |
3682 | hw_if->get_tstamp_time = xgbe_get_tstamp_time; |
3683 | hw_if->get_tx_tstamp = xgbe_get_tx_tstamp; |
3684 | |
3685 | /* For Data Center Bridging config */ |
3686 | hw_if->config_tc = xgbe_config_tc; |
3687 | hw_if->config_dcb_tc = xgbe_config_dcb_tc; |
3688 | hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; |
3689 | |
3690 | /* For Receive Side Scaling */ |
3691 | hw_if->enable_rss = xgbe_enable_rss; |
3692 | hw_if->disable_rss = xgbe_disable_rss; |
3693 | hw_if->set_rss_hash_key = xgbe_set_rss_hash_key; |
3694 | hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table; |
3695 | |
3696 | /* For ECC */ |
3697 | hw_if->disable_ecc_ded = xgbe_disable_ecc_ded; |
3698 | hw_if->disable_ecc_sec = xgbe_disable_ecc_sec; |
3699 | |
3700 | /* For VXLAN */ |
3701 | hw_if->enable_vxlan = xgbe_enable_vxlan; |
3702 | hw_if->disable_vxlan = xgbe_disable_vxlan; |
3703 | hw_if->set_vxlan_id = xgbe_set_vxlan_id; |
3704 | |
3705 | DBGPR("<--xgbe_init_function_ptrs\n" ); |
3706 | } |
3707 | |