1 | /* |
2 | * AMD 10Gb Ethernet driver |
3 | * |
4 | * This file is available to you under your choice of the following two |
5 | * licenses: |
6 | * |
7 | * License 1: GPLv2 |
8 | * |
9 | * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. |
10 | * |
11 | * This file is free software; you may copy, redistribute and/or modify |
12 | * it under the terms of the GNU General Public License as published by |
13 | * the Free Software Foundation, either version 2 of the License, or (at |
14 | * your option) any later version. |
15 | * |
16 | * This file is distributed in the hope that it will be useful, but |
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
19 | * General Public License for more details. |
20 | * |
21 | * You should have received a copy of the GNU General Public License |
22 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
23 | * |
24 | * This file incorporates work covered by the following copyright and |
25 | * permission notice: |
26 | * The Synopsys DWC ETHER XGMAC Software Driver and documentation |
27 | * (hereinafter "Software") is an unsupported proprietary work of Synopsys, |
28 | * Inc. unless otherwise expressly agreed to in writing between Synopsys |
29 | * and you. |
30 | * |
31 | * The Software IS NOT an item of Licensed Software or Licensed Product |
32 | * under any End User Software License Agreement or Agreement for Licensed |
33 | * Product with Synopsys or any supplement thereto. Permission is hereby |
34 | * granted, free of charge, to any person obtaining a copy of this software |
35 | * annotated with this license and the Software, to deal in the Software |
36 | * without restriction, including without limitation the rights to use, |
37 | * copy, modify, merge, publish, distribute, sublicense, and/or sell copies |
38 | * of the Software, and to permit persons to whom the Software is furnished |
39 | * to do so, subject to the following conditions: |
40 | * |
41 | * The above copyright notice and this permission notice shall be included |
42 | * in all copies or substantial portions of the Software. |
43 | * |
44 | * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" |
45 | * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
46 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A |
47 | * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS |
48 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
49 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
50 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
51 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
52 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
53 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
54 | * THE POSSIBILITY OF SUCH DAMAGE. |
55 | * |
56 | * |
57 | * License 2: Modified BSD |
58 | * |
59 | * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. |
60 | * All rights reserved. |
61 | * |
62 | * Redistribution and use in source and binary forms, with or without |
63 | * modification, are permitted provided that the following conditions are met: |
64 | * * Redistributions of source code must retain the above copyright |
65 | * notice, this list of conditions and the following disclaimer. |
66 | * * Redistributions in binary form must reproduce the above copyright |
67 | * notice, this list of conditions and the following disclaimer in the |
68 | * documentation and/or other materials provided with the distribution. |
69 | * * Neither the name of Advanced Micro Devices, Inc. nor the |
70 | * names of its contributors may be used to endorse or promote products |
71 | * derived from this software without specific prior written permission. |
72 | * |
73 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
74 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
75 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
76 | * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY |
77 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
78 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
79 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
80 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
81 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
82 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
83 | * |
84 | * This file incorporates work covered by the following copyright and |
85 | * permission notice: |
86 | * The Synopsys DWC ETHER XGMAC Software Driver and documentation |
87 | * (hereinafter "Software") is an unsupported proprietary work of Synopsys, |
88 | * Inc. unless otherwise expressly agreed to in writing between Synopsys |
89 | * and you. |
90 | * |
91 | * The Software IS NOT an item of Licensed Software or Licensed Product |
92 | * under any End User Software License Agreement or Agreement for Licensed |
93 | * Product with Synopsys or any supplement thereto. Permission is hereby |
94 | * granted, free of charge, to any person obtaining a copy of this software |
95 | * annotated with this license and the Software, to deal in the Software |
96 | * without restriction, including without limitation the rights to use, |
97 | * copy, modify, merge, publish, distribute, sublicense, and/or sell copies |
98 | * of the Software, and to permit persons to whom the Software is furnished |
99 | * to do so, subject to the following conditions: |
100 | * |
101 | * The above copyright notice and this permission notice shall be included |
102 | * in all copies or substantial portions of the Software. |
103 | * |
104 | * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" |
105 | * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
106 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A |
107 | * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS |
108 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
109 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
110 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
111 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
112 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
113 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
114 | * THE POSSIBILITY OF SUCH DAMAGE. |
115 | */ |
116 | |
117 | #include <linux/module.h> |
118 | #include <linux/spinlock.h> |
119 | #include <linux/tcp.h> |
120 | #include <linux/if_vlan.h> |
121 | #include <linux/interrupt.h> |
122 | #include <linux/clk.h> |
123 | #include <linux/if_ether.h> |
124 | #include <linux/net_tstamp.h> |
125 | #include <linux/phy.h> |
126 | #include <net/vxlan.h> |
127 | |
128 | #include "xgbe.h" |
129 | #include "xgbe-common.h" |
130 | |
131 | static unsigned int ecc_sec_info_threshold = 10; |
132 | static unsigned int ecc_sec_warn_threshold = 10000; |
133 | static unsigned int ecc_sec_period = 600; |
134 | static unsigned int ecc_ded_threshold = 2; |
135 | static unsigned int ecc_ded_period = 600; |
136 | |
137 | #ifdef CONFIG_AMD_XGBE_HAVE_ECC |
138 | /* Only expose the ECC parameters if supported */ |
139 | module_param(ecc_sec_info_threshold, uint, 0644); |
140 | MODULE_PARM_DESC(ecc_sec_info_threshold, |
141 | " ECC corrected error informational threshold setting" ); |
142 | |
143 | module_param(ecc_sec_warn_threshold, uint, 0644); |
144 | MODULE_PARM_DESC(ecc_sec_warn_threshold, |
145 | " ECC corrected error warning threshold setting" ); |
146 | |
147 | module_param(ecc_sec_period, uint, 0644); |
148 | MODULE_PARM_DESC(ecc_sec_period, " ECC corrected error period (in seconds)" ); |
149 | |
150 | module_param(ecc_ded_threshold, uint, 0644); |
151 | MODULE_PARM_DESC(ecc_ded_threshold, " ECC detected error threshold setting" ); |
152 | |
153 | module_param(ecc_ded_period, uint, 0644); |
154 | MODULE_PARM_DESC(ecc_ded_period, " ECC detected error period (in seconds)" ); |
155 | #endif |
156 | |
157 | static int xgbe_one_poll(struct napi_struct *, int); |
158 | static int xgbe_all_poll(struct napi_struct *, int); |
159 | static void xgbe_stop(struct xgbe_prv_data *); |
160 | |
161 | static void *xgbe_alloc_node(size_t size, int node) |
162 | { |
163 | void *mem; |
164 | |
165 | mem = kzalloc_node(size, GFP_KERNEL, node); |
166 | if (!mem) |
167 | mem = kzalloc(size, GFP_KERNEL); |
168 | |
169 | return mem; |
170 | } |
171 | |
172 | static void xgbe_free_channels(struct xgbe_prv_data *pdata) |
173 | { |
174 | unsigned int i; |
175 | |
176 | for (i = 0; i < ARRAY_SIZE(pdata->channel); i++) { |
177 | if (!pdata->channel[i]) |
178 | continue; |
179 | |
180 | kfree(objp: pdata->channel[i]->rx_ring); |
181 | kfree(objp: pdata->channel[i]->tx_ring); |
182 | kfree(objp: pdata->channel[i]); |
183 | |
184 | pdata->channel[i] = NULL; |
185 | } |
186 | |
187 | pdata->channel_count = 0; |
188 | } |
189 | |
190 | static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) |
191 | { |
192 | struct xgbe_channel *channel; |
193 | struct xgbe_ring *ring; |
194 | unsigned int count, i; |
195 | unsigned int cpu; |
196 | int node; |
197 | |
198 | count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); |
199 | for (i = 0; i < count; i++) { |
200 | /* Attempt to use a CPU on the node the device is on */ |
201 | cpu = cpumask_local_spread(i, node: dev_to_node(dev: pdata->dev)); |
202 | |
203 | /* Set the allocation node based on the returned CPU */ |
204 | node = cpu_to_node(cpu); |
205 | |
206 | channel = xgbe_alloc_node(size: sizeof(*channel), node); |
207 | if (!channel) |
208 | goto err_mem; |
209 | pdata->channel[i] = channel; |
210 | |
211 | snprintf(buf: channel->name, size: sizeof(channel->name), fmt: "channel-%u" , i); |
212 | channel->pdata = pdata; |
213 | channel->queue_index = i; |
214 | channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE + |
215 | (DMA_CH_INC * i); |
216 | channel->node = node; |
217 | cpumask_set_cpu(cpu, dstp: &channel->affinity_mask); |
218 | |
219 | if (pdata->per_channel_irq) |
220 | channel->dma_irq = pdata->channel_irq[i]; |
221 | |
222 | if (i < pdata->tx_ring_count) { |
223 | ring = xgbe_alloc_node(size: sizeof(*ring), node); |
224 | if (!ring) |
225 | goto err_mem; |
226 | |
227 | spin_lock_init(&ring->lock); |
228 | ring->node = node; |
229 | |
230 | channel->tx_ring = ring; |
231 | } |
232 | |
233 | if (i < pdata->rx_ring_count) { |
234 | ring = xgbe_alloc_node(size: sizeof(*ring), node); |
235 | if (!ring) |
236 | goto err_mem; |
237 | |
238 | spin_lock_init(&ring->lock); |
239 | ring->node = node; |
240 | |
241 | channel->rx_ring = ring; |
242 | } |
243 | |
244 | netif_dbg(pdata, drv, pdata->netdev, |
245 | "%s: cpu=%u, node=%d\n" , channel->name, cpu, node); |
246 | |
247 | netif_dbg(pdata, drv, pdata->netdev, |
248 | "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n" , |
249 | channel->name, channel->dma_regs, channel->dma_irq, |
250 | channel->tx_ring, channel->rx_ring); |
251 | } |
252 | |
253 | pdata->channel_count = count; |
254 | |
255 | return 0; |
256 | |
257 | err_mem: |
258 | xgbe_free_channels(pdata); |
259 | |
260 | return -ENOMEM; |
261 | } |
262 | |
263 | static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring) |
264 | { |
265 | return (ring->rdesc_count - (ring->cur - ring->dirty)); |
266 | } |
267 | |
268 | static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring) |
269 | { |
270 | return (ring->cur - ring->dirty); |
271 | } |
272 | |
273 | static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel, |
274 | struct xgbe_ring *ring, unsigned int count) |
275 | { |
276 | struct xgbe_prv_data *pdata = channel->pdata; |
277 | |
278 | if (count > xgbe_tx_avail_desc(ring)) { |
279 | netif_info(pdata, drv, pdata->netdev, |
280 | "Tx queue stopped, not enough descriptors available\n" ); |
281 | netif_stop_subqueue(dev: pdata->netdev, queue_index: channel->queue_index); |
282 | ring->tx.queue_stopped = 1; |
283 | |
284 | /* If we haven't notified the hardware because of xmit_more |
285 | * support, tell it now |
286 | */ |
287 | if (ring->tx.xmit_more) |
288 | pdata->hw_if.tx_start_xmit(channel, ring); |
289 | |
290 | return NETDEV_TX_BUSY; |
291 | } |
292 | |
293 | return 0; |
294 | } |
295 | |
296 | static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) |
297 | { |
298 | unsigned int rx_buf_size; |
299 | |
300 | rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
301 | rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE); |
302 | |
303 | rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) & |
304 | ~(XGBE_RX_BUF_ALIGN - 1); |
305 | |
306 | return rx_buf_size; |
307 | } |
308 | |
309 | static void xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata, |
310 | struct xgbe_channel *channel) |
311 | { |
312 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
313 | enum xgbe_int int_id; |
314 | |
315 | if (channel->tx_ring && channel->rx_ring) |
316 | int_id = XGMAC_INT_DMA_CH_SR_TI_RI; |
317 | else if (channel->tx_ring) |
318 | int_id = XGMAC_INT_DMA_CH_SR_TI; |
319 | else if (channel->rx_ring) |
320 | int_id = XGMAC_INT_DMA_CH_SR_RI; |
321 | else |
322 | return; |
323 | |
324 | hw_if->enable_int(channel, int_id); |
325 | } |
326 | |
327 | static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata) |
328 | { |
329 | unsigned int i; |
330 | |
331 | for (i = 0; i < pdata->channel_count; i++) |
332 | xgbe_enable_rx_tx_int(pdata, channel: pdata->channel[i]); |
333 | } |
334 | |
335 | static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata, |
336 | struct xgbe_channel *channel) |
337 | { |
338 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
339 | enum xgbe_int int_id; |
340 | |
341 | if (channel->tx_ring && channel->rx_ring) |
342 | int_id = XGMAC_INT_DMA_CH_SR_TI_RI; |
343 | else if (channel->tx_ring) |
344 | int_id = XGMAC_INT_DMA_CH_SR_TI; |
345 | else if (channel->rx_ring) |
346 | int_id = XGMAC_INT_DMA_CH_SR_RI; |
347 | else |
348 | return; |
349 | |
350 | hw_if->disable_int(channel, int_id); |
351 | } |
352 | |
353 | static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata) |
354 | { |
355 | unsigned int i; |
356 | |
357 | for (i = 0; i < pdata->channel_count; i++) |
358 | xgbe_disable_rx_tx_int(pdata, channel: pdata->channel[i]); |
359 | } |
360 | |
361 | static bool xgbe_ecc_sec(struct xgbe_prv_data *pdata, unsigned long *period, |
362 | unsigned int *count, const char *area) |
363 | { |
364 | if (time_before(jiffies, *period)) { |
365 | (*count)++; |
366 | } else { |
367 | *period = jiffies + (ecc_sec_period * HZ); |
368 | *count = 1; |
369 | } |
370 | |
371 | if (*count > ecc_sec_info_threshold) |
372 | dev_warn_once(pdata->dev, |
373 | "%s ECC corrected errors exceed informational threshold\n" , |
374 | area); |
375 | |
376 | if (*count > ecc_sec_warn_threshold) { |
377 | dev_warn_once(pdata->dev, |
378 | "%s ECC corrected errors exceed warning threshold\n" , |
379 | area); |
380 | return true; |
381 | } |
382 | |
383 | return false; |
384 | } |
385 | |
386 | static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period, |
387 | unsigned int *count, const char *area) |
388 | { |
389 | if (time_before(jiffies, *period)) { |
390 | (*count)++; |
391 | } else { |
392 | *period = jiffies + (ecc_ded_period * HZ); |
393 | *count = 1; |
394 | } |
395 | |
396 | if (*count > ecc_ded_threshold) { |
397 | netdev_alert(dev: pdata->netdev, |
398 | format: "%s ECC detected errors exceed threshold\n" , |
399 | area); |
400 | return true; |
401 | } |
402 | |
403 | return false; |
404 | } |
405 | |
406 | static void xgbe_ecc_isr_task(struct tasklet_struct *t) |
407 | { |
408 | struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_ecc); |
409 | unsigned int ecc_isr; |
410 | bool stop = false; |
411 | |
412 | /* Mask status with only the interrupts we care about */ |
413 | ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR); |
414 | ecc_isr &= XP_IOREAD(pdata, XP_ECC_IER); |
415 | netif_dbg(pdata, intr, pdata->netdev, "ECC_ISR=%#010x\n" , ecc_isr); |
416 | |
417 | if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_DED)) { |
418 | stop |= xgbe_ecc_ded(pdata, period: &pdata->tx_ded_period, |
419 | count: &pdata->tx_ded_count, area: "TX fifo" ); |
420 | } |
421 | |
422 | if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_DED)) { |
423 | stop |= xgbe_ecc_ded(pdata, period: &pdata->rx_ded_period, |
424 | count: &pdata->rx_ded_count, area: "RX fifo" ); |
425 | } |
426 | |
427 | if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_DED)) { |
428 | stop |= xgbe_ecc_ded(pdata, period: &pdata->desc_ded_period, |
429 | count: &pdata->desc_ded_count, |
430 | area: "descriptor cache" ); |
431 | } |
432 | |
433 | if (stop) { |
434 | pdata->hw_if.disable_ecc_ded(pdata); |
435 | schedule_work(work: &pdata->stopdev_work); |
436 | goto out; |
437 | } |
438 | |
439 | if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_SEC)) { |
440 | if (xgbe_ecc_sec(pdata, period: &pdata->tx_sec_period, |
441 | count: &pdata->tx_sec_count, area: "TX fifo" )) |
442 | pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_TX); |
443 | } |
444 | |
445 | if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_SEC)) |
446 | if (xgbe_ecc_sec(pdata, period: &pdata->rx_sec_period, |
447 | count: &pdata->rx_sec_count, area: "RX fifo" )) |
448 | pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_RX); |
449 | |
450 | if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_SEC)) |
451 | if (xgbe_ecc_sec(pdata, period: &pdata->desc_sec_period, |
452 | count: &pdata->desc_sec_count, area: "descriptor cache" )) |
453 | pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_DESC); |
454 | |
455 | out: |
456 | /* Clear all ECC interrupts */ |
457 | XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr); |
458 | |
459 | /* Reissue interrupt if status is not clear */ |
460 | if (pdata->vdata->irq_reissue_support) |
461 | XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 1); |
462 | } |
463 | |
464 | static irqreturn_t xgbe_ecc_isr(int irq, void *data) |
465 | { |
466 | struct xgbe_prv_data *pdata = data; |
467 | |
468 | if (pdata->isr_as_tasklet) |
469 | tasklet_schedule(t: &pdata->tasklet_ecc); |
470 | else |
471 | xgbe_ecc_isr_task(t: &pdata->tasklet_ecc); |
472 | |
473 | return IRQ_HANDLED; |
474 | } |
475 | |
476 | static void xgbe_isr_task(struct tasklet_struct *t) |
477 | { |
478 | struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_dev); |
479 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
480 | struct xgbe_channel *channel; |
481 | unsigned int dma_isr, dma_ch_isr; |
482 | unsigned int mac_isr, mac_tssr, mac_mdioisr; |
483 | unsigned int i; |
484 | |
485 | /* The DMA interrupt status register also reports MAC and MTL |
486 | * interrupts. So for polling mode, we just need to check for |
487 | * this register to be non-zero |
488 | */ |
489 | dma_isr = XGMAC_IOREAD(pdata, DMA_ISR); |
490 | if (!dma_isr) |
491 | goto isr_done; |
492 | |
493 | netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n" , dma_isr); |
494 | |
495 | for (i = 0; i < pdata->channel_count; i++) { |
496 | if (!(dma_isr & (1 << i))) |
497 | continue; |
498 | |
499 | channel = pdata->channel[i]; |
500 | |
501 | dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); |
502 | netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n" , |
503 | i, dma_ch_isr); |
504 | |
505 | /* The TI or RI interrupt bits may still be set even if using |
506 | * per channel DMA interrupts. Check to be sure those are not |
507 | * enabled before using the private data napi structure. |
508 | */ |
509 | if (!pdata->per_channel_irq && |
510 | (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || |
511 | XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) { |
512 | if (napi_schedule_prep(n: &pdata->napi)) { |
513 | /* Disable Tx and Rx interrupts */ |
514 | xgbe_disable_rx_tx_ints(pdata); |
515 | |
516 | /* Turn on polling */ |
517 | __napi_schedule(n: &pdata->napi); |
518 | } |
519 | } else { |
520 | /* Don't clear Rx/Tx status if doing per channel DMA |
521 | * interrupts, these will be cleared by the ISR for |
522 | * per channel DMA interrupts. |
523 | */ |
524 | XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0); |
525 | XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0); |
526 | } |
527 | |
528 | if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU)) |
529 | pdata->ext_stats.rx_buffer_unavailable++; |
530 | |
531 | /* Restart the device on a Fatal Bus Error */ |
532 | if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE)) |
533 | schedule_work(work: &pdata->restart_work); |
534 | |
535 | /* Clear interrupt signals */ |
536 | XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); |
537 | } |
538 | |
539 | if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) { |
540 | mac_isr = XGMAC_IOREAD(pdata, MAC_ISR); |
541 | |
542 | netif_dbg(pdata, intr, pdata->netdev, "MAC_ISR=%#010x\n" , |
543 | mac_isr); |
544 | |
545 | if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS)) |
546 | hw_if->tx_mmc_int(pdata); |
547 | |
548 | if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS)) |
549 | hw_if->rx_mmc_int(pdata); |
550 | |
551 | if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) { |
552 | mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR); |
553 | |
554 | netif_dbg(pdata, intr, pdata->netdev, |
555 | "MAC_TSSR=%#010x\n" , mac_tssr); |
556 | |
557 | if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) { |
558 | /* Read Tx Timestamp to clear interrupt */ |
559 | pdata->tx_tstamp = |
560 | hw_if->get_tx_tstamp(pdata); |
561 | queue_work(wq: pdata->dev_workqueue, |
562 | work: &pdata->tx_tstamp_work); |
563 | } |
564 | } |
565 | |
566 | if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) { |
567 | mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR); |
568 | |
569 | netif_dbg(pdata, intr, pdata->netdev, |
570 | "MAC_MDIOISR=%#010x\n" , mac_mdioisr); |
571 | |
572 | if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR, |
573 | SNGLCOMPINT)) |
574 | complete(&pdata->mdio_complete); |
575 | } |
576 | } |
577 | |
578 | isr_done: |
579 | /* If there is not a separate AN irq, handle it here */ |
580 | if (pdata->dev_irq == pdata->an_irq) |
581 | pdata->phy_if.an_isr(pdata); |
582 | |
583 | /* If there is not a separate ECC irq, handle it here */ |
584 | if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq)) |
585 | xgbe_ecc_isr_task(t: &pdata->tasklet_ecc); |
586 | |
587 | /* If there is not a separate I2C irq, handle it here */ |
588 | if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq)) |
589 | pdata->i2c_if.i2c_isr(pdata); |
590 | |
591 | /* Reissue interrupt if status is not clear */ |
592 | if (pdata->vdata->irq_reissue_support) { |
593 | unsigned int reissue_mask; |
594 | |
595 | reissue_mask = 1 << 0; |
596 | if (!pdata->per_channel_irq) |
597 | reissue_mask |= 0xffff << 4; |
598 | |
599 | XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask); |
600 | } |
601 | } |
602 | |
603 | static irqreturn_t xgbe_isr(int irq, void *data) |
604 | { |
605 | struct xgbe_prv_data *pdata = data; |
606 | |
607 | if (pdata->isr_as_tasklet) |
608 | tasklet_schedule(t: &pdata->tasklet_dev); |
609 | else |
610 | xgbe_isr_task(t: &pdata->tasklet_dev); |
611 | |
612 | return IRQ_HANDLED; |
613 | } |
614 | |
615 | static irqreturn_t xgbe_dma_isr(int irq, void *data) |
616 | { |
617 | struct xgbe_channel *channel = data; |
618 | struct xgbe_prv_data *pdata = channel->pdata; |
619 | unsigned int dma_status; |
620 | |
621 | /* Per channel DMA interrupts are enabled, so we use the per |
622 | * channel napi structure and not the private data napi structure |
623 | */ |
624 | if (napi_schedule_prep(n: &channel->napi)) { |
625 | /* Disable Tx and Rx interrupts */ |
626 | if (pdata->channel_irq_mode) |
627 | xgbe_disable_rx_tx_int(pdata, channel); |
628 | else |
629 | disable_irq_nosync(irq: channel->dma_irq); |
630 | |
631 | /* Turn on polling */ |
632 | __napi_schedule_irqoff(n: &channel->napi); |
633 | } |
634 | |
635 | /* Clear Tx/Rx signals */ |
636 | dma_status = 0; |
637 | XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1); |
638 | XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1); |
639 | XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status); |
640 | |
641 | return IRQ_HANDLED; |
642 | } |
643 | |
644 | static void xgbe_tx_timer(struct timer_list *t) |
645 | { |
646 | struct xgbe_channel *channel = from_timer(channel, t, tx_timer); |
647 | struct xgbe_prv_data *pdata = channel->pdata; |
648 | struct napi_struct *napi; |
649 | |
650 | DBGPR("-->xgbe_tx_timer\n" ); |
651 | |
652 | napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; |
653 | |
654 | if (napi_schedule_prep(n: napi)) { |
655 | /* Disable Tx and Rx interrupts */ |
656 | if (pdata->per_channel_irq) |
657 | if (pdata->channel_irq_mode) |
658 | xgbe_disable_rx_tx_int(pdata, channel); |
659 | else |
660 | disable_irq_nosync(irq: channel->dma_irq); |
661 | else |
662 | xgbe_disable_rx_tx_ints(pdata); |
663 | |
664 | /* Turn on polling */ |
665 | __napi_schedule(n: napi); |
666 | } |
667 | |
668 | channel->tx_timer_active = 0; |
669 | |
670 | DBGPR("<--xgbe_tx_timer\n" ); |
671 | } |
672 | |
673 | static void xgbe_service(struct work_struct *work) |
674 | { |
675 | struct xgbe_prv_data *pdata = container_of(work, |
676 | struct xgbe_prv_data, |
677 | service_work); |
678 | |
679 | pdata->phy_if.phy_status(pdata); |
680 | } |
681 | |
682 | static void xgbe_service_timer(struct timer_list *t) |
683 | { |
684 | struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer); |
685 | |
686 | queue_work(wq: pdata->dev_workqueue, work: &pdata->service_work); |
687 | |
688 | mod_timer(timer: &pdata->service_timer, expires: jiffies + HZ); |
689 | } |
690 | |
691 | static void xgbe_init_timers(struct xgbe_prv_data *pdata) |
692 | { |
693 | struct xgbe_channel *channel; |
694 | unsigned int i; |
695 | |
696 | timer_setup(&pdata->service_timer, xgbe_service_timer, 0); |
697 | |
698 | for (i = 0; i < pdata->channel_count; i++) { |
699 | channel = pdata->channel[i]; |
700 | if (!channel->tx_ring) |
701 | break; |
702 | |
703 | timer_setup(&channel->tx_timer, xgbe_tx_timer, 0); |
704 | } |
705 | } |
706 | |
707 | static void xgbe_start_timers(struct xgbe_prv_data *pdata) |
708 | { |
709 | mod_timer(timer: &pdata->service_timer, expires: jiffies + HZ); |
710 | } |
711 | |
712 | static void xgbe_stop_timers(struct xgbe_prv_data *pdata) |
713 | { |
714 | struct xgbe_channel *channel; |
715 | unsigned int i; |
716 | |
717 | del_timer_sync(timer: &pdata->service_timer); |
718 | |
719 | for (i = 0; i < pdata->channel_count; i++) { |
720 | channel = pdata->channel[i]; |
721 | if (!channel->tx_ring) |
722 | break; |
723 | |
724 | /* Deactivate the Tx timer */ |
725 | del_timer_sync(timer: &channel->tx_timer); |
726 | channel->tx_timer_active = 0; |
727 | } |
728 | } |
729 | |
730 | void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) |
731 | { |
732 | unsigned int mac_hfr0, mac_hfr1, mac_hfr2; |
733 | struct xgbe_hw_features *hw_feat = &pdata->hw_feat; |
734 | |
735 | mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R); |
736 | mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R); |
737 | mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R); |
738 | |
739 | memset(hw_feat, 0, sizeof(*hw_feat)); |
740 | |
741 | hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR); |
742 | |
743 | /* Hardware feature register 0 */ |
744 | hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); |
745 | hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); |
746 | hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); |
747 | hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); |
748 | hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); |
749 | hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); |
750 | hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); |
751 | hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); |
752 | hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); |
753 | hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); |
754 | hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); |
755 | hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, |
756 | ADDMACADRSEL); |
757 | hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); |
758 | hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); |
759 | hw_feat->vxn = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VXN); |
760 | |
761 | /* Hardware feature register 1 */ |
762 | hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, |
763 | RXFIFOSIZE); |
764 | hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, |
765 | TXFIFOSIZE); |
766 | hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD); |
767 | hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); |
768 | hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); |
769 | hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); |
770 | hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); |
771 | hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); |
772 | hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); |
773 | hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); |
774 | hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, |
775 | HASHTBLSZ); |
776 | hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, |
777 | L3L4FNUM); |
778 | |
779 | /* Hardware feature register 2 */ |
780 | hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); |
781 | hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); |
782 | hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); |
783 | hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); |
784 | hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); |
785 | hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM); |
786 | |
787 | /* Translate the Hash Table size into actual number */ |
788 | switch (hw_feat->hash_table_size) { |
789 | case 0: |
790 | break; |
791 | case 1: |
792 | hw_feat->hash_table_size = 64; |
793 | break; |
794 | case 2: |
795 | hw_feat->hash_table_size = 128; |
796 | break; |
797 | case 3: |
798 | hw_feat->hash_table_size = 256; |
799 | break; |
800 | } |
801 | |
802 | /* Translate the address width setting into actual number */ |
803 | switch (hw_feat->dma_width) { |
804 | case 0: |
805 | hw_feat->dma_width = 32; |
806 | break; |
807 | case 1: |
808 | hw_feat->dma_width = 40; |
809 | break; |
810 | case 2: |
811 | hw_feat->dma_width = 48; |
812 | break; |
813 | default: |
814 | hw_feat->dma_width = 32; |
815 | } |
816 | |
817 | /* The Queue, Channel and TC counts are zero based so increment them |
818 | * to get the actual number |
819 | */ |
820 | hw_feat->rx_q_cnt++; |
821 | hw_feat->tx_q_cnt++; |
822 | hw_feat->rx_ch_cnt++; |
823 | hw_feat->tx_ch_cnt++; |
824 | hw_feat->tc_cnt++; |
825 | |
826 | /* Translate the fifo sizes into actual numbers */ |
827 | hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); |
828 | hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); |
829 | |
830 | if (netif_msg_probe(pdata)) { |
831 | dev_dbg(pdata->dev, "Hardware features:\n" ); |
832 | |
833 | /* Hardware feature register 0 */ |
834 | dev_dbg(pdata->dev, " 1GbE support : %s\n" , |
835 | hw_feat->gmii ? "yes" : "no" ); |
836 | dev_dbg(pdata->dev, " VLAN hash filter : %s\n" , |
837 | hw_feat->vlhash ? "yes" : "no" ); |
838 | dev_dbg(pdata->dev, " MDIO interface : %s\n" , |
839 | hw_feat->sma ? "yes" : "no" ); |
840 | dev_dbg(pdata->dev, " Wake-up packet support : %s\n" , |
841 | hw_feat->rwk ? "yes" : "no" ); |
842 | dev_dbg(pdata->dev, " Magic packet support : %s\n" , |
843 | hw_feat->mgk ? "yes" : "no" ); |
844 | dev_dbg(pdata->dev, " Management counters : %s\n" , |
845 | hw_feat->mmc ? "yes" : "no" ); |
846 | dev_dbg(pdata->dev, " ARP offload : %s\n" , |
847 | hw_feat->aoe ? "yes" : "no" ); |
848 | dev_dbg(pdata->dev, " IEEE 1588-2008 Timestamp : %s\n" , |
849 | hw_feat->ts ? "yes" : "no" ); |
850 | dev_dbg(pdata->dev, " Energy Efficient Ethernet : %s\n" , |
851 | hw_feat->eee ? "yes" : "no" ); |
852 | dev_dbg(pdata->dev, " TX checksum offload : %s\n" , |
853 | hw_feat->tx_coe ? "yes" : "no" ); |
854 | dev_dbg(pdata->dev, " RX checksum offload : %s\n" , |
855 | hw_feat->rx_coe ? "yes" : "no" ); |
856 | dev_dbg(pdata->dev, " Additional MAC addresses : %u\n" , |
857 | hw_feat->addn_mac); |
858 | dev_dbg(pdata->dev, " Timestamp source : %s\n" , |
859 | (hw_feat->ts_src == 1) ? "internal" : |
860 | (hw_feat->ts_src == 2) ? "external" : |
861 | (hw_feat->ts_src == 3) ? "internal/external" : "n/a" ); |
862 | dev_dbg(pdata->dev, " SA/VLAN insertion : %s\n" , |
863 | hw_feat->sa_vlan_ins ? "yes" : "no" ); |
864 | dev_dbg(pdata->dev, " VXLAN/NVGRE support : %s\n" , |
865 | hw_feat->vxn ? "yes" : "no" ); |
866 | |
867 | /* Hardware feature register 1 */ |
868 | dev_dbg(pdata->dev, " RX fifo size : %u\n" , |
869 | hw_feat->rx_fifo_size); |
870 | dev_dbg(pdata->dev, " TX fifo size : %u\n" , |
871 | hw_feat->tx_fifo_size); |
872 | dev_dbg(pdata->dev, " IEEE 1588 high word : %s\n" , |
873 | hw_feat->adv_ts_hi ? "yes" : "no" ); |
874 | dev_dbg(pdata->dev, " DMA width : %u\n" , |
875 | hw_feat->dma_width); |
876 | dev_dbg(pdata->dev, " Data Center Bridging : %s\n" , |
877 | hw_feat->dcb ? "yes" : "no" ); |
878 | dev_dbg(pdata->dev, " Split header : %s\n" , |
879 | hw_feat->sph ? "yes" : "no" ); |
880 | dev_dbg(pdata->dev, " TCP Segmentation Offload : %s\n" , |
881 | hw_feat->tso ? "yes" : "no" ); |
882 | dev_dbg(pdata->dev, " Debug memory interface : %s\n" , |
883 | hw_feat->dma_debug ? "yes" : "no" ); |
884 | dev_dbg(pdata->dev, " Receive Side Scaling : %s\n" , |
885 | hw_feat->rss ? "yes" : "no" ); |
886 | dev_dbg(pdata->dev, " Traffic Class count : %u\n" , |
887 | hw_feat->tc_cnt); |
888 | dev_dbg(pdata->dev, " Hash table size : %u\n" , |
889 | hw_feat->hash_table_size); |
890 | dev_dbg(pdata->dev, " L3/L4 Filters : %u\n" , |
891 | hw_feat->l3l4_filter_num); |
892 | |
893 | /* Hardware feature register 2 */ |
894 | dev_dbg(pdata->dev, " RX queue count : %u\n" , |
895 | hw_feat->rx_q_cnt); |
896 | dev_dbg(pdata->dev, " TX queue count : %u\n" , |
897 | hw_feat->tx_q_cnt); |
898 | dev_dbg(pdata->dev, " RX DMA channel count : %u\n" , |
899 | hw_feat->rx_ch_cnt); |
900 | dev_dbg(pdata->dev, " TX DMA channel count : %u\n" , |
901 | hw_feat->rx_ch_cnt); |
902 | dev_dbg(pdata->dev, " PPS outputs : %u\n" , |
903 | hw_feat->pps_out_num); |
904 | dev_dbg(pdata->dev, " Auxiliary snapshot inputs : %u\n" , |
905 | hw_feat->aux_snap_num); |
906 | } |
907 | } |
908 | |
909 | static int xgbe_vxlan_set_port(struct net_device *netdev, unsigned int table, |
910 | unsigned int entry, struct udp_tunnel_info *ti) |
911 | { |
912 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
913 | |
914 | pdata->vxlan_port = be16_to_cpu(ti->port); |
915 | pdata->hw_if.enable_vxlan(pdata); |
916 | |
917 | return 0; |
918 | } |
919 | |
920 | static int xgbe_vxlan_unset_port(struct net_device *netdev, unsigned int table, |
921 | unsigned int entry, struct udp_tunnel_info *ti) |
922 | { |
923 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
924 | |
925 | pdata->hw_if.disable_vxlan(pdata); |
926 | pdata->vxlan_port = 0; |
927 | |
928 | return 0; |
929 | } |
930 | |
931 | static const struct udp_tunnel_nic_info xgbe_udp_tunnels = { |
932 | .set_port = xgbe_vxlan_set_port, |
933 | .unset_port = xgbe_vxlan_unset_port, |
934 | .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY, |
935 | .tables = { |
936 | { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, |
937 | }, |
938 | }; |
939 | |
940 | const struct udp_tunnel_nic_info *xgbe_get_udp_tunnel_info(void) |
941 | { |
942 | return &xgbe_udp_tunnels; |
943 | } |
944 | |
945 | static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add) |
946 | { |
947 | struct xgbe_channel *channel; |
948 | unsigned int i; |
949 | |
950 | if (pdata->per_channel_irq) { |
951 | for (i = 0; i < pdata->channel_count; i++) { |
952 | channel = pdata->channel[i]; |
953 | if (add) |
954 | netif_napi_add(dev: pdata->netdev, napi: &channel->napi, |
955 | poll: xgbe_one_poll); |
956 | |
957 | napi_enable(n: &channel->napi); |
958 | } |
959 | } else { |
960 | if (add) |
961 | netif_napi_add(dev: pdata->netdev, napi: &pdata->napi, |
962 | poll: xgbe_all_poll); |
963 | |
964 | napi_enable(n: &pdata->napi); |
965 | } |
966 | } |
967 | |
968 | static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del) |
969 | { |
970 | struct xgbe_channel *channel; |
971 | unsigned int i; |
972 | |
973 | if (pdata->per_channel_irq) { |
974 | for (i = 0; i < pdata->channel_count; i++) { |
975 | channel = pdata->channel[i]; |
976 | napi_disable(n: &channel->napi); |
977 | |
978 | if (del) |
979 | netif_napi_del(napi: &channel->napi); |
980 | } |
981 | } else { |
982 | napi_disable(n: &pdata->napi); |
983 | |
984 | if (del) |
985 | netif_napi_del(napi: &pdata->napi); |
986 | } |
987 | } |
988 | |
989 | static int xgbe_request_irqs(struct xgbe_prv_data *pdata) |
990 | { |
991 | struct xgbe_channel *channel; |
992 | struct net_device *netdev = pdata->netdev; |
993 | unsigned int i; |
994 | int ret; |
995 | |
996 | tasklet_setup(t: &pdata->tasklet_dev, callback: xgbe_isr_task); |
997 | tasklet_setup(t: &pdata->tasklet_ecc, callback: xgbe_ecc_isr_task); |
998 | |
999 | ret = devm_request_irq(dev: pdata->dev, irq: pdata->dev_irq, handler: xgbe_isr, irqflags: 0, |
1000 | devname: netdev_name(dev: netdev), dev_id: pdata); |
1001 | if (ret) { |
1002 | netdev_alert(dev: netdev, format: "error requesting irq %d\n" , |
1003 | pdata->dev_irq); |
1004 | return ret; |
1005 | } |
1006 | |
1007 | if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) { |
1008 | ret = devm_request_irq(dev: pdata->dev, irq: pdata->ecc_irq, handler: xgbe_ecc_isr, |
1009 | irqflags: 0, devname: pdata->ecc_name, dev_id: pdata); |
1010 | if (ret) { |
1011 | netdev_alert(dev: netdev, format: "error requesting ecc irq %d\n" , |
1012 | pdata->ecc_irq); |
1013 | goto err_dev_irq; |
1014 | } |
1015 | } |
1016 | |
1017 | if (!pdata->per_channel_irq) |
1018 | return 0; |
1019 | |
1020 | for (i = 0; i < pdata->channel_count; i++) { |
1021 | channel = pdata->channel[i]; |
1022 | snprintf(buf: channel->dma_irq_name, |
1023 | size: sizeof(channel->dma_irq_name) - 1, |
1024 | fmt: "%s-TxRx-%u" , netdev_name(dev: netdev), |
1025 | channel->queue_index); |
1026 | |
1027 | ret = devm_request_irq(dev: pdata->dev, irq: channel->dma_irq, |
1028 | handler: xgbe_dma_isr, irqflags: 0, |
1029 | devname: channel->dma_irq_name, dev_id: channel); |
1030 | if (ret) { |
1031 | netdev_alert(dev: netdev, format: "error requesting irq %d\n" , |
1032 | channel->dma_irq); |
1033 | goto err_dma_irq; |
1034 | } |
1035 | |
1036 | irq_set_affinity_hint(irq: channel->dma_irq, |
1037 | m: &channel->affinity_mask); |
1038 | } |
1039 | |
1040 | return 0; |
1041 | |
1042 | err_dma_irq: |
1043 | /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ |
1044 | for (i--; i < pdata->channel_count; i--) { |
1045 | channel = pdata->channel[i]; |
1046 | |
1047 | irq_set_affinity_hint(irq: channel->dma_irq, NULL); |
1048 | devm_free_irq(dev: pdata->dev, irq: channel->dma_irq, dev_id: channel); |
1049 | } |
1050 | |
1051 | if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) |
1052 | devm_free_irq(dev: pdata->dev, irq: pdata->ecc_irq, dev_id: pdata); |
1053 | |
1054 | err_dev_irq: |
1055 | devm_free_irq(dev: pdata->dev, irq: pdata->dev_irq, dev_id: pdata); |
1056 | |
1057 | return ret; |
1058 | } |
1059 | |
1060 | static void xgbe_free_irqs(struct xgbe_prv_data *pdata) |
1061 | { |
1062 | struct xgbe_channel *channel; |
1063 | unsigned int i; |
1064 | |
1065 | devm_free_irq(dev: pdata->dev, irq: pdata->dev_irq, dev_id: pdata); |
1066 | |
1067 | tasklet_kill(t: &pdata->tasklet_dev); |
1068 | tasklet_kill(t: &pdata->tasklet_ecc); |
1069 | |
1070 | if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) |
1071 | devm_free_irq(dev: pdata->dev, irq: pdata->ecc_irq, dev_id: pdata); |
1072 | |
1073 | if (!pdata->per_channel_irq) |
1074 | return; |
1075 | |
1076 | for (i = 0; i < pdata->channel_count; i++) { |
1077 | channel = pdata->channel[i]; |
1078 | |
1079 | irq_set_affinity_hint(irq: channel->dma_irq, NULL); |
1080 | devm_free_irq(dev: pdata->dev, irq: channel->dma_irq, dev_id: channel); |
1081 | } |
1082 | } |
1083 | |
1084 | void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) |
1085 | { |
1086 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
1087 | |
1088 | DBGPR("-->xgbe_init_tx_coalesce\n" ); |
1089 | |
1090 | pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS; |
1091 | pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES; |
1092 | |
1093 | hw_if->config_tx_coalesce(pdata); |
1094 | |
1095 | DBGPR("<--xgbe_init_tx_coalesce\n" ); |
1096 | } |
1097 | |
1098 | void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) |
1099 | { |
1100 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
1101 | |
1102 | DBGPR("-->xgbe_init_rx_coalesce\n" ); |
1103 | |
1104 | pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS); |
1105 | pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS; |
1106 | pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES; |
1107 | |
1108 | hw_if->config_rx_coalesce(pdata); |
1109 | |
1110 | DBGPR("<--xgbe_init_rx_coalesce\n" ); |
1111 | } |
1112 | |
1113 | static void xgbe_free_tx_data(struct xgbe_prv_data *pdata) |
1114 | { |
1115 | struct xgbe_desc_if *desc_if = &pdata->desc_if; |
1116 | struct xgbe_ring *ring; |
1117 | struct xgbe_ring_data *rdata; |
1118 | unsigned int i, j; |
1119 | |
1120 | DBGPR("-->xgbe_free_tx_data\n" ); |
1121 | |
1122 | for (i = 0; i < pdata->channel_count; i++) { |
1123 | ring = pdata->channel[i]->tx_ring; |
1124 | if (!ring) |
1125 | break; |
1126 | |
1127 | for (j = 0; j < ring->rdesc_count; j++) { |
1128 | rdata = XGBE_GET_DESC_DATA(ring, j); |
1129 | desc_if->unmap_rdata(pdata, rdata); |
1130 | } |
1131 | } |
1132 | |
1133 | DBGPR("<--xgbe_free_tx_data\n" ); |
1134 | } |
1135 | |
1136 | static void xgbe_free_rx_data(struct xgbe_prv_data *pdata) |
1137 | { |
1138 | struct xgbe_desc_if *desc_if = &pdata->desc_if; |
1139 | struct xgbe_ring *ring; |
1140 | struct xgbe_ring_data *rdata; |
1141 | unsigned int i, j; |
1142 | |
1143 | DBGPR("-->xgbe_free_rx_data\n" ); |
1144 | |
1145 | for (i = 0; i < pdata->channel_count; i++) { |
1146 | ring = pdata->channel[i]->rx_ring; |
1147 | if (!ring) |
1148 | break; |
1149 | |
1150 | for (j = 0; j < ring->rdesc_count; j++) { |
1151 | rdata = XGBE_GET_DESC_DATA(ring, j); |
1152 | desc_if->unmap_rdata(pdata, rdata); |
1153 | } |
1154 | } |
1155 | |
1156 | DBGPR("<--xgbe_free_rx_data\n" ); |
1157 | } |
1158 | |
1159 | static int xgbe_phy_reset(struct xgbe_prv_data *pdata) |
1160 | { |
1161 | pdata->phy_link = -1; |
1162 | pdata->phy_speed = SPEED_UNKNOWN; |
1163 | |
1164 | return pdata->phy_if.phy_reset(pdata); |
1165 | } |
1166 | |
1167 | int xgbe_powerdown(struct net_device *netdev, unsigned int caller) |
1168 | { |
1169 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
1170 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
1171 | unsigned long flags; |
1172 | |
1173 | DBGPR("-->xgbe_powerdown\n" ); |
1174 | |
1175 | if (!netif_running(dev: netdev) || |
1176 | (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) { |
1177 | netdev_alert(dev: netdev, format: "Device is already powered down\n" ); |
1178 | DBGPR("<--xgbe_powerdown\n" ); |
1179 | return -EINVAL; |
1180 | } |
1181 | |
1182 | spin_lock_irqsave(&pdata->lock, flags); |
1183 | |
1184 | if (caller == XGMAC_DRIVER_CONTEXT) |
1185 | netif_device_detach(dev: netdev); |
1186 | |
1187 | netif_tx_stop_all_queues(dev: netdev); |
1188 | |
1189 | xgbe_stop_timers(pdata); |
1190 | flush_workqueue(pdata->dev_workqueue); |
1191 | |
1192 | hw_if->powerdown_tx(pdata); |
1193 | hw_if->powerdown_rx(pdata); |
1194 | |
1195 | xgbe_napi_disable(pdata, del: 0); |
1196 | |
1197 | pdata->power_down = 1; |
1198 | |
1199 | spin_unlock_irqrestore(lock: &pdata->lock, flags); |
1200 | |
1201 | DBGPR("<--xgbe_powerdown\n" ); |
1202 | |
1203 | return 0; |
1204 | } |
1205 | |
1206 | int xgbe_powerup(struct net_device *netdev, unsigned int caller) |
1207 | { |
1208 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
1209 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
1210 | unsigned long flags; |
1211 | |
1212 | DBGPR("-->xgbe_powerup\n" ); |
1213 | |
1214 | if (!netif_running(dev: netdev) || |
1215 | (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) { |
1216 | netdev_alert(dev: netdev, format: "Device is already powered up\n" ); |
1217 | DBGPR("<--xgbe_powerup\n" ); |
1218 | return -EINVAL; |
1219 | } |
1220 | |
1221 | spin_lock_irqsave(&pdata->lock, flags); |
1222 | |
1223 | pdata->power_down = 0; |
1224 | |
1225 | xgbe_napi_enable(pdata, add: 0); |
1226 | |
1227 | hw_if->powerup_tx(pdata); |
1228 | hw_if->powerup_rx(pdata); |
1229 | |
1230 | if (caller == XGMAC_DRIVER_CONTEXT) |
1231 | netif_device_attach(dev: netdev); |
1232 | |
1233 | netif_tx_start_all_queues(dev: netdev); |
1234 | |
1235 | xgbe_start_timers(pdata); |
1236 | |
1237 | spin_unlock_irqrestore(lock: &pdata->lock, flags); |
1238 | |
1239 | DBGPR("<--xgbe_powerup\n" ); |
1240 | |
1241 | return 0; |
1242 | } |
1243 | |
1244 | static void xgbe_free_memory(struct xgbe_prv_data *pdata) |
1245 | { |
1246 | struct xgbe_desc_if *desc_if = &pdata->desc_if; |
1247 | |
1248 | /* Free the ring descriptors and buffers */ |
1249 | desc_if->free_ring_resources(pdata); |
1250 | |
1251 | /* Free the channel and ring structures */ |
1252 | xgbe_free_channels(pdata); |
1253 | } |
1254 | |
1255 | static int xgbe_alloc_memory(struct xgbe_prv_data *pdata) |
1256 | { |
1257 | struct xgbe_desc_if *desc_if = &pdata->desc_if; |
1258 | struct net_device *netdev = pdata->netdev; |
1259 | int ret; |
1260 | |
1261 | if (pdata->new_tx_ring_count) { |
1262 | pdata->tx_ring_count = pdata->new_tx_ring_count; |
1263 | pdata->tx_q_count = pdata->tx_ring_count; |
1264 | pdata->new_tx_ring_count = 0; |
1265 | } |
1266 | |
1267 | if (pdata->new_rx_ring_count) { |
1268 | pdata->rx_ring_count = pdata->new_rx_ring_count; |
1269 | pdata->new_rx_ring_count = 0; |
1270 | } |
1271 | |
1272 | /* Calculate the Rx buffer size before allocating rings */ |
1273 | pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, mtu: netdev->mtu); |
1274 | |
1275 | /* Allocate the channel and ring structures */ |
1276 | ret = xgbe_alloc_channels(pdata); |
1277 | if (ret) |
1278 | return ret; |
1279 | |
1280 | /* Allocate the ring descriptors and buffers */ |
1281 | ret = desc_if->alloc_ring_resources(pdata); |
1282 | if (ret) |
1283 | goto err_channels; |
1284 | |
1285 | /* Initialize the service and Tx timers */ |
1286 | xgbe_init_timers(pdata); |
1287 | |
1288 | return 0; |
1289 | |
1290 | err_channels: |
1291 | xgbe_free_memory(pdata); |
1292 | |
1293 | return ret; |
1294 | } |
1295 | |
1296 | static int xgbe_start(struct xgbe_prv_data *pdata) |
1297 | { |
1298 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
1299 | struct xgbe_phy_if *phy_if = &pdata->phy_if; |
1300 | struct net_device *netdev = pdata->netdev; |
1301 | unsigned int i; |
1302 | int ret; |
1303 | |
1304 | /* Set the number of queues */ |
1305 | ret = netif_set_real_num_tx_queues(dev: netdev, txq: pdata->tx_ring_count); |
1306 | if (ret) { |
1307 | netdev_err(dev: netdev, format: "error setting real tx queue count\n" ); |
1308 | return ret; |
1309 | } |
1310 | |
1311 | ret = netif_set_real_num_rx_queues(dev: netdev, rxq: pdata->rx_ring_count); |
1312 | if (ret) { |
1313 | netdev_err(dev: netdev, format: "error setting real rx queue count\n" ); |
1314 | return ret; |
1315 | } |
1316 | |
1317 | /* Set RSS lookup table data for programming */ |
1318 | for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) |
1319 | XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, |
1320 | i % pdata->rx_ring_count); |
1321 | |
1322 | ret = hw_if->init(pdata); |
1323 | if (ret) |
1324 | return ret; |
1325 | |
1326 | xgbe_napi_enable(pdata, add: 1); |
1327 | |
1328 | ret = xgbe_request_irqs(pdata); |
1329 | if (ret) |
1330 | goto err_napi; |
1331 | |
1332 | ret = phy_if->phy_start(pdata); |
1333 | if (ret) |
1334 | goto err_irqs; |
1335 | |
1336 | hw_if->enable_tx(pdata); |
1337 | hw_if->enable_rx(pdata); |
1338 | |
1339 | udp_tunnel_nic_reset_ntf(dev: netdev); |
1340 | |
1341 | netif_tx_start_all_queues(dev: netdev); |
1342 | |
1343 | xgbe_start_timers(pdata); |
1344 | queue_work(wq: pdata->dev_workqueue, work: &pdata->service_work); |
1345 | |
1346 | clear_bit(nr: XGBE_STOPPED, addr: &pdata->dev_state); |
1347 | |
1348 | return 0; |
1349 | |
1350 | err_irqs: |
1351 | xgbe_free_irqs(pdata); |
1352 | |
1353 | err_napi: |
1354 | xgbe_napi_disable(pdata, del: 1); |
1355 | |
1356 | hw_if->exit(pdata); |
1357 | |
1358 | return ret; |
1359 | } |
1360 | |
1361 | static void xgbe_stop(struct xgbe_prv_data *pdata) |
1362 | { |
1363 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
1364 | struct xgbe_phy_if *phy_if = &pdata->phy_if; |
1365 | struct xgbe_channel *channel; |
1366 | struct net_device *netdev = pdata->netdev; |
1367 | struct netdev_queue *txq; |
1368 | unsigned int i; |
1369 | |
1370 | DBGPR("-->xgbe_stop\n" ); |
1371 | |
1372 | if (test_bit(XGBE_STOPPED, &pdata->dev_state)) |
1373 | return; |
1374 | |
1375 | netif_tx_stop_all_queues(dev: netdev); |
1376 | netif_carrier_off(dev: pdata->netdev); |
1377 | |
1378 | xgbe_stop_timers(pdata); |
1379 | flush_workqueue(pdata->dev_workqueue); |
1380 | |
1381 | xgbe_vxlan_unset_port(netdev, table: 0, entry: 0, NULL); |
1382 | |
1383 | hw_if->disable_tx(pdata); |
1384 | hw_if->disable_rx(pdata); |
1385 | |
1386 | phy_if->phy_stop(pdata); |
1387 | |
1388 | xgbe_free_irqs(pdata); |
1389 | |
1390 | xgbe_napi_disable(pdata, del: 1); |
1391 | |
1392 | hw_if->exit(pdata); |
1393 | |
1394 | for (i = 0; i < pdata->channel_count; i++) { |
1395 | channel = pdata->channel[i]; |
1396 | if (!channel->tx_ring) |
1397 | continue; |
1398 | |
1399 | txq = netdev_get_tx_queue(dev: netdev, index: channel->queue_index); |
1400 | netdev_tx_reset_queue(q: txq); |
1401 | } |
1402 | |
1403 | set_bit(nr: XGBE_STOPPED, addr: &pdata->dev_state); |
1404 | |
1405 | DBGPR("<--xgbe_stop\n" ); |
1406 | } |
1407 | |
1408 | static void xgbe_stopdev(struct work_struct *work) |
1409 | { |
1410 | struct xgbe_prv_data *pdata = container_of(work, |
1411 | struct xgbe_prv_data, |
1412 | stopdev_work); |
1413 | |
1414 | rtnl_lock(); |
1415 | |
1416 | xgbe_stop(pdata); |
1417 | |
1418 | xgbe_free_tx_data(pdata); |
1419 | xgbe_free_rx_data(pdata); |
1420 | |
1421 | rtnl_unlock(); |
1422 | |
1423 | netdev_alert(dev: pdata->netdev, format: "device stopped\n" ); |
1424 | } |
1425 | |
1426 | void xgbe_full_restart_dev(struct xgbe_prv_data *pdata) |
1427 | { |
1428 | /* If not running, "restart" will happen on open */ |
1429 | if (!netif_running(dev: pdata->netdev)) |
1430 | return; |
1431 | |
1432 | xgbe_stop(pdata); |
1433 | |
1434 | xgbe_free_memory(pdata); |
1435 | xgbe_alloc_memory(pdata); |
1436 | |
1437 | xgbe_start(pdata); |
1438 | } |
1439 | |
1440 | void xgbe_restart_dev(struct xgbe_prv_data *pdata) |
1441 | { |
1442 | /* If not running, "restart" will happen on open */ |
1443 | if (!netif_running(dev: pdata->netdev)) |
1444 | return; |
1445 | |
1446 | xgbe_stop(pdata); |
1447 | |
1448 | xgbe_free_tx_data(pdata); |
1449 | xgbe_free_rx_data(pdata); |
1450 | |
1451 | xgbe_start(pdata); |
1452 | } |
1453 | |
1454 | static void xgbe_restart(struct work_struct *work) |
1455 | { |
1456 | struct xgbe_prv_data *pdata = container_of(work, |
1457 | struct xgbe_prv_data, |
1458 | restart_work); |
1459 | |
1460 | rtnl_lock(); |
1461 | |
1462 | xgbe_restart_dev(pdata); |
1463 | |
1464 | rtnl_unlock(); |
1465 | } |
1466 | |
1467 | static void xgbe_tx_tstamp(struct work_struct *work) |
1468 | { |
1469 | struct xgbe_prv_data *pdata = container_of(work, |
1470 | struct xgbe_prv_data, |
1471 | tx_tstamp_work); |
1472 | struct skb_shared_hwtstamps hwtstamps; |
1473 | u64 nsec; |
1474 | unsigned long flags; |
1475 | |
1476 | spin_lock_irqsave(&pdata->tstamp_lock, flags); |
1477 | if (!pdata->tx_tstamp_skb) |
1478 | goto unlock; |
1479 | |
1480 | if (pdata->tx_tstamp) { |
1481 | nsec = timecounter_cyc2time(tc: &pdata->tstamp_tc, |
1482 | cycle_tstamp: pdata->tx_tstamp); |
1483 | |
1484 | memset(&hwtstamps, 0, sizeof(hwtstamps)); |
1485 | hwtstamps.hwtstamp = ns_to_ktime(ns: nsec); |
1486 | skb_tstamp_tx(orig_skb: pdata->tx_tstamp_skb, hwtstamps: &hwtstamps); |
1487 | } |
1488 | |
1489 | dev_kfree_skb_any(skb: pdata->tx_tstamp_skb); |
1490 | |
1491 | pdata->tx_tstamp_skb = NULL; |
1492 | |
1493 | unlock: |
1494 | spin_unlock_irqrestore(lock: &pdata->tstamp_lock, flags); |
1495 | } |
1496 | |
1497 | static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata, |
1498 | struct ifreq *ifreq) |
1499 | { |
1500 | if (copy_to_user(to: ifreq->ifr_data, from: &pdata->tstamp_config, |
1501 | n: sizeof(pdata->tstamp_config))) |
1502 | return -EFAULT; |
1503 | |
1504 | return 0; |
1505 | } |
1506 | |
1507 | static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, |
1508 | struct ifreq *ifreq) |
1509 | { |
1510 | struct hwtstamp_config config; |
1511 | unsigned int mac_tscr; |
1512 | |
1513 | if (copy_from_user(to: &config, from: ifreq->ifr_data, n: sizeof(config))) |
1514 | return -EFAULT; |
1515 | |
1516 | mac_tscr = 0; |
1517 | |
1518 | switch (config.tx_type) { |
1519 | case HWTSTAMP_TX_OFF: |
1520 | break; |
1521 | |
1522 | case HWTSTAMP_TX_ON: |
1523 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); |
1524 | break; |
1525 | |
1526 | default: |
1527 | return -ERANGE; |
1528 | } |
1529 | |
1530 | switch (config.rx_filter) { |
1531 | case HWTSTAMP_FILTER_NONE: |
1532 | break; |
1533 | |
1534 | case HWTSTAMP_FILTER_NTP_ALL: |
1535 | case HWTSTAMP_FILTER_ALL: |
1536 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1); |
1537 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); |
1538 | break; |
1539 | |
1540 | /* PTP v2, UDP, any kind of event packet */ |
1541 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: |
1542 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); |
1543 | fallthrough; /* to PTP v1, UDP, any kind of event packet */ |
1544 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: |
1545 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); |
1546 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); |
1547 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); |
1548 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); |
1549 | break; |
1550 | |
1551 | /* PTP v2, UDP, Sync packet */ |
1552 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: |
1553 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); |
1554 | fallthrough; /* to PTP v1, UDP, Sync packet */ |
1555 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: |
1556 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); |
1557 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); |
1558 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); |
1559 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); |
1560 | break; |
1561 | |
1562 | /* PTP v2, UDP, Delay_req packet */ |
1563 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: |
1564 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); |
1565 | fallthrough; /* to PTP v1, UDP, Delay_req packet */ |
1566 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: |
1567 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); |
1568 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); |
1569 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); |
1570 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); |
1571 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); |
1572 | break; |
1573 | |
1574 | /* 802.AS1, Ethernet, any kind of event packet */ |
1575 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: |
1576 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); |
1577 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); |
1578 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); |
1579 | break; |
1580 | |
1581 | /* 802.AS1, Ethernet, Sync packet */ |
1582 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: |
1583 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); |
1584 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); |
1585 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); |
1586 | break; |
1587 | |
1588 | /* 802.AS1, Ethernet, Delay_req packet */ |
1589 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: |
1590 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); |
1591 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); |
1592 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); |
1593 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); |
1594 | break; |
1595 | |
1596 | /* PTP v2/802.AS1, any layer, any kind of event packet */ |
1597 | case HWTSTAMP_FILTER_PTP_V2_EVENT: |
1598 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); |
1599 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); |
1600 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); |
1601 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); |
1602 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); |
1603 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); |
1604 | break; |
1605 | |
1606 | /* PTP v2/802.AS1, any layer, Sync packet */ |
1607 | case HWTSTAMP_FILTER_PTP_V2_SYNC: |
1608 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); |
1609 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); |
1610 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); |
1611 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); |
1612 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); |
1613 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); |
1614 | break; |
1615 | |
1616 | /* PTP v2/802.AS1, any layer, Delay_req packet */ |
1617 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: |
1618 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); |
1619 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); |
1620 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); |
1621 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); |
1622 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); |
1623 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); |
1624 | XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); |
1625 | break; |
1626 | |
1627 | default: |
1628 | return -ERANGE; |
1629 | } |
1630 | |
1631 | pdata->hw_if.config_tstamp(pdata, mac_tscr); |
1632 | |
1633 | memcpy(&pdata->tstamp_config, &config, sizeof(config)); |
1634 | |
1635 | return 0; |
1636 | } |
1637 | |
1638 | static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata, |
1639 | struct sk_buff *skb, |
1640 | struct xgbe_packet_data *packet) |
1641 | { |
1642 | unsigned long flags; |
1643 | |
1644 | if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) { |
1645 | spin_lock_irqsave(&pdata->tstamp_lock, flags); |
1646 | if (pdata->tx_tstamp_skb) { |
1647 | /* Another timestamp in progress, ignore this one */ |
1648 | XGMAC_SET_BITS(packet->attributes, |
1649 | TX_PACKET_ATTRIBUTES, PTP, 0); |
1650 | } else { |
1651 | pdata->tx_tstamp_skb = skb_get(skb); |
1652 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
1653 | } |
1654 | spin_unlock_irqrestore(lock: &pdata->tstamp_lock, flags); |
1655 | } |
1656 | |
1657 | skb_tx_timestamp(skb); |
1658 | } |
1659 | |
1660 | static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet) |
1661 | { |
1662 | if (skb_vlan_tag_present(skb)) |
1663 | packet->vlan_ctag = skb_vlan_tag_get(skb); |
1664 | } |
1665 | |
1666 | static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet) |
1667 | { |
1668 | int ret; |
1669 | |
1670 | if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, |
1671 | TSO_ENABLE)) |
1672 | return 0; |
1673 | |
1674 | ret = skb_cow_head(skb, headroom: 0); |
1675 | if (ret) |
1676 | return ret; |
1677 | |
1678 | if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) { |
1679 | packet->header_len = skb_inner_tcp_all_headers(skb); |
1680 | packet->tcp_header_len = inner_tcp_hdrlen(skb); |
1681 | } else { |
1682 | packet->header_len = skb_tcp_all_headers(skb); |
1683 | packet->tcp_header_len = tcp_hdrlen(skb); |
1684 | } |
1685 | packet->tcp_payload_len = skb->len - packet->header_len; |
1686 | packet->mss = skb_shinfo(skb)->gso_size; |
1687 | |
1688 | DBGPR(" packet->header_len=%u\n" , packet->header_len); |
1689 | DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n" , |
1690 | packet->tcp_header_len, packet->tcp_payload_len); |
1691 | DBGPR(" packet->mss=%u\n" , packet->mss); |
1692 | |
1693 | /* Update the number of packets that will ultimately be transmitted |
1694 | * along with the extra bytes for each extra packet |
1695 | */ |
1696 | packet->tx_packets = skb_shinfo(skb)->gso_segs; |
1697 | packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len; |
1698 | |
1699 | return 0; |
1700 | } |
1701 | |
1702 | static bool xgbe_is_vxlan(struct sk_buff *skb) |
1703 | { |
1704 | if (!skb->encapsulation) |
1705 | return false; |
1706 | |
1707 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
1708 | return false; |
1709 | |
1710 | switch (skb->protocol) { |
1711 | case htons(ETH_P_IP): |
1712 | if (ip_hdr(skb)->protocol != IPPROTO_UDP) |
1713 | return false; |
1714 | break; |
1715 | |
1716 | case htons(ETH_P_IPV6): |
1717 | if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP) |
1718 | return false; |
1719 | break; |
1720 | |
1721 | default: |
1722 | return false; |
1723 | } |
1724 | |
1725 | if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || |
1726 | skb->inner_protocol != htons(ETH_P_TEB) || |
1727 | (skb_inner_mac_header(skb) - skb_transport_header(skb) != |
1728 | sizeof(struct udphdr) + sizeof(struct vxlanhdr))) |
1729 | return false; |
1730 | |
1731 | return true; |
1732 | } |
1733 | |
1734 | static int xgbe_is_tso(struct sk_buff *skb) |
1735 | { |
1736 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
1737 | return 0; |
1738 | |
1739 | if (!skb_is_gso(skb)) |
1740 | return 0; |
1741 | |
1742 | DBGPR(" TSO packet to be processed\n" ); |
1743 | |
1744 | return 1; |
1745 | } |
1746 | |
1747 | static void xgbe_packet_info(struct xgbe_prv_data *pdata, |
1748 | struct xgbe_ring *ring, struct sk_buff *skb, |
1749 | struct xgbe_packet_data *packet) |
1750 | { |
1751 | skb_frag_t *frag; |
1752 | unsigned int context_desc; |
1753 | unsigned int len; |
1754 | unsigned int i; |
1755 | |
1756 | packet->skb = skb; |
1757 | |
1758 | context_desc = 0; |
1759 | packet->rdesc_count = 0; |
1760 | |
1761 | packet->tx_packets = 1; |
1762 | packet->tx_bytes = skb->len; |
1763 | |
1764 | if (xgbe_is_tso(skb)) { |
1765 | /* TSO requires an extra descriptor if mss is different */ |
1766 | if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { |
1767 | context_desc = 1; |
1768 | packet->rdesc_count++; |
1769 | } |
1770 | |
1771 | /* TSO requires an extra descriptor for TSO header */ |
1772 | packet->rdesc_count++; |
1773 | |
1774 | XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, |
1775 | TSO_ENABLE, 1); |
1776 | XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, |
1777 | CSUM_ENABLE, 1); |
1778 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) |
1779 | XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, |
1780 | CSUM_ENABLE, 1); |
1781 | |
1782 | if (xgbe_is_vxlan(skb)) |
1783 | XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, |
1784 | VXLAN, 1); |
1785 | |
1786 | if (skb_vlan_tag_present(skb)) { |
1787 | /* VLAN requires an extra descriptor if tag is different */ |
1788 | if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag) |
1789 | /* We can share with the TSO context descriptor */ |
1790 | if (!context_desc) { |
1791 | context_desc = 1; |
1792 | packet->rdesc_count++; |
1793 | } |
1794 | |
1795 | XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, |
1796 | VLAN_CTAG, 1); |
1797 | } |
1798 | |
1799 | if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
1800 | (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON)) |
1801 | XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, |
1802 | PTP, 1); |
1803 | |
1804 | for (len = skb_headlen(skb); len;) { |
1805 | packet->rdesc_count++; |
1806 | len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE); |
1807 | } |
1808 | |
1809 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
1810 | frag = &skb_shinfo(skb)->frags[i]; |
1811 | for (len = skb_frag_size(frag); len; ) { |
1812 | packet->rdesc_count++; |
1813 | len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE); |
1814 | } |
1815 | } |
1816 | } |
1817 | |
1818 | static int xgbe_open(struct net_device *netdev) |
1819 | { |
1820 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
1821 | int ret; |
1822 | |
1823 | /* Create the various names based on netdev name */ |
1824 | snprintf(buf: pdata->an_name, size: sizeof(pdata->an_name) - 1, fmt: "%s-pcs" , |
1825 | netdev_name(dev: netdev)); |
1826 | |
1827 | snprintf(buf: pdata->ecc_name, size: sizeof(pdata->ecc_name) - 1, fmt: "%s-ecc" , |
1828 | netdev_name(dev: netdev)); |
1829 | |
1830 | snprintf(buf: pdata->i2c_name, size: sizeof(pdata->i2c_name) - 1, fmt: "%s-i2c" , |
1831 | netdev_name(dev: netdev)); |
1832 | |
1833 | /* Create workqueues */ |
1834 | pdata->dev_workqueue = |
1835 | create_singlethread_workqueue(netdev_name(netdev)); |
1836 | if (!pdata->dev_workqueue) { |
1837 | netdev_err(dev: netdev, format: "device workqueue creation failed\n" ); |
1838 | return -ENOMEM; |
1839 | } |
1840 | |
1841 | pdata->an_workqueue = |
1842 | create_singlethread_workqueue(pdata->an_name); |
1843 | if (!pdata->an_workqueue) { |
1844 | netdev_err(dev: netdev, format: "phy workqueue creation failed\n" ); |
1845 | ret = -ENOMEM; |
1846 | goto err_dev_wq; |
1847 | } |
1848 | |
1849 | /* Reset the phy settings */ |
1850 | ret = xgbe_phy_reset(pdata); |
1851 | if (ret) |
1852 | goto err_an_wq; |
1853 | |
1854 | /* Enable the clocks */ |
1855 | ret = clk_prepare_enable(clk: pdata->sysclk); |
1856 | if (ret) { |
1857 | netdev_alert(dev: netdev, format: "dma clk_prepare_enable failed\n" ); |
1858 | goto err_an_wq; |
1859 | } |
1860 | |
1861 | ret = clk_prepare_enable(clk: pdata->ptpclk); |
1862 | if (ret) { |
1863 | netdev_alert(dev: netdev, format: "ptp clk_prepare_enable failed\n" ); |
1864 | goto err_sysclk; |
1865 | } |
1866 | |
1867 | INIT_WORK(&pdata->service_work, xgbe_service); |
1868 | INIT_WORK(&pdata->restart_work, xgbe_restart); |
1869 | INIT_WORK(&pdata->stopdev_work, xgbe_stopdev); |
1870 | INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); |
1871 | |
1872 | ret = xgbe_alloc_memory(pdata); |
1873 | if (ret) |
1874 | goto err_ptpclk; |
1875 | |
1876 | ret = xgbe_start(pdata); |
1877 | if (ret) |
1878 | goto err_mem; |
1879 | |
1880 | clear_bit(nr: XGBE_DOWN, addr: &pdata->dev_state); |
1881 | |
1882 | return 0; |
1883 | |
1884 | err_mem: |
1885 | xgbe_free_memory(pdata); |
1886 | |
1887 | err_ptpclk: |
1888 | clk_disable_unprepare(clk: pdata->ptpclk); |
1889 | |
1890 | err_sysclk: |
1891 | clk_disable_unprepare(clk: pdata->sysclk); |
1892 | |
1893 | err_an_wq: |
1894 | destroy_workqueue(wq: pdata->an_workqueue); |
1895 | |
1896 | err_dev_wq: |
1897 | destroy_workqueue(wq: pdata->dev_workqueue); |
1898 | |
1899 | return ret; |
1900 | } |
1901 | |
1902 | static int xgbe_close(struct net_device *netdev) |
1903 | { |
1904 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
1905 | |
1906 | /* Stop the device */ |
1907 | xgbe_stop(pdata); |
1908 | |
1909 | xgbe_free_memory(pdata); |
1910 | |
1911 | /* Disable the clocks */ |
1912 | clk_disable_unprepare(clk: pdata->ptpclk); |
1913 | clk_disable_unprepare(clk: pdata->sysclk); |
1914 | |
1915 | destroy_workqueue(wq: pdata->an_workqueue); |
1916 | |
1917 | destroy_workqueue(wq: pdata->dev_workqueue); |
1918 | |
1919 | set_bit(nr: XGBE_DOWN, addr: &pdata->dev_state); |
1920 | |
1921 | return 0; |
1922 | } |
1923 | |
1924 | static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) |
1925 | { |
1926 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
1927 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
1928 | struct xgbe_desc_if *desc_if = &pdata->desc_if; |
1929 | struct xgbe_channel *channel; |
1930 | struct xgbe_ring *ring; |
1931 | struct xgbe_packet_data *packet; |
1932 | struct netdev_queue *txq; |
1933 | netdev_tx_t ret; |
1934 | |
1935 | DBGPR("-->xgbe_xmit: skb->len = %d\n" , skb->len); |
1936 | |
1937 | channel = pdata->channel[skb->queue_mapping]; |
1938 | txq = netdev_get_tx_queue(dev: netdev, index: channel->queue_index); |
1939 | ring = channel->tx_ring; |
1940 | packet = &ring->packet_data; |
1941 | |
1942 | ret = NETDEV_TX_OK; |
1943 | |
1944 | if (skb->len == 0) { |
1945 | netif_err(pdata, tx_err, netdev, |
1946 | "empty skb received from stack\n" ); |
1947 | dev_kfree_skb_any(skb); |
1948 | goto tx_netdev_return; |
1949 | } |
1950 | |
1951 | /* Calculate preliminary packet info */ |
1952 | memset(packet, 0, sizeof(*packet)); |
1953 | xgbe_packet_info(pdata, ring, skb, packet); |
1954 | |
1955 | /* Check that there are enough descriptors available */ |
1956 | ret = xgbe_maybe_stop_tx_queue(channel, ring, count: packet->rdesc_count); |
1957 | if (ret) |
1958 | goto tx_netdev_return; |
1959 | |
1960 | ret = xgbe_prep_tso(skb, packet); |
1961 | if (ret) { |
1962 | netif_err(pdata, tx_err, netdev, |
1963 | "error processing TSO packet\n" ); |
1964 | dev_kfree_skb_any(skb); |
1965 | goto tx_netdev_return; |
1966 | } |
1967 | xgbe_prep_vlan(skb, packet); |
1968 | |
1969 | if (!desc_if->map_tx_skb(channel, skb)) { |
1970 | dev_kfree_skb_any(skb); |
1971 | goto tx_netdev_return; |
1972 | } |
1973 | |
1974 | xgbe_prep_tx_tstamp(pdata, skb, packet); |
1975 | |
1976 | /* Report on the actual number of bytes (to be) sent */ |
1977 | netdev_tx_sent_queue(dev_queue: txq, bytes: packet->tx_bytes); |
1978 | |
1979 | /* Configure required descriptor fields for transmission */ |
1980 | hw_if->dev_xmit(channel); |
1981 | |
1982 | if (netif_msg_pktdata(pdata)) |
1983 | xgbe_print_pkt(netdev, skb, true); |
1984 | |
1985 | /* Stop the queue in advance if there may not be enough descriptors */ |
1986 | xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS); |
1987 | |
1988 | ret = NETDEV_TX_OK; |
1989 | |
1990 | tx_netdev_return: |
1991 | return ret; |
1992 | } |
1993 | |
1994 | static void xgbe_set_rx_mode(struct net_device *netdev) |
1995 | { |
1996 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
1997 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
1998 | |
1999 | DBGPR("-->xgbe_set_rx_mode\n" ); |
2000 | |
2001 | hw_if->config_rx_mode(pdata); |
2002 | |
2003 | DBGPR("<--xgbe_set_rx_mode\n" ); |
2004 | } |
2005 | |
2006 | static int xgbe_set_mac_address(struct net_device *netdev, void *addr) |
2007 | { |
2008 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
2009 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
2010 | struct sockaddr *saddr = addr; |
2011 | |
2012 | DBGPR("-->xgbe_set_mac_address\n" ); |
2013 | |
2014 | if (!is_valid_ether_addr(addr: saddr->sa_data)) |
2015 | return -EADDRNOTAVAIL; |
2016 | |
2017 | eth_hw_addr_set(dev: netdev, addr: saddr->sa_data); |
2018 | |
2019 | hw_if->set_mac_address(pdata, netdev->dev_addr); |
2020 | |
2021 | DBGPR("<--xgbe_set_mac_address\n" ); |
2022 | |
2023 | return 0; |
2024 | } |
2025 | |
2026 | static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd) |
2027 | { |
2028 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
2029 | int ret; |
2030 | |
2031 | switch (cmd) { |
2032 | case SIOCGHWTSTAMP: |
2033 | ret = xgbe_get_hwtstamp_settings(pdata, ifreq); |
2034 | break; |
2035 | |
2036 | case SIOCSHWTSTAMP: |
2037 | ret = xgbe_set_hwtstamp_settings(pdata, ifreq); |
2038 | break; |
2039 | |
2040 | default: |
2041 | ret = -EOPNOTSUPP; |
2042 | } |
2043 | |
2044 | return ret; |
2045 | } |
2046 | |
2047 | static int xgbe_change_mtu(struct net_device *netdev, int mtu) |
2048 | { |
2049 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
2050 | int ret; |
2051 | |
2052 | DBGPR("-->xgbe_change_mtu\n" ); |
2053 | |
2054 | ret = xgbe_calc_rx_buf_size(netdev, mtu); |
2055 | if (ret < 0) |
2056 | return ret; |
2057 | |
2058 | pdata->rx_buf_size = ret; |
2059 | netdev->mtu = mtu; |
2060 | |
2061 | xgbe_restart_dev(pdata); |
2062 | |
2063 | DBGPR("<--xgbe_change_mtu\n" ); |
2064 | |
2065 | return 0; |
2066 | } |
2067 | |
2068 | static void xgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue) |
2069 | { |
2070 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
2071 | |
2072 | netdev_warn(dev: netdev, format: "tx timeout, device restarting\n" ); |
2073 | schedule_work(work: &pdata->restart_work); |
2074 | } |
2075 | |
2076 | static void xgbe_get_stats64(struct net_device *netdev, |
2077 | struct rtnl_link_stats64 *s) |
2078 | { |
2079 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
2080 | struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; |
2081 | |
2082 | DBGPR("-->%s\n" , __func__); |
2083 | |
2084 | pdata->hw_if.read_mmc_stats(pdata); |
2085 | |
2086 | s->rx_packets = pstats->rxframecount_gb; |
2087 | s->rx_bytes = pstats->rxoctetcount_gb; |
2088 | s->rx_errors = pstats->rxframecount_gb - |
2089 | pstats->rxbroadcastframes_g - |
2090 | pstats->rxmulticastframes_g - |
2091 | pstats->rxunicastframes_g; |
2092 | s->multicast = pstats->rxmulticastframes_g; |
2093 | s->rx_length_errors = pstats->rxlengtherror; |
2094 | s->rx_crc_errors = pstats->rxcrcerror; |
2095 | s->rx_fifo_errors = pstats->rxfifooverflow; |
2096 | |
2097 | s->tx_packets = pstats->txframecount_gb; |
2098 | s->tx_bytes = pstats->txoctetcount_gb; |
2099 | s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g; |
2100 | s->tx_dropped = netdev->stats.tx_dropped; |
2101 | |
2102 | DBGPR("<--%s\n" , __func__); |
2103 | } |
2104 | |
2105 | static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, |
2106 | u16 vid) |
2107 | { |
2108 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
2109 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
2110 | |
2111 | DBGPR("-->%s\n" , __func__); |
2112 | |
2113 | set_bit(nr: vid, addr: pdata->active_vlans); |
2114 | hw_if->update_vlan_hash_table(pdata); |
2115 | |
2116 | DBGPR("<--%s\n" , __func__); |
2117 | |
2118 | return 0; |
2119 | } |
2120 | |
2121 | static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, |
2122 | u16 vid) |
2123 | { |
2124 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
2125 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
2126 | |
2127 | DBGPR("-->%s\n" , __func__); |
2128 | |
2129 | clear_bit(nr: vid, addr: pdata->active_vlans); |
2130 | hw_if->update_vlan_hash_table(pdata); |
2131 | |
2132 | DBGPR("<--%s\n" , __func__); |
2133 | |
2134 | return 0; |
2135 | } |
2136 | |
2137 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2138 | static void xgbe_poll_controller(struct net_device *netdev) |
2139 | { |
2140 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
2141 | struct xgbe_channel *channel; |
2142 | unsigned int i; |
2143 | |
2144 | DBGPR("-->xgbe_poll_controller\n" ); |
2145 | |
2146 | if (pdata->per_channel_irq) { |
2147 | for (i = 0; i < pdata->channel_count; i++) { |
2148 | channel = pdata->channel[i]; |
2149 | xgbe_dma_isr(irq: channel->dma_irq, data: channel); |
2150 | } |
2151 | } else { |
2152 | disable_irq(irq: pdata->dev_irq); |
2153 | xgbe_isr(irq: pdata->dev_irq, data: pdata); |
2154 | enable_irq(irq: pdata->dev_irq); |
2155 | } |
2156 | |
2157 | DBGPR("<--xgbe_poll_controller\n" ); |
2158 | } |
2159 | #endif /* End CONFIG_NET_POLL_CONTROLLER */ |
2160 | |
2161 | static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type, |
2162 | void *type_data) |
2163 | { |
2164 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
2165 | struct tc_mqprio_qopt *mqprio = type_data; |
2166 | u8 tc; |
2167 | |
2168 | if (type != TC_SETUP_QDISC_MQPRIO) |
2169 | return -EOPNOTSUPP; |
2170 | |
2171 | mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; |
2172 | tc = mqprio->num_tc; |
2173 | |
2174 | if (tc > pdata->hw_feat.tc_cnt) |
2175 | return -EINVAL; |
2176 | |
2177 | pdata->num_tcs = tc; |
2178 | pdata->hw_if.config_tc(pdata); |
2179 | |
2180 | return 0; |
2181 | } |
2182 | |
2183 | static netdev_features_t xgbe_fix_features(struct net_device *netdev, |
2184 | netdev_features_t features) |
2185 | { |
2186 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
2187 | netdev_features_t vxlan_base; |
2188 | |
2189 | vxlan_base = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RX_UDP_TUNNEL_PORT; |
2190 | |
2191 | if (!pdata->hw_feat.vxn) |
2192 | return features; |
2193 | |
2194 | /* VXLAN CSUM requires VXLAN base */ |
2195 | if ((features & NETIF_F_GSO_UDP_TUNNEL_CSUM) && |
2196 | !(features & NETIF_F_GSO_UDP_TUNNEL)) { |
2197 | netdev_notice(dev: netdev, |
2198 | format: "forcing tx udp tunnel support\n" ); |
2199 | features |= NETIF_F_GSO_UDP_TUNNEL; |
2200 | } |
2201 | |
2202 | /* Can't do one without doing the other */ |
2203 | if ((features & vxlan_base) != vxlan_base) { |
2204 | netdev_notice(dev: netdev, |
2205 | format: "forcing both tx and rx udp tunnel support\n" ); |
2206 | features |= vxlan_base; |
2207 | } |
2208 | |
2209 | if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { |
2210 | if (!(features & NETIF_F_GSO_UDP_TUNNEL_CSUM)) { |
2211 | netdev_notice(dev: netdev, |
2212 | format: "forcing tx udp tunnel checksumming on\n" ); |
2213 | features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; |
2214 | } |
2215 | } else { |
2216 | if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) { |
2217 | netdev_notice(dev: netdev, |
2218 | format: "forcing tx udp tunnel checksumming off\n" ); |
2219 | features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM; |
2220 | } |
2221 | } |
2222 | |
2223 | return features; |
2224 | } |
2225 | |
2226 | static int xgbe_set_features(struct net_device *netdev, |
2227 | netdev_features_t features) |
2228 | { |
2229 | struct xgbe_prv_data *pdata = netdev_priv(dev: netdev); |
2230 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
2231 | netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter; |
2232 | int ret = 0; |
2233 | |
2234 | rxhash = pdata->netdev_features & NETIF_F_RXHASH; |
2235 | rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; |
2236 | rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; |
2237 | rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; |
2238 | |
2239 | if ((features & NETIF_F_RXHASH) && !rxhash) |
2240 | ret = hw_if->enable_rss(pdata); |
2241 | else if (!(features & NETIF_F_RXHASH) && rxhash) |
2242 | ret = hw_if->disable_rss(pdata); |
2243 | if (ret) |
2244 | return ret; |
2245 | |
2246 | if ((features & NETIF_F_RXCSUM) && !rxcsum) |
2247 | hw_if->enable_rx_csum(pdata); |
2248 | else if (!(features & NETIF_F_RXCSUM) && rxcsum) |
2249 | hw_if->disable_rx_csum(pdata); |
2250 | |
2251 | if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan) |
2252 | hw_if->enable_rx_vlan_stripping(pdata); |
2253 | else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan) |
2254 | hw_if->disable_rx_vlan_stripping(pdata); |
2255 | |
2256 | if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter) |
2257 | hw_if->enable_rx_vlan_filtering(pdata); |
2258 | else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter) |
2259 | hw_if->disable_rx_vlan_filtering(pdata); |
2260 | |
2261 | pdata->netdev_features = features; |
2262 | |
2263 | DBGPR("<--xgbe_set_features\n" ); |
2264 | |
2265 | return 0; |
2266 | } |
2267 | |
2268 | static netdev_features_t xgbe_features_check(struct sk_buff *skb, |
2269 | struct net_device *netdev, |
2270 | netdev_features_t features) |
2271 | { |
2272 | features = vlan_features_check(skb, features); |
2273 | features = vxlan_features_check(skb, features); |
2274 | |
2275 | return features; |
2276 | } |
2277 | |
2278 | static const struct net_device_ops xgbe_netdev_ops = { |
2279 | .ndo_open = xgbe_open, |
2280 | .ndo_stop = xgbe_close, |
2281 | .ndo_start_xmit = xgbe_xmit, |
2282 | .ndo_set_rx_mode = xgbe_set_rx_mode, |
2283 | .ndo_set_mac_address = xgbe_set_mac_address, |
2284 | .ndo_validate_addr = eth_validate_addr, |
2285 | .ndo_eth_ioctl = xgbe_ioctl, |
2286 | .ndo_change_mtu = xgbe_change_mtu, |
2287 | .ndo_tx_timeout = xgbe_tx_timeout, |
2288 | .ndo_get_stats64 = xgbe_get_stats64, |
2289 | .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid, |
2290 | .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid, |
2291 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2292 | .ndo_poll_controller = xgbe_poll_controller, |
2293 | #endif |
2294 | .ndo_setup_tc = xgbe_setup_tc, |
2295 | .ndo_fix_features = xgbe_fix_features, |
2296 | .ndo_set_features = xgbe_set_features, |
2297 | .ndo_features_check = xgbe_features_check, |
2298 | }; |
2299 | |
2300 | const struct net_device_ops *xgbe_get_netdev_ops(void) |
2301 | { |
2302 | return &xgbe_netdev_ops; |
2303 | } |
2304 | |
2305 | static void xgbe_rx_refresh(struct xgbe_channel *channel) |
2306 | { |
2307 | struct xgbe_prv_data *pdata = channel->pdata; |
2308 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
2309 | struct xgbe_desc_if *desc_if = &pdata->desc_if; |
2310 | struct xgbe_ring *ring = channel->rx_ring; |
2311 | struct xgbe_ring_data *rdata; |
2312 | |
2313 | while (ring->dirty != ring->cur) { |
2314 | rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); |
2315 | |
2316 | /* Reset rdata values */ |
2317 | desc_if->unmap_rdata(pdata, rdata); |
2318 | |
2319 | if (desc_if->map_rx_buffer(pdata, ring, rdata)) |
2320 | break; |
2321 | |
2322 | hw_if->rx_desc_reset(pdata, rdata, ring->dirty); |
2323 | |
2324 | ring->dirty++; |
2325 | } |
2326 | |
2327 | /* Make sure everything is written before the register write */ |
2328 | wmb(); |
2329 | |
2330 | /* Update the Rx Tail Pointer Register with address of |
2331 | * the last cleaned entry */ |
2332 | rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1); |
2333 | XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, |
2334 | lower_32_bits(rdata->rdesc_dma)); |
2335 | } |
2336 | |
2337 | static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, |
2338 | struct napi_struct *napi, |
2339 | struct xgbe_ring_data *rdata, |
2340 | unsigned int len) |
2341 | { |
2342 | struct sk_buff *skb; |
2343 | u8 *packet; |
2344 | |
2345 | skb = napi_alloc_skb(napi, length: rdata->rx.hdr.dma_len); |
2346 | if (!skb) |
2347 | return NULL; |
2348 | |
2349 | /* Pull in the header buffer which may contain just the header |
2350 | * or the header plus data |
2351 | */ |
2352 | dma_sync_single_range_for_cpu(dev: pdata->dev, addr: rdata->rx.hdr.dma_base, |
2353 | offset: rdata->rx.hdr.dma_off, |
2354 | size: rdata->rx.hdr.dma_len, dir: DMA_FROM_DEVICE); |
2355 | |
2356 | packet = page_address(rdata->rx.hdr.pa.pages) + |
2357 | rdata->rx.hdr.pa.pages_offset; |
2358 | skb_copy_to_linear_data(skb, from: packet, len); |
2359 | skb_put(skb, len); |
2360 | |
2361 | return skb; |
2362 | } |
2363 | |
2364 | static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata, |
2365 | struct xgbe_packet_data *packet) |
2366 | { |
2367 | /* Always zero if not the first descriptor */ |
2368 | if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) |
2369 | return 0; |
2370 | |
2371 | /* First descriptor with split header, return header length */ |
2372 | if (rdata->rx.hdr_len) |
2373 | return rdata->rx.hdr_len; |
2374 | |
2375 | /* First descriptor but not the last descriptor and no split header, |
2376 | * so the full buffer was used |
2377 | */ |
2378 | if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) |
2379 | return rdata->rx.hdr.dma_len; |
2380 | |
2381 | /* First descriptor and last descriptor and no split header, so |
2382 | * calculate how much of the buffer was used |
2383 | */ |
2384 | return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len); |
2385 | } |
2386 | |
2387 | static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata, |
2388 | struct xgbe_packet_data *packet, |
2389 | unsigned int len) |
2390 | { |
2391 | /* Always the full buffer if not the last descriptor */ |
2392 | if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) |
2393 | return rdata->rx.buf.dma_len; |
2394 | |
2395 | /* Last descriptor so calculate how much of the buffer was used |
2396 | * for the last bit of data |
2397 | */ |
2398 | return rdata->rx.len - len; |
2399 | } |
2400 | |
2401 | static int xgbe_tx_poll(struct xgbe_channel *channel) |
2402 | { |
2403 | struct xgbe_prv_data *pdata = channel->pdata; |
2404 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
2405 | struct xgbe_desc_if *desc_if = &pdata->desc_if; |
2406 | struct xgbe_ring *ring = channel->tx_ring; |
2407 | struct xgbe_ring_data *rdata; |
2408 | struct xgbe_ring_desc *rdesc; |
2409 | struct net_device *netdev = pdata->netdev; |
2410 | struct netdev_queue *txq; |
2411 | int processed = 0; |
2412 | unsigned int tx_packets = 0, tx_bytes = 0; |
2413 | unsigned int cur; |
2414 | |
2415 | DBGPR("-->xgbe_tx_poll\n" ); |
2416 | |
2417 | /* Nothing to do if there isn't a Tx ring for this channel */ |
2418 | if (!ring) |
2419 | return 0; |
2420 | |
2421 | cur = ring->cur; |
2422 | |
2423 | /* Be sure we get ring->cur before accessing descriptor data */ |
2424 | smp_rmb(); |
2425 | |
2426 | txq = netdev_get_tx_queue(dev: netdev, index: channel->queue_index); |
2427 | |
2428 | while ((processed < XGBE_TX_DESC_MAX_PROC) && |
2429 | (ring->dirty != cur)) { |
2430 | rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); |
2431 | rdesc = rdata->rdesc; |
2432 | |
2433 | if (!hw_if->tx_complete(rdesc)) |
2434 | break; |
2435 | |
2436 | /* Make sure descriptor fields are read after reading the OWN |
2437 | * bit */ |
2438 | dma_rmb(); |
2439 | |
2440 | if (netif_msg_tx_done(pdata)) |
2441 | xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0); |
2442 | |
2443 | if (hw_if->is_last_desc(rdesc)) { |
2444 | tx_packets += rdata->tx.packets; |
2445 | tx_bytes += rdata->tx.bytes; |
2446 | } |
2447 | |
2448 | /* Free the SKB and reset the descriptor for re-use */ |
2449 | desc_if->unmap_rdata(pdata, rdata); |
2450 | hw_if->tx_desc_reset(rdata); |
2451 | |
2452 | processed++; |
2453 | ring->dirty++; |
2454 | } |
2455 | |
2456 | if (!processed) |
2457 | return 0; |
2458 | |
2459 | netdev_tx_completed_queue(dev_queue: txq, pkts: tx_packets, bytes: tx_bytes); |
2460 | |
2461 | if ((ring->tx.queue_stopped == 1) && |
2462 | (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) { |
2463 | ring->tx.queue_stopped = 0; |
2464 | netif_tx_wake_queue(dev_queue: txq); |
2465 | } |
2466 | |
2467 | DBGPR("<--xgbe_tx_poll: processed=%d\n" , processed); |
2468 | |
2469 | return processed; |
2470 | } |
2471 | |
2472 | static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) |
2473 | { |
2474 | struct xgbe_prv_data *pdata = channel->pdata; |
2475 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
2476 | struct xgbe_ring *ring = channel->rx_ring; |
2477 | struct xgbe_ring_data *rdata; |
2478 | struct xgbe_packet_data *packet; |
2479 | struct net_device *netdev = pdata->netdev; |
2480 | struct napi_struct *napi; |
2481 | struct sk_buff *skb; |
2482 | struct skb_shared_hwtstamps *hwtstamps; |
2483 | unsigned int last, error, context_next, context; |
2484 | unsigned int len, buf1_len, buf2_len, max_len; |
2485 | unsigned int received = 0; |
2486 | int packet_count = 0; |
2487 | |
2488 | DBGPR("-->xgbe_rx_poll: budget=%d\n" , budget); |
2489 | |
2490 | /* Nothing to do if there isn't a Rx ring for this channel */ |
2491 | if (!ring) |
2492 | return 0; |
2493 | |
2494 | last = 0; |
2495 | context_next = 0; |
2496 | |
2497 | napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; |
2498 | |
2499 | rdata = XGBE_GET_DESC_DATA(ring, ring->cur); |
2500 | packet = &ring->packet_data; |
2501 | while (packet_count < budget) { |
2502 | DBGPR(" cur = %d\n" , ring->cur); |
2503 | |
2504 | /* First time in loop see if we need to restore state */ |
2505 | if (!received && rdata->state_saved) { |
2506 | skb = rdata->state.skb; |
2507 | error = rdata->state.error; |
2508 | len = rdata->state.len; |
2509 | } else { |
2510 | memset(packet, 0, sizeof(*packet)); |
2511 | skb = NULL; |
2512 | error = 0; |
2513 | len = 0; |
2514 | } |
2515 | |
2516 | read_again: |
2517 | rdata = XGBE_GET_DESC_DATA(ring, ring->cur); |
2518 | |
2519 | if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3)) |
2520 | xgbe_rx_refresh(channel); |
2521 | |
2522 | if (hw_if->dev_read(channel)) |
2523 | break; |
2524 | |
2525 | received++; |
2526 | ring->cur++; |
2527 | |
2528 | last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
2529 | LAST); |
2530 | context_next = XGMAC_GET_BITS(packet->attributes, |
2531 | RX_PACKET_ATTRIBUTES, |
2532 | CONTEXT_NEXT); |
2533 | context = XGMAC_GET_BITS(packet->attributes, |
2534 | RX_PACKET_ATTRIBUTES, |
2535 | CONTEXT); |
2536 | |
2537 | /* Earlier error, just drain the remaining data */ |
2538 | if ((!last || context_next) && error) |
2539 | goto read_again; |
2540 | |
2541 | if (error || packet->errors) { |
2542 | if (packet->errors) |
2543 | netif_err(pdata, rx_err, netdev, |
2544 | "error in received packet\n" ); |
2545 | dev_kfree_skb(skb); |
2546 | goto next_packet; |
2547 | } |
2548 | |
2549 | if (!context) { |
2550 | /* Get the data length in the descriptor buffers */ |
2551 | buf1_len = xgbe_rx_buf1_len(rdata, packet); |
2552 | len += buf1_len; |
2553 | buf2_len = xgbe_rx_buf2_len(rdata, packet, len); |
2554 | len += buf2_len; |
2555 | |
2556 | if (buf2_len > rdata->rx.buf.dma_len) { |
2557 | /* Hardware inconsistency within the descriptors |
2558 | * that has resulted in a length underflow. |
2559 | */ |
2560 | error = 1; |
2561 | goto skip_data; |
2562 | } |
2563 | |
2564 | if (!skb) { |
2565 | skb = xgbe_create_skb(pdata, napi, rdata, |
2566 | len: buf1_len); |
2567 | if (!skb) { |
2568 | error = 1; |
2569 | goto skip_data; |
2570 | } |
2571 | } |
2572 | |
2573 | if (buf2_len) { |
2574 | dma_sync_single_range_for_cpu(dev: pdata->dev, |
2575 | addr: rdata->rx.buf.dma_base, |
2576 | offset: rdata->rx.buf.dma_off, |
2577 | size: rdata->rx.buf.dma_len, |
2578 | dir: DMA_FROM_DEVICE); |
2579 | |
2580 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
2581 | page: rdata->rx.buf.pa.pages, |
2582 | off: rdata->rx.buf.pa.pages_offset, |
2583 | size: buf2_len, |
2584 | truesize: rdata->rx.buf.dma_len); |
2585 | rdata->rx.buf.pa.pages = NULL; |
2586 | } |
2587 | } |
2588 | |
2589 | skip_data: |
2590 | if (!last || context_next) |
2591 | goto read_again; |
2592 | |
2593 | if (!skb || error) { |
2594 | dev_kfree_skb(skb); |
2595 | goto next_packet; |
2596 | } |
2597 | |
2598 | /* Be sure we don't exceed the configured MTU */ |
2599 | max_len = netdev->mtu + ETH_HLEN; |
2600 | if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && |
2601 | (skb->protocol == htons(ETH_P_8021Q))) |
2602 | max_len += VLAN_HLEN; |
2603 | |
2604 | if (skb->len > max_len) { |
2605 | netif_err(pdata, rx_err, netdev, |
2606 | "packet length exceeds configured MTU\n" ); |
2607 | dev_kfree_skb(skb); |
2608 | goto next_packet; |
2609 | } |
2610 | |
2611 | if (netif_msg_pktdata(pdata)) |
2612 | xgbe_print_pkt(netdev, skb, false); |
2613 | |
2614 | skb_checksum_none_assert(skb); |
2615 | if (XGMAC_GET_BITS(packet->attributes, |
2616 | RX_PACKET_ATTRIBUTES, CSUM_DONE)) |
2617 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2618 | |
2619 | if (XGMAC_GET_BITS(packet->attributes, |
2620 | RX_PACKET_ATTRIBUTES, TNP)) { |
2621 | skb->encapsulation = 1; |
2622 | |
2623 | if (XGMAC_GET_BITS(packet->attributes, |
2624 | RX_PACKET_ATTRIBUTES, TNPCSUM_DONE)) |
2625 | skb->csum_level = 1; |
2626 | } |
2627 | |
2628 | if (XGMAC_GET_BITS(packet->attributes, |
2629 | RX_PACKET_ATTRIBUTES, VLAN_CTAG)) |
2630 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
2631 | vlan_tci: packet->vlan_ctag); |
2632 | |
2633 | if (XGMAC_GET_BITS(packet->attributes, |
2634 | RX_PACKET_ATTRIBUTES, RX_TSTAMP)) { |
2635 | u64 nsec; |
2636 | |
2637 | nsec = timecounter_cyc2time(tc: &pdata->tstamp_tc, |
2638 | cycle_tstamp: packet->rx_tstamp); |
2639 | hwtstamps = skb_hwtstamps(skb); |
2640 | hwtstamps->hwtstamp = ns_to_ktime(ns: nsec); |
2641 | } |
2642 | |
2643 | if (XGMAC_GET_BITS(packet->attributes, |
2644 | RX_PACKET_ATTRIBUTES, RSS_HASH)) |
2645 | skb_set_hash(skb, hash: packet->rss_hash, |
2646 | type: packet->rss_hash_type); |
2647 | |
2648 | skb->dev = netdev; |
2649 | skb->protocol = eth_type_trans(skb, dev: netdev); |
2650 | skb_record_rx_queue(skb, rx_queue: channel->queue_index); |
2651 | |
2652 | napi_gro_receive(napi, skb); |
2653 | |
2654 | next_packet: |
2655 | packet_count++; |
2656 | } |
2657 | |
2658 | /* Check if we need to save state before leaving */ |
2659 | if (received && (!last || context_next)) { |
2660 | rdata = XGBE_GET_DESC_DATA(ring, ring->cur); |
2661 | rdata->state_saved = 1; |
2662 | rdata->state.skb = skb; |
2663 | rdata->state.len = len; |
2664 | rdata->state.error = error; |
2665 | } |
2666 | |
2667 | DBGPR("<--xgbe_rx_poll: packet_count = %d\n" , packet_count); |
2668 | |
2669 | return packet_count; |
2670 | } |
2671 | |
2672 | static int xgbe_one_poll(struct napi_struct *napi, int budget) |
2673 | { |
2674 | struct xgbe_channel *channel = container_of(napi, struct xgbe_channel, |
2675 | napi); |
2676 | struct xgbe_prv_data *pdata = channel->pdata; |
2677 | int processed = 0; |
2678 | |
2679 | DBGPR("-->xgbe_one_poll: budget=%d\n" , budget); |
2680 | |
2681 | /* Cleanup Tx ring first */ |
2682 | xgbe_tx_poll(channel); |
2683 | |
2684 | /* Process Rx ring next */ |
2685 | processed = xgbe_rx_poll(channel, budget); |
2686 | |
2687 | /* If we processed everything, we are done */ |
2688 | if ((processed < budget) && napi_complete_done(n: napi, work_done: processed)) { |
2689 | /* Enable Tx and Rx interrupts */ |
2690 | if (pdata->channel_irq_mode) |
2691 | xgbe_enable_rx_tx_int(pdata, channel); |
2692 | else |
2693 | enable_irq(irq: channel->dma_irq); |
2694 | } |
2695 | |
2696 | DBGPR("<--xgbe_one_poll: received = %d\n" , processed); |
2697 | |
2698 | return processed; |
2699 | } |
2700 | |
2701 | static int xgbe_all_poll(struct napi_struct *napi, int budget) |
2702 | { |
2703 | struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data, |
2704 | napi); |
2705 | struct xgbe_channel *channel; |
2706 | int ring_budget; |
2707 | int processed, last_processed; |
2708 | unsigned int i; |
2709 | |
2710 | DBGPR("-->xgbe_all_poll: budget=%d\n" , budget); |
2711 | |
2712 | processed = 0; |
2713 | ring_budget = budget / pdata->rx_ring_count; |
2714 | do { |
2715 | last_processed = processed; |
2716 | |
2717 | for (i = 0; i < pdata->channel_count; i++) { |
2718 | channel = pdata->channel[i]; |
2719 | |
2720 | /* Cleanup Tx ring first */ |
2721 | xgbe_tx_poll(channel); |
2722 | |
2723 | /* Process Rx ring next */ |
2724 | if (ring_budget > (budget - processed)) |
2725 | ring_budget = budget - processed; |
2726 | processed += xgbe_rx_poll(channel, budget: ring_budget); |
2727 | } |
2728 | } while ((processed < budget) && (processed != last_processed)); |
2729 | |
2730 | /* If we processed everything, we are done */ |
2731 | if ((processed < budget) && napi_complete_done(n: napi, work_done: processed)) { |
2732 | /* Enable Tx and Rx interrupts */ |
2733 | xgbe_enable_rx_tx_ints(pdata); |
2734 | } |
2735 | |
2736 | DBGPR("<--xgbe_all_poll: received = %d\n" , processed); |
2737 | |
2738 | return processed; |
2739 | } |
2740 | |
2741 | void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring, |
2742 | unsigned int idx, unsigned int count, unsigned int flag) |
2743 | { |
2744 | struct xgbe_ring_data *rdata; |
2745 | struct xgbe_ring_desc *rdesc; |
2746 | |
2747 | while (count--) { |
2748 | rdata = XGBE_GET_DESC_DATA(ring, idx); |
2749 | rdesc = rdata->rdesc; |
2750 | netdev_dbg(pdata->netdev, |
2751 | "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n" , idx, |
2752 | (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE" , |
2753 | le32_to_cpu(rdesc->desc0), |
2754 | le32_to_cpu(rdesc->desc1), |
2755 | le32_to_cpu(rdesc->desc2), |
2756 | le32_to_cpu(rdesc->desc3)); |
2757 | idx++; |
2758 | } |
2759 | } |
2760 | |
2761 | void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring, |
2762 | unsigned int idx) |
2763 | { |
2764 | struct xgbe_ring_data *rdata; |
2765 | struct xgbe_ring_desc *rdesc; |
2766 | |
2767 | rdata = XGBE_GET_DESC_DATA(ring, idx); |
2768 | rdesc = rdata->rdesc; |
2769 | netdev_dbg(pdata->netdev, |
2770 | "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n" , |
2771 | idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), |
2772 | le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); |
2773 | } |
2774 | |
2775 | void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) |
2776 | { |
2777 | struct ethhdr *eth = (struct ethhdr *)skb->data; |
2778 | unsigned char buffer[128]; |
2779 | unsigned int i; |
2780 | |
2781 | netdev_dbg(netdev, "\n************** SKB dump ****************\n" ); |
2782 | |
2783 | netdev_dbg(netdev, "%s packet of %d bytes\n" , |
2784 | (tx_rx ? "TX" : "RX" ), skb->len); |
2785 | |
2786 | netdev_dbg(netdev, "Dst MAC addr: %pM\n" , eth->h_dest); |
2787 | netdev_dbg(netdev, "Src MAC addr: %pM\n" , eth->h_source); |
2788 | netdev_dbg(netdev, "Protocol: %#06x\n" , ntohs(eth->h_proto)); |
2789 | |
2790 | for (i = 0; i < skb->len; i += 32) { |
2791 | unsigned int len = min(skb->len - i, 32U); |
2792 | |
2793 | hex_dump_to_buffer(buf: &skb->data[i], len, rowsize: 32, groupsize: 1, |
2794 | linebuf: buffer, linebuflen: sizeof(buffer), ascii: false); |
2795 | netdev_dbg(netdev, " %#06x: %s\n" , i, buffer); |
2796 | } |
2797 | |
2798 | netdev_dbg(netdev, "\n************** SKB dump ****************\n" ); |
2799 | } |
2800 | |