1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /******************************************************************************* |
3 | This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. |
4 | ST Ethernet IPs are built around a Synopsys IP Core. |
5 | |
6 | Copyright(C) 2007-2011 STMicroelectronics Ltd |
7 | |
8 | |
9 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> |
10 | |
11 | Documentation available at: |
12 | http://www.stlinux.com |
13 | Support available at: |
14 | https://bugzilla.stlinux.com/ |
15 | *******************************************************************************/ |
16 | |
17 | #include <linux/clk.h> |
18 | #include <linux/kernel.h> |
19 | #include <linux/interrupt.h> |
20 | #include <linux/ip.h> |
21 | #include <linux/tcp.h> |
22 | #include <linux/skbuff.h> |
23 | #include <linux/ethtool.h> |
24 | #include <linux/if_ether.h> |
25 | #include <linux/crc32.h> |
26 | #include <linux/mii.h> |
27 | #include <linux/if.h> |
28 | #include <linux/if_vlan.h> |
29 | #include <linux/dma-mapping.h> |
30 | #include <linux/slab.h> |
31 | #include <linux/pm_runtime.h> |
32 | #include <linux/prefetch.h> |
33 | #include <linux/pinctrl/consumer.h> |
34 | #ifdef CONFIG_DEBUG_FS |
35 | #include <linux/debugfs.h> |
36 | #include <linux/seq_file.h> |
37 | #endif /* CONFIG_DEBUG_FS */ |
38 | #include <linux/net_tstamp.h> |
39 | #include <linux/phylink.h> |
40 | #include <linux/udp.h> |
41 | #include <linux/bpf_trace.h> |
42 | #include <net/page_pool/helpers.h> |
43 | #include <net/pkt_cls.h> |
44 | #include <net/xdp_sock_drv.h> |
45 | #include "stmmac_ptp.h" |
46 | #include "stmmac.h" |
47 | #include "stmmac_xdp.h" |
48 | #include <linux/reset.h> |
49 | #include <linux/of_mdio.h> |
50 | #include "dwmac1000.h" |
51 | #include "dwxgmac2.h" |
52 | #include "hwif.h" |
53 | |
54 | /* As long as the interface is active, we keep the timestamping counter enabled |
55 | * with fine resolution and binary rollover. This avoid non-monotonic behavior |
56 | * (clock jumps) when changing timestamping settings at runtime. |
57 | */ |
58 | #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ |
59 | PTP_TCR_TSCTRLSSR) |
60 | |
61 | #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) |
62 | #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) |
63 | |
64 | /* Module parameters */ |
65 | #define TX_TIMEO 5000 |
66 | static int watchdog = TX_TIMEO; |
67 | module_param(watchdog, int, 0644); |
68 | MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)" ); |
69 | |
70 | static int debug = -1; |
71 | module_param(debug, int, 0644); |
72 | MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)" ); |
73 | |
74 | static int phyaddr = -1; |
75 | module_param(phyaddr, int, 0444); |
76 | MODULE_PARM_DESC(phyaddr, "Physical device address" ); |
77 | |
78 | #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4) |
79 | #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4) |
80 | |
81 | /* Limit to make sure XDP TX and slow path can coexist */ |
82 | #define STMMAC_XSK_TX_BUDGET_MAX 256 |
83 | #define STMMAC_TX_XSK_AVAIL 16 |
84 | #define STMMAC_RX_FILL_BATCH 16 |
85 | |
86 | #define STMMAC_XDP_PASS 0 |
87 | #define STMMAC_XDP_CONSUMED BIT(0) |
88 | #define STMMAC_XDP_TX BIT(1) |
89 | #define STMMAC_XDP_REDIRECT BIT(2) |
90 | |
91 | static int flow_ctrl = FLOW_AUTO; |
92 | module_param(flow_ctrl, int, 0644); |
93 | MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]" ); |
94 | |
95 | static int pause = PAUSE_TIME; |
96 | module_param(pause, int, 0644); |
97 | MODULE_PARM_DESC(pause, "Flow Control Pause Time" ); |
98 | |
99 | #define TC_DEFAULT 64 |
100 | static int tc = TC_DEFAULT; |
101 | module_param(tc, int, 0644); |
102 | MODULE_PARM_DESC(tc, "DMA threshold control value" ); |
103 | |
104 | #define DEFAULT_BUFSIZE 1536 |
105 | static int buf_sz = DEFAULT_BUFSIZE; |
106 | module_param(buf_sz, int, 0644); |
107 | MODULE_PARM_DESC(buf_sz, "DMA buffer size" ); |
108 | |
109 | #define STMMAC_RX_COPYBREAK 256 |
110 | |
111 | static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | |
112 | NETIF_MSG_LINK | NETIF_MSG_IFUP | |
113 | NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); |
114 | |
115 | #define STMMAC_DEFAULT_LPI_TIMER 1000 |
116 | static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; |
117 | module_param(eee_timer, int, 0644); |
118 | MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec" ); |
119 | #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) |
120 | |
121 | /* By default the driver will use the ring mode to manage tx and rx descriptors, |
122 | * but allow user to force to use the chain instead of the ring |
123 | */ |
124 | static unsigned int chain_mode; |
125 | module_param(chain_mode, int, 0444); |
126 | MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode" ); |
127 | |
128 | static irqreturn_t stmmac_interrupt(int irq, void *dev_id); |
129 | /* For MSI interrupts handling */ |
130 | static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); |
131 | static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); |
132 | static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); |
133 | static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); |
134 | static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue); |
135 | static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue); |
136 | static void stmmac_reset_queues_param(struct stmmac_priv *priv); |
137 | static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); |
138 | static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); |
139 | static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, |
140 | u32 rxmode, u32 chan); |
141 | |
142 | #ifdef CONFIG_DEBUG_FS |
143 | static const struct net_device_ops stmmac_netdev_ops; |
144 | static void stmmac_init_fs(struct net_device *dev); |
145 | static void stmmac_exit_fs(struct net_device *dev); |
146 | #endif |
147 | |
148 | #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) |
149 | |
150 | int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) |
151 | { |
152 | int ret = 0; |
153 | |
154 | if (enabled) { |
155 | ret = clk_prepare_enable(clk: priv->plat->stmmac_clk); |
156 | if (ret) |
157 | return ret; |
158 | ret = clk_prepare_enable(clk: priv->plat->pclk); |
159 | if (ret) { |
160 | clk_disable_unprepare(clk: priv->plat->stmmac_clk); |
161 | return ret; |
162 | } |
163 | if (priv->plat->clks_config) { |
164 | ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); |
165 | if (ret) { |
166 | clk_disable_unprepare(clk: priv->plat->stmmac_clk); |
167 | clk_disable_unprepare(clk: priv->plat->pclk); |
168 | return ret; |
169 | } |
170 | } |
171 | } else { |
172 | clk_disable_unprepare(clk: priv->plat->stmmac_clk); |
173 | clk_disable_unprepare(clk: priv->plat->pclk); |
174 | if (priv->plat->clks_config) |
175 | priv->plat->clks_config(priv->plat->bsp_priv, enabled); |
176 | } |
177 | |
178 | return ret; |
179 | } |
180 | EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); |
181 | |
182 | /** |
183 | * stmmac_verify_args - verify the driver parameters. |
184 | * Description: it checks the driver parameters and set a default in case of |
185 | * errors. |
186 | */ |
187 | static void stmmac_verify_args(void) |
188 | { |
189 | if (unlikely(watchdog < 0)) |
190 | watchdog = TX_TIMEO; |
191 | if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) |
192 | buf_sz = DEFAULT_BUFSIZE; |
193 | if (unlikely(flow_ctrl > 1)) |
194 | flow_ctrl = FLOW_AUTO; |
195 | else if (likely(flow_ctrl < 0)) |
196 | flow_ctrl = FLOW_OFF; |
197 | if (unlikely((pause < 0) || (pause > 0xffff))) |
198 | pause = PAUSE_TIME; |
199 | if (eee_timer < 0) |
200 | eee_timer = STMMAC_DEFAULT_LPI_TIMER; |
201 | } |
202 | |
203 | static void __stmmac_disable_all_queues(struct stmmac_priv *priv) |
204 | { |
205 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; |
206 | u32 tx_queues_cnt = priv->plat->tx_queues_to_use; |
207 | u32 maxq = max(rx_queues_cnt, tx_queues_cnt); |
208 | u32 queue; |
209 | |
210 | for (queue = 0; queue < maxq; queue++) { |
211 | struct stmmac_channel *ch = &priv->channel[queue]; |
212 | |
213 | if (stmmac_xdp_is_enabled(priv) && |
214 | test_bit(queue, priv->af_xdp_zc_qps)) { |
215 | napi_disable(n: &ch->rxtx_napi); |
216 | continue; |
217 | } |
218 | |
219 | if (queue < rx_queues_cnt) |
220 | napi_disable(n: &ch->rx_napi); |
221 | if (queue < tx_queues_cnt) |
222 | napi_disable(n: &ch->tx_napi); |
223 | } |
224 | } |
225 | |
226 | /** |
227 | * stmmac_disable_all_queues - Disable all queues |
228 | * @priv: driver private structure |
229 | */ |
230 | static void stmmac_disable_all_queues(struct stmmac_priv *priv) |
231 | { |
232 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; |
233 | struct stmmac_rx_queue *rx_q; |
234 | u32 queue; |
235 | |
236 | /* synchronize_rcu() needed for pending XDP buffers to drain */ |
237 | for (queue = 0; queue < rx_queues_cnt; queue++) { |
238 | rx_q = &priv->dma_conf.rx_queue[queue]; |
239 | if (rx_q->xsk_pool) { |
240 | synchronize_rcu(); |
241 | break; |
242 | } |
243 | } |
244 | |
245 | __stmmac_disable_all_queues(priv); |
246 | } |
247 | |
248 | /** |
249 | * stmmac_enable_all_queues - Enable all queues |
250 | * @priv: driver private structure |
251 | */ |
252 | static void stmmac_enable_all_queues(struct stmmac_priv *priv) |
253 | { |
254 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; |
255 | u32 tx_queues_cnt = priv->plat->tx_queues_to_use; |
256 | u32 maxq = max(rx_queues_cnt, tx_queues_cnt); |
257 | u32 queue; |
258 | |
259 | for (queue = 0; queue < maxq; queue++) { |
260 | struct stmmac_channel *ch = &priv->channel[queue]; |
261 | |
262 | if (stmmac_xdp_is_enabled(priv) && |
263 | test_bit(queue, priv->af_xdp_zc_qps)) { |
264 | napi_enable(n: &ch->rxtx_napi); |
265 | continue; |
266 | } |
267 | |
268 | if (queue < rx_queues_cnt) |
269 | napi_enable(n: &ch->rx_napi); |
270 | if (queue < tx_queues_cnt) |
271 | napi_enable(n: &ch->tx_napi); |
272 | } |
273 | } |
274 | |
275 | static void stmmac_service_event_schedule(struct stmmac_priv *priv) |
276 | { |
277 | if (!test_bit(STMMAC_DOWN, &priv->state) && |
278 | !test_and_set_bit(nr: STMMAC_SERVICE_SCHED, addr: &priv->state)) |
279 | queue_work(wq: priv->wq, work: &priv->service_task); |
280 | } |
281 | |
282 | static void stmmac_global_err(struct stmmac_priv *priv) |
283 | { |
284 | netif_carrier_off(dev: priv->dev); |
285 | set_bit(nr: STMMAC_RESET_REQUESTED, addr: &priv->state); |
286 | stmmac_service_event_schedule(priv); |
287 | } |
288 | |
289 | /** |
290 | * stmmac_clk_csr_set - dynamically set the MDC clock |
291 | * @priv: driver private structure |
292 | * Description: this is to dynamically set the MDC clock according to the csr |
293 | * clock input. |
294 | * Note: |
295 | * If a specific clk_csr value is passed from the platform |
296 | * this means that the CSR Clock Range selection cannot be |
297 | * changed at run-time and it is fixed (as reported in the driver |
298 | * documentation). Viceversa the driver will try to set the MDC |
299 | * clock dynamically according to the actual clock input. |
300 | */ |
301 | static void stmmac_clk_csr_set(struct stmmac_priv *priv) |
302 | { |
303 | u32 clk_rate; |
304 | |
305 | clk_rate = clk_get_rate(clk: priv->plat->stmmac_clk); |
306 | |
307 | /* Platform provided default clk_csr would be assumed valid |
308 | * for all other cases except for the below mentioned ones. |
309 | * For values higher than the IEEE 802.3 specified frequency |
310 | * we can not estimate the proper divider as it is not known |
311 | * the frequency of clk_csr_i. So we do not change the default |
312 | * divider. |
313 | */ |
314 | if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { |
315 | if (clk_rate < CSR_F_35M) |
316 | priv->clk_csr = STMMAC_CSR_20_35M; |
317 | else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) |
318 | priv->clk_csr = STMMAC_CSR_35_60M; |
319 | else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) |
320 | priv->clk_csr = STMMAC_CSR_60_100M; |
321 | else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) |
322 | priv->clk_csr = STMMAC_CSR_100_150M; |
323 | else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) |
324 | priv->clk_csr = STMMAC_CSR_150_250M; |
325 | else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) |
326 | priv->clk_csr = STMMAC_CSR_250_300M; |
327 | } |
328 | |
329 | if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) { |
330 | if (clk_rate > 160000000) |
331 | priv->clk_csr = 0x03; |
332 | else if (clk_rate > 80000000) |
333 | priv->clk_csr = 0x02; |
334 | else if (clk_rate > 40000000) |
335 | priv->clk_csr = 0x01; |
336 | else |
337 | priv->clk_csr = 0; |
338 | } |
339 | |
340 | if (priv->plat->has_xgmac) { |
341 | if (clk_rate > 400000000) |
342 | priv->clk_csr = 0x5; |
343 | else if (clk_rate > 350000000) |
344 | priv->clk_csr = 0x4; |
345 | else if (clk_rate > 300000000) |
346 | priv->clk_csr = 0x3; |
347 | else if (clk_rate > 250000000) |
348 | priv->clk_csr = 0x2; |
349 | else if (clk_rate > 150000000) |
350 | priv->clk_csr = 0x1; |
351 | else |
352 | priv->clk_csr = 0x0; |
353 | } |
354 | } |
355 | |
356 | static void print_pkt(unsigned char *buf, int len) |
357 | { |
358 | pr_debug("len = %d byte, buf addr: 0x%p\n" , len, buf); |
359 | print_hex_dump_bytes("" , DUMP_PREFIX_OFFSET, buf, len); |
360 | } |
361 | |
362 | static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) |
363 | { |
364 | struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; |
365 | u32 avail; |
366 | |
367 | if (tx_q->dirty_tx > tx_q->cur_tx) |
368 | avail = tx_q->dirty_tx - tx_q->cur_tx - 1; |
369 | else |
370 | avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; |
371 | |
372 | return avail; |
373 | } |
374 | |
375 | /** |
376 | * stmmac_rx_dirty - Get RX queue dirty |
377 | * @priv: driver private structure |
378 | * @queue: RX queue index |
379 | */ |
380 | static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) |
381 | { |
382 | struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; |
383 | u32 dirty; |
384 | |
385 | if (rx_q->dirty_rx <= rx_q->cur_rx) |
386 | dirty = rx_q->cur_rx - rx_q->dirty_rx; |
387 | else |
388 | dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; |
389 | |
390 | return dirty; |
391 | } |
392 | |
393 | static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) |
394 | { |
395 | int tx_lpi_timer; |
396 | |
397 | /* Clear/set the SW EEE timer flag based on LPI ET enablement */ |
398 | priv->eee_sw_timer_en = en ? 0 : 1; |
399 | tx_lpi_timer = en ? priv->tx_lpi_timer : 0; |
400 | stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); |
401 | } |
402 | |
403 | /** |
404 | * stmmac_enable_eee_mode - check and enter in LPI mode |
405 | * @priv: driver private structure |
406 | * Description: this function is to verify and enter in LPI mode in case of |
407 | * EEE. |
408 | */ |
409 | static int stmmac_enable_eee_mode(struct stmmac_priv *priv) |
410 | { |
411 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
412 | u32 queue; |
413 | |
414 | /* check if all TX queues have the work finished */ |
415 | for (queue = 0; queue < tx_cnt; queue++) { |
416 | struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; |
417 | |
418 | if (tx_q->dirty_tx != tx_q->cur_tx) |
419 | return -EBUSY; /* still unfinished work */ |
420 | } |
421 | |
422 | /* Check and enter in LPI mode */ |
423 | if (!priv->tx_path_in_lpi_mode) |
424 | stmmac_set_eee_mode(priv, priv->hw, |
425 | priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING); |
426 | return 0; |
427 | } |
428 | |
429 | /** |
430 | * stmmac_disable_eee_mode - disable and exit from LPI mode |
431 | * @priv: driver private structure |
432 | * Description: this function is to exit and disable EEE in case of |
433 | * LPI state is true. This is called by the xmit. |
434 | */ |
435 | void stmmac_disable_eee_mode(struct stmmac_priv *priv) |
436 | { |
437 | if (!priv->eee_sw_timer_en) { |
438 | stmmac_lpi_entry_timer_config(priv, en: 0); |
439 | return; |
440 | } |
441 | |
442 | stmmac_reset_eee_mode(priv, priv->hw); |
443 | del_timer_sync(timer: &priv->eee_ctrl_timer); |
444 | priv->tx_path_in_lpi_mode = false; |
445 | } |
446 | |
447 | /** |
448 | * stmmac_eee_ctrl_timer - EEE TX SW timer. |
449 | * @t: timer_list struct containing private info |
450 | * Description: |
451 | * if there is no data transfer and if we are not in LPI state, |
452 | * then MAC Transmitter can be moved to LPI state. |
453 | */ |
454 | static void stmmac_eee_ctrl_timer(struct timer_list *t) |
455 | { |
456 | struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); |
457 | |
458 | if (stmmac_enable_eee_mode(priv)) |
459 | mod_timer(timer: &priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); |
460 | } |
461 | |
462 | /** |
463 | * stmmac_eee_init - init EEE |
464 | * @priv: driver private structure |
465 | * Description: |
466 | * if the GMAC supports the EEE (from the HW cap reg) and the phy device |
467 | * can also manage EEE, this function enable the LPI state and start related |
468 | * timer. |
469 | */ |
470 | bool stmmac_eee_init(struct stmmac_priv *priv) |
471 | { |
472 | int eee_tw_timer = priv->eee_tw_timer; |
473 | |
474 | /* Using PCS we cannot dial with the phy registers at this stage |
475 | * so we do not support extra feature like EEE. |
476 | */ |
477 | if (priv->hw->pcs == STMMAC_PCS_TBI || |
478 | priv->hw->pcs == STMMAC_PCS_RTBI) |
479 | return false; |
480 | |
481 | /* Check if MAC core supports the EEE feature. */ |
482 | if (!priv->dma_cap.eee) |
483 | return false; |
484 | |
485 | mutex_lock(&priv->lock); |
486 | |
487 | /* Check if it needs to be deactivated */ |
488 | if (!priv->eee_active) { |
489 | if (priv->eee_enabled) { |
490 | netdev_dbg(priv->dev, "disable EEE\n" ); |
491 | stmmac_lpi_entry_timer_config(priv, en: 0); |
492 | del_timer_sync(timer: &priv->eee_ctrl_timer); |
493 | stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); |
494 | if (priv->hw->xpcs) |
495 | xpcs_config_eee(xpcs: priv->hw->xpcs, |
496 | mult_fact_100ns: priv->plat->mult_fact_100ns, |
497 | enable: false); |
498 | } |
499 | mutex_unlock(lock: &priv->lock); |
500 | return false; |
501 | } |
502 | |
503 | if (priv->eee_active && !priv->eee_enabled) { |
504 | timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); |
505 | stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, |
506 | eee_tw_timer); |
507 | if (priv->hw->xpcs) |
508 | xpcs_config_eee(xpcs: priv->hw->xpcs, |
509 | mult_fact_100ns: priv->plat->mult_fact_100ns, |
510 | enable: true); |
511 | } |
512 | |
513 | if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { |
514 | del_timer_sync(timer: &priv->eee_ctrl_timer); |
515 | priv->tx_path_in_lpi_mode = false; |
516 | stmmac_lpi_entry_timer_config(priv, en: 1); |
517 | } else { |
518 | stmmac_lpi_entry_timer_config(priv, en: 0); |
519 | mod_timer(timer: &priv->eee_ctrl_timer, |
520 | STMMAC_LPI_T(priv->tx_lpi_timer)); |
521 | } |
522 | |
523 | mutex_unlock(lock: &priv->lock); |
524 | netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n" ); |
525 | return true; |
526 | } |
527 | |
528 | /* stmmac_get_tx_hwtstamp - get HW TX timestamps |
529 | * @priv: driver private structure |
530 | * @p : descriptor pointer |
531 | * @skb : the socket buffer |
532 | * Description : |
533 | * This function will read timestamp from the descriptor & pass it to stack. |
534 | * and also perform some sanity checks. |
535 | */ |
536 | static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, |
537 | struct dma_desc *p, struct sk_buff *skb) |
538 | { |
539 | struct skb_shared_hwtstamps shhwtstamp; |
540 | bool found = false; |
541 | u64 ns = 0; |
542 | |
543 | if (!priv->hwts_tx_en) |
544 | return; |
545 | |
546 | /* exit if skb doesn't support hw tstamp */ |
547 | if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) |
548 | return; |
549 | |
550 | /* check tx tstamp status */ |
551 | if (stmmac_get_tx_timestamp_status(priv, p)) { |
552 | stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); |
553 | found = true; |
554 | } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { |
555 | found = true; |
556 | } |
557 | |
558 | if (found) { |
559 | ns -= priv->plat->cdc_error_adj; |
560 | |
561 | memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); |
562 | shhwtstamp.hwtstamp = ns_to_ktime(ns); |
563 | |
564 | netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n" , ns); |
565 | /* pass tstamp to stack */ |
566 | skb_tstamp_tx(orig_skb: skb, hwtstamps: &shhwtstamp); |
567 | } |
568 | } |
569 | |
570 | /* stmmac_get_rx_hwtstamp - get HW RX timestamps |
571 | * @priv: driver private structure |
572 | * @p : descriptor pointer |
573 | * @np : next descriptor pointer |
574 | * @skb : the socket buffer |
575 | * Description : |
576 | * This function will read received packet's timestamp from the descriptor |
577 | * and pass it to stack. It also perform some sanity checks. |
578 | */ |
579 | static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, |
580 | struct dma_desc *np, struct sk_buff *skb) |
581 | { |
582 | struct skb_shared_hwtstamps *shhwtstamp = NULL; |
583 | struct dma_desc *desc = p; |
584 | u64 ns = 0; |
585 | |
586 | if (!priv->hwts_rx_en) |
587 | return; |
588 | /* For GMAC4, the valid timestamp is from CTX next desc. */ |
589 | if (priv->plat->has_gmac4 || priv->plat->has_xgmac) |
590 | desc = np; |
591 | |
592 | /* Check if timestamp is available */ |
593 | if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { |
594 | stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); |
595 | |
596 | ns -= priv->plat->cdc_error_adj; |
597 | |
598 | netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n" , ns); |
599 | shhwtstamp = skb_hwtstamps(skb); |
600 | memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); |
601 | shhwtstamp->hwtstamp = ns_to_ktime(ns); |
602 | } else { |
603 | netdev_dbg(priv->dev, "cannot get RX hw timestamp\n" ); |
604 | } |
605 | } |
606 | |
607 | /** |
608 | * stmmac_hwtstamp_set - control hardware timestamping. |
609 | * @dev: device pointer. |
610 | * @ifr: An IOCTL specific structure, that can contain a pointer to |
611 | * a proprietary structure used to pass information to the driver. |
612 | * Description: |
613 | * This function configures the MAC to enable/disable both outgoing(TX) |
614 | * and incoming(RX) packets time stamping based on user input. |
615 | * Return Value: |
616 | * 0 on success and an appropriate -ve integer on failure. |
617 | */ |
618 | static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) |
619 | { |
620 | struct stmmac_priv *priv = netdev_priv(dev); |
621 | struct hwtstamp_config config; |
622 | u32 ptp_v2 = 0; |
623 | u32 tstamp_all = 0; |
624 | u32 ptp_over_ipv4_udp = 0; |
625 | u32 ptp_over_ipv6_udp = 0; |
626 | u32 ptp_over_ethernet = 0; |
627 | u32 snap_type_sel = 0; |
628 | u32 ts_master_en = 0; |
629 | u32 ts_event_en = 0; |
630 | |
631 | if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { |
632 | netdev_alert(dev: priv->dev, format: "No support for HW time stamping\n" ); |
633 | priv->hwts_tx_en = 0; |
634 | priv->hwts_rx_en = 0; |
635 | |
636 | return -EOPNOTSUPP; |
637 | } |
638 | |
639 | if (copy_from_user(to: &config, from: ifr->ifr_data, |
640 | n: sizeof(config))) |
641 | return -EFAULT; |
642 | |
643 | netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n" , |
644 | __func__, config.flags, config.tx_type, config.rx_filter); |
645 | |
646 | if (config.tx_type != HWTSTAMP_TX_OFF && |
647 | config.tx_type != HWTSTAMP_TX_ON) |
648 | return -ERANGE; |
649 | |
650 | if (priv->adv_ts) { |
651 | switch (config.rx_filter) { |
652 | case HWTSTAMP_FILTER_NONE: |
653 | /* time stamp no incoming packet at all */ |
654 | config.rx_filter = HWTSTAMP_FILTER_NONE; |
655 | break; |
656 | |
657 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: |
658 | /* PTP v1, UDP, any kind of event packet */ |
659 | config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; |
660 | /* 'xmac' hardware can support Sync, Pdelay_Req and |
661 | * Pdelay_resp by setting bit14 and bits17/16 to 01 |
662 | * This leaves Delay_Req timestamps out. |
663 | * Enable all events *and* general purpose message |
664 | * timestamping |
665 | */ |
666 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; |
667 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
668 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
669 | break; |
670 | |
671 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: |
672 | /* PTP v1, UDP, Sync packet */ |
673 | config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; |
674 | /* take time stamp for SYNC messages only */ |
675 | ts_event_en = PTP_TCR_TSEVNTENA; |
676 | |
677 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
678 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
679 | break; |
680 | |
681 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: |
682 | /* PTP v1, UDP, Delay_req packet */ |
683 | config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; |
684 | /* take time stamp for Delay_Req messages only */ |
685 | ts_master_en = PTP_TCR_TSMSTRENA; |
686 | ts_event_en = PTP_TCR_TSEVNTENA; |
687 | |
688 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
689 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
690 | break; |
691 | |
692 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: |
693 | /* PTP v2, UDP, any kind of event packet */ |
694 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; |
695 | ptp_v2 = PTP_TCR_TSVER2ENA; |
696 | /* take time stamp for all event messages */ |
697 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; |
698 | |
699 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
700 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
701 | break; |
702 | |
703 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: |
704 | /* PTP v2, UDP, Sync packet */ |
705 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; |
706 | ptp_v2 = PTP_TCR_TSVER2ENA; |
707 | /* take time stamp for SYNC messages only */ |
708 | ts_event_en = PTP_TCR_TSEVNTENA; |
709 | |
710 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
711 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
712 | break; |
713 | |
714 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: |
715 | /* PTP v2, UDP, Delay_req packet */ |
716 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; |
717 | ptp_v2 = PTP_TCR_TSVER2ENA; |
718 | /* take time stamp for Delay_Req messages only */ |
719 | ts_master_en = PTP_TCR_TSMSTRENA; |
720 | ts_event_en = PTP_TCR_TSEVNTENA; |
721 | |
722 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
723 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
724 | break; |
725 | |
726 | case HWTSTAMP_FILTER_PTP_V2_EVENT: |
727 | /* PTP v2/802.AS1 any layer, any kind of event packet */ |
728 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; |
729 | ptp_v2 = PTP_TCR_TSVER2ENA; |
730 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; |
731 | if (priv->synopsys_id < DWMAC_CORE_4_10) |
732 | ts_event_en = PTP_TCR_TSEVNTENA; |
733 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
734 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
735 | ptp_over_ethernet = PTP_TCR_TSIPENA; |
736 | break; |
737 | |
738 | case HWTSTAMP_FILTER_PTP_V2_SYNC: |
739 | /* PTP v2/802.AS1, any layer, Sync packet */ |
740 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; |
741 | ptp_v2 = PTP_TCR_TSVER2ENA; |
742 | /* take time stamp for SYNC messages only */ |
743 | ts_event_en = PTP_TCR_TSEVNTENA; |
744 | |
745 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
746 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
747 | ptp_over_ethernet = PTP_TCR_TSIPENA; |
748 | break; |
749 | |
750 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: |
751 | /* PTP v2/802.AS1, any layer, Delay_req packet */ |
752 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; |
753 | ptp_v2 = PTP_TCR_TSVER2ENA; |
754 | /* take time stamp for Delay_Req messages only */ |
755 | ts_master_en = PTP_TCR_TSMSTRENA; |
756 | ts_event_en = PTP_TCR_TSEVNTENA; |
757 | |
758 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
759 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
760 | ptp_over_ethernet = PTP_TCR_TSIPENA; |
761 | break; |
762 | |
763 | case HWTSTAMP_FILTER_NTP_ALL: |
764 | case HWTSTAMP_FILTER_ALL: |
765 | /* time stamp any incoming packet */ |
766 | config.rx_filter = HWTSTAMP_FILTER_ALL; |
767 | tstamp_all = PTP_TCR_TSENALL; |
768 | break; |
769 | |
770 | default: |
771 | return -ERANGE; |
772 | } |
773 | } else { |
774 | switch (config.rx_filter) { |
775 | case HWTSTAMP_FILTER_NONE: |
776 | config.rx_filter = HWTSTAMP_FILTER_NONE; |
777 | break; |
778 | default: |
779 | /* PTP v1, UDP, any kind of event packet */ |
780 | config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; |
781 | break; |
782 | } |
783 | } |
784 | priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); |
785 | priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; |
786 | |
787 | priv->systime_flags = STMMAC_HWTS_ACTIVE; |
788 | |
789 | if (priv->hwts_tx_en || priv->hwts_rx_en) { |
790 | priv->systime_flags |= tstamp_all | ptp_v2 | |
791 | ptp_over_ethernet | ptp_over_ipv6_udp | |
792 | ptp_over_ipv4_udp | ts_event_en | |
793 | ts_master_en | snap_type_sel; |
794 | } |
795 | |
796 | stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); |
797 | |
798 | memcpy(&priv->tstamp_config, &config, sizeof(config)); |
799 | |
800 | return copy_to_user(to: ifr->ifr_data, from: &config, |
801 | n: sizeof(config)) ? -EFAULT : 0; |
802 | } |
803 | |
804 | /** |
805 | * stmmac_hwtstamp_get - read hardware timestamping. |
806 | * @dev: device pointer. |
807 | * @ifr: An IOCTL specific structure, that can contain a pointer to |
808 | * a proprietary structure used to pass information to the driver. |
809 | * Description: |
810 | * This function obtain the current hardware timestamping settings |
811 | * as requested. |
812 | */ |
813 | static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) |
814 | { |
815 | struct stmmac_priv *priv = netdev_priv(dev); |
816 | struct hwtstamp_config *config = &priv->tstamp_config; |
817 | |
818 | if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) |
819 | return -EOPNOTSUPP; |
820 | |
821 | return copy_to_user(to: ifr->ifr_data, from: config, |
822 | n: sizeof(*config)) ? -EFAULT : 0; |
823 | } |
824 | |
825 | /** |
826 | * stmmac_init_tstamp_counter - init hardware timestamping counter |
827 | * @priv: driver private structure |
828 | * @systime_flags: timestamping flags |
829 | * Description: |
830 | * Initialize hardware counter for packet timestamping. |
831 | * This is valid as long as the interface is open and not suspended. |
832 | * Will be rerun after resuming from suspend, case in which the timestamping |
833 | * flags updated by stmmac_hwtstamp_set() also need to be restored. |
834 | */ |
835 | int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) |
836 | { |
837 | bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; |
838 | struct timespec64 now; |
839 | u32 sec_inc = 0; |
840 | u64 temp = 0; |
841 | |
842 | if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) |
843 | return -EOPNOTSUPP; |
844 | |
845 | stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); |
846 | priv->systime_flags = systime_flags; |
847 | |
848 | /* program Sub Second Increment reg */ |
849 | stmmac_config_sub_second_increment(priv, priv->ptpaddr, |
850 | priv->plat->clk_ptp_rate, |
851 | xmac, &sec_inc); |
852 | temp = div_u64(dividend: 1000000000ULL, divisor: sec_inc); |
853 | |
854 | /* Store sub second increment for later use */ |
855 | priv->sub_second_inc = sec_inc; |
856 | |
857 | /* calculate default added value: |
858 | * formula is : |
859 | * addend = (2^32)/freq_div_ratio; |
860 | * where, freq_div_ratio = 1e9ns/sec_inc |
861 | */ |
862 | temp = (u64)(temp << 32); |
863 | priv->default_addend = div_u64(dividend: temp, divisor: priv->plat->clk_ptp_rate); |
864 | stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); |
865 | |
866 | /* initialize system time */ |
867 | ktime_get_real_ts64(tv: &now); |
868 | |
869 | /* lower 32 bits of tv_sec are safe until y2106 */ |
870 | stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); |
871 | |
872 | return 0; |
873 | } |
874 | EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter); |
875 | |
876 | /** |
877 | * stmmac_init_ptp - init PTP |
878 | * @priv: driver private structure |
879 | * Description: this is to verify if the HW supports the PTPv1 or PTPv2. |
880 | * This is done by looking at the HW cap. register. |
881 | * This function also registers the ptp driver. |
882 | */ |
883 | static int stmmac_init_ptp(struct stmmac_priv *priv) |
884 | { |
885 | bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; |
886 | int ret; |
887 | |
888 | if (priv->plat->ptp_clk_freq_config) |
889 | priv->plat->ptp_clk_freq_config(priv); |
890 | |
891 | ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); |
892 | if (ret) |
893 | return ret; |
894 | |
895 | priv->adv_ts = 0; |
896 | /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ |
897 | if (xmac && priv->dma_cap.atime_stamp) |
898 | priv->adv_ts = 1; |
899 | /* Dwmac 3.x core with extend_desc can support adv_ts */ |
900 | else if (priv->extend_desc && priv->dma_cap.atime_stamp) |
901 | priv->adv_ts = 1; |
902 | |
903 | if (priv->dma_cap.time_stamp) |
904 | netdev_info(dev: priv->dev, format: "IEEE 1588-2002 Timestamp supported\n" ); |
905 | |
906 | if (priv->adv_ts) |
907 | netdev_info(dev: priv->dev, |
908 | format: "IEEE 1588-2008 Advanced Timestamp supported\n" ); |
909 | |
910 | priv->hwts_tx_en = 0; |
911 | priv->hwts_rx_en = 0; |
912 | |
913 | if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) |
914 | stmmac_hwtstamp_correct_latency(priv, priv); |
915 | |
916 | return 0; |
917 | } |
918 | |
919 | static void stmmac_release_ptp(struct stmmac_priv *priv) |
920 | { |
921 | clk_disable_unprepare(clk: priv->plat->clk_ptp_ref); |
922 | stmmac_ptp_unregister(priv); |
923 | } |
924 | |
925 | /** |
926 | * stmmac_mac_flow_ctrl - Configure flow control in all queues |
927 | * @priv: driver private structure |
928 | * @duplex: duplex passed to the next function |
929 | * Description: It is used for configuring the flow control in all queues |
930 | */ |
931 | static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) |
932 | { |
933 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
934 | |
935 | stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, |
936 | priv->pause, tx_cnt); |
937 | } |
938 | |
939 | static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, |
940 | phy_interface_t interface) |
941 | { |
942 | struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); |
943 | |
944 | if (priv->hw->xpcs) |
945 | return &priv->hw->xpcs->pcs; |
946 | |
947 | if (priv->hw->lynx_pcs) |
948 | return priv->hw->lynx_pcs; |
949 | |
950 | return NULL; |
951 | } |
952 | |
953 | static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, |
954 | const struct phylink_link_state *state) |
955 | { |
956 | /* Nothing to do, xpcs_config() handles everything */ |
957 | } |
958 | |
959 | static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) |
960 | { |
961 | struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; |
962 | enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; |
963 | enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; |
964 | bool *hs_enable = &fpe_cfg->hs_enable; |
965 | |
966 | if (is_up && *hs_enable) { |
967 | stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg, |
968 | MPACKET_VERIFY); |
969 | } else { |
970 | *lo_state = FPE_STATE_OFF; |
971 | *lp_state = FPE_STATE_OFF; |
972 | } |
973 | } |
974 | |
975 | static void stmmac_mac_link_down(struct phylink_config *config, |
976 | unsigned int mode, phy_interface_t interface) |
977 | { |
978 | struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); |
979 | |
980 | stmmac_mac_set(priv, priv->ioaddr, false); |
981 | priv->eee_active = false; |
982 | priv->tx_lpi_enabled = false; |
983 | priv->eee_enabled = stmmac_eee_init(priv); |
984 | stmmac_set_eee_pls(priv, priv->hw, false); |
985 | |
986 | if (priv->dma_cap.fpesel) |
987 | stmmac_fpe_link_state_handle(priv, is_up: false); |
988 | } |
989 | |
990 | static void stmmac_mac_link_up(struct phylink_config *config, |
991 | struct phy_device *phy, |
992 | unsigned int mode, phy_interface_t interface, |
993 | int speed, int duplex, |
994 | bool tx_pause, bool rx_pause) |
995 | { |
996 | struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); |
997 | u32 old_ctrl, ctrl; |
998 | |
999 | if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && |
1000 | priv->plat->serdes_powerup) |
1001 | priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv); |
1002 | |
1003 | old_ctrl = readl(addr: priv->ioaddr + MAC_CTRL_REG); |
1004 | ctrl = old_ctrl & ~priv->hw->link.speed_mask; |
1005 | |
1006 | if (interface == PHY_INTERFACE_MODE_USXGMII) { |
1007 | switch (speed) { |
1008 | case SPEED_10000: |
1009 | ctrl |= priv->hw->link.xgmii.speed10000; |
1010 | break; |
1011 | case SPEED_5000: |
1012 | ctrl |= priv->hw->link.xgmii.speed5000; |
1013 | break; |
1014 | case SPEED_2500: |
1015 | ctrl |= priv->hw->link.xgmii.speed2500; |
1016 | break; |
1017 | default: |
1018 | return; |
1019 | } |
1020 | } else if (interface == PHY_INTERFACE_MODE_XLGMII) { |
1021 | switch (speed) { |
1022 | case SPEED_100000: |
1023 | ctrl |= priv->hw->link.xlgmii.speed100000; |
1024 | break; |
1025 | case SPEED_50000: |
1026 | ctrl |= priv->hw->link.xlgmii.speed50000; |
1027 | break; |
1028 | case SPEED_40000: |
1029 | ctrl |= priv->hw->link.xlgmii.speed40000; |
1030 | break; |
1031 | case SPEED_25000: |
1032 | ctrl |= priv->hw->link.xlgmii.speed25000; |
1033 | break; |
1034 | case SPEED_10000: |
1035 | ctrl |= priv->hw->link.xgmii.speed10000; |
1036 | break; |
1037 | case SPEED_2500: |
1038 | ctrl |= priv->hw->link.speed2500; |
1039 | break; |
1040 | case SPEED_1000: |
1041 | ctrl |= priv->hw->link.speed1000; |
1042 | break; |
1043 | default: |
1044 | return; |
1045 | } |
1046 | } else { |
1047 | switch (speed) { |
1048 | case SPEED_2500: |
1049 | ctrl |= priv->hw->link.speed2500; |
1050 | break; |
1051 | case SPEED_1000: |
1052 | ctrl |= priv->hw->link.speed1000; |
1053 | break; |
1054 | case SPEED_100: |
1055 | ctrl |= priv->hw->link.speed100; |
1056 | break; |
1057 | case SPEED_10: |
1058 | ctrl |= priv->hw->link.speed10; |
1059 | break; |
1060 | default: |
1061 | return; |
1062 | } |
1063 | } |
1064 | |
1065 | priv->speed = speed; |
1066 | |
1067 | if (priv->plat->fix_mac_speed) |
1068 | priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode); |
1069 | |
1070 | if (!duplex) |
1071 | ctrl &= ~priv->hw->link.duplex; |
1072 | else |
1073 | ctrl |= priv->hw->link.duplex; |
1074 | |
1075 | /* Flow Control operation */ |
1076 | if (rx_pause && tx_pause) |
1077 | priv->flow_ctrl = FLOW_AUTO; |
1078 | else if (rx_pause && !tx_pause) |
1079 | priv->flow_ctrl = FLOW_RX; |
1080 | else if (!rx_pause && tx_pause) |
1081 | priv->flow_ctrl = FLOW_TX; |
1082 | else |
1083 | priv->flow_ctrl = FLOW_OFF; |
1084 | |
1085 | stmmac_mac_flow_ctrl(priv, duplex); |
1086 | |
1087 | if (ctrl != old_ctrl) |
1088 | writel(val: ctrl, addr: priv->ioaddr + MAC_CTRL_REG); |
1089 | |
1090 | stmmac_mac_set(priv, priv->ioaddr, true); |
1091 | if (phy && priv->dma_cap.eee) { |
1092 | priv->eee_active = |
1093 | phy_init_eee(phydev: phy, clk_stop_enable: !(priv->plat->flags & |
1094 | STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0; |
1095 | priv->eee_enabled = stmmac_eee_init(priv); |
1096 | priv->tx_lpi_enabled = priv->eee_enabled; |
1097 | stmmac_set_eee_pls(priv, priv->hw, true); |
1098 | } |
1099 | |
1100 | if (priv->dma_cap.fpesel) |
1101 | stmmac_fpe_link_state_handle(priv, is_up: true); |
1102 | |
1103 | if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) |
1104 | stmmac_hwtstamp_correct_latency(priv, priv); |
1105 | } |
1106 | |
1107 | static const struct phylink_mac_ops stmmac_phylink_mac_ops = { |
1108 | .mac_select_pcs = stmmac_mac_select_pcs, |
1109 | .mac_config = stmmac_mac_config, |
1110 | .mac_link_down = stmmac_mac_link_down, |
1111 | .mac_link_up = stmmac_mac_link_up, |
1112 | }; |
1113 | |
1114 | /** |
1115 | * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported |
1116 | * @priv: driver private structure |
1117 | * Description: this is to verify if the HW supports the PCS. |
1118 | * Physical Coding Sublayer (PCS) interface that can be used when the MAC is |
1119 | * configured for the TBI, RTBI, or SGMII PHY interface. |
1120 | */ |
1121 | static void stmmac_check_pcs_mode(struct stmmac_priv *priv) |
1122 | { |
1123 | int interface = priv->plat->mac_interface; |
1124 | |
1125 | if (priv->dma_cap.pcs) { |
1126 | if ((interface == PHY_INTERFACE_MODE_RGMII) || |
1127 | (interface == PHY_INTERFACE_MODE_RGMII_ID) || |
1128 | (interface == PHY_INTERFACE_MODE_RGMII_RXID) || |
1129 | (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { |
1130 | netdev_dbg(priv->dev, "PCS RGMII support enabled\n" ); |
1131 | priv->hw->pcs = STMMAC_PCS_RGMII; |
1132 | } else if (interface == PHY_INTERFACE_MODE_SGMII) { |
1133 | netdev_dbg(priv->dev, "PCS SGMII support enabled\n" ); |
1134 | priv->hw->pcs = STMMAC_PCS_SGMII; |
1135 | } |
1136 | } |
1137 | } |
1138 | |
1139 | /** |
1140 | * stmmac_init_phy - PHY initialization |
1141 | * @dev: net device structure |
1142 | * Description: it initializes the driver's PHY state, and attaches the PHY |
1143 | * to the mac driver. |
1144 | * Return value: |
1145 | * 0 on success |
1146 | */ |
1147 | static int stmmac_init_phy(struct net_device *dev) |
1148 | { |
1149 | struct stmmac_priv *priv = netdev_priv(dev); |
1150 | struct fwnode_handle *phy_fwnode; |
1151 | struct fwnode_handle *fwnode; |
1152 | int ret; |
1153 | |
1154 | if (!phylink_expects_phy(pl: priv->phylink)) |
1155 | return 0; |
1156 | |
1157 | fwnode = priv->plat->port_node; |
1158 | if (!fwnode) |
1159 | fwnode = dev_fwnode(priv->device); |
1160 | |
1161 | if (fwnode) |
1162 | phy_fwnode = fwnode_get_phy_node(fwnode); |
1163 | else |
1164 | phy_fwnode = NULL; |
1165 | |
1166 | /* Some DT bindings do not set-up the PHY handle. Let's try to |
1167 | * manually parse it |
1168 | */ |
1169 | if (!phy_fwnode || IS_ERR(ptr: phy_fwnode)) { |
1170 | int addr = priv->plat->phy_addr; |
1171 | struct phy_device *phydev; |
1172 | |
1173 | if (addr < 0) { |
1174 | netdev_err(dev: priv->dev, format: "no phy found\n" ); |
1175 | return -ENODEV; |
1176 | } |
1177 | |
1178 | phydev = mdiobus_get_phy(bus: priv->mii, addr); |
1179 | if (!phydev) { |
1180 | netdev_err(dev: priv->dev, format: "no phy at addr %d\n" , addr); |
1181 | return -ENODEV; |
1182 | } |
1183 | |
1184 | ret = phylink_connect_phy(priv->phylink, phydev); |
1185 | } else { |
1186 | fwnode_handle_put(fwnode: phy_fwnode); |
1187 | ret = phylink_fwnode_phy_connect(pl: priv->phylink, fwnode, flags: 0); |
1188 | } |
1189 | |
1190 | if (!priv->plat->pmt) { |
1191 | struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; |
1192 | |
1193 | phylink_ethtool_get_wol(priv->phylink, &wol); |
1194 | device_set_wakeup_capable(dev: priv->device, capable: !!wol.supported); |
1195 | device_set_wakeup_enable(dev: priv->device, enable: !!wol.wolopts); |
1196 | } |
1197 | |
1198 | return ret; |
1199 | } |
1200 | |
1201 | static int stmmac_phy_setup(struct stmmac_priv *priv) |
1202 | { |
1203 | struct stmmac_mdio_bus_data *mdio_bus_data; |
1204 | int mode = priv->plat->phy_interface; |
1205 | struct fwnode_handle *fwnode; |
1206 | struct phylink *phylink; |
1207 | int max_speed; |
1208 | |
1209 | priv->phylink_config.dev = &priv->dev->dev; |
1210 | priv->phylink_config.type = PHYLINK_NETDEV; |
1211 | priv->phylink_config.mac_managed_pm = true; |
1212 | |
1213 | mdio_bus_data = priv->plat->mdio_bus_data; |
1214 | if (mdio_bus_data) |
1215 | priv->phylink_config.ovr_an_inband = |
1216 | mdio_bus_data->xpcs_an_inband; |
1217 | |
1218 | /* Set the platform/firmware specified interface mode. Note, phylink |
1219 | * deals with the PHY interface mode, not the MAC interface mode. |
1220 | */ |
1221 | __set_bit(mode, priv->phylink_config.supported_interfaces); |
1222 | |
1223 | /* If we have an xpcs, it defines which PHY interfaces are supported. */ |
1224 | if (priv->hw->xpcs) |
1225 | xpcs_get_interfaces(xpcs: priv->hw->xpcs, |
1226 | interfaces: priv->phylink_config.supported_interfaces); |
1227 | |
1228 | /* Get the MAC specific capabilities */ |
1229 | stmmac_mac_phylink_get_caps(priv); |
1230 | |
1231 | priv->phylink_config.mac_capabilities = priv->hw->link.caps; |
1232 | |
1233 | max_speed = priv->plat->max_speed; |
1234 | if (max_speed) |
1235 | phylink_limit_mac_speed(config: &priv->phylink_config, max_speed); |
1236 | |
1237 | fwnode = priv->plat->port_node; |
1238 | if (!fwnode) |
1239 | fwnode = dev_fwnode(priv->device); |
1240 | |
1241 | phylink = phylink_create(&priv->phylink_config, fwnode, |
1242 | mode, &stmmac_phylink_mac_ops); |
1243 | if (IS_ERR(ptr: phylink)) |
1244 | return PTR_ERR(ptr: phylink); |
1245 | |
1246 | priv->phylink = phylink; |
1247 | return 0; |
1248 | } |
1249 | |
1250 | static void stmmac_display_rx_rings(struct stmmac_priv *priv, |
1251 | struct stmmac_dma_conf *dma_conf) |
1252 | { |
1253 | u32 rx_cnt = priv->plat->rx_queues_to_use; |
1254 | unsigned int desc_size; |
1255 | void *head_rx; |
1256 | u32 queue; |
1257 | |
1258 | /* Display RX rings */ |
1259 | for (queue = 0; queue < rx_cnt; queue++) { |
1260 | struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; |
1261 | |
1262 | pr_info("\tRX Queue %u rings\n" , queue); |
1263 | |
1264 | if (priv->extend_desc) { |
1265 | head_rx = (void *)rx_q->dma_erx; |
1266 | desc_size = sizeof(struct dma_extended_desc); |
1267 | } else { |
1268 | head_rx = (void *)rx_q->dma_rx; |
1269 | desc_size = sizeof(struct dma_desc); |
1270 | } |
1271 | |
1272 | /* Display RX ring */ |
1273 | stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, |
1274 | rx_q->dma_rx_phy, desc_size); |
1275 | } |
1276 | } |
1277 | |
1278 | static void stmmac_display_tx_rings(struct stmmac_priv *priv, |
1279 | struct stmmac_dma_conf *dma_conf) |
1280 | { |
1281 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
1282 | unsigned int desc_size; |
1283 | void *head_tx; |
1284 | u32 queue; |
1285 | |
1286 | /* Display TX rings */ |
1287 | for (queue = 0; queue < tx_cnt; queue++) { |
1288 | struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; |
1289 | |
1290 | pr_info("\tTX Queue %d rings\n" , queue); |
1291 | |
1292 | if (priv->extend_desc) { |
1293 | head_tx = (void *)tx_q->dma_etx; |
1294 | desc_size = sizeof(struct dma_extended_desc); |
1295 | } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { |
1296 | head_tx = (void *)tx_q->dma_entx; |
1297 | desc_size = sizeof(struct dma_edesc); |
1298 | } else { |
1299 | head_tx = (void *)tx_q->dma_tx; |
1300 | desc_size = sizeof(struct dma_desc); |
1301 | } |
1302 | |
1303 | stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, |
1304 | tx_q->dma_tx_phy, desc_size); |
1305 | } |
1306 | } |
1307 | |
1308 | static void stmmac_display_rings(struct stmmac_priv *priv, |
1309 | struct stmmac_dma_conf *dma_conf) |
1310 | { |
1311 | /* Display RX ring */ |
1312 | stmmac_display_rx_rings(priv, dma_conf); |
1313 | |
1314 | /* Display TX ring */ |
1315 | stmmac_display_tx_rings(priv, dma_conf); |
1316 | } |
1317 | |
1318 | static int stmmac_set_bfsize(int mtu, int bufsize) |
1319 | { |
1320 | int ret = bufsize; |
1321 | |
1322 | if (mtu >= BUF_SIZE_8KiB) |
1323 | ret = BUF_SIZE_16KiB; |
1324 | else if (mtu >= BUF_SIZE_4KiB) |
1325 | ret = BUF_SIZE_8KiB; |
1326 | else if (mtu >= BUF_SIZE_2KiB) |
1327 | ret = BUF_SIZE_4KiB; |
1328 | else if (mtu > DEFAULT_BUFSIZE) |
1329 | ret = BUF_SIZE_2KiB; |
1330 | else |
1331 | ret = DEFAULT_BUFSIZE; |
1332 | |
1333 | return ret; |
1334 | } |
1335 | |
1336 | /** |
1337 | * stmmac_clear_rx_descriptors - clear RX descriptors |
1338 | * @priv: driver private structure |
1339 | * @dma_conf: structure to take the dma data |
1340 | * @queue: RX queue index |
1341 | * Description: this function is called to clear the RX descriptors |
1342 | * in case of both basic and extended descriptors are used. |
1343 | */ |
1344 | static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, |
1345 | struct stmmac_dma_conf *dma_conf, |
1346 | u32 queue) |
1347 | { |
1348 | struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; |
1349 | int i; |
1350 | |
1351 | /* Clear the RX descriptors */ |
1352 | for (i = 0; i < dma_conf->dma_rx_size; i++) |
1353 | if (priv->extend_desc) |
1354 | stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, |
1355 | priv->use_riwt, priv->mode, |
1356 | (i == dma_conf->dma_rx_size - 1), |
1357 | dma_conf->dma_buf_sz); |
1358 | else |
1359 | stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], |
1360 | priv->use_riwt, priv->mode, |
1361 | (i == dma_conf->dma_rx_size - 1), |
1362 | dma_conf->dma_buf_sz); |
1363 | } |
1364 | |
1365 | /** |
1366 | * stmmac_clear_tx_descriptors - clear tx descriptors |
1367 | * @priv: driver private structure |
1368 | * @dma_conf: structure to take the dma data |
1369 | * @queue: TX queue index. |
1370 | * Description: this function is called to clear the TX descriptors |
1371 | * in case of both basic and extended descriptors are used. |
1372 | */ |
1373 | static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, |
1374 | struct stmmac_dma_conf *dma_conf, |
1375 | u32 queue) |
1376 | { |
1377 | struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; |
1378 | int i; |
1379 | |
1380 | /* Clear the TX descriptors */ |
1381 | for (i = 0; i < dma_conf->dma_tx_size; i++) { |
1382 | int last = (i == (dma_conf->dma_tx_size - 1)); |
1383 | struct dma_desc *p; |
1384 | |
1385 | if (priv->extend_desc) |
1386 | p = &tx_q->dma_etx[i].basic; |
1387 | else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
1388 | p = &tx_q->dma_entx[i].basic; |
1389 | else |
1390 | p = &tx_q->dma_tx[i]; |
1391 | |
1392 | stmmac_init_tx_desc(priv, p, priv->mode, last); |
1393 | } |
1394 | } |
1395 | |
1396 | /** |
1397 | * stmmac_clear_descriptors - clear descriptors |
1398 | * @priv: driver private structure |
1399 | * @dma_conf: structure to take the dma data |
1400 | * Description: this function is called to clear the TX and RX descriptors |
1401 | * in case of both basic and extended descriptors are used. |
1402 | */ |
1403 | static void stmmac_clear_descriptors(struct stmmac_priv *priv, |
1404 | struct stmmac_dma_conf *dma_conf) |
1405 | { |
1406 | u32 rx_queue_cnt = priv->plat->rx_queues_to_use; |
1407 | u32 tx_queue_cnt = priv->plat->tx_queues_to_use; |
1408 | u32 queue; |
1409 | |
1410 | /* Clear the RX descriptors */ |
1411 | for (queue = 0; queue < rx_queue_cnt; queue++) |
1412 | stmmac_clear_rx_descriptors(priv, dma_conf, queue); |
1413 | |
1414 | /* Clear the TX descriptors */ |
1415 | for (queue = 0; queue < tx_queue_cnt; queue++) |
1416 | stmmac_clear_tx_descriptors(priv, dma_conf, queue); |
1417 | } |
1418 | |
1419 | /** |
1420 | * stmmac_init_rx_buffers - init the RX descriptor buffer. |
1421 | * @priv: driver private structure |
1422 | * @dma_conf: structure to take the dma data |
1423 | * @p: descriptor pointer |
1424 | * @i: descriptor index |
1425 | * @flags: gfp flag |
1426 | * @queue: RX queue index |
1427 | * Description: this function is called to allocate a receive buffer, perform |
1428 | * the DMA mapping and init the descriptor. |
1429 | */ |
1430 | static int stmmac_init_rx_buffers(struct stmmac_priv *priv, |
1431 | struct stmmac_dma_conf *dma_conf, |
1432 | struct dma_desc *p, |
1433 | int i, gfp_t flags, u32 queue) |
1434 | { |
1435 | struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; |
1436 | struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; |
1437 | gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); |
1438 | |
1439 | if (priv->dma_cap.host_dma_width <= 32) |
1440 | gfp |= GFP_DMA32; |
1441 | |
1442 | if (!buf->page) { |
1443 | buf->page = page_pool_alloc_pages(pool: rx_q->page_pool, gfp); |
1444 | if (!buf->page) |
1445 | return -ENOMEM; |
1446 | buf->page_offset = stmmac_rx_offset(priv); |
1447 | } |
1448 | |
1449 | if (priv->sph && !buf->sec_page) { |
1450 | buf->sec_page = page_pool_alloc_pages(pool: rx_q->page_pool, gfp); |
1451 | if (!buf->sec_page) |
1452 | return -ENOMEM; |
1453 | |
1454 | buf->sec_addr = page_pool_get_dma_addr(page: buf->sec_page); |
1455 | stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); |
1456 | } else { |
1457 | buf->sec_page = NULL; |
1458 | stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); |
1459 | } |
1460 | |
1461 | buf->addr = page_pool_get_dma_addr(page: buf->page) + buf->page_offset; |
1462 | |
1463 | stmmac_set_desc_addr(priv, p, buf->addr); |
1464 | if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) |
1465 | stmmac_init_desc3(priv, p); |
1466 | |
1467 | return 0; |
1468 | } |
1469 | |
1470 | /** |
1471 | * stmmac_free_rx_buffer - free RX dma buffers |
1472 | * @priv: private structure |
1473 | * @rx_q: RX queue |
1474 | * @i: buffer index. |
1475 | */ |
1476 | static void stmmac_free_rx_buffer(struct stmmac_priv *priv, |
1477 | struct stmmac_rx_queue *rx_q, |
1478 | int i) |
1479 | { |
1480 | struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; |
1481 | |
1482 | if (buf->page) |
1483 | page_pool_put_full_page(pool: rx_q->page_pool, page: buf->page, allow_direct: false); |
1484 | buf->page = NULL; |
1485 | |
1486 | if (buf->sec_page) |
1487 | page_pool_put_full_page(pool: rx_q->page_pool, page: buf->sec_page, allow_direct: false); |
1488 | buf->sec_page = NULL; |
1489 | } |
1490 | |
1491 | /** |
1492 | * stmmac_free_tx_buffer - free RX dma buffers |
1493 | * @priv: private structure |
1494 | * @dma_conf: structure to take the dma data |
1495 | * @queue: RX queue index |
1496 | * @i: buffer index. |
1497 | */ |
1498 | static void stmmac_free_tx_buffer(struct stmmac_priv *priv, |
1499 | struct stmmac_dma_conf *dma_conf, |
1500 | u32 queue, int i) |
1501 | { |
1502 | struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; |
1503 | |
1504 | if (tx_q->tx_skbuff_dma[i].buf && |
1505 | tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { |
1506 | if (tx_q->tx_skbuff_dma[i].map_as_page) |
1507 | dma_unmap_page(priv->device, |
1508 | tx_q->tx_skbuff_dma[i].buf, |
1509 | tx_q->tx_skbuff_dma[i].len, |
1510 | DMA_TO_DEVICE); |
1511 | else |
1512 | dma_unmap_single(priv->device, |
1513 | tx_q->tx_skbuff_dma[i].buf, |
1514 | tx_q->tx_skbuff_dma[i].len, |
1515 | DMA_TO_DEVICE); |
1516 | } |
1517 | |
1518 | if (tx_q->xdpf[i] && |
1519 | (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || |
1520 | tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { |
1521 | xdp_return_frame(xdpf: tx_q->xdpf[i]); |
1522 | tx_q->xdpf[i] = NULL; |
1523 | } |
1524 | |
1525 | if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) |
1526 | tx_q->xsk_frames_done++; |
1527 | |
1528 | if (tx_q->tx_skbuff[i] && |
1529 | tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { |
1530 | dev_kfree_skb_any(skb: tx_q->tx_skbuff[i]); |
1531 | tx_q->tx_skbuff[i] = NULL; |
1532 | } |
1533 | |
1534 | tx_q->tx_skbuff_dma[i].buf = 0; |
1535 | tx_q->tx_skbuff_dma[i].map_as_page = false; |
1536 | } |
1537 | |
1538 | /** |
1539 | * dma_free_rx_skbufs - free RX dma buffers |
1540 | * @priv: private structure |
1541 | * @dma_conf: structure to take the dma data |
1542 | * @queue: RX queue index |
1543 | */ |
1544 | static void dma_free_rx_skbufs(struct stmmac_priv *priv, |
1545 | struct stmmac_dma_conf *dma_conf, |
1546 | u32 queue) |
1547 | { |
1548 | struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; |
1549 | int i; |
1550 | |
1551 | for (i = 0; i < dma_conf->dma_rx_size; i++) |
1552 | stmmac_free_rx_buffer(priv, rx_q, i); |
1553 | } |
1554 | |
1555 | static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, |
1556 | struct stmmac_dma_conf *dma_conf, |
1557 | u32 queue, gfp_t flags) |
1558 | { |
1559 | struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; |
1560 | int i; |
1561 | |
1562 | for (i = 0; i < dma_conf->dma_rx_size; i++) { |
1563 | struct dma_desc *p; |
1564 | int ret; |
1565 | |
1566 | if (priv->extend_desc) |
1567 | p = &((rx_q->dma_erx + i)->basic); |
1568 | else |
1569 | p = rx_q->dma_rx + i; |
1570 | |
1571 | ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags, |
1572 | queue); |
1573 | if (ret) |
1574 | return ret; |
1575 | |
1576 | rx_q->buf_alloc_num++; |
1577 | } |
1578 | |
1579 | return 0; |
1580 | } |
1581 | |
1582 | /** |
1583 | * dma_free_rx_xskbufs - free RX dma buffers from XSK pool |
1584 | * @priv: private structure |
1585 | * @dma_conf: structure to take the dma data |
1586 | * @queue: RX queue index |
1587 | */ |
1588 | static void dma_free_rx_xskbufs(struct stmmac_priv *priv, |
1589 | struct stmmac_dma_conf *dma_conf, |
1590 | u32 queue) |
1591 | { |
1592 | struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; |
1593 | int i; |
1594 | |
1595 | for (i = 0; i < dma_conf->dma_rx_size; i++) { |
1596 | struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; |
1597 | |
1598 | if (!buf->xdp) |
1599 | continue; |
1600 | |
1601 | xsk_buff_free(xdp: buf->xdp); |
1602 | buf->xdp = NULL; |
1603 | } |
1604 | } |
1605 | |
1606 | static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, |
1607 | struct stmmac_dma_conf *dma_conf, |
1608 | u32 queue) |
1609 | { |
1610 | struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; |
1611 | int i; |
1612 | |
1613 | /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes) |
1614 | * in struct xdp_buff_xsk to stash driver specific information. Thus, |
1615 | * use this macro to make sure no size violations. |
1616 | */ |
1617 | XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff); |
1618 | |
1619 | for (i = 0; i < dma_conf->dma_rx_size; i++) { |
1620 | struct stmmac_rx_buffer *buf; |
1621 | dma_addr_t dma_addr; |
1622 | struct dma_desc *p; |
1623 | |
1624 | if (priv->extend_desc) |
1625 | p = (struct dma_desc *)(rx_q->dma_erx + i); |
1626 | else |
1627 | p = rx_q->dma_rx + i; |
1628 | |
1629 | buf = &rx_q->buf_pool[i]; |
1630 | |
1631 | buf->xdp = xsk_buff_alloc(pool: rx_q->xsk_pool); |
1632 | if (!buf->xdp) |
1633 | return -ENOMEM; |
1634 | |
1635 | dma_addr = xsk_buff_xdp_get_dma(xdp: buf->xdp); |
1636 | stmmac_set_desc_addr(priv, p, dma_addr); |
1637 | rx_q->buf_alloc_num++; |
1638 | } |
1639 | |
1640 | return 0; |
1641 | } |
1642 | |
1643 | static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) |
1644 | { |
1645 | if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) |
1646 | return NULL; |
1647 | |
1648 | return xsk_get_pool_from_qid(dev: priv->dev, queue_id: queue); |
1649 | } |
1650 | |
1651 | /** |
1652 | * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) |
1653 | * @priv: driver private structure |
1654 | * @dma_conf: structure to take the dma data |
1655 | * @queue: RX queue index |
1656 | * @flags: gfp flag. |
1657 | * Description: this function initializes the DMA RX descriptors |
1658 | * and allocates the socket buffers. It supports the chained and ring |
1659 | * modes. |
1660 | */ |
1661 | static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, |
1662 | struct stmmac_dma_conf *dma_conf, |
1663 | u32 queue, gfp_t flags) |
1664 | { |
1665 | struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; |
1666 | int ret; |
1667 | |
1668 | netif_dbg(priv, probe, priv->dev, |
1669 | "(%s) dma_rx_phy=0x%08x\n" , __func__, |
1670 | (u32)rx_q->dma_rx_phy); |
1671 | |
1672 | stmmac_clear_rx_descriptors(priv, dma_conf, queue); |
1673 | |
1674 | xdp_rxq_info_unreg_mem_model(xdp_rxq: &rx_q->xdp_rxq); |
1675 | |
1676 | rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); |
1677 | |
1678 | if (rx_q->xsk_pool) { |
1679 | WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, |
1680 | MEM_TYPE_XSK_BUFF_POOL, |
1681 | NULL)); |
1682 | netdev_info(dev: priv->dev, |
1683 | format: "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n" , |
1684 | rx_q->queue_index); |
1685 | xsk_pool_set_rxq_info(pool: rx_q->xsk_pool, rxq: &rx_q->xdp_rxq); |
1686 | } else { |
1687 | WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, |
1688 | MEM_TYPE_PAGE_POOL, |
1689 | rx_q->page_pool)); |
1690 | netdev_info(dev: priv->dev, |
1691 | format: "Register MEM_TYPE_PAGE_POOL RxQ-%d\n" , |
1692 | rx_q->queue_index); |
1693 | } |
1694 | |
1695 | if (rx_q->xsk_pool) { |
1696 | /* RX XDP ZC buffer pool may not be populated, e.g. |
1697 | * xdpsock TX-only. |
1698 | */ |
1699 | stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue); |
1700 | } else { |
1701 | ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags); |
1702 | if (ret < 0) |
1703 | return -ENOMEM; |
1704 | } |
1705 | |
1706 | /* Setup the chained descriptor addresses */ |
1707 | if (priv->mode == STMMAC_CHAIN_MODE) { |
1708 | if (priv->extend_desc) |
1709 | stmmac_mode_init(priv, rx_q->dma_erx, |
1710 | rx_q->dma_rx_phy, |
1711 | dma_conf->dma_rx_size, 1); |
1712 | else |
1713 | stmmac_mode_init(priv, rx_q->dma_rx, |
1714 | rx_q->dma_rx_phy, |
1715 | dma_conf->dma_rx_size, 0); |
1716 | } |
1717 | |
1718 | return 0; |
1719 | } |
1720 | |
1721 | static int init_dma_rx_desc_rings(struct net_device *dev, |
1722 | struct stmmac_dma_conf *dma_conf, |
1723 | gfp_t flags) |
1724 | { |
1725 | struct stmmac_priv *priv = netdev_priv(dev); |
1726 | u32 rx_count = priv->plat->rx_queues_to_use; |
1727 | int queue; |
1728 | int ret; |
1729 | |
1730 | /* RX INITIALIZATION */ |
1731 | netif_dbg(priv, probe, priv->dev, |
1732 | "SKB addresses:\nskb\t\tskb data\tdma data\n" ); |
1733 | |
1734 | for (queue = 0; queue < rx_count; queue++) { |
1735 | ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags); |
1736 | if (ret) |
1737 | goto err_init_rx_buffers; |
1738 | } |
1739 | |
1740 | return 0; |
1741 | |
1742 | err_init_rx_buffers: |
1743 | while (queue >= 0) { |
1744 | struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; |
1745 | |
1746 | if (rx_q->xsk_pool) |
1747 | dma_free_rx_xskbufs(priv, dma_conf, queue); |
1748 | else |
1749 | dma_free_rx_skbufs(priv, dma_conf, queue); |
1750 | |
1751 | rx_q->buf_alloc_num = 0; |
1752 | rx_q->xsk_pool = NULL; |
1753 | |
1754 | queue--; |
1755 | } |
1756 | |
1757 | return ret; |
1758 | } |
1759 | |
1760 | /** |
1761 | * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) |
1762 | * @priv: driver private structure |
1763 | * @dma_conf: structure to take the dma data |
1764 | * @queue: TX queue index |
1765 | * Description: this function initializes the DMA TX descriptors |
1766 | * and allocates the socket buffers. It supports the chained and ring |
1767 | * modes. |
1768 | */ |
1769 | static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, |
1770 | struct stmmac_dma_conf *dma_conf, |
1771 | u32 queue) |
1772 | { |
1773 | struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; |
1774 | int i; |
1775 | |
1776 | netif_dbg(priv, probe, priv->dev, |
1777 | "(%s) dma_tx_phy=0x%08x\n" , __func__, |
1778 | (u32)tx_q->dma_tx_phy); |
1779 | |
1780 | /* Setup the chained descriptor addresses */ |
1781 | if (priv->mode == STMMAC_CHAIN_MODE) { |
1782 | if (priv->extend_desc) |
1783 | stmmac_mode_init(priv, tx_q->dma_etx, |
1784 | tx_q->dma_tx_phy, |
1785 | dma_conf->dma_tx_size, 1); |
1786 | else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) |
1787 | stmmac_mode_init(priv, tx_q->dma_tx, |
1788 | tx_q->dma_tx_phy, |
1789 | dma_conf->dma_tx_size, 0); |
1790 | } |
1791 | |
1792 | tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); |
1793 | |
1794 | for (i = 0; i < dma_conf->dma_tx_size; i++) { |
1795 | struct dma_desc *p; |
1796 | |
1797 | if (priv->extend_desc) |
1798 | p = &((tx_q->dma_etx + i)->basic); |
1799 | else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
1800 | p = &((tx_q->dma_entx + i)->basic); |
1801 | else |
1802 | p = tx_q->dma_tx + i; |
1803 | |
1804 | stmmac_clear_desc(priv, p); |
1805 | |
1806 | tx_q->tx_skbuff_dma[i].buf = 0; |
1807 | tx_q->tx_skbuff_dma[i].map_as_page = false; |
1808 | tx_q->tx_skbuff_dma[i].len = 0; |
1809 | tx_q->tx_skbuff_dma[i].last_segment = false; |
1810 | tx_q->tx_skbuff[i] = NULL; |
1811 | } |
1812 | |
1813 | return 0; |
1814 | } |
1815 | |
1816 | static int init_dma_tx_desc_rings(struct net_device *dev, |
1817 | struct stmmac_dma_conf *dma_conf) |
1818 | { |
1819 | struct stmmac_priv *priv = netdev_priv(dev); |
1820 | u32 tx_queue_cnt; |
1821 | u32 queue; |
1822 | |
1823 | tx_queue_cnt = priv->plat->tx_queues_to_use; |
1824 | |
1825 | for (queue = 0; queue < tx_queue_cnt; queue++) |
1826 | __init_dma_tx_desc_rings(priv, dma_conf, queue); |
1827 | |
1828 | return 0; |
1829 | } |
1830 | |
1831 | /** |
1832 | * init_dma_desc_rings - init the RX/TX descriptor rings |
1833 | * @dev: net device structure |
1834 | * @dma_conf: structure to take the dma data |
1835 | * @flags: gfp flag. |
1836 | * Description: this function initializes the DMA RX/TX descriptors |
1837 | * and allocates the socket buffers. It supports the chained and ring |
1838 | * modes. |
1839 | */ |
1840 | static int init_dma_desc_rings(struct net_device *dev, |
1841 | struct stmmac_dma_conf *dma_conf, |
1842 | gfp_t flags) |
1843 | { |
1844 | struct stmmac_priv *priv = netdev_priv(dev); |
1845 | int ret; |
1846 | |
1847 | ret = init_dma_rx_desc_rings(dev, dma_conf, flags); |
1848 | if (ret) |
1849 | return ret; |
1850 | |
1851 | ret = init_dma_tx_desc_rings(dev, dma_conf); |
1852 | |
1853 | stmmac_clear_descriptors(priv, dma_conf); |
1854 | |
1855 | if (netif_msg_hw(priv)) |
1856 | stmmac_display_rings(priv, dma_conf); |
1857 | |
1858 | return ret; |
1859 | } |
1860 | |
1861 | /** |
1862 | * dma_free_tx_skbufs - free TX dma buffers |
1863 | * @priv: private structure |
1864 | * @dma_conf: structure to take the dma data |
1865 | * @queue: TX queue index |
1866 | */ |
1867 | static void dma_free_tx_skbufs(struct stmmac_priv *priv, |
1868 | struct stmmac_dma_conf *dma_conf, |
1869 | u32 queue) |
1870 | { |
1871 | struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; |
1872 | int i; |
1873 | |
1874 | tx_q->xsk_frames_done = 0; |
1875 | |
1876 | for (i = 0; i < dma_conf->dma_tx_size; i++) |
1877 | stmmac_free_tx_buffer(priv, dma_conf, queue, i); |
1878 | |
1879 | if (tx_q->xsk_pool && tx_q->xsk_frames_done) { |
1880 | xsk_tx_completed(pool: tx_q->xsk_pool, nb_entries: tx_q->xsk_frames_done); |
1881 | tx_q->xsk_frames_done = 0; |
1882 | tx_q->xsk_pool = NULL; |
1883 | } |
1884 | } |
1885 | |
1886 | /** |
1887 | * stmmac_free_tx_skbufs - free TX skb buffers |
1888 | * @priv: private structure |
1889 | */ |
1890 | static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) |
1891 | { |
1892 | u32 tx_queue_cnt = priv->plat->tx_queues_to_use; |
1893 | u32 queue; |
1894 | |
1895 | for (queue = 0; queue < tx_queue_cnt; queue++) |
1896 | dma_free_tx_skbufs(priv, dma_conf: &priv->dma_conf, queue); |
1897 | } |
1898 | |
1899 | /** |
1900 | * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) |
1901 | * @priv: private structure |
1902 | * @dma_conf: structure to take the dma data |
1903 | * @queue: RX queue index |
1904 | */ |
1905 | static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, |
1906 | struct stmmac_dma_conf *dma_conf, |
1907 | u32 queue) |
1908 | { |
1909 | struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; |
1910 | |
1911 | /* Release the DMA RX socket buffers */ |
1912 | if (rx_q->xsk_pool) |
1913 | dma_free_rx_xskbufs(priv, dma_conf, queue); |
1914 | else |
1915 | dma_free_rx_skbufs(priv, dma_conf, queue); |
1916 | |
1917 | rx_q->buf_alloc_num = 0; |
1918 | rx_q->xsk_pool = NULL; |
1919 | |
1920 | /* Free DMA regions of consistent memory previously allocated */ |
1921 | if (!priv->extend_desc) |
1922 | dma_free_coherent(dev: priv->device, size: dma_conf->dma_rx_size * |
1923 | sizeof(struct dma_desc), |
1924 | cpu_addr: rx_q->dma_rx, dma_handle: rx_q->dma_rx_phy); |
1925 | else |
1926 | dma_free_coherent(dev: priv->device, size: dma_conf->dma_rx_size * |
1927 | sizeof(struct dma_extended_desc), |
1928 | cpu_addr: rx_q->dma_erx, dma_handle: rx_q->dma_rx_phy); |
1929 | |
1930 | if (xdp_rxq_info_is_reg(xdp_rxq: &rx_q->xdp_rxq)) |
1931 | xdp_rxq_info_unreg(xdp_rxq: &rx_q->xdp_rxq); |
1932 | |
1933 | kfree(objp: rx_q->buf_pool); |
1934 | if (rx_q->page_pool) |
1935 | page_pool_destroy(pool: rx_q->page_pool); |
1936 | } |
1937 | |
1938 | static void free_dma_rx_desc_resources(struct stmmac_priv *priv, |
1939 | struct stmmac_dma_conf *dma_conf) |
1940 | { |
1941 | u32 rx_count = priv->plat->rx_queues_to_use; |
1942 | u32 queue; |
1943 | |
1944 | /* Free RX queue resources */ |
1945 | for (queue = 0; queue < rx_count; queue++) |
1946 | __free_dma_rx_desc_resources(priv, dma_conf, queue); |
1947 | } |
1948 | |
1949 | /** |
1950 | * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) |
1951 | * @priv: private structure |
1952 | * @dma_conf: structure to take the dma data |
1953 | * @queue: TX queue index |
1954 | */ |
1955 | static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, |
1956 | struct stmmac_dma_conf *dma_conf, |
1957 | u32 queue) |
1958 | { |
1959 | struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; |
1960 | size_t size; |
1961 | void *addr; |
1962 | |
1963 | /* Release the DMA TX socket buffers */ |
1964 | dma_free_tx_skbufs(priv, dma_conf, queue); |
1965 | |
1966 | if (priv->extend_desc) { |
1967 | size = sizeof(struct dma_extended_desc); |
1968 | addr = tx_q->dma_etx; |
1969 | } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { |
1970 | size = sizeof(struct dma_edesc); |
1971 | addr = tx_q->dma_entx; |
1972 | } else { |
1973 | size = sizeof(struct dma_desc); |
1974 | addr = tx_q->dma_tx; |
1975 | } |
1976 | |
1977 | size *= dma_conf->dma_tx_size; |
1978 | |
1979 | dma_free_coherent(dev: priv->device, size, cpu_addr: addr, dma_handle: tx_q->dma_tx_phy); |
1980 | |
1981 | kfree(objp: tx_q->tx_skbuff_dma); |
1982 | kfree(objp: tx_q->tx_skbuff); |
1983 | } |
1984 | |
1985 | static void free_dma_tx_desc_resources(struct stmmac_priv *priv, |
1986 | struct stmmac_dma_conf *dma_conf) |
1987 | { |
1988 | u32 tx_count = priv->plat->tx_queues_to_use; |
1989 | u32 queue; |
1990 | |
1991 | /* Free TX queue resources */ |
1992 | for (queue = 0; queue < tx_count; queue++) |
1993 | __free_dma_tx_desc_resources(priv, dma_conf, queue); |
1994 | } |
1995 | |
1996 | /** |
1997 | * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). |
1998 | * @priv: private structure |
1999 | * @dma_conf: structure to take the dma data |
2000 | * @queue: RX queue index |
2001 | * Description: according to which descriptor can be used (extend or basic) |
2002 | * this function allocates the resources for TX and RX paths. In case of |
2003 | * reception, for example, it pre-allocated the RX socket buffer in order to |
2004 | * allow zero-copy mechanism. |
2005 | */ |
2006 | static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, |
2007 | struct stmmac_dma_conf *dma_conf, |
2008 | u32 queue) |
2009 | { |
2010 | struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; |
2011 | struct stmmac_channel *ch = &priv->channel[queue]; |
2012 | bool xdp_prog = stmmac_xdp_is_enabled(priv); |
2013 | struct page_pool_params pp_params = { 0 }; |
2014 | unsigned int num_pages; |
2015 | unsigned int napi_id; |
2016 | int ret; |
2017 | |
2018 | rx_q->queue_index = queue; |
2019 | rx_q->priv_data = priv; |
2020 | |
2021 | pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; |
2022 | pp_params.pool_size = dma_conf->dma_rx_size; |
2023 | num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); |
2024 | pp_params.order = ilog2(num_pages); |
2025 | pp_params.nid = dev_to_node(dev: priv->device); |
2026 | pp_params.dev = priv->device; |
2027 | pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; |
2028 | pp_params.offset = stmmac_rx_offset(priv); |
2029 | pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); |
2030 | |
2031 | rx_q->page_pool = page_pool_create(params: &pp_params); |
2032 | if (IS_ERR(ptr: rx_q->page_pool)) { |
2033 | ret = PTR_ERR(ptr: rx_q->page_pool); |
2034 | rx_q->page_pool = NULL; |
2035 | return ret; |
2036 | } |
2037 | |
2038 | rx_q->buf_pool = kcalloc(n: dma_conf->dma_rx_size, |
2039 | size: sizeof(*rx_q->buf_pool), |
2040 | GFP_KERNEL); |
2041 | if (!rx_q->buf_pool) |
2042 | return -ENOMEM; |
2043 | |
2044 | if (priv->extend_desc) { |
2045 | rx_q->dma_erx = dma_alloc_coherent(dev: priv->device, |
2046 | size: dma_conf->dma_rx_size * |
2047 | sizeof(struct dma_extended_desc), |
2048 | dma_handle: &rx_q->dma_rx_phy, |
2049 | GFP_KERNEL); |
2050 | if (!rx_q->dma_erx) |
2051 | return -ENOMEM; |
2052 | |
2053 | } else { |
2054 | rx_q->dma_rx = dma_alloc_coherent(dev: priv->device, |
2055 | size: dma_conf->dma_rx_size * |
2056 | sizeof(struct dma_desc), |
2057 | dma_handle: &rx_q->dma_rx_phy, |
2058 | GFP_KERNEL); |
2059 | if (!rx_q->dma_rx) |
2060 | return -ENOMEM; |
2061 | } |
2062 | |
2063 | if (stmmac_xdp_is_enabled(priv) && |
2064 | test_bit(queue, priv->af_xdp_zc_qps)) |
2065 | napi_id = ch->rxtx_napi.napi_id; |
2066 | else |
2067 | napi_id = ch->rx_napi.napi_id; |
2068 | |
2069 | ret = xdp_rxq_info_reg(xdp_rxq: &rx_q->xdp_rxq, dev: priv->dev, |
2070 | queue_index: rx_q->queue_index, |
2071 | napi_id); |
2072 | if (ret) { |
2073 | netdev_err(dev: priv->dev, format: "Failed to register xdp rxq info\n" ); |
2074 | return -EINVAL; |
2075 | } |
2076 | |
2077 | return 0; |
2078 | } |
2079 | |
2080 | static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv, |
2081 | struct stmmac_dma_conf *dma_conf) |
2082 | { |
2083 | u32 rx_count = priv->plat->rx_queues_to_use; |
2084 | u32 queue; |
2085 | int ret; |
2086 | |
2087 | /* RX queues buffers and DMA */ |
2088 | for (queue = 0; queue < rx_count; queue++) { |
2089 | ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue); |
2090 | if (ret) |
2091 | goto err_dma; |
2092 | } |
2093 | |
2094 | return 0; |
2095 | |
2096 | err_dma: |
2097 | free_dma_rx_desc_resources(priv, dma_conf); |
2098 | |
2099 | return ret; |
2100 | } |
2101 | |
2102 | /** |
2103 | * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). |
2104 | * @priv: private structure |
2105 | * @dma_conf: structure to take the dma data |
2106 | * @queue: TX queue index |
2107 | * Description: according to which descriptor can be used (extend or basic) |
2108 | * this function allocates the resources for TX and RX paths. In case of |
2109 | * reception, for example, it pre-allocated the RX socket buffer in order to |
2110 | * allow zero-copy mechanism. |
2111 | */ |
2112 | static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, |
2113 | struct stmmac_dma_conf *dma_conf, |
2114 | u32 queue) |
2115 | { |
2116 | struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; |
2117 | size_t size; |
2118 | void *addr; |
2119 | |
2120 | tx_q->queue_index = queue; |
2121 | tx_q->priv_data = priv; |
2122 | |
2123 | tx_q->tx_skbuff_dma = kcalloc(n: dma_conf->dma_tx_size, |
2124 | size: sizeof(*tx_q->tx_skbuff_dma), |
2125 | GFP_KERNEL); |
2126 | if (!tx_q->tx_skbuff_dma) |
2127 | return -ENOMEM; |
2128 | |
2129 | tx_q->tx_skbuff = kcalloc(n: dma_conf->dma_tx_size, |
2130 | size: sizeof(struct sk_buff *), |
2131 | GFP_KERNEL); |
2132 | if (!tx_q->tx_skbuff) |
2133 | return -ENOMEM; |
2134 | |
2135 | if (priv->extend_desc) |
2136 | size = sizeof(struct dma_extended_desc); |
2137 | else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
2138 | size = sizeof(struct dma_edesc); |
2139 | else |
2140 | size = sizeof(struct dma_desc); |
2141 | |
2142 | size *= dma_conf->dma_tx_size; |
2143 | |
2144 | addr = dma_alloc_coherent(dev: priv->device, size, |
2145 | dma_handle: &tx_q->dma_tx_phy, GFP_KERNEL); |
2146 | if (!addr) |
2147 | return -ENOMEM; |
2148 | |
2149 | if (priv->extend_desc) |
2150 | tx_q->dma_etx = addr; |
2151 | else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
2152 | tx_q->dma_entx = addr; |
2153 | else |
2154 | tx_q->dma_tx = addr; |
2155 | |
2156 | return 0; |
2157 | } |
2158 | |
2159 | static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv, |
2160 | struct stmmac_dma_conf *dma_conf) |
2161 | { |
2162 | u32 tx_count = priv->plat->tx_queues_to_use; |
2163 | u32 queue; |
2164 | int ret; |
2165 | |
2166 | /* TX queues buffers and DMA */ |
2167 | for (queue = 0; queue < tx_count; queue++) { |
2168 | ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue); |
2169 | if (ret) |
2170 | goto err_dma; |
2171 | } |
2172 | |
2173 | return 0; |
2174 | |
2175 | err_dma: |
2176 | free_dma_tx_desc_resources(priv, dma_conf); |
2177 | return ret; |
2178 | } |
2179 | |
2180 | /** |
2181 | * alloc_dma_desc_resources - alloc TX/RX resources. |
2182 | * @priv: private structure |
2183 | * @dma_conf: structure to take the dma data |
2184 | * Description: according to which descriptor can be used (extend or basic) |
2185 | * this function allocates the resources for TX and RX paths. In case of |
2186 | * reception, for example, it pre-allocated the RX socket buffer in order to |
2187 | * allow zero-copy mechanism. |
2188 | */ |
2189 | static int alloc_dma_desc_resources(struct stmmac_priv *priv, |
2190 | struct stmmac_dma_conf *dma_conf) |
2191 | { |
2192 | /* RX Allocation */ |
2193 | int ret = alloc_dma_rx_desc_resources(priv, dma_conf); |
2194 | |
2195 | if (ret) |
2196 | return ret; |
2197 | |
2198 | ret = alloc_dma_tx_desc_resources(priv, dma_conf); |
2199 | |
2200 | return ret; |
2201 | } |
2202 | |
2203 | /** |
2204 | * free_dma_desc_resources - free dma desc resources |
2205 | * @priv: private structure |
2206 | * @dma_conf: structure to take the dma data |
2207 | */ |
2208 | static void free_dma_desc_resources(struct stmmac_priv *priv, |
2209 | struct stmmac_dma_conf *dma_conf) |
2210 | { |
2211 | /* Release the DMA TX socket buffers */ |
2212 | free_dma_tx_desc_resources(priv, dma_conf); |
2213 | |
2214 | /* Release the DMA RX socket buffers later |
2215 | * to ensure all pending XDP_TX buffers are returned. |
2216 | */ |
2217 | free_dma_rx_desc_resources(priv, dma_conf); |
2218 | } |
2219 | |
2220 | /** |
2221 | * stmmac_mac_enable_rx_queues - Enable MAC rx queues |
2222 | * @priv: driver private structure |
2223 | * Description: It is used for enabling the rx queues in the MAC |
2224 | */ |
2225 | static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) |
2226 | { |
2227 | u32 rx_queues_count = priv->plat->rx_queues_to_use; |
2228 | int queue; |
2229 | u8 mode; |
2230 | |
2231 | for (queue = 0; queue < rx_queues_count; queue++) { |
2232 | mode = priv->plat->rx_queues_cfg[queue].mode_to_use; |
2233 | stmmac_rx_queue_enable(priv, priv->hw, mode, queue); |
2234 | } |
2235 | } |
2236 | |
2237 | /** |
2238 | * stmmac_start_rx_dma - start RX DMA channel |
2239 | * @priv: driver private structure |
2240 | * @chan: RX channel index |
2241 | * Description: |
2242 | * This starts a RX DMA channel |
2243 | */ |
2244 | static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) |
2245 | { |
2246 | netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n" , chan); |
2247 | stmmac_start_rx(priv, priv->ioaddr, chan); |
2248 | } |
2249 | |
2250 | /** |
2251 | * stmmac_start_tx_dma - start TX DMA channel |
2252 | * @priv: driver private structure |
2253 | * @chan: TX channel index |
2254 | * Description: |
2255 | * This starts a TX DMA channel |
2256 | */ |
2257 | static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) |
2258 | { |
2259 | netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n" , chan); |
2260 | stmmac_start_tx(priv, priv->ioaddr, chan); |
2261 | } |
2262 | |
2263 | /** |
2264 | * stmmac_stop_rx_dma - stop RX DMA channel |
2265 | * @priv: driver private structure |
2266 | * @chan: RX channel index |
2267 | * Description: |
2268 | * This stops a RX DMA channel |
2269 | */ |
2270 | static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) |
2271 | { |
2272 | netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n" , chan); |
2273 | stmmac_stop_rx(priv, priv->ioaddr, chan); |
2274 | } |
2275 | |
2276 | /** |
2277 | * stmmac_stop_tx_dma - stop TX DMA channel |
2278 | * @priv: driver private structure |
2279 | * @chan: TX channel index |
2280 | * Description: |
2281 | * This stops a TX DMA channel |
2282 | */ |
2283 | static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) |
2284 | { |
2285 | netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n" , chan); |
2286 | stmmac_stop_tx(priv, priv->ioaddr, chan); |
2287 | } |
2288 | |
2289 | static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv) |
2290 | { |
2291 | u32 rx_channels_count = priv->plat->rx_queues_to_use; |
2292 | u32 tx_channels_count = priv->plat->tx_queues_to_use; |
2293 | u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); |
2294 | u32 chan; |
2295 | |
2296 | for (chan = 0; chan < dma_csr_ch; chan++) { |
2297 | struct stmmac_channel *ch = &priv->channel[chan]; |
2298 | unsigned long flags; |
2299 | |
2300 | spin_lock_irqsave(&ch->lock, flags); |
2301 | stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); |
2302 | spin_unlock_irqrestore(lock: &ch->lock, flags); |
2303 | } |
2304 | } |
2305 | |
2306 | /** |
2307 | * stmmac_start_all_dma - start all RX and TX DMA channels |
2308 | * @priv: driver private structure |
2309 | * Description: |
2310 | * This starts all the RX and TX DMA channels |
2311 | */ |
2312 | static void stmmac_start_all_dma(struct stmmac_priv *priv) |
2313 | { |
2314 | u32 rx_channels_count = priv->plat->rx_queues_to_use; |
2315 | u32 tx_channels_count = priv->plat->tx_queues_to_use; |
2316 | u32 chan = 0; |
2317 | |
2318 | for (chan = 0; chan < rx_channels_count; chan++) |
2319 | stmmac_start_rx_dma(priv, chan); |
2320 | |
2321 | for (chan = 0; chan < tx_channels_count; chan++) |
2322 | stmmac_start_tx_dma(priv, chan); |
2323 | } |
2324 | |
2325 | /** |
2326 | * stmmac_stop_all_dma - stop all RX and TX DMA channels |
2327 | * @priv: driver private structure |
2328 | * Description: |
2329 | * This stops the RX and TX DMA channels |
2330 | */ |
2331 | static void stmmac_stop_all_dma(struct stmmac_priv *priv) |
2332 | { |
2333 | u32 rx_channels_count = priv->plat->rx_queues_to_use; |
2334 | u32 tx_channels_count = priv->plat->tx_queues_to_use; |
2335 | u32 chan = 0; |
2336 | |
2337 | for (chan = 0; chan < rx_channels_count; chan++) |
2338 | stmmac_stop_rx_dma(priv, chan); |
2339 | |
2340 | for (chan = 0; chan < tx_channels_count; chan++) |
2341 | stmmac_stop_tx_dma(priv, chan); |
2342 | } |
2343 | |
2344 | /** |
2345 | * stmmac_dma_operation_mode - HW DMA operation mode |
2346 | * @priv: driver private structure |
2347 | * Description: it is used for configuring the DMA operation mode register in |
2348 | * order to program the tx/rx DMA thresholds or Store-And-Forward mode. |
2349 | */ |
2350 | static void stmmac_dma_operation_mode(struct stmmac_priv *priv) |
2351 | { |
2352 | u32 rx_channels_count = priv->plat->rx_queues_to_use; |
2353 | u32 tx_channels_count = priv->plat->tx_queues_to_use; |
2354 | int rxfifosz = priv->plat->rx_fifo_size; |
2355 | int txfifosz = priv->plat->tx_fifo_size; |
2356 | u32 txmode = 0; |
2357 | u32 rxmode = 0; |
2358 | u32 chan = 0; |
2359 | u8 qmode = 0; |
2360 | |
2361 | if (rxfifosz == 0) |
2362 | rxfifosz = priv->dma_cap.rx_fifo_size; |
2363 | if (txfifosz == 0) |
2364 | txfifosz = priv->dma_cap.tx_fifo_size; |
2365 | |
2366 | /* Adjust for real per queue fifo size */ |
2367 | rxfifosz /= rx_channels_count; |
2368 | txfifosz /= tx_channels_count; |
2369 | |
2370 | if (priv->plat->force_thresh_dma_mode) { |
2371 | txmode = tc; |
2372 | rxmode = tc; |
2373 | } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { |
2374 | /* |
2375 | * In case of GMAC, SF mode can be enabled |
2376 | * to perform the TX COE in HW. This depends on: |
2377 | * 1) TX COE if actually supported |
2378 | * 2) There is no bugged Jumbo frame support |
2379 | * that needs to not insert csum in the TDES. |
2380 | */ |
2381 | txmode = SF_DMA_MODE; |
2382 | rxmode = SF_DMA_MODE; |
2383 | priv->xstats.threshold = SF_DMA_MODE; |
2384 | } else { |
2385 | txmode = tc; |
2386 | rxmode = SF_DMA_MODE; |
2387 | } |
2388 | |
2389 | /* configure all channels */ |
2390 | for (chan = 0; chan < rx_channels_count; chan++) { |
2391 | struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; |
2392 | u32 buf_size; |
2393 | |
2394 | qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; |
2395 | |
2396 | stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, |
2397 | rxfifosz, qmode); |
2398 | |
2399 | if (rx_q->xsk_pool) { |
2400 | buf_size = xsk_pool_get_rx_frame_size(pool: rx_q->xsk_pool); |
2401 | stmmac_set_dma_bfsize(priv, priv->ioaddr, |
2402 | buf_size, |
2403 | chan); |
2404 | } else { |
2405 | stmmac_set_dma_bfsize(priv, priv->ioaddr, |
2406 | priv->dma_conf.dma_buf_sz, |
2407 | chan); |
2408 | } |
2409 | } |
2410 | |
2411 | for (chan = 0; chan < tx_channels_count; chan++) { |
2412 | qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; |
2413 | |
2414 | stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, |
2415 | txfifosz, qmode); |
2416 | } |
2417 | } |
2418 | |
2419 | static void stmmac_xsk_request_timestamp(void *_priv) |
2420 | { |
2421 | struct stmmac_metadata_request *meta_req = _priv; |
2422 | |
2423 | stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc); |
2424 | *meta_req->set_ic = true; |
2425 | } |
2426 | |
2427 | static u64 stmmac_xsk_fill_timestamp(void *_priv) |
2428 | { |
2429 | struct stmmac_xsk_tx_complete *tx_compl = _priv; |
2430 | struct stmmac_priv *priv = tx_compl->priv; |
2431 | struct dma_desc *desc = tx_compl->desc; |
2432 | bool found = false; |
2433 | u64 ns = 0; |
2434 | |
2435 | if (!priv->hwts_tx_en) |
2436 | return 0; |
2437 | |
2438 | /* check tx tstamp status */ |
2439 | if (stmmac_get_tx_timestamp_status(priv, desc)) { |
2440 | stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); |
2441 | found = true; |
2442 | } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { |
2443 | found = true; |
2444 | } |
2445 | |
2446 | if (found) { |
2447 | ns -= priv->plat->cdc_error_adj; |
2448 | return ns_to_ktime(ns); |
2449 | } |
2450 | |
2451 | return 0; |
2452 | } |
2453 | |
2454 | static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = { |
2455 | .tmo_request_timestamp = stmmac_xsk_request_timestamp, |
2456 | .tmo_fill_timestamp = stmmac_xsk_fill_timestamp, |
2457 | }; |
2458 | |
2459 | static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) |
2460 | { |
2461 | struct netdev_queue *nq = netdev_get_tx_queue(dev: priv->dev, index: queue); |
2462 | struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; |
2463 | struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; |
2464 | struct xsk_buff_pool *pool = tx_q->xsk_pool; |
2465 | unsigned int entry = tx_q->cur_tx; |
2466 | struct dma_desc *tx_desc = NULL; |
2467 | struct xdp_desc xdp_desc; |
2468 | bool work_done = true; |
2469 | u32 tx_set_ic_bit = 0; |
2470 | |
2471 | /* Avoids TX time-out as we are sharing with slow path */ |
2472 | txq_trans_cond_update(txq: nq); |
2473 | |
2474 | budget = min(budget, stmmac_tx_avail(priv, queue)); |
2475 | |
2476 | while (budget-- > 0) { |
2477 | struct stmmac_metadata_request meta_req; |
2478 | struct xsk_tx_metadata *meta = NULL; |
2479 | dma_addr_t dma_addr; |
2480 | bool set_ic; |
2481 | |
2482 | /* We are sharing with slow path and stop XSK TX desc submission when |
2483 | * available TX ring is less than threshold. |
2484 | */ |
2485 | if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || |
2486 | !netif_carrier_ok(dev: priv->dev)) { |
2487 | work_done = false; |
2488 | break; |
2489 | } |
2490 | |
2491 | if (!xsk_tx_peek_desc(pool, desc: &xdp_desc)) |
2492 | break; |
2493 | |
2494 | if (priv->plat->est && priv->plat->est->enable && |
2495 | priv->plat->est->max_sdu[queue] && |
2496 | xdp_desc.len > priv->plat->est->max_sdu[queue]) { |
2497 | priv->xstats.max_sdu_txq_drop[queue]++; |
2498 | continue; |
2499 | } |
2500 | |
2501 | if (likely(priv->extend_desc)) |
2502 | tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); |
2503 | else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
2504 | tx_desc = &tx_q->dma_entx[entry].basic; |
2505 | else |
2506 | tx_desc = tx_q->dma_tx + entry; |
2507 | |
2508 | dma_addr = xsk_buff_raw_get_dma(pool, addr: xdp_desc.addr); |
2509 | meta = xsk_buff_get_metadata(pool, addr: xdp_desc.addr); |
2510 | xsk_buff_raw_dma_sync_for_device(pool, dma: dma_addr, size: xdp_desc.len); |
2511 | |
2512 | tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; |
2513 | |
2514 | /* To return XDP buffer to XSK pool, we simple call |
2515 | * xsk_tx_completed(), so we don't need to fill up |
2516 | * 'buf' and 'xdpf'. |
2517 | */ |
2518 | tx_q->tx_skbuff_dma[entry].buf = 0; |
2519 | tx_q->xdpf[entry] = NULL; |
2520 | |
2521 | tx_q->tx_skbuff_dma[entry].map_as_page = false; |
2522 | tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; |
2523 | tx_q->tx_skbuff_dma[entry].last_segment = true; |
2524 | tx_q->tx_skbuff_dma[entry].is_jumbo = false; |
2525 | |
2526 | stmmac_set_desc_addr(priv, tx_desc, dma_addr); |
2527 | |
2528 | tx_q->tx_count_frames++; |
2529 | |
2530 | if (!priv->tx_coal_frames[queue]) |
2531 | set_ic = false; |
2532 | else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) |
2533 | set_ic = true; |
2534 | else |
2535 | set_ic = false; |
2536 | |
2537 | meta_req.priv = priv; |
2538 | meta_req.tx_desc = tx_desc; |
2539 | meta_req.set_ic = &set_ic; |
2540 | xsk_tx_metadata_request(meta, ops: &stmmac_xsk_tx_metadata_ops, |
2541 | priv: &meta_req); |
2542 | if (set_ic) { |
2543 | tx_q->tx_count_frames = 0; |
2544 | stmmac_set_tx_ic(priv, tx_desc); |
2545 | tx_set_ic_bit++; |
2546 | } |
2547 | |
2548 | stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, |
2549 | true, priv->mode, true, true, |
2550 | xdp_desc.len); |
2551 | |
2552 | stmmac_enable_dma_transmission(priv, priv->ioaddr); |
2553 | |
2554 | xsk_tx_metadata_to_compl(meta, |
2555 | compl: &tx_q->tx_skbuff_dma[entry].xsk_meta); |
2556 | |
2557 | tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); |
2558 | entry = tx_q->cur_tx; |
2559 | } |
2560 | u64_stats_update_begin(syncp: &txq_stats->napi_syncp); |
2561 | u64_stats_add(p: &txq_stats->napi.tx_set_ic_bit, val: tx_set_ic_bit); |
2562 | u64_stats_update_end(syncp: &txq_stats->napi_syncp); |
2563 | |
2564 | if (tx_desc) { |
2565 | stmmac_flush_tx_descriptors(priv, queue); |
2566 | xsk_tx_release(pool); |
2567 | } |
2568 | |
2569 | /* Return true if all of the 3 conditions are met |
2570 | * a) TX Budget is still available |
2571 | * b) work_done = true when XSK TX desc peek is empty (no more |
2572 | * pending XSK TX for transmission) |
2573 | */ |
2574 | return !!budget && work_done; |
2575 | } |
2576 | |
2577 | static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) |
2578 | { |
2579 | if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { |
2580 | tc += 64; |
2581 | |
2582 | if (priv->plat->force_thresh_dma_mode) |
2583 | stmmac_set_dma_operation_mode(priv, txmode: tc, rxmode: tc, chan); |
2584 | else |
2585 | stmmac_set_dma_operation_mode(priv, txmode: tc, SF_DMA_MODE, |
2586 | chan); |
2587 | |
2588 | priv->xstats.threshold = tc; |
2589 | } |
2590 | } |
2591 | |
2592 | /** |
2593 | * stmmac_tx_clean - to manage the transmission completion |
2594 | * @priv: driver private structure |
2595 | * @budget: napi budget limiting this functions packet handling |
2596 | * @queue: TX queue index |
2597 | * @pending_packets: signal to arm the TX coal timer |
2598 | * Description: it reclaims the transmit resources after transmission completes. |
2599 | * If some packets still needs to be handled, due to TX coalesce, set |
2600 | * pending_packets to true to make NAPI arm the TX coal timer. |
2601 | */ |
2602 | static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, |
2603 | bool *pending_packets) |
2604 | { |
2605 | struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; |
2606 | struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; |
2607 | unsigned int bytes_compl = 0, pkts_compl = 0; |
2608 | unsigned int entry, xmits = 0, count = 0; |
2609 | u32 tx_packets = 0, tx_errors = 0; |
2610 | |
2611 | __netif_tx_lock_bh(txq: netdev_get_tx_queue(dev: priv->dev, index: queue)); |
2612 | |
2613 | tx_q->xsk_frames_done = 0; |
2614 | |
2615 | entry = tx_q->dirty_tx; |
2616 | |
2617 | /* Try to clean all TX complete frame in 1 shot */ |
2618 | while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { |
2619 | struct xdp_frame *xdpf; |
2620 | struct sk_buff *skb; |
2621 | struct dma_desc *p; |
2622 | int status; |
2623 | |
2624 | if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || |
2625 | tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { |
2626 | xdpf = tx_q->xdpf[entry]; |
2627 | skb = NULL; |
2628 | } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { |
2629 | xdpf = NULL; |
2630 | skb = tx_q->tx_skbuff[entry]; |
2631 | } else { |
2632 | xdpf = NULL; |
2633 | skb = NULL; |
2634 | } |
2635 | |
2636 | if (priv->extend_desc) |
2637 | p = (struct dma_desc *)(tx_q->dma_etx + entry); |
2638 | else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
2639 | p = &tx_q->dma_entx[entry].basic; |
2640 | else |
2641 | p = tx_q->dma_tx + entry; |
2642 | |
2643 | status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr); |
2644 | /* Check if the descriptor is owned by the DMA */ |
2645 | if (unlikely(status & tx_dma_own)) |
2646 | break; |
2647 | |
2648 | count++; |
2649 | |
2650 | /* Make sure descriptor fields are read after reading |
2651 | * the own bit. |
2652 | */ |
2653 | dma_rmb(); |
2654 | |
2655 | /* Just consider the last segment and ...*/ |
2656 | if (likely(!(status & tx_not_ls))) { |
2657 | /* ... verify the status error condition */ |
2658 | if (unlikely(status & tx_err)) { |
2659 | tx_errors++; |
2660 | if (unlikely(status & tx_err_bump_tc)) |
2661 | stmmac_bump_dma_threshold(priv, chan: queue); |
2662 | } else { |
2663 | tx_packets++; |
2664 | } |
2665 | if (skb) { |
2666 | stmmac_get_tx_hwtstamp(priv, p, skb); |
2667 | } else if (tx_q->xsk_pool && |
2668 | xp_tx_metadata_enabled(pool: tx_q->xsk_pool)) { |
2669 | struct stmmac_xsk_tx_complete tx_compl = { |
2670 | .priv = priv, |
2671 | .desc = p, |
2672 | }; |
2673 | |
2674 | xsk_tx_metadata_complete(compl: &tx_q->tx_skbuff_dma[entry].xsk_meta, |
2675 | ops: &stmmac_xsk_tx_metadata_ops, |
2676 | priv: &tx_compl); |
2677 | } |
2678 | } |
2679 | |
2680 | if (likely(tx_q->tx_skbuff_dma[entry].buf && |
2681 | tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { |
2682 | if (tx_q->tx_skbuff_dma[entry].map_as_page) |
2683 | dma_unmap_page(priv->device, |
2684 | tx_q->tx_skbuff_dma[entry].buf, |
2685 | tx_q->tx_skbuff_dma[entry].len, |
2686 | DMA_TO_DEVICE); |
2687 | else |
2688 | dma_unmap_single(priv->device, |
2689 | tx_q->tx_skbuff_dma[entry].buf, |
2690 | tx_q->tx_skbuff_dma[entry].len, |
2691 | DMA_TO_DEVICE); |
2692 | tx_q->tx_skbuff_dma[entry].buf = 0; |
2693 | tx_q->tx_skbuff_dma[entry].len = 0; |
2694 | tx_q->tx_skbuff_dma[entry].map_as_page = false; |
2695 | } |
2696 | |
2697 | stmmac_clean_desc3(priv, tx_q, p); |
2698 | |
2699 | tx_q->tx_skbuff_dma[entry].last_segment = false; |
2700 | tx_q->tx_skbuff_dma[entry].is_jumbo = false; |
2701 | |
2702 | if (xdpf && |
2703 | tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { |
2704 | xdp_return_frame_rx_napi(xdpf); |
2705 | tx_q->xdpf[entry] = NULL; |
2706 | } |
2707 | |
2708 | if (xdpf && |
2709 | tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { |
2710 | xdp_return_frame(xdpf); |
2711 | tx_q->xdpf[entry] = NULL; |
2712 | } |
2713 | |
2714 | if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) |
2715 | tx_q->xsk_frames_done++; |
2716 | |
2717 | if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { |
2718 | if (likely(skb)) { |
2719 | pkts_compl++; |
2720 | bytes_compl += skb->len; |
2721 | dev_consume_skb_any(skb); |
2722 | tx_q->tx_skbuff[entry] = NULL; |
2723 | } |
2724 | } |
2725 | |
2726 | stmmac_release_tx_desc(priv, p, priv->mode); |
2727 | |
2728 | entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); |
2729 | } |
2730 | tx_q->dirty_tx = entry; |
2731 | |
2732 | netdev_tx_completed_queue(dev_queue: netdev_get_tx_queue(dev: priv->dev, index: queue), |
2733 | pkts: pkts_compl, bytes: bytes_compl); |
2734 | |
2735 | if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, |
2736 | queue))) && |
2737 | stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { |
2738 | |
2739 | netif_dbg(priv, tx_done, priv->dev, |
2740 | "%s: restart transmit\n" , __func__); |
2741 | netif_tx_wake_queue(dev_queue: netdev_get_tx_queue(dev: priv->dev, index: queue)); |
2742 | } |
2743 | |
2744 | if (tx_q->xsk_pool) { |
2745 | bool work_done; |
2746 | |
2747 | if (tx_q->xsk_frames_done) |
2748 | xsk_tx_completed(pool: tx_q->xsk_pool, nb_entries: tx_q->xsk_frames_done); |
2749 | |
2750 | if (xsk_uses_need_wakeup(pool: tx_q->xsk_pool)) |
2751 | xsk_set_tx_need_wakeup(pool: tx_q->xsk_pool); |
2752 | |
2753 | /* For XSK TX, we try to send as many as possible. |
2754 | * If XSK work done (XSK TX desc empty and budget still |
2755 | * available), return "budget - 1" to reenable TX IRQ. |
2756 | * Else, return "budget" to make NAPI continue polling. |
2757 | */ |
2758 | work_done = stmmac_xdp_xmit_zc(priv, queue, |
2759 | STMMAC_XSK_TX_BUDGET_MAX); |
2760 | if (work_done) |
2761 | xmits = budget - 1; |
2762 | else |
2763 | xmits = budget; |
2764 | } |
2765 | |
2766 | if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && |
2767 | priv->eee_sw_timer_en) { |
2768 | if (stmmac_enable_eee_mode(priv)) |
2769 | mod_timer(timer: &priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); |
2770 | } |
2771 | |
2772 | /* We still have pending packets, let's call for a new scheduling */ |
2773 | if (tx_q->dirty_tx != tx_q->cur_tx) |
2774 | *pending_packets = true; |
2775 | |
2776 | u64_stats_update_begin(syncp: &txq_stats->napi_syncp); |
2777 | u64_stats_add(p: &txq_stats->napi.tx_packets, val: tx_packets); |
2778 | u64_stats_add(p: &txq_stats->napi.tx_pkt_n, val: tx_packets); |
2779 | u64_stats_inc(p: &txq_stats->napi.tx_clean); |
2780 | u64_stats_update_end(syncp: &txq_stats->napi_syncp); |
2781 | |
2782 | priv->xstats.tx_errors += tx_errors; |
2783 | |
2784 | __netif_tx_unlock_bh(txq: netdev_get_tx_queue(dev: priv->dev, index: queue)); |
2785 | |
2786 | /* Combine decisions from TX clean and XSK TX */ |
2787 | return max(count, xmits); |
2788 | } |
2789 | |
2790 | /** |
2791 | * stmmac_tx_err - to manage the tx error |
2792 | * @priv: driver private structure |
2793 | * @chan: channel index |
2794 | * Description: it cleans the descriptors and restarts the transmission |
2795 | * in case of transmission errors. |
2796 | */ |
2797 | static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) |
2798 | { |
2799 | struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; |
2800 | |
2801 | netif_tx_stop_queue(dev_queue: netdev_get_tx_queue(dev: priv->dev, index: chan)); |
2802 | |
2803 | stmmac_stop_tx_dma(priv, chan); |
2804 | dma_free_tx_skbufs(priv, dma_conf: &priv->dma_conf, queue: chan); |
2805 | stmmac_clear_tx_descriptors(priv, dma_conf: &priv->dma_conf, queue: chan); |
2806 | stmmac_reset_tx_queue(priv, queue: chan); |
2807 | stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, |
2808 | tx_q->dma_tx_phy, chan); |
2809 | stmmac_start_tx_dma(priv, chan); |
2810 | |
2811 | priv->xstats.tx_errors++; |
2812 | netif_tx_wake_queue(dev_queue: netdev_get_tx_queue(dev: priv->dev, index: chan)); |
2813 | } |
2814 | |
2815 | /** |
2816 | * stmmac_set_dma_operation_mode - Set DMA operation mode by channel |
2817 | * @priv: driver private structure |
2818 | * @txmode: TX operating mode |
2819 | * @rxmode: RX operating mode |
2820 | * @chan: channel index |
2821 | * Description: it is used for configuring of the DMA operation mode in |
2822 | * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward |
2823 | * mode. |
2824 | */ |
2825 | static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, |
2826 | u32 rxmode, u32 chan) |
2827 | { |
2828 | u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; |
2829 | u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; |
2830 | u32 rx_channels_count = priv->plat->rx_queues_to_use; |
2831 | u32 tx_channels_count = priv->plat->tx_queues_to_use; |
2832 | int rxfifosz = priv->plat->rx_fifo_size; |
2833 | int txfifosz = priv->plat->tx_fifo_size; |
2834 | |
2835 | if (rxfifosz == 0) |
2836 | rxfifosz = priv->dma_cap.rx_fifo_size; |
2837 | if (txfifosz == 0) |
2838 | txfifosz = priv->dma_cap.tx_fifo_size; |
2839 | |
2840 | /* Adjust for real per queue fifo size */ |
2841 | rxfifosz /= rx_channels_count; |
2842 | txfifosz /= tx_channels_count; |
2843 | |
2844 | stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); |
2845 | stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); |
2846 | } |
2847 | |
2848 | static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) |
2849 | { |
2850 | int ret; |
2851 | |
2852 | ret = stmmac_safety_feat_irq_status(priv, priv->dev, |
2853 | priv->ioaddr, priv->dma_cap.asp, &priv->sstats); |
2854 | if (ret && (ret != -EINVAL)) { |
2855 | stmmac_global_err(priv); |
2856 | return true; |
2857 | } |
2858 | |
2859 | return false; |
2860 | } |
2861 | |
2862 | static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) |
2863 | { |
2864 | int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, |
2865 | &priv->xstats, chan, dir); |
2866 | struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; |
2867 | struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; |
2868 | struct stmmac_channel *ch = &priv->channel[chan]; |
2869 | struct napi_struct *rx_napi; |
2870 | struct napi_struct *tx_napi; |
2871 | unsigned long flags; |
2872 | |
2873 | rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; |
2874 | tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; |
2875 | |
2876 | if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { |
2877 | if (napi_schedule_prep(n: rx_napi)) { |
2878 | spin_lock_irqsave(&ch->lock, flags); |
2879 | stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); |
2880 | spin_unlock_irqrestore(lock: &ch->lock, flags); |
2881 | __napi_schedule(n: rx_napi); |
2882 | } |
2883 | } |
2884 | |
2885 | if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { |
2886 | if (napi_schedule_prep(n: tx_napi)) { |
2887 | spin_lock_irqsave(&ch->lock, flags); |
2888 | stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); |
2889 | spin_unlock_irqrestore(lock: &ch->lock, flags); |
2890 | __napi_schedule(n: tx_napi); |
2891 | } |
2892 | } |
2893 | |
2894 | return status; |
2895 | } |
2896 | |
2897 | /** |
2898 | * stmmac_dma_interrupt - DMA ISR |
2899 | * @priv: driver private structure |
2900 | * Description: this is the DMA ISR. It is called by the main ISR. |
2901 | * It calls the dwmac dma routine and schedule poll method in case of some |
2902 | * work can be done. |
2903 | */ |
2904 | static void stmmac_dma_interrupt(struct stmmac_priv *priv) |
2905 | { |
2906 | u32 tx_channel_count = priv->plat->tx_queues_to_use; |
2907 | u32 rx_channel_count = priv->plat->rx_queues_to_use; |
2908 | u32 channels_to_check = tx_channel_count > rx_channel_count ? |
2909 | tx_channel_count : rx_channel_count; |
2910 | u32 chan; |
2911 | int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; |
2912 | |
2913 | /* Make sure we never check beyond our status buffer. */ |
2914 | if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) |
2915 | channels_to_check = ARRAY_SIZE(status); |
2916 | |
2917 | for (chan = 0; chan < channels_to_check; chan++) |
2918 | status[chan] = stmmac_napi_check(priv, chan, |
2919 | dir: DMA_DIR_RXTX); |
2920 | |
2921 | for (chan = 0; chan < tx_channel_count; chan++) { |
2922 | if (unlikely(status[chan] & tx_hard_error_bump_tc)) { |
2923 | /* Try to bump up the dma threshold on this failure */ |
2924 | stmmac_bump_dma_threshold(priv, chan); |
2925 | } else if (unlikely(status[chan] == tx_hard_error)) { |
2926 | stmmac_tx_err(priv, chan); |
2927 | } |
2928 | } |
2929 | } |
2930 | |
2931 | /** |
2932 | * stmmac_mmc_setup: setup the Mac Management Counters (MMC) |
2933 | * @priv: driver private structure |
2934 | * Description: this masks the MMC irq, in fact, the counters are managed in SW. |
2935 | */ |
2936 | static void stmmac_mmc_setup(struct stmmac_priv *priv) |
2937 | { |
2938 | unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | |
2939 | MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; |
2940 | |
2941 | stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); |
2942 | |
2943 | if (priv->dma_cap.rmon) { |
2944 | stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); |
2945 | memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); |
2946 | } else |
2947 | netdev_info(dev: priv->dev, format: "No MAC Management Counters available\n" ); |
2948 | } |
2949 | |
2950 | /** |
2951 | * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. |
2952 | * @priv: driver private structure |
2953 | * Description: |
2954 | * new GMAC chip generations have a new register to indicate the |
2955 | * presence of the optional feature/functions. |
2956 | * This can be also used to override the value passed through the |
2957 | * platform and necessary for old MAC10/100 and GMAC chips. |
2958 | */ |
2959 | static int stmmac_get_hw_features(struct stmmac_priv *priv) |
2960 | { |
2961 | return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; |
2962 | } |
2963 | |
2964 | /** |
2965 | * stmmac_check_ether_addr - check if the MAC addr is valid |
2966 | * @priv: driver private structure |
2967 | * Description: |
2968 | * it is to verify if the MAC address is valid, in case of failures it |
2969 | * generates a random MAC address |
2970 | */ |
2971 | static void stmmac_check_ether_addr(struct stmmac_priv *priv) |
2972 | { |
2973 | u8 addr[ETH_ALEN]; |
2974 | |
2975 | if (!is_valid_ether_addr(addr: priv->dev->dev_addr)) { |
2976 | stmmac_get_umac_addr(priv, priv->hw, addr, 0); |
2977 | if (is_valid_ether_addr(addr)) |
2978 | eth_hw_addr_set(dev: priv->dev, addr); |
2979 | else |
2980 | eth_hw_addr_random(dev: priv->dev); |
2981 | dev_info(priv->device, "device MAC address %pM\n" , |
2982 | priv->dev->dev_addr); |
2983 | } |
2984 | } |
2985 | |
2986 | /** |
2987 | * stmmac_init_dma_engine - DMA init. |
2988 | * @priv: driver private structure |
2989 | * Description: |
2990 | * It inits the DMA invoking the specific MAC/GMAC callback. |
2991 | * Some DMA parameters can be passed from the platform; |
2992 | * in case of these are not passed a default is kept for the MAC or GMAC. |
2993 | */ |
2994 | static int stmmac_init_dma_engine(struct stmmac_priv *priv) |
2995 | { |
2996 | u32 rx_channels_count = priv->plat->rx_queues_to_use; |
2997 | u32 tx_channels_count = priv->plat->tx_queues_to_use; |
2998 | u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); |
2999 | struct stmmac_rx_queue *rx_q; |
3000 | struct stmmac_tx_queue *tx_q; |
3001 | u32 chan = 0; |
3002 | int atds = 0; |
3003 | int ret = 0; |
3004 | |
3005 | if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { |
3006 | dev_err(priv->device, "Invalid DMA configuration\n" ); |
3007 | return -EINVAL; |
3008 | } |
3009 | |
3010 | if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) |
3011 | atds = 1; |
3012 | |
3013 | ret = stmmac_reset(priv, ioaddr: priv->ioaddr); |
3014 | if (ret) { |
3015 | dev_err(priv->device, "Failed to reset the dma\n" ); |
3016 | return ret; |
3017 | } |
3018 | |
3019 | /* DMA Configuration */ |
3020 | stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); |
3021 | |
3022 | if (priv->plat->axi) |
3023 | stmmac_axi(priv, priv->ioaddr, priv->plat->axi); |
3024 | |
3025 | /* DMA CSR Channel configuration */ |
3026 | for (chan = 0; chan < dma_csr_ch; chan++) { |
3027 | stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); |
3028 | stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); |
3029 | } |
3030 | |
3031 | /* DMA RX Channel Configuration */ |
3032 | for (chan = 0; chan < rx_channels_count; chan++) { |
3033 | rx_q = &priv->dma_conf.rx_queue[chan]; |
3034 | |
3035 | stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, |
3036 | rx_q->dma_rx_phy, chan); |
3037 | |
3038 | rx_q->rx_tail_addr = rx_q->dma_rx_phy + |
3039 | (rx_q->buf_alloc_num * |
3040 | sizeof(struct dma_desc)); |
3041 | stmmac_set_rx_tail_ptr(priv, priv->ioaddr, |
3042 | rx_q->rx_tail_addr, chan); |
3043 | } |
3044 | |
3045 | /* DMA TX Channel Configuration */ |
3046 | for (chan = 0; chan < tx_channels_count; chan++) { |
3047 | tx_q = &priv->dma_conf.tx_queue[chan]; |
3048 | |
3049 | stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, |
3050 | tx_q->dma_tx_phy, chan); |
3051 | |
3052 | tx_q->tx_tail_addr = tx_q->dma_tx_phy; |
3053 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, |
3054 | tx_q->tx_tail_addr, chan); |
3055 | } |
3056 | |
3057 | return ret; |
3058 | } |
3059 | |
3060 | static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) |
3061 | { |
3062 | struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; |
3063 | u32 tx_coal_timer = priv->tx_coal_timer[queue]; |
3064 | struct stmmac_channel *ch; |
3065 | struct napi_struct *napi; |
3066 | |
3067 | if (!tx_coal_timer) |
3068 | return; |
3069 | |
3070 | ch = &priv->channel[tx_q->queue_index]; |
3071 | napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; |
3072 | |
3073 | /* Arm timer only if napi is not already scheduled. |
3074 | * Try to cancel any timer if napi is scheduled, timer will be armed |
3075 | * again in the next scheduled napi. |
3076 | */ |
3077 | if (unlikely(!napi_is_scheduled(napi))) |
3078 | hrtimer_start(timer: &tx_q->txtimer, |
3079 | STMMAC_COAL_TIMER(tx_coal_timer), |
3080 | mode: HRTIMER_MODE_REL); |
3081 | else |
3082 | hrtimer_try_to_cancel(timer: &tx_q->txtimer); |
3083 | } |
3084 | |
3085 | /** |
3086 | * stmmac_tx_timer - mitigation sw timer for tx. |
3087 | * @t: data pointer |
3088 | * Description: |
3089 | * This is the timer handler to directly invoke the stmmac_tx_clean. |
3090 | */ |
3091 | static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) |
3092 | { |
3093 | struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); |
3094 | struct stmmac_priv *priv = tx_q->priv_data; |
3095 | struct stmmac_channel *ch; |
3096 | struct napi_struct *napi; |
3097 | |
3098 | ch = &priv->channel[tx_q->queue_index]; |
3099 | napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; |
3100 | |
3101 | if (likely(napi_schedule_prep(napi))) { |
3102 | unsigned long flags; |
3103 | |
3104 | spin_lock_irqsave(&ch->lock, flags); |
3105 | stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); |
3106 | spin_unlock_irqrestore(lock: &ch->lock, flags); |
3107 | __napi_schedule(n: napi); |
3108 | } |
3109 | |
3110 | return HRTIMER_NORESTART; |
3111 | } |
3112 | |
3113 | /** |
3114 | * stmmac_init_coalesce - init mitigation options. |
3115 | * @priv: driver private structure |
3116 | * Description: |
3117 | * This inits the coalesce parameters: i.e. timer rate, |
3118 | * timer handler and default threshold used for enabling the |
3119 | * interrupt on completion bit. |
3120 | */ |
3121 | static void stmmac_init_coalesce(struct stmmac_priv *priv) |
3122 | { |
3123 | u32 tx_channel_count = priv->plat->tx_queues_to_use; |
3124 | u32 rx_channel_count = priv->plat->rx_queues_to_use; |
3125 | u32 chan; |
3126 | |
3127 | for (chan = 0; chan < tx_channel_count; chan++) { |
3128 | struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; |
3129 | |
3130 | priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; |
3131 | priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; |
3132 | |
3133 | hrtimer_init(timer: &tx_q->txtimer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL); |
3134 | tx_q->txtimer.function = stmmac_tx_timer; |
3135 | } |
3136 | |
3137 | for (chan = 0; chan < rx_channel_count; chan++) |
3138 | priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; |
3139 | } |
3140 | |
3141 | static void stmmac_set_rings_length(struct stmmac_priv *priv) |
3142 | { |
3143 | u32 rx_channels_count = priv->plat->rx_queues_to_use; |
3144 | u32 tx_channels_count = priv->plat->tx_queues_to_use; |
3145 | u32 chan; |
3146 | |
3147 | /* set TX ring length */ |
3148 | for (chan = 0; chan < tx_channels_count; chan++) |
3149 | stmmac_set_tx_ring_len(priv, priv->ioaddr, |
3150 | (priv->dma_conf.dma_tx_size - 1), chan); |
3151 | |
3152 | /* set RX ring length */ |
3153 | for (chan = 0; chan < rx_channels_count; chan++) |
3154 | stmmac_set_rx_ring_len(priv, priv->ioaddr, |
3155 | (priv->dma_conf.dma_rx_size - 1), chan); |
3156 | } |
3157 | |
3158 | /** |
3159 | * stmmac_set_tx_queue_weight - Set TX queue weight |
3160 | * @priv: driver private structure |
3161 | * Description: It is used for setting TX queues weight |
3162 | */ |
3163 | static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) |
3164 | { |
3165 | u32 tx_queues_count = priv->plat->tx_queues_to_use; |
3166 | u32 weight; |
3167 | u32 queue; |
3168 | |
3169 | for (queue = 0; queue < tx_queues_count; queue++) { |
3170 | weight = priv->plat->tx_queues_cfg[queue].weight; |
3171 | stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); |
3172 | } |
3173 | } |
3174 | |
3175 | /** |
3176 | * stmmac_configure_cbs - Configure CBS in TX queue |
3177 | * @priv: driver private structure |
3178 | * Description: It is used for configuring CBS in AVB TX queues |
3179 | */ |
3180 | static void stmmac_configure_cbs(struct stmmac_priv *priv) |
3181 | { |
3182 | u32 tx_queues_count = priv->plat->tx_queues_to_use; |
3183 | u32 mode_to_use; |
3184 | u32 queue; |
3185 | |
3186 | /* queue 0 is reserved for legacy traffic */ |
3187 | for (queue = 1; queue < tx_queues_count; queue++) { |
3188 | mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; |
3189 | if (mode_to_use == MTL_QUEUE_DCB) |
3190 | continue; |
3191 | |
3192 | stmmac_config_cbs(priv, priv->hw, |
3193 | priv->plat->tx_queues_cfg[queue].send_slope, |
3194 | priv->plat->tx_queues_cfg[queue].idle_slope, |
3195 | priv->plat->tx_queues_cfg[queue].high_credit, |
3196 | priv->plat->tx_queues_cfg[queue].low_credit, |
3197 | queue); |
3198 | } |
3199 | } |
3200 | |
3201 | /** |
3202 | * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel |
3203 | * @priv: driver private structure |
3204 | * Description: It is used for mapping RX queues to RX dma channels |
3205 | */ |
3206 | static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) |
3207 | { |
3208 | u32 rx_queues_count = priv->plat->rx_queues_to_use; |
3209 | u32 queue; |
3210 | u32 chan; |
3211 | |
3212 | for (queue = 0; queue < rx_queues_count; queue++) { |
3213 | chan = priv->plat->rx_queues_cfg[queue].chan; |
3214 | stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); |
3215 | } |
3216 | } |
3217 | |
3218 | /** |
3219 | * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority |
3220 | * @priv: driver private structure |
3221 | * Description: It is used for configuring the RX Queue Priority |
3222 | */ |
3223 | static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) |
3224 | { |
3225 | u32 rx_queues_count = priv->plat->rx_queues_to_use; |
3226 | u32 queue; |
3227 | u32 prio; |
3228 | |
3229 | for (queue = 0; queue < rx_queues_count; queue++) { |
3230 | if (!priv->plat->rx_queues_cfg[queue].use_prio) |
3231 | continue; |
3232 | |
3233 | prio = priv->plat->rx_queues_cfg[queue].prio; |
3234 | stmmac_rx_queue_prio(priv, priv->hw, prio, queue); |
3235 | } |
3236 | } |
3237 | |
3238 | /** |
3239 | * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority |
3240 | * @priv: driver private structure |
3241 | * Description: It is used for configuring the TX Queue Priority |
3242 | */ |
3243 | static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) |
3244 | { |
3245 | u32 tx_queues_count = priv->plat->tx_queues_to_use; |
3246 | u32 queue; |
3247 | u32 prio; |
3248 | |
3249 | for (queue = 0; queue < tx_queues_count; queue++) { |
3250 | if (!priv->plat->tx_queues_cfg[queue].use_prio) |
3251 | continue; |
3252 | |
3253 | prio = priv->plat->tx_queues_cfg[queue].prio; |
3254 | stmmac_tx_queue_prio(priv, priv->hw, prio, queue); |
3255 | } |
3256 | } |
3257 | |
3258 | /** |
3259 | * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing |
3260 | * @priv: driver private structure |
3261 | * Description: It is used for configuring the RX queue routing |
3262 | */ |
3263 | static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) |
3264 | { |
3265 | u32 rx_queues_count = priv->plat->rx_queues_to_use; |
3266 | u32 queue; |
3267 | u8 packet; |
3268 | |
3269 | for (queue = 0; queue < rx_queues_count; queue++) { |
3270 | /* no specific packet type routing specified for the queue */ |
3271 | if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) |
3272 | continue; |
3273 | |
3274 | packet = priv->plat->rx_queues_cfg[queue].pkt_route; |
3275 | stmmac_rx_queue_routing(priv, priv->hw, packet, queue); |
3276 | } |
3277 | } |
3278 | |
3279 | static void (struct stmmac_priv *priv) |
3280 | { |
3281 | if (!priv->dma_cap.rssen || !priv->plat->rss_en) { |
3282 | priv->rss.enable = false; |
3283 | return; |
3284 | } |
3285 | |
3286 | if (priv->dev->features & NETIF_F_RXHASH) |
3287 | priv->rss.enable = true; |
3288 | else |
3289 | priv->rss.enable = false; |
3290 | |
3291 | stmmac_rss_configure(priv, priv->hw, &priv->rss, |
3292 | priv->plat->rx_queues_to_use); |
3293 | } |
3294 | |
3295 | /** |
3296 | * stmmac_mtl_configuration - Configure MTL |
3297 | * @priv: driver private structure |
3298 | * Description: It is used for configurring MTL |
3299 | */ |
3300 | static void stmmac_mtl_configuration(struct stmmac_priv *priv) |
3301 | { |
3302 | u32 rx_queues_count = priv->plat->rx_queues_to_use; |
3303 | u32 tx_queues_count = priv->plat->tx_queues_to_use; |
3304 | |
3305 | if (tx_queues_count > 1) |
3306 | stmmac_set_tx_queue_weight(priv); |
3307 | |
3308 | /* Configure MTL RX algorithms */ |
3309 | if (rx_queues_count > 1) |
3310 | stmmac_prog_mtl_rx_algorithms(priv, priv->hw, |
3311 | priv->plat->rx_sched_algorithm); |
3312 | |
3313 | /* Configure MTL TX algorithms */ |
3314 | if (tx_queues_count > 1) |
3315 | stmmac_prog_mtl_tx_algorithms(priv, priv->hw, |
3316 | priv->plat->tx_sched_algorithm); |
3317 | |
3318 | /* Configure CBS in AVB TX queues */ |
3319 | if (tx_queues_count > 1) |
3320 | stmmac_configure_cbs(priv); |
3321 | |
3322 | /* Map RX MTL to DMA channels */ |
3323 | stmmac_rx_queue_dma_chan_map(priv); |
3324 | |
3325 | /* Enable MAC RX Queues */ |
3326 | stmmac_mac_enable_rx_queues(priv); |
3327 | |
3328 | /* Set RX priorities */ |
3329 | if (rx_queues_count > 1) |
3330 | stmmac_mac_config_rx_queues_prio(priv); |
3331 | |
3332 | /* Set TX priorities */ |
3333 | if (tx_queues_count > 1) |
3334 | stmmac_mac_config_tx_queues_prio(priv); |
3335 | |
3336 | /* Set RX routing */ |
3337 | if (rx_queues_count > 1) |
3338 | stmmac_mac_config_rx_queues_routing(priv); |
3339 | |
3340 | /* Receive Side Scaling */ |
3341 | if (rx_queues_count > 1) |
3342 | stmmac_mac_config_rss(priv); |
3343 | } |
3344 | |
3345 | static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) |
3346 | { |
3347 | if (priv->dma_cap.asp) { |
3348 | netdev_info(dev: priv->dev, format: "Enabling Safety Features\n" ); |
3349 | stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, |
3350 | priv->plat->safety_feat_cfg); |
3351 | } else { |
3352 | netdev_info(dev: priv->dev, format: "No Safety Features support found\n" ); |
3353 | } |
3354 | } |
3355 | |
3356 | static int stmmac_fpe_start_wq(struct stmmac_priv *priv) |
3357 | { |
3358 | char *name; |
3359 | |
3360 | clear_bit(nr: __FPE_TASK_SCHED, addr: &priv->fpe_task_state); |
3361 | clear_bit(nr: __FPE_REMOVING, addr: &priv->fpe_task_state); |
3362 | |
3363 | name = priv->wq_name; |
3364 | sprintf(buf: name, fmt: "%s-fpe" , priv->dev->name); |
3365 | |
3366 | priv->fpe_wq = create_singlethread_workqueue(name); |
3367 | if (!priv->fpe_wq) { |
3368 | netdev_err(dev: priv->dev, format: "%s: Failed to create workqueue\n" , name); |
3369 | |
3370 | return -ENOMEM; |
3371 | } |
3372 | netdev_info(dev: priv->dev, format: "FPE workqueue start" ); |
3373 | |
3374 | return 0; |
3375 | } |
3376 | |
3377 | /** |
3378 | * stmmac_hw_setup - setup mac in a usable state. |
3379 | * @dev : pointer to the device structure. |
3380 | * @ptp_register: register PTP if set |
3381 | * Description: |
3382 | * this is the main function to setup the HW in a usable state because the |
3383 | * dma engine is reset, the core registers are configured (e.g. AXI, |
3384 | * Checksum features, timers). The DMA is ready to start receiving and |
3385 | * transmitting. |
3386 | * Return value: |
3387 | * 0 on success and an appropriate (-)ve integer as defined in errno.h |
3388 | * file on failure. |
3389 | */ |
3390 | static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) |
3391 | { |
3392 | struct stmmac_priv *priv = netdev_priv(dev); |
3393 | u32 rx_cnt = priv->plat->rx_queues_to_use; |
3394 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
3395 | bool sph_en; |
3396 | u32 chan; |
3397 | int ret; |
3398 | |
3399 | /* DMA initialization and SW reset */ |
3400 | ret = stmmac_init_dma_engine(priv); |
3401 | if (ret < 0) { |
3402 | netdev_err(dev: priv->dev, format: "%s: DMA engine initialization failed\n" , |
3403 | __func__); |
3404 | return ret; |
3405 | } |
3406 | |
3407 | /* Copy the MAC addr into the HW */ |
3408 | stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); |
3409 | |
3410 | /* PS and related bits will be programmed according to the speed */ |
3411 | if (priv->hw->pcs) { |
3412 | int speed = priv->plat->mac_port_sel_speed; |
3413 | |
3414 | if ((speed == SPEED_10) || (speed == SPEED_100) || |
3415 | (speed == SPEED_1000)) { |
3416 | priv->hw->ps = speed; |
3417 | } else { |
3418 | dev_warn(priv->device, "invalid port speed\n" ); |
3419 | priv->hw->ps = 0; |
3420 | } |
3421 | } |
3422 | |
3423 | /* Initialize the MAC Core */ |
3424 | stmmac_core_init(priv, priv->hw, dev); |
3425 | |
3426 | /* Initialize MTL*/ |
3427 | stmmac_mtl_configuration(priv); |
3428 | |
3429 | /* Initialize Safety Features */ |
3430 | stmmac_safety_feat_configuration(priv); |
3431 | |
3432 | ret = stmmac_rx_ipc(priv, priv->hw); |
3433 | if (!ret) { |
3434 | netdev_warn(dev: priv->dev, format: "RX IPC Checksum Offload disabled\n" ); |
3435 | priv->plat->rx_coe = STMMAC_RX_COE_NONE; |
3436 | priv->hw->rx_csum = 0; |
3437 | } |
3438 | |
3439 | /* Enable the MAC Rx/Tx */ |
3440 | stmmac_mac_set(priv, priv->ioaddr, true); |
3441 | |
3442 | /* Set the HW DMA mode and the COE */ |
3443 | stmmac_dma_operation_mode(priv); |
3444 | |
3445 | stmmac_mmc_setup(priv); |
3446 | |
3447 | if (ptp_register) { |
3448 | ret = clk_prepare_enable(clk: priv->plat->clk_ptp_ref); |
3449 | if (ret < 0) |
3450 | netdev_warn(dev: priv->dev, |
3451 | format: "failed to enable PTP reference clock: %pe\n" , |
3452 | ERR_PTR(error: ret)); |
3453 | } |
3454 | |
3455 | ret = stmmac_init_ptp(priv); |
3456 | if (ret == -EOPNOTSUPP) |
3457 | netdev_info(dev: priv->dev, format: "PTP not supported by HW\n" ); |
3458 | else if (ret) |
3459 | netdev_warn(dev: priv->dev, format: "PTP init failed\n" ); |
3460 | else if (ptp_register) |
3461 | stmmac_ptp_register(priv); |
3462 | |
3463 | priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; |
3464 | |
3465 | /* Convert the timer from msec to usec */ |
3466 | if (!priv->tx_lpi_timer) |
3467 | priv->tx_lpi_timer = eee_timer * 1000; |
3468 | |
3469 | if (priv->use_riwt) { |
3470 | u32 queue; |
3471 | |
3472 | for (queue = 0; queue < rx_cnt; queue++) { |
3473 | if (!priv->rx_riwt[queue]) |
3474 | priv->rx_riwt[queue] = DEF_DMA_RIWT; |
3475 | |
3476 | stmmac_rx_watchdog(priv, priv->ioaddr, |
3477 | priv->rx_riwt[queue], queue); |
3478 | } |
3479 | } |
3480 | |
3481 | if (priv->hw->pcs) |
3482 | stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); |
3483 | |
3484 | /* set TX and RX rings length */ |
3485 | stmmac_set_rings_length(priv); |
3486 | |
3487 | /* Enable TSO */ |
3488 | if (priv->tso) { |
3489 | for (chan = 0; chan < tx_cnt; chan++) { |
3490 | struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; |
3491 | |
3492 | /* TSO and TBS cannot co-exist */ |
3493 | if (tx_q->tbs & STMMAC_TBS_AVAIL) |
3494 | continue; |
3495 | |
3496 | stmmac_enable_tso(priv, priv->ioaddr, 1, chan); |
3497 | } |
3498 | } |
3499 | |
3500 | /* Enable Split Header */ |
3501 | sph_en = (priv->hw->rx_csum > 0) && priv->sph; |
3502 | for (chan = 0; chan < rx_cnt; chan++) |
3503 | stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); |
3504 | |
3505 | |
3506 | /* VLAN Tag Insertion */ |
3507 | if (priv->dma_cap.vlins) |
3508 | stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); |
3509 | |
3510 | /* TBS */ |
3511 | for (chan = 0; chan < tx_cnt; chan++) { |
3512 | struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; |
3513 | int enable = tx_q->tbs & STMMAC_TBS_AVAIL; |
3514 | |
3515 | stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); |
3516 | } |
3517 | |
3518 | /* Configure real RX and TX queues */ |
3519 | netif_set_real_num_rx_queues(dev, rxq: priv->plat->rx_queues_to_use); |
3520 | netif_set_real_num_tx_queues(dev, txq: priv->plat->tx_queues_to_use); |
3521 | |
3522 | /* Start the ball rolling... */ |
3523 | stmmac_start_all_dma(priv); |
3524 | |
3525 | stmmac_set_hw_vlan_mode(priv, priv->hw); |
3526 | |
3527 | if (priv->dma_cap.fpesel) { |
3528 | stmmac_fpe_start_wq(priv); |
3529 | |
3530 | if (priv->plat->fpe_cfg->enable) |
3531 | stmmac_fpe_handshake(priv, enable: true); |
3532 | } |
3533 | |
3534 | return 0; |
3535 | } |
3536 | |
3537 | static void stmmac_hw_teardown(struct net_device *dev) |
3538 | { |
3539 | struct stmmac_priv *priv = netdev_priv(dev); |
3540 | |
3541 | clk_disable_unprepare(clk: priv->plat->clk_ptp_ref); |
3542 | } |
3543 | |
3544 | static void stmmac_free_irq(struct net_device *dev, |
3545 | enum request_irq_err irq_err, int irq_idx) |
3546 | { |
3547 | struct stmmac_priv *priv = netdev_priv(dev); |
3548 | int j; |
3549 | |
3550 | switch (irq_err) { |
3551 | case REQ_IRQ_ERR_ALL: |
3552 | irq_idx = priv->plat->tx_queues_to_use; |
3553 | fallthrough; |
3554 | case REQ_IRQ_ERR_TX: |
3555 | for (j = irq_idx - 1; j >= 0; j--) { |
3556 | if (priv->tx_irq[j] > 0) { |
3557 | irq_set_affinity_hint(irq: priv->tx_irq[j], NULL); |
3558 | free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); |
3559 | } |
3560 | } |
3561 | irq_idx = priv->plat->rx_queues_to_use; |
3562 | fallthrough; |
3563 | case REQ_IRQ_ERR_RX: |
3564 | for (j = irq_idx - 1; j >= 0; j--) { |
3565 | if (priv->rx_irq[j] > 0) { |
3566 | irq_set_affinity_hint(irq: priv->rx_irq[j], NULL); |
3567 | free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); |
3568 | } |
3569 | } |
3570 | |
3571 | if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) |
3572 | free_irq(priv->sfty_ue_irq, dev); |
3573 | fallthrough; |
3574 | case REQ_IRQ_ERR_SFTY_UE: |
3575 | if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) |
3576 | free_irq(priv->sfty_ce_irq, dev); |
3577 | fallthrough; |
3578 | case REQ_IRQ_ERR_SFTY_CE: |
3579 | if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) |
3580 | free_irq(priv->lpi_irq, dev); |
3581 | fallthrough; |
3582 | case REQ_IRQ_ERR_LPI: |
3583 | if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) |
3584 | free_irq(priv->wol_irq, dev); |
3585 | fallthrough; |
3586 | case REQ_IRQ_ERR_SFTY: |
3587 | if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) |
3588 | free_irq(priv->sfty_irq, dev); |
3589 | fallthrough; |
3590 | case REQ_IRQ_ERR_WOL: |
3591 | free_irq(dev->irq, dev); |
3592 | fallthrough; |
3593 | case REQ_IRQ_ERR_MAC: |
3594 | case REQ_IRQ_ERR_NO: |
3595 | /* If MAC IRQ request error, no more IRQ to free */ |
3596 | break; |
3597 | } |
3598 | } |
3599 | |
3600 | static int stmmac_request_irq_multi_msi(struct net_device *dev) |
3601 | { |
3602 | struct stmmac_priv *priv = netdev_priv(dev); |
3603 | enum request_irq_err irq_err; |
3604 | cpumask_t cpu_mask; |
3605 | int irq_idx = 0; |
3606 | char *int_name; |
3607 | int ret; |
3608 | int i; |
3609 | |
3610 | /* For common interrupt */ |
3611 | int_name = priv->int_name_mac; |
3612 | sprintf(buf: int_name, fmt: "%s:%s" , dev->name, "mac" ); |
3613 | ret = request_irq(irq: dev->irq, handler: stmmac_mac_interrupt, |
3614 | flags: 0, name: int_name, dev); |
3615 | if (unlikely(ret < 0)) { |
3616 | netdev_err(dev: priv->dev, |
3617 | format: "%s: alloc mac MSI %d (error: %d)\n" , |
3618 | __func__, dev->irq, ret); |
3619 | irq_err = REQ_IRQ_ERR_MAC; |
3620 | goto irq_error; |
3621 | } |
3622 | |
3623 | /* Request the Wake IRQ in case of another line |
3624 | * is used for WoL |
3625 | */ |
3626 | priv->wol_irq_disabled = true; |
3627 | if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { |
3628 | int_name = priv->int_name_wol; |
3629 | sprintf(buf: int_name, fmt: "%s:%s" , dev->name, "wol" ); |
3630 | ret = request_irq(irq: priv->wol_irq, |
3631 | handler: stmmac_mac_interrupt, |
3632 | flags: 0, name: int_name, dev); |
3633 | if (unlikely(ret < 0)) { |
3634 | netdev_err(dev: priv->dev, |
3635 | format: "%s: alloc wol MSI %d (error: %d)\n" , |
3636 | __func__, priv->wol_irq, ret); |
3637 | irq_err = REQ_IRQ_ERR_WOL; |
3638 | goto irq_error; |
3639 | } |
3640 | } |
3641 | |
3642 | /* Request the LPI IRQ in case of another line |
3643 | * is used for LPI |
3644 | */ |
3645 | if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { |
3646 | int_name = priv->int_name_lpi; |
3647 | sprintf(buf: int_name, fmt: "%s:%s" , dev->name, "lpi" ); |
3648 | ret = request_irq(irq: priv->lpi_irq, |
3649 | handler: stmmac_mac_interrupt, |
3650 | flags: 0, name: int_name, dev); |
3651 | if (unlikely(ret < 0)) { |
3652 | netdev_err(dev: priv->dev, |
3653 | format: "%s: alloc lpi MSI %d (error: %d)\n" , |
3654 | __func__, priv->lpi_irq, ret); |
3655 | irq_err = REQ_IRQ_ERR_LPI; |
3656 | goto irq_error; |
3657 | } |
3658 | } |
3659 | |
3660 | /* Request the common Safety Feature Correctible/Uncorrectible |
3661 | * Error line in case of another line is used |
3662 | */ |
3663 | if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) { |
3664 | int_name = priv->int_name_sfty; |
3665 | sprintf(buf: int_name, fmt: "%s:%s" , dev->name, "safety" ); |
3666 | ret = request_irq(irq: priv->sfty_irq, handler: stmmac_safety_interrupt, |
3667 | flags: 0, name: int_name, dev); |
3668 | if (unlikely(ret < 0)) { |
3669 | netdev_err(dev: priv->dev, |
3670 | format: "%s: alloc sfty MSI %d (error: %d)\n" , |
3671 | __func__, priv->sfty_irq, ret); |
3672 | irq_err = REQ_IRQ_ERR_SFTY; |
3673 | goto irq_error; |
3674 | } |
3675 | } |
3676 | |
3677 | /* Request the Safety Feature Correctible Error line in |
3678 | * case of another line is used |
3679 | */ |
3680 | if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { |
3681 | int_name = priv->int_name_sfty_ce; |
3682 | sprintf(buf: int_name, fmt: "%s:%s" , dev->name, "safety-ce" ); |
3683 | ret = request_irq(irq: priv->sfty_ce_irq, |
3684 | handler: stmmac_safety_interrupt, |
3685 | flags: 0, name: int_name, dev); |
3686 | if (unlikely(ret < 0)) { |
3687 | netdev_err(dev: priv->dev, |
3688 | format: "%s: alloc sfty ce MSI %d (error: %d)\n" , |
3689 | __func__, priv->sfty_ce_irq, ret); |
3690 | irq_err = REQ_IRQ_ERR_SFTY_CE; |
3691 | goto irq_error; |
3692 | } |
3693 | } |
3694 | |
3695 | /* Request the Safety Feature Uncorrectible Error line in |
3696 | * case of another line is used |
3697 | */ |
3698 | if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { |
3699 | int_name = priv->int_name_sfty_ue; |
3700 | sprintf(buf: int_name, fmt: "%s:%s" , dev->name, "safety-ue" ); |
3701 | ret = request_irq(irq: priv->sfty_ue_irq, |
3702 | handler: stmmac_safety_interrupt, |
3703 | flags: 0, name: int_name, dev); |
3704 | if (unlikely(ret < 0)) { |
3705 | netdev_err(dev: priv->dev, |
3706 | format: "%s: alloc sfty ue MSI %d (error: %d)\n" , |
3707 | __func__, priv->sfty_ue_irq, ret); |
3708 | irq_err = REQ_IRQ_ERR_SFTY_UE; |
3709 | goto irq_error; |
3710 | } |
3711 | } |
3712 | |
3713 | /* Request Rx MSI irq */ |
3714 | for (i = 0; i < priv->plat->rx_queues_to_use; i++) { |
3715 | if (i >= MTL_MAX_RX_QUEUES) |
3716 | break; |
3717 | if (priv->rx_irq[i] == 0) |
3718 | continue; |
3719 | |
3720 | int_name = priv->int_name_rx_irq[i]; |
3721 | sprintf(buf: int_name, fmt: "%s:%s-%d" , dev->name, "rx" , i); |
3722 | ret = request_irq(irq: priv->rx_irq[i], |
3723 | handler: stmmac_msi_intr_rx, |
3724 | flags: 0, name: int_name, dev: &priv->dma_conf.rx_queue[i]); |
3725 | if (unlikely(ret < 0)) { |
3726 | netdev_err(dev: priv->dev, |
3727 | format: "%s: alloc rx-%d MSI %d (error: %d)\n" , |
3728 | __func__, i, priv->rx_irq[i], ret); |
3729 | irq_err = REQ_IRQ_ERR_RX; |
3730 | irq_idx = i; |
3731 | goto irq_error; |
3732 | } |
3733 | cpumask_clear(dstp: &cpu_mask); |
3734 | cpumask_set_cpu(cpu: i % num_online_cpus(), dstp: &cpu_mask); |
3735 | irq_set_affinity_hint(irq: priv->rx_irq[i], m: &cpu_mask); |
3736 | } |
3737 | |
3738 | /* Request Tx MSI irq */ |
3739 | for (i = 0; i < priv->plat->tx_queues_to_use; i++) { |
3740 | if (i >= MTL_MAX_TX_QUEUES) |
3741 | break; |
3742 | if (priv->tx_irq[i] == 0) |
3743 | continue; |
3744 | |
3745 | int_name = priv->int_name_tx_irq[i]; |
3746 | sprintf(buf: int_name, fmt: "%s:%s-%d" , dev->name, "tx" , i); |
3747 | ret = request_irq(irq: priv->tx_irq[i], |
3748 | handler: stmmac_msi_intr_tx, |
3749 | flags: 0, name: int_name, dev: &priv->dma_conf.tx_queue[i]); |
3750 | if (unlikely(ret < 0)) { |
3751 | netdev_err(dev: priv->dev, |
3752 | format: "%s: alloc tx-%d MSI %d (error: %d)\n" , |
3753 | __func__, i, priv->tx_irq[i], ret); |
3754 | irq_err = REQ_IRQ_ERR_TX; |
3755 | irq_idx = i; |
3756 | goto irq_error; |
3757 | } |
3758 | cpumask_clear(dstp: &cpu_mask); |
3759 | cpumask_set_cpu(cpu: i % num_online_cpus(), dstp: &cpu_mask); |
3760 | irq_set_affinity_hint(irq: priv->tx_irq[i], m: &cpu_mask); |
3761 | } |
3762 | |
3763 | return 0; |
3764 | |
3765 | irq_error: |
3766 | stmmac_free_irq(dev, irq_err, irq_idx); |
3767 | return ret; |
3768 | } |
3769 | |
3770 | static int stmmac_request_irq_single(struct net_device *dev) |
3771 | { |
3772 | struct stmmac_priv *priv = netdev_priv(dev); |
3773 | enum request_irq_err irq_err; |
3774 | int ret; |
3775 | |
3776 | ret = request_irq(irq: dev->irq, handler: stmmac_interrupt, |
3777 | IRQF_SHARED, name: dev->name, dev); |
3778 | if (unlikely(ret < 0)) { |
3779 | netdev_err(dev: priv->dev, |
3780 | format: "%s: ERROR: allocating the IRQ %d (error: %d)\n" , |
3781 | __func__, dev->irq, ret); |
3782 | irq_err = REQ_IRQ_ERR_MAC; |
3783 | goto irq_error; |
3784 | } |
3785 | |
3786 | /* Request the Wake IRQ in case of another line |
3787 | * is used for WoL |
3788 | */ |
3789 | if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { |
3790 | ret = request_irq(irq: priv->wol_irq, handler: stmmac_interrupt, |
3791 | IRQF_SHARED, name: dev->name, dev); |
3792 | if (unlikely(ret < 0)) { |
3793 | netdev_err(dev: priv->dev, |
3794 | format: "%s: ERROR: allocating the WoL IRQ %d (%d)\n" , |
3795 | __func__, priv->wol_irq, ret); |
3796 | irq_err = REQ_IRQ_ERR_WOL; |
3797 | goto irq_error; |
3798 | } |
3799 | } |
3800 | |
3801 | /* Request the IRQ lines */ |
3802 | if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { |
3803 | ret = request_irq(irq: priv->lpi_irq, handler: stmmac_interrupt, |
3804 | IRQF_SHARED, name: dev->name, dev); |
3805 | if (unlikely(ret < 0)) { |
3806 | netdev_err(dev: priv->dev, |
3807 | format: "%s: ERROR: allocating the LPI IRQ %d (%d)\n" , |
3808 | __func__, priv->lpi_irq, ret); |
3809 | irq_err = REQ_IRQ_ERR_LPI; |
3810 | goto irq_error; |
3811 | } |
3812 | } |
3813 | |
3814 | /* Request the common Safety Feature Correctible/Uncorrectible |
3815 | * Error line in case of another line is used |
3816 | */ |
3817 | if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) { |
3818 | ret = request_irq(irq: priv->sfty_irq, handler: stmmac_safety_interrupt, |
3819 | IRQF_SHARED, name: dev->name, dev); |
3820 | if (unlikely(ret < 0)) { |
3821 | netdev_err(dev: priv->dev, |
3822 | format: "%s: ERROR: allocating the sfty IRQ %d (%d)\n" , |
3823 | __func__, priv->sfty_irq, ret); |
3824 | irq_err = REQ_IRQ_ERR_SFTY; |
3825 | goto irq_error; |
3826 | } |
3827 | } |
3828 | |
3829 | return 0; |
3830 | |
3831 | irq_error: |
3832 | stmmac_free_irq(dev, irq_err, irq_idx: 0); |
3833 | return ret; |
3834 | } |
3835 | |
3836 | static int stmmac_request_irq(struct net_device *dev) |
3837 | { |
3838 | struct stmmac_priv *priv = netdev_priv(dev); |
3839 | int ret; |
3840 | |
3841 | /* Request the IRQ lines */ |
3842 | if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) |
3843 | ret = stmmac_request_irq_multi_msi(dev); |
3844 | else |
3845 | ret = stmmac_request_irq_single(dev); |
3846 | |
3847 | return ret; |
3848 | } |
3849 | |
3850 | /** |
3851 | * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue |
3852 | * @priv: driver private structure |
3853 | * @mtu: MTU to setup the dma queue and buf with |
3854 | * Description: Allocate and generate a dma_conf based on the provided MTU. |
3855 | * Allocate the Tx/Rx DMA queue and init them. |
3856 | * Return value: |
3857 | * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure. |
3858 | */ |
3859 | static struct stmmac_dma_conf * |
3860 | stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu) |
3861 | { |
3862 | struct stmmac_dma_conf *dma_conf; |
3863 | int chan, bfsize, ret; |
3864 | |
3865 | dma_conf = kzalloc(size: sizeof(*dma_conf), GFP_KERNEL); |
3866 | if (!dma_conf) { |
3867 | netdev_err(dev: priv->dev, format: "%s: DMA conf allocation failed\n" , |
3868 | __func__); |
3869 | return ERR_PTR(error: -ENOMEM); |
3870 | } |
3871 | |
3872 | bfsize = stmmac_set_16kib_bfsize(priv, mtu); |
3873 | if (bfsize < 0) |
3874 | bfsize = 0; |
3875 | |
3876 | if (bfsize < BUF_SIZE_16KiB) |
3877 | bfsize = stmmac_set_bfsize(mtu, bufsize: 0); |
3878 | |
3879 | dma_conf->dma_buf_sz = bfsize; |
3880 | /* Chose the tx/rx size from the already defined one in the |
3881 | * priv struct. (if defined) |
3882 | */ |
3883 | dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; |
3884 | dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; |
3885 | |
3886 | if (!dma_conf->dma_tx_size) |
3887 | dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE; |
3888 | if (!dma_conf->dma_rx_size) |
3889 | dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; |
3890 | |
3891 | /* Earlier check for TBS */ |
3892 | for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { |
3893 | struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; |
3894 | int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; |
3895 | |
3896 | /* Setup per-TXQ tbs flag before TX descriptor alloc */ |
3897 | tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; |
3898 | } |
3899 | |
3900 | ret = alloc_dma_desc_resources(priv, dma_conf); |
3901 | if (ret < 0) { |
3902 | netdev_err(dev: priv->dev, format: "%s: DMA descriptors allocation failed\n" , |
3903 | __func__); |
3904 | goto alloc_error; |
3905 | } |
3906 | |
3907 | ret = init_dma_desc_rings(dev: priv->dev, dma_conf, GFP_KERNEL); |
3908 | if (ret < 0) { |
3909 | netdev_err(dev: priv->dev, format: "%s: DMA descriptors initialization failed\n" , |
3910 | __func__); |
3911 | goto init_error; |
3912 | } |
3913 | |
3914 | return dma_conf; |
3915 | |
3916 | init_error: |
3917 | free_dma_desc_resources(priv, dma_conf); |
3918 | alloc_error: |
3919 | kfree(objp: dma_conf); |
3920 | return ERR_PTR(error: ret); |
3921 | } |
3922 | |
3923 | /** |
3924 | * __stmmac_open - open entry point of the driver |
3925 | * @dev : pointer to the device structure. |
3926 | * @dma_conf : structure to take the dma data |
3927 | * Description: |
3928 | * This function is the open entry point of the driver. |
3929 | * Return value: |
3930 | * 0 on success and an appropriate (-)ve integer as defined in errno.h |
3931 | * file on failure. |
3932 | */ |
3933 | static int __stmmac_open(struct net_device *dev, |
3934 | struct stmmac_dma_conf *dma_conf) |
3935 | { |
3936 | struct stmmac_priv *priv = netdev_priv(dev); |
3937 | int mode = priv->plat->phy_interface; |
3938 | u32 chan; |
3939 | int ret; |
3940 | |
3941 | ret = pm_runtime_resume_and_get(dev: priv->device); |
3942 | if (ret < 0) |
3943 | return ret; |
3944 | |
3945 | if (priv->hw->pcs != STMMAC_PCS_TBI && |
3946 | priv->hw->pcs != STMMAC_PCS_RTBI && |
3947 | (!priv->hw->xpcs || |
3948 | xpcs_get_an_mode(xpcs: priv->hw->xpcs, interface: mode) != DW_AN_C73) && |
3949 | !priv->hw->lynx_pcs) { |
3950 | ret = stmmac_init_phy(dev); |
3951 | if (ret) { |
3952 | netdev_err(dev: priv->dev, |
3953 | format: "%s: Cannot attach to PHY (error: %d)\n" , |
3954 | __func__, ret); |
3955 | goto init_phy_error; |
3956 | } |
3957 | } |
3958 | |
3959 | priv->rx_copybreak = STMMAC_RX_COPYBREAK; |
3960 | |
3961 | buf_sz = dma_conf->dma_buf_sz; |
3962 | for (int i = 0; i < MTL_MAX_TX_QUEUES; i++) |
3963 | if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN) |
3964 | dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs; |
3965 | memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); |
3966 | |
3967 | stmmac_reset_queues_param(priv); |
3968 | |
3969 | if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && |
3970 | priv->plat->serdes_powerup) { |
3971 | ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); |
3972 | if (ret < 0) { |
3973 | netdev_err(dev: priv->dev, format: "%s: Serdes powerup failed\n" , |
3974 | __func__); |
3975 | goto init_error; |
3976 | } |
3977 | } |
3978 | |
3979 | ret = stmmac_hw_setup(dev, ptp_register: true); |
3980 | if (ret < 0) { |
3981 | netdev_err(dev: priv->dev, format: "%s: Hw setup failed\n" , __func__); |
3982 | goto init_error; |
3983 | } |
3984 | |
3985 | stmmac_init_coalesce(priv); |
3986 | |
3987 | phylink_start(priv->phylink); |
3988 | /* We may have called phylink_speed_down before */ |
3989 | phylink_speed_up(pl: priv->phylink); |
3990 | |
3991 | ret = stmmac_request_irq(dev); |
3992 | if (ret) |
3993 | goto irq_error; |
3994 | |
3995 | stmmac_enable_all_queues(priv); |
3996 | netif_tx_start_all_queues(dev: priv->dev); |
3997 | stmmac_enable_all_dma_irq(priv); |
3998 | |
3999 | return 0; |
4000 | |
4001 | irq_error: |
4002 | phylink_stop(priv->phylink); |
4003 | |
4004 | for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) |
4005 | hrtimer_cancel(timer: &priv->dma_conf.tx_queue[chan].txtimer); |
4006 | |
4007 | stmmac_hw_teardown(dev); |
4008 | init_error: |
4009 | phylink_disconnect_phy(priv->phylink); |
4010 | init_phy_error: |
4011 | pm_runtime_put(dev: priv->device); |
4012 | return ret; |
4013 | } |
4014 | |
4015 | static int stmmac_open(struct net_device *dev) |
4016 | { |
4017 | struct stmmac_priv *priv = netdev_priv(dev); |
4018 | struct stmmac_dma_conf *dma_conf; |
4019 | int ret; |
4020 | |
4021 | dma_conf = stmmac_setup_dma_desc(priv, mtu: dev->mtu); |
4022 | if (IS_ERR(ptr: dma_conf)) |
4023 | return PTR_ERR(ptr: dma_conf); |
4024 | |
4025 | ret = __stmmac_open(dev, dma_conf); |
4026 | if (ret) |
4027 | free_dma_desc_resources(priv, dma_conf); |
4028 | |
4029 | kfree(objp: dma_conf); |
4030 | return ret; |
4031 | } |
4032 | |
4033 | static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) |
4034 | { |
4035 | set_bit(nr: __FPE_REMOVING, addr: &priv->fpe_task_state); |
4036 | |
4037 | if (priv->fpe_wq) { |
4038 | destroy_workqueue(wq: priv->fpe_wq); |
4039 | priv->fpe_wq = NULL; |
4040 | } |
4041 | |
4042 | netdev_info(dev: priv->dev, format: "FPE workqueue stop" ); |
4043 | } |
4044 | |
4045 | /** |
4046 | * stmmac_release - close entry point of the driver |
4047 | * @dev : device pointer. |
4048 | * Description: |
4049 | * This is the stop entry point of the driver. |
4050 | */ |
4051 | static int stmmac_release(struct net_device *dev) |
4052 | { |
4053 | struct stmmac_priv *priv = netdev_priv(dev); |
4054 | u32 chan; |
4055 | |
4056 | if (device_may_wakeup(dev: priv->device)) |
4057 | phylink_speed_down(pl: priv->phylink, sync: false); |
4058 | /* Stop and disconnect the PHY */ |
4059 | phylink_stop(priv->phylink); |
4060 | phylink_disconnect_phy(priv->phylink); |
4061 | |
4062 | stmmac_disable_all_queues(priv); |
4063 | |
4064 | for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) |
4065 | hrtimer_cancel(timer: &priv->dma_conf.tx_queue[chan].txtimer); |
4066 | |
4067 | netif_tx_disable(dev); |
4068 | |
4069 | /* Free the IRQ lines */ |
4070 | stmmac_free_irq(dev, irq_err: REQ_IRQ_ERR_ALL, irq_idx: 0); |
4071 | |
4072 | if (priv->eee_enabled) { |
4073 | priv->tx_path_in_lpi_mode = false; |
4074 | del_timer_sync(timer: &priv->eee_ctrl_timer); |
4075 | } |
4076 | |
4077 | /* Stop TX/RX DMA and clear the descriptors */ |
4078 | stmmac_stop_all_dma(priv); |
4079 | |
4080 | /* Release and free the Rx/Tx resources */ |
4081 | free_dma_desc_resources(priv, dma_conf: &priv->dma_conf); |
4082 | |
4083 | /* Disable the MAC Rx/Tx */ |
4084 | stmmac_mac_set(priv, priv->ioaddr, false); |
4085 | |
4086 | /* Powerdown Serdes if there is */ |
4087 | if (priv->plat->serdes_powerdown) |
4088 | priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); |
4089 | |
4090 | netif_carrier_off(dev); |
4091 | |
4092 | stmmac_release_ptp(priv); |
4093 | |
4094 | pm_runtime_put(dev: priv->device); |
4095 | |
4096 | if (priv->dma_cap.fpesel) |
4097 | stmmac_fpe_stop_wq(priv); |
4098 | |
4099 | return 0; |
4100 | } |
4101 | |
4102 | static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, |
4103 | struct stmmac_tx_queue *tx_q) |
4104 | { |
4105 | u16 tag = 0x0, inner_tag = 0x0; |
4106 | u32 inner_type = 0x0; |
4107 | struct dma_desc *p; |
4108 | |
4109 | if (!priv->dma_cap.vlins) |
4110 | return false; |
4111 | if (!skb_vlan_tag_present(skb)) |
4112 | return false; |
4113 | if (skb->vlan_proto == htons(ETH_P_8021AD)) { |
4114 | inner_tag = skb_vlan_tag_get(skb); |
4115 | inner_type = STMMAC_VLAN_INSERT; |
4116 | } |
4117 | |
4118 | tag = skb_vlan_tag_get(skb); |
4119 | |
4120 | if (tx_q->tbs & STMMAC_TBS_AVAIL) |
4121 | p = &tx_q->dma_entx[tx_q->cur_tx].basic; |
4122 | else |
4123 | p = &tx_q->dma_tx[tx_q->cur_tx]; |
4124 | |
4125 | if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) |
4126 | return false; |
4127 | |
4128 | stmmac_set_tx_owner(priv, p); |
4129 | tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); |
4130 | return true; |
4131 | } |
4132 | |
4133 | /** |
4134 | * stmmac_tso_allocator - close entry point of the driver |
4135 | * @priv: driver private structure |
4136 | * @des: buffer start address |
4137 | * @total_len: total length to fill in descriptors |
4138 | * @last_segment: condition for the last descriptor |
4139 | * @queue: TX queue index |
4140 | * Description: |
4141 | * This function fills descriptor and request new descriptors according to |
4142 | * buffer length to fill |
4143 | */ |
4144 | static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, |
4145 | int total_len, bool last_segment, u32 queue) |
4146 | { |
4147 | struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; |
4148 | struct dma_desc *desc; |
4149 | u32 buff_size; |
4150 | int tmp_len; |
4151 | |
4152 | tmp_len = total_len; |
4153 | |
4154 | while (tmp_len > 0) { |
4155 | dma_addr_t curr_addr; |
4156 | |
4157 | tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, |
4158 | priv->dma_conf.dma_tx_size); |
4159 | WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); |
4160 | |
4161 | if (tx_q->tbs & STMMAC_TBS_AVAIL) |
4162 | desc = &tx_q->dma_entx[tx_q->cur_tx].basic; |
4163 | else |
4164 | desc = &tx_q->dma_tx[tx_q->cur_tx]; |
4165 | |
4166 | curr_addr = des + (total_len - tmp_len); |
4167 | if (priv->dma_cap.addr64 <= 32) |
4168 | desc->des0 = cpu_to_le32(curr_addr); |
4169 | else |
4170 | stmmac_set_desc_addr(priv, desc, curr_addr); |
4171 | |
4172 | buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? |
4173 | TSO_MAX_BUFF_SIZE : tmp_len; |
4174 | |
4175 | stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, |
4176 | 0, 1, |
4177 | (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), |
4178 | 0, 0); |
4179 | |
4180 | tmp_len -= TSO_MAX_BUFF_SIZE; |
4181 | } |
4182 | } |
4183 | |
4184 | static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) |
4185 | { |
4186 | struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; |
4187 | int desc_size; |
4188 | |
4189 | if (likely(priv->extend_desc)) |
4190 | desc_size = sizeof(struct dma_extended_desc); |
4191 | else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
4192 | desc_size = sizeof(struct dma_edesc); |
4193 | else |
4194 | desc_size = sizeof(struct dma_desc); |
4195 | |
4196 | /* The own bit must be the latest setting done when prepare the |
4197 | * descriptor and then barrier is needed to make sure that |
4198 | * all is coherent before granting the DMA engine. |
4199 | */ |
4200 | wmb(); |
4201 | |
4202 | tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); |
4203 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); |
4204 | } |
4205 | |
4206 | /** |
4207 | * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) |
4208 | * @skb : the socket buffer |
4209 | * @dev : device pointer |
4210 | * Description: this is the transmit function that is called on TSO frames |
4211 | * (support available on GMAC4 and newer chips). |
4212 | * Diagram below show the ring programming in case of TSO frames: |
4213 | * |
4214 | * First Descriptor |
4215 | * -------- |
4216 | * | DES0 |---> buffer1 = L2/L3/L4 header |
4217 | * | DES1 |---> TCP Payload (can continue on next descr...) |
4218 | * | DES2 |---> buffer 1 and 2 len |
4219 | * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] |
4220 | * -------- |
4221 | * | |
4222 | * ... |
4223 | * | |
4224 | * -------- |
4225 | * | DES0 | --| Split TCP Payload on Buffers 1 and 2 |
4226 | * | DES1 | --| |
4227 | * | DES2 | --> buffer 1 and 2 len |
4228 | * | DES3 | |
4229 | * -------- |
4230 | * |
4231 | * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. |
4232 | */ |
4233 | static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) |
4234 | { |
4235 | struct dma_desc *desc, *first, *mss_desc = NULL; |
4236 | struct stmmac_priv *priv = netdev_priv(dev); |
4237 | int nfrags = skb_shinfo(skb)->nr_frags; |
4238 | u32 queue = skb_get_queue_mapping(skb); |
4239 | unsigned int first_entry, tx_packets; |
4240 | struct stmmac_txq_stats *txq_stats; |
4241 | int tmp_pay_len = 0, first_tx; |
4242 | struct stmmac_tx_queue *tx_q; |
4243 | bool has_vlan, set_ic; |
4244 | u8 proto_hdr_len, hdr; |
4245 | u32 pay_len, mss; |
4246 | dma_addr_t des; |
4247 | int i; |
4248 | |
4249 | tx_q = &priv->dma_conf.tx_queue[queue]; |
4250 | txq_stats = &priv->xstats.txq_stats[queue]; |
4251 | first_tx = tx_q->cur_tx; |
4252 | |
4253 | /* Compute header lengths */ |
4254 | if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { |
4255 | proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); |
4256 | hdr = sizeof(struct udphdr); |
4257 | } else { |
4258 | proto_hdr_len = skb_tcp_all_headers(skb); |
4259 | hdr = tcp_hdrlen(skb); |
4260 | } |
4261 | |
4262 | /* Desc availability based on threshold should be enough safe */ |
4263 | if (unlikely(stmmac_tx_avail(priv, queue) < |
4264 | (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { |
4265 | if (!netif_tx_queue_stopped(dev_queue: netdev_get_tx_queue(dev, index: queue))) { |
4266 | netif_tx_stop_queue(dev_queue: netdev_get_tx_queue(dev: priv->dev, |
4267 | index: queue)); |
4268 | /* This is a hard error, log it. */ |
4269 | netdev_err(dev: priv->dev, |
4270 | format: "%s: Tx Ring full when queue awake\n" , |
4271 | __func__); |
4272 | } |
4273 | return NETDEV_TX_BUSY; |
4274 | } |
4275 | |
4276 | pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ |
4277 | |
4278 | mss = skb_shinfo(skb)->gso_size; |
4279 | |
4280 | /* set new MSS value if needed */ |
4281 | if (mss != tx_q->mss) { |
4282 | if (tx_q->tbs & STMMAC_TBS_AVAIL) |
4283 | mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; |
4284 | else |
4285 | mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; |
4286 | |
4287 | stmmac_set_mss(priv, mss_desc, mss); |
4288 | tx_q->mss = mss; |
4289 | tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, |
4290 | priv->dma_conf.dma_tx_size); |
4291 | WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); |
4292 | } |
4293 | |
4294 | if (netif_msg_tx_queued(priv)) { |
4295 | pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n" , |
4296 | __func__, hdr, proto_hdr_len, pay_len, mss); |
4297 | pr_info("\tskb->len %d, skb->data_len %d\n" , skb->len, |
4298 | skb->data_len); |
4299 | } |
4300 | |
4301 | /* Check if VLAN can be inserted by HW */ |
4302 | has_vlan = stmmac_vlan_insert(priv, skb, tx_q); |
4303 | |
4304 | first_entry = tx_q->cur_tx; |
4305 | WARN_ON(tx_q->tx_skbuff[first_entry]); |
4306 | |
4307 | if (tx_q->tbs & STMMAC_TBS_AVAIL) |
4308 | desc = &tx_q->dma_entx[first_entry].basic; |
4309 | else |
4310 | desc = &tx_q->dma_tx[first_entry]; |
4311 | first = desc; |
4312 | |
4313 | if (has_vlan) |
4314 | stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); |
4315 | |
4316 | /* first descriptor: fill Headers on Buf1 */ |
4317 | des = dma_map_single(priv->device, skb->data, skb_headlen(skb), |
4318 | DMA_TO_DEVICE); |
4319 | if (dma_mapping_error(dev: priv->device, dma_addr: des)) |
4320 | goto dma_map_err; |
4321 | |
4322 | tx_q->tx_skbuff_dma[first_entry].buf = des; |
4323 | tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); |
4324 | tx_q->tx_skbuff_dma[first_entry].map_as_page = false; |
4325 | tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; |
4326 | |
4327 | if (priv->dma_cap.addr64 <= 32) { |
4328 | first->des0 = cpu_to_le32(des); |
4329 | |
4330 | /* Fill start of payload in buff2 of first descriptor */ |
4331 | if (pay_len) |
4332 | first->des1 = cpu_to_le32(des + proto_hdr_len); |
4333 | |
4334 | /* If needed take extra descriptors to fill the remaining payload */ |
4335 | tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; |
4336 | } else { |
4337 | stmmac_set_desc_addr(priv, first, des); |
4338 | tmp_pay_len = pay_len; |
4339 | des += proto_hdr_len; |
4340 | pay_len = 0; |
4341 | } |
4342 | |
4343 | stmmac_tso_allocator(priv, des, total_len: tmp_pay_len, last_segment: (nfrags == 0), queue); |
4344 | |
4345 | /* Prepare fragments */ |
4346 | for (i = 0; i < nfrags; i++) { |
4347 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
4348 | |
4349 | des = skb_frag_dma_map(dev: priv->device, frag, offset: 0, |
4350 | size: skb_frag_size(frag), |
4351 | dir: DMA_TO_DEVICE); |
4352 | if (dma_mapping_error(dev: priv->device, dma_addr: des)) |
4353 | goto dma_map_err; |
4354 | |
4355 | stmmac_tso_allocator(priv, des, total_len: skb_frag_size(frag), |
4356 | last_segment: (i == nfrags - 1), queue); |
4357 | |
4358 | tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; |
4359 | tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); |
4360 | tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; |
4361 | tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; |
4362 | } |
4363 | |
4364 | tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; |
4365 | |
4366 | /* Only the last descriptor gets to point to the skb. */ |
4367 | tx_q->tx_skbuff[tx_q->cur_tx] = skb; |
4368 | tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; |
4369 | |
4370 | /* Manage tx mitigation */ |
4371 | tx_packets = (tx_q->cur_tx + 1) - first_tx; |
4372 | tx_q->tx_count_frames += tx_packets; |
4373 | |
4374 | if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) |
4375 | set_ic = true; |
4376 | else if (!priv->tx_coal_frames[queue]) |
4377 | set_ic = false; |
4378 | else if (tx_packets > priv->tx_coal_frames[queue]) |
4379 | set_ic = true; |
4380 | else if ((tx_q->tx_count_frames % |
4381 | priv->tx_coal_frames[queue]) < tx_packets) |
4382 | set_ic = true; |
4383 | else |
4384 | set_ic = false; |
4385 | |
4386 | if (set_ic) { |
4387 | if (tx_q->tbs & STMMAC_TBS_AVAIL) |
4388 | desc = &tx_q->dma_entx[tx_q->cur_tx].basic; |
4389 | else |
4390 | desc = &tx_q->dma_tx[tx_q->cur_tx]; |
4391 | |
4392 | tx_q->tx_count_frames = 0; |
4393 | stmmac_set_tx_ic(priv, desc); |
4394 | } |
4395 | |
4396 | /* We've used all descriptors we need for this skb, however, |
4397 | * advance cur_tx so that it references a fresh descriptor. |
4398 | * ndo_start_xmit will fill this descriptor the next time it's |
4399 | * called and stmmac_tx_clean may clean up to this descriptor. |
4400 | */ |
4401 | tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); |
4402 | |
4403 | if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { |
4404 | netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n" , |
4405 | __func__); |
4406 | netif_tx_stop_queue(dev_queue: netdev_get_tx_queue(dev: priv->dev, index: queue)); |
4407 | } |
4408 | |
4409 | u64_stats_update_begin(syncp: &txq_stats->q_syncp); |
4410 | u64_stats_add(p: &txq_stats->q.tx_bytes, val: skb->len); |
4411 | u64_stats_inc(p: &txq_stats->q.tx_tso_frames); |
4412 | u64_stats_add(p: &txq_stats->q.tx_tso_nfrags, val: nfrags); |
4413 | if (set_ic) |
4414 | u64_stats_inc(p: &txq_stats->q.tx_set_ic_bit); |
4415 | u64_stats_update_end(syncp: &txq_stats->q_syncp); |
4416 | |
4417 | if (priv->sarc_type) |
4418 | stmmac_set_desc_sarc(priv, first, priv->sarc_type); |
4419 | |
4420 | skb_tx_timestamp(skb); |
4421 | |
4422 | if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
4423 | priv->hwts_tx_en)) { |
4424 | /* declare that device is doing timestamping */ |
4425 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
4426 | stmmac_enable_tx_timestamp(priv, first); |
4427 | } |
4428 | |
4429 | /* Complete the first descriptor before granting the DMA */ |
4430 | stmmac_prepare_tso_tx_desc(priv, first, 1, |
4431 | proto_hdr_len, |
4432 | pay_len, |
4433 | 1, tx_q->tx_skbuff_dma[first_entry].last_segment, |
4434 | hdr / 4, (skb->len - proto_hdr_len)); |
4435 | |
4436 | /* If context desc is used to change MSS */ |
4437 | if (mss_desc) { |
4438 | /* Make sure that first descriptor has been completely |
4439 | * written, including its own bit. This is because MSS is |
4440 | * actually before first descriptor, so we need to make |
4441 | * sure that MSS's own bit is the last thing written. |
4442 | */ |
4443 | dma_wmb(); |
4444 | stmmac_set_tx_owner(priv, mss_desc); |
4445 | } |
4446 | |
4447 | if (netif_msg_pktdata(priv)) { |
4448 | pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n" , |
4449 | __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, |
4450 | tx_q->cur_tx, first, nfrags); |
4451 | pr_info(">>> frame to be transmitted: " ); |
4452 | print_pkt(buf: skb->data, len: skb_headlen(skb)); |
4453 | } |
4454 | |
4455 | netdev_tx_sent_queue(dev_queue: netdev_get_tx_queue(dev, index: queue), bytes: skb->len); |
4456 | |
4457 | stmmac_flush_tx_descriptors(priv, queue); |
4458 | stmmac_tx_timer_arm(priv, queue); |
4459 | |
4460 | return NETDEV_TX_OK; |
4461 | |
4462 | dma_map_err: |
4463 | dev_err(priv->device, "Tx dma map failed\n" ); |
4464 | dev_kfree_skb(skb); |
4465 | priv->xstats.tx_dropped++; |
4466 | return NETDEV_TX_OK; |
4467 | } |
4468 | |
4469 | /** |
4470 | * stmmac_has_ip_ethertype() - Check if packet has IP ethertype |
4471 | * @skb: socket buffer to check |
4472 | * |
4473 | * Check if a packet has an ethertype that will trigger the IP header checks |
4474 | * and IP/TCP checksum engine of the stmmac core. |
4475 | * |
4476 | * Return: true if the ethertype can trigger the checksum engine, false |
4477 | * otherwise |
4478 | */ |
4479 | static bool stmmac_has_ip_ethertype(struct sk_buff *skb) |
4480 | { |
4481 | int depth = 0; |
4482 | __be16 proto; |
4483 | |
4484 | proto = __vlan_get_protocol(skb, type: eth_header_parse_protocol(skb), |
4485 | depth: &depth); |
4486 | |
4487 | return (depth <= ETH_HLEN) && |
4488 | (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6)); |
4489 | } |
4490 | |
4491 | /** |
4492 | * stmmac_xmit - Tx entry point of the driver |
4493 | * @skb : the socket buffer |
4494 | * @dev : device pointer |
4495 | * Description : this is the tx entry point of the driver. |
4496 | * It programs the chain or the ring and supports oversized frames |
4497 | * and SG feature. |
4498 | */ |
4499 | static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) |
4500 | { |
4501 | unsigned int first_entry, tx_packets, enh_desc; |
4502 | struct stmmac_priv *priv = netdev_priv(dev); |
4503 | unsigned int nopaged_len = skb_headlen(skb); |
4504 | int i, csum_insertion = 0, is_jumbo = 0; |
4505 | u32 queue = skb_get_queue_mapping(skb); |
4506 | int nfrags = skb_shinfo(skb)->nr_frags; |
4507 | int gso = skb_shinfo(skb)->gso_type; |
4508 | struct stmmac_txq_stats *txq_stats; |
4509 | struct dma_edesc *tbs_desc = NULL; |
4510 | struct dma_desc *desc, *first; |
4511 | struct stmmac_tx_queue *tx_q; |
4512 | bool has_vlan, set_ic; |
4513 | int entry, first_tx; |
4514 | dma_addr_t des; |
4515 | |
4516 | tx_q = &priv->dma_conf.tx_queue[queue]; |
4517 | txq_stats = &priv->xstats.txq_stats[queue]; |
4518 | first_tx = tx_q->cur_tx; |
4519 | |
4520 | if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) |
4521 | stmmac_disable_eee_mode(priv); |
4522 | |
4523 | /* Manage oversized TCP frames for GMAC4 device */ |
4524 | if (skb_is_gso(skb) && priv->tso) { |
4525 | if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) |
4526 | return stmmac_tso_xmit(skb, dev); |
4527 | if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) |
4528 | return stmmac_tso_xmit(skb, dev); |
4529 | } |
4530 | |
4531 | if (priv->plat->est && priv->plat->est->enable && |
4532 | priv->plat->est->max_sdu[queue] && |
4533 | skb->len > priv->plat->est->max_sdu[queue]){ |
4534 | priv->xstats.max_sdu_txq_drop[queue]++; |
4535 | goto max_sdu_err; |
4536 | } |
4537 | |
4538 | if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { |
4539 | if (!netif_tx_queue_stopped(dev_queue: netdev_get_tx_queue(dev, index: queue))) { |
4540 | netif_tx_stop_queue(dev_queue: netdev_get_tx_queue(dev: priv->dev, |
4541 | index: queue)); |
4542 | /* This is a hard error, log it. */ |
4543 | netdev_err(dev: priv->dev, |
4544 | format: "%s: Tx Ring full when queue awake\n" , |
4545 | __func__); |
4546 | } |
4547 | return NETDEV_TX_BUSY; |
4548 | } |
4549 | |
4550 | /* Check if VLAN can be inserted by HW */ |
4551 | has_vlan = stmmac_vlan_insert(priv, skb, tx_q); |
4552 | |
4553 | entry = tx_q->cur_tx; |
4554 | first_entry = entry; |
4555 | WARN_ON(tx_q->tx_skbuff[first_entry]); |
4556 | |
4557 | csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); |
4558 | /* DWMAC IPs can be synthesized to support tx coe only for a few tx |
4559 | * queues. In that case, checksum offloading for those queues that don't |
4560 | * support tx coe needs to fallback to software checksum calculation. |
4561 | * |
4562 | * Packets that won't trigger the COE e.g. most DSA-tagged packets will |
4563 | * also have to be checksummed in software. |
4564 | */ |
4565 | if (csum_insertion && |
4566 | (priv->plat->tx_queues_cfg[queue].coe_unsupported || |
4567 | !stmmac_has_ip_ethertype(skb))) { |
4568 | if (unlikely(skb_checksum_help(skb))) |
4569 | goto dma_map_err; |
4570 | csum_insertion = !csum_insertion; |
4571 | } |
4572 | |
4573 | if (likely(priv->extend_desc)) |
4574 | desc = (struct dma_desc *)(tx_q->dma_etx + entry); |
4575 | else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
4576 | desc = &tx_q->dma_entx[entry].basic; |
4577 | else |
4578 | desc = tx_q->dma_tx + entry; |
4579 | |
4580 | first = desc; |
4581 | |
4582 | if (has_vlan) |
4583 | stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); |
4584 | |
4585 | enh_desc = priv->plat->enh_desc; |
4586 | /* To program the descriptors according to the size of the frame */ |
4587 | if (enh_desc) |
4588 | is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); |
4589 | |
4590 | if (unlikely(is_jumbo)) { |
4591 | entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); |
4592 | if (unlikely(entry < 0) && (entry != -EINVAL)) |
4593 | goto dma_map_err; |
4594 | } |
4595 | |
4596 | for (i = 0; i < nfrags; i++) { |
4597 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
4598 | int len = skb_frag_size(frag); |
4599 | bool last_segment = (i == (nfrags - 1)); |
4600 | |
4601 | entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); |
4602 | WARN_ON(tx_q->tx_skbuff[entry]); |
4603 | |
4604 | if (likely(priv->extend_desc)) |
4605 | desc = (struct dma_desc *)(tx_q->dma_etx + entry); |
4606 | else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
4607 | desc = &tx_q->dma_entx[entry].basic; |
4608 | else |
4609 | desc = tx_q->dma_tx + entry; |
4610 | |
4611 | des = skb_frag_dma_map(dev: priv->device, frag, offset: 0, size: len, |
4612 | dir: DMA_TO_DEVICE); |
4613 | if (dma_mapping_error(dev: priv->device, dma_addr: des)) |
4614 | goto dma_map_err; /* should reuse desc w/o issues */ |
4615 | |
4616 | tx_q->tx_skbuff_dma[entry].buf = des; |
4617 | |
4618 | stmmac_set_desc_addr(priv, desc, des); |
4619 | |
4620 | tx_q->tx_skbuff_dma[entry].map_as_page = true; |
4621 | tx_q->tx_skbuff_dma[entry].len = len; |
4622 | tx_q->tx_skbuff_dma[entry].last_segment = last_segment; |
4623 | tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; |
4624 | |
4625 | /* Prepare the descriptor and set the own bit too */ |
4626 | stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, |
4627 | priv->mode, 1, last_segment, skb->len); |
4628 | } |
4629 | |
4630 | /* Only the last descriptor gets to point to the skb. */ |
4631 | tx_q->tx_skbuff[entry] = skb; |
4632 | tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; |
4633 | |
4634 | /* According to the coalesce parameter the IC bit for the latest |
4635 | * segment is reset and the timer re-started to clean the tx status. |
4636 | * This approach takes care about the fragments: desc is the first |
4637 | * element in case of no SG. |
4638 | */ |
4639 | tx_packets = (entry + 1) - first_tx; |
4640 | tx_q->tx_count_frames += tx_packets; |
4641 | |
4642 | if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) |
4643 | set_ic = true; |
4644 | else if (!priv->tx_coal_frames[queue]) |
4645 | set_ic = false; |
4646 | else if (tx_packets > priv->tx_coal_frames[queue]) |
4647 | set_ic = true; |
4648 | else if ((tx_q->tx_count_frames % |
4649 | priv->tx_coal_frames[queue]) < tx_packets) |
4650 | set_ic = true; |
4651 | else |
4652 | set_ic = false; |
4653 | |
4654 | if (set_ic) { |
4655 | if (likely(priv->extend_desc)) |
4656 | desc = &tx_q->dma_etx[entry].basic; |
4657 | else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
4658 | desc = &tx_q->dma_entx[entry].basic; |
4659 | else |
4660 | desc = &tx_q->dma_tx[entry]; |
4661 | |
4662 | tx_q->tx_count_frames = 0; |
4663 | stmmac_set_tx_ic(priv, desc); |
4664 | } |
4665 | |
4666 | /* We've used all descriptors we need for this skb, however, |
4667 | * advance cur_tx so that it references a fresh descriptor. |
4668 | * ndo_start_xmit will fill this descriptor the next time it's |
4669 | * called and stmmac_tx_clean may clean up to this descriptor. |
4670 | */ |
4671 | entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); |
4672 | tx_q->cur_tx = entry; |
4673 | |
4674 | if (netif_msg_pktdata(priv)) { |
4675 | netdev_dbg(priv->dev, |
4676 | "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d" , |
4677 | __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, |
4678 | entry, first, nfrags); |
4679 | |
4680 | netdev_dbg(priv->dev, ">>> frame to be transmitted: " ); |
4681 | print_pkt(buf: skb->data, len: skb->len); |
4682 | } |
4683 | |
4684 | if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { |
4685 | netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n" , |
4686 | __func__); |
4687 | netif_tx_stop_queue(dev_queue: netdev_get_tx_queue(dev: priv->dev, index: queue)); |
4688 | } |
4689 | |
4690 | u64_stats_update_begin(syncp: &txq_stats->q_syncp); |
4691 | u64_stats_add(p: &txq_stats->q.tx_bytes, val: skb->len); |
4692 | if (set_ic) |
4693 | u64_stats_inc(p: &txq_stats->q.tx_set_ic_bit); |
4694 | u64_stats_update_end(syncp: &txq_stats->q_syncp); |
4695 | |
4696 | if (priv->sarc_type) |
4697 | stmmac_set_desc_sarc(priv, first, priv->sarc_type); |
4698 | |
4699 | skb_tx_timestamp(skb); |
4700 | |
4701 | /* Ready to fill the first descriptor and set the OWN bit w/o any |
4702 | * problems because all the descriptors are actually ready to be |
4703 | * passed to the DMA engine. |
4704 | */ |
4705 | if (likely(!is_jumbo)) { |
4706 | bool last_segment = (nfrags == 0); |
4707 | |
4708 | des = dma_map_single(priv->device, skb->data, |
4709 | nopaged_len, DMA_TO_DEVICE); |
4710 | if (dma_mapping_error(dev: priv->device, dma_addr: des)) |
4711 | goto dma_map_err; |
4712 | |
4713 | tx_q->tx_skbuff_dma[first_entry].buf = des; |
4714 | tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; |
4715 | tx_q->tx_skbuff_dma[first_entry].map_as_page = false; |
4716 | |
4717 | stmmac_set_desc_addr(priv, first, des); |
4718 | |
4719 | tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; |
4720 | tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; |
4721 | |
4722 | if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
4723 | priv->hwts_tx_en)) { |
4724 | /* declare that device is doing timestamping */ |
4725 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
4726 | stmmac_enable_tx_timestamp(priv, first); |
4727 | } |
4728 | |
4729 | /* Prepare the first descriptor setting the OWN bit too */ |
4730 | stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, |
4731 | csum_insertion, priv->mode, 0, last_segment, |
4732 | skb->len); |
4733 | } |
4734 | |
4735 | if (tx_q->tbs & STMMAC_TBS_EN) { |
4736 | struct timespec64 ts = ns_to_timespec64(nsec: skb->tstamp); |
4737 | |
4738 | tbs_desc = &tx_q->dma_entx[first_entry]; |
4739 | stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); |
4740 | } |
4741 | |
4742 | stmmac_set_tx_owner(priv, first); |
4743 | |
4744 | netdev_tx_sent_queue(dev_queue: netdev_get_tx_queue(dev, index: queue), bytes: skb->len); |
4745 | |
4746 | stmmac_enable_dma_transmission(priv, priv->ioaddr); |
4747 | |
4748 | stmmac_flush_tx_descriptors(priv, queue); |
4749 | stmmac_tx_timer_arm(priv, queue); |
4750 | |
4751 | return NETDEV_TX_OK; |
4752 | |
4753 | dma_map_err: |
4754 | netdev_err(dev: priv->dev, format: "Tx DMA map failed\n" ); |
4755 | max_sdu_err: |
4756 | dev_kfree_skb(skb); |
4757 | priv->xstats.tx_dropped++; |
4758 | return NETDEV_TX_OK; |
4759 | } |
4760 | |
4761 | static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) |
4762 | { |
4763 | struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb); |
4764 | __be16 vlan_proto = veth->h_vlan_proto; |
4765 | u16 vlanid; |
4766 | |
4767 | if ((vlan_proto == htons(ETH_P_8021Q) && |
4768 | dev->features & NETIF_F_HW_VLAN_CTAG_RX) || |
4769 | (vlan_proto == htons(ETH_P_8021AD) && |
4770 | dev->features & NETIF_F_HW_VLAN_STAG_RX)) { |
4771 | /* pop the vlan tag */ |
4772 | vlanid = ntohs(veth->h_vlan_TCI); |
4773 | memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); |
4774 | skb_pull(skb, VLAN_HLEN); |
4775 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci: vlanid); |
4776 | } |
4777 | } |
4778 | |
4779 | /** |
4780 | * stmmac_rx_refill - refill used skb preallocated buffers |
4781 | * @priv: driver private structure |
4782 | * @queue: RX queue index |
4783 | * Description : this is to reallocate the skb for the reception process |
4784 | * that is based on zero-copy. |
4785 | */ |
4786 | static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) |
4787 | { |
4788 | struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; |
4789 | int dirty = stmmac_rx_dirty(priv, queue); |
4790 | unsigned int entry = rx_q->dirty_rx; |
4791 | gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); |
4792 | |
4793 | if (priv->dma_cap.host_dma_width <= 32) |
4794 | gfp |= GFP_DMA32; |
4795 | |
4796 | while (dirty-- > 0) { |
4797 | struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; |
4798 | struct dma_desc *p; |
4799 | bool use_rx_wd; |
4800 | |
4801 | if (priv->extend_desc) |
4802 | p = (struct dma_desc *)(rx_q->dma_erx + entry); |
4803 | else |
4804 | p = rx_q->dma_rx + entry; |
4805 | |
4806 | if (!buf->page) { |
4807 | buf->page = page_pool_alloc_pages(pool: rx_q->page_pool, gfp); |
4808 | if (!buf->page) |
4809 | break; |
4810 | } |
4811 | |
4812 | if (priv->sph && !buf->sec_page) { |
4813 | buf->sec_page = page_pool_alloc_pages(pool: rx_q->page_pool, gfp); |
4814 | if (!buf->sec_page) |
4815 | break; |
4816 | |
4817 | buf->sec_addr = page_pool_get_dma_addr(page: buf->sec_page); |
4818 | } |
4819 | |
4820 | buf->addr = page_pool_get_dma_addr(page: buf->page) + buf->page_offset; |
4821 | |
4822 | stmmac_set_desc_addr(priv, p, buf->addr); |
4823 | if (priv->sph) |
4824 | stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); |
4825 | else |
4826 | stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); |
4827 | stmmac_refill_desc3(priv, rx_q, p); |
4828 | |
4829 | rx_q->rx_count_frames++; |
4830 | rx_q->rx_count_frames += priv->rx_coal_frames[queue]; |
4831 | if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) |
4832 | rx_q->rx_count_frames = 0; |
4833 | |
4834 | use_rx_wd = !priv->rx_coal_frames[queue]; |
4835 | use_rx_wd |= rx_q->rx_count_frames > 0; |
4836 | if (!priv->use_riwt) |
4837 | use_rx_wd = false; |
4838 | |
4839 | dma_wmb(); |
4840 | stmmac_set_rx_owner(priv, p, use_rx_wd); |
4841 | |
4842 | entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); |
4843 | } |
4844 | rx_q->dirty_rx = entry; |
4845 | rx_q->rx_tail_addr = rx_q->dma_rx_phy + |
4846 | (rx_q->dirty_rx * sizeof(struct dma_desc)); |
4847 | stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); |
4848 | } |
4849 | |
4850 | static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, |
4851 | struct dma_desc *p, |
4852 | int status, unsigned int len) |
4853 | { |
4854 | unsigned int plen = 0, hlen = 0; |
4855 | int coe = priv->hw->rx_csum; |
4856 | |
4857 | /* Not first descriptor, buffer is always zero */ |
4858 | if (priv->sph && len) |
4859 | return 0; |
4860 | |
4861 | /* First descriptor, get split header length */ |
4862 | stmmac_get_rx_header_len(priv, p, &hlen); |
4863 | if (priv->sph && hlen) { |
4864 | priv->xstats.rx_split_hdr_pkt_n++; |
4865 | return hlen; |
4866 | } |
4867 | |
4868 | /* First descriptor, not last descriptor and not split header */ |
4869 | if (status & rx_not_ls) |
4870 | return priv->dma_conf.dma_buf_sz; |
4871 | |
4872 | plen = stmmac_get_rx_frame_len(priv, p, coe); |
4873 | |
4874 | /* First descriptor and last descriptor and not split header */ |
4875 | return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); |
4876 | } |
4877 | |
4878 | static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, |
4879 | struct dma_desc *p, |
4880 | int status, unsigned int len) |
4881 | { |
4882 | int coe = priv->hw->rx_csum; |
4883 | unsigned int plen = 0; |
4884 | |
4885 | /* Not split header, buffer is not available */ |
4886 | if (!priv->sph) |
4887 | return 0; |
4888 | |
4889 | /* Not last descriptor */ |
4890 | if (status & rx_not_ls) |
4891 | return priv->dma_conf.dma_buf_sz; |
4892 | |
4893 | plen = stmmac_get_rx_frame_len(priv, p, coe); |
4894 | |
4895 | /* Last descriptor */ |
4896 | return plen - len; |
4897 | } |
4898 | |
4899 | static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, |
4900 | struct xdp_frame *xdpf, bool dma_map) |
4901 | { |
4902 | struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; |
4903 | struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; |
4904 | unsigned int entry = tx_q->cur_tx; |
4905 | struct dma_desc *tx_desc; |
4906 | dma_addr_t dma_addr; |
4907 | bool set_ic; |
4908 | |
4909 | if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) |
4910 | return STMMAC_XDP_CONSUMED; |
4911 | |
4912 | if (priv->plat->est && priv->plat->est->enable && |
4913 | priv->plat->est->max_sdu[queue] && |
4914 | xdpf->len > priv->plat->est->max_sdu[queue]) { |
4915 | priv->xstats.max_sdu_txq_drop[queue]++; |
4916 | return STMMAC_XDP_CONSUMED; |
4917 | } |
4918 | |
4919 | if (likely(priv->extend_desc)) |
4920 | tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); |
4921 | else if (tx_q->tbs & STMMAC_TBS_AVAIL) |
4922 | tx_desc = &tx_q->dma_entx[entry].basic; |
4923 | else |
4924 | tx_desc = tx_q->dma_tx + entry; |
4925 | |
4926 | if (dma_map) { |
4927 | dma_addr = dma_map_single(priv->device, xdpf->data, |
4928 | xdpf->len, DMA_TO_DEVICE); |
4929 | if (dma_mapping_error(dev: priv->device, dma_addr)) |
4930 | return STMMAC_XDP_CONSUMED; |
4931 | |
4932 | tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; |
4933 | } else { |
4934 | struct page *page = virt_to_page(xdpf->data); |
4935 | |
4936 | dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) + |
4937 | xdpf->headroom; |
4938 | dma_sync_single_for_device(dev: priv->device, addr: dma_addr, |
4939 | size: xdpf->len, dir: DMA_BIDIRECTIONAL); |
4940 | |
4941 | tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; |
4942 | } |
4943 | |
4944 | tx_q->tx_skbuff_dma[entry].buf = dma_addr; |
4945 | tx_q->tx_skbuff_dma[entry].map_as_page = false; |
4946 | tx_q->tx_skbuff_dma[entry].len = xdpf->len; |
4947 | tx_q->tx_skbuff_dma[entry].last_segment = true; |
4948 | tx_q->tx_skbuff_dma[entry].is_jumbo = false; |
4949 | |
4950 | tx_q->xdpf[entry] = xdpf; |
4951 | |
4952 | stmmac_set_desc_addr(priv, tx_desc, dma_addr); |
4953 | |
4954 | stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, |
4955 | true, priv->mode, true, true, |
4956 | xdpf->len); |
4957 | |
4958 | tx_q->tx_count_frames++; |
4959 | |
4960 | if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) |
4961 | set_ic = true; |
4962 | else |
4963 | set_ic = false; |
4964 | |
4965 | if (set_ic) { |
4966 | tx_q->tx_count_frames = 0; |
4967 | stmmac_set_tx_ic(priv, tx_desc); |
4968 | u64_stats_update_begin(syncp: &txq_stats->q_syncp); |
4969 | u64_stats_inc(p: &txq_stats->q.tx_set_ic_bit); |
4970 | u64_stats_update_end(syncp: &txq_stats->q_syncp); |
4971 | } |
4972 | |
4973 | stmmac_enable_dma_transmission(priv, priv->ioaddr); |
4974 | |
4975 | entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); |
4976 | tx_q->cur_tx = entry; |
4977 | |
4978 | return STMMAC_XDP_TX; |
4979 | } |
4980 | |
4981 | static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, |
4982 | int cpu) |
4983 | { |
4984 | int index = cpu; |
4985 | |
4986 | if (unlikely(index < 0)) |
4987 | index = 0; |
4988 | |
4989 | while (index >= priv->plat->tx_queues_to_use) |
4990 | index -= priv->plat->tx_queues_to_use; |
4991 | |
4992 | return index; |
4993 | } |
4994 | |
4995 | static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, |
4996 | struct xdp_buff *xdp) |
4997 | { |
4998 | struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); |
4999 | int cpu = smp_processor_id(); |
5000 | struct netdev_queue *nq; |
5001 | int queue; |
5002 | int res; |
5003 | |
5004 | if (unlikely(!xdpf)) |
5005 | return STMMAC_XDP_CONSUMED; |
5006 | |
5007 | queue = stmmac_xdp_get_tx_queue(priv, cpu); |
5008 | nq = netdev_get_tx_queue(dev: priv->dev, index: queue); |
5009 | |
5010 | __netif_tx_lock(txq: nq, cpu); |
5011 | /* Avoids TX time-out as we are sharing with slow path */ |
5012 | txq_trans_cond_update(txq: nq); |
5013 | |
5014 | res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, dma_map: false); |
5015 | if (res == STMMAC_XDP_TX) |
5016 | stmmac_flush_tx_descriptors(priv, queue); |
5017 | |
5018 | __netif_tx_unlock(txq: nq); |
5019 | |
5020 | return res; |
5021 | } |
5022 | |
5023 | static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, |
5024 | struct bpf_prog *prog, |
5025 | struct xdp_buff *xdp) |
5026 | { |
5027 | u32 act; |
5028 | int res; |
5029 | |
5030 | act = bpf_prog_run_xdp(prog, xdp); |
5031 | switch (act) { |
5032 | case XDP_PASS: |
5033 | res = STMMAC_XDP_PASS; |
5034 | break; |
5035 | case XDP_TX: |
5036 | res = stmmac_xdp_xmit_back(priv, xdp); |
5037 | break; |
5038 | case XDP_REDIRECT: |
5039 | if (xdp_do_redirect(dev: priv->dev, xdp, prog) < 0) |
5040 | res = STMMAC_XDP_CONSUMED; |
5041 | else |
5042 | res = STMMAC_XDP_REDIRECT; |
5043 | break; |
5044 | default: |
5045 | bpf_warn_invalid_xdp_action(dev: priv->dev, prog, act); |
5046 | fallthrough; |
5047 | case XDP_ABORTED: |
5048 | trace_xdp_exception(dev: priv->dev, xdp: prog, act); |
5049 | fallthrough; |
5050 | case XDP_DROP: |
5051 | res = STMMAC_XDP_CONSUMED; |
5052 | break; |
5053 | } |
5054 | |
5055 | return res; |
5056 | } |
5057 | |
5058 | static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, |
5059 | struct xdp_buff *xdp) |
5060 | { |
5061 | struct bpf_prog *prog; |
5062 | int res; |
5063 | |
5064 | prog = READ_ONCE(priv->xdp_prog); |
5065 | if (!prog) { |
5066 | res = STMMAC_XDP_PASS; |
5067 | goto out; |
5068 | } |
5069 | |
5070 | res = __stmmac_xdp_run_prog(priv, prog, xdp); |
5071 | out: |
5072 | return ERR_PTR(error: -res); |
5073 | } |
5074 | |
5075 | static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, |
5076 | int xdp_status) |
5077 | { |
5078 | int cpu = smp_processor_id(); |
5079 | int queue; |
5080 | |
5081 | queue = stmmac_xdp_get_tx_queue(priv, cpu); |
5082 | |
5083 | if (xdp_status & STMMAC_XDP_TX) |
5084 | stmmac_tx_timer_arm(priv, queue); |
5085 | |
5086 | if (xdp_status & STMMAC_XDP_REDIRECT) |
5087 | xdp_do_flush(); |
5088 | } |
5089 | |
5090 | static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, |
5091 | struct xdp_buff *xdp) |
5092 | { |
5093 | unsigned int metasize = xdp->data - xdp->data_meta; |
5094 | unsigned int datasize = xdp->data_end - xdp->data; |
5095 | struct sk_buff *skb; |
5096 | |
5097 | skb = __napi_alloc_skb(napi: &ch->rxtx_napi, |
5098 | length: xdp->data_end - xdp->data_hard_start, |
5099 | GFP_ATOMIC | __GFP_NOWARN); |
5100 | if (unlikely(!skb)) |
5101 | return NULL; |
5102 | |
5103 | skb_reserve(skb, len: xdp->data - xdp->data_hard_start); |
5104 | memcpy(__skb_put(skb, datasize), xdp->data, datasize); |
5105 | if (metasize) |
5106 | skb_metadata_set(skb, meta_len: metasize); |
5107 | |
5108 | return skb; |
5109 | } |
5110 | |
5111 | static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, |
5112 | struct dma_desc *p, struct dma_desc *np, |
5113 | struct xdp_buff *xdp) |
5114 | { |
5115 | struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; |
5116 | struct stmmac_channel *ch = &priv->channel[queue]; |
5117 | unsigned int len = xdp->data_end - xdp->data; |
5118 | enum pkt_hash_types hash_type; |
5119 | int coe = priv->hw->rx_csum; |
5120 | struct sk_buff *skb; |
5121 | u32 hash; |
5122 | |
5123 | skb = stmmac_construct_skb_zc(ch, xdp); |
5124 | if (!skb) { |
5125 | priv->xstats.rx_dropped++; |
5126 | return; |
5127 | } |
5128 | |
5129 | stmmac_get_rx_hwtstamp(priv, p, np, skb); |
5130 | if (priv->hw->hw_vlan_en) |
5131 | /* MAC level stripping. */ |
5132 | stmmac_rx_hw_vlan(priv, priv->hw, p, skb); |
5133 | else |
5134 | /* Driver level stripping. */ |
5135 | stmmac_rx_vlan(dev: priv->dev, skb); |
5136 | skb->protocol = eth_type_trans(skb, dev: priv->dev); |
5137 | |
5138 | if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb)) |
5139 | skb_checksum_none_assert(skb); |
5140 | else |
5141 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
5142 | |
5143 | if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) |
5144 | skb_set_hash(skb, hash, type: hash_type); |
5145 | |
5146 | skb_record_rx_queue(skb, rx_queue: queue); |
5147 | napi_gro_receive(napi: &ch->rxtx_napi, skb); |
5148 | |
5149 | u64_stats_update_begin(syncp: &rxq_stats->napi_syncp); |
5150 | u64_stats_inc(p: &rxq_stats->napi.rx_pkt_n); |
5151 | u64_stats_add(p: &rxq_stats->napi.rx_bytes, val: len); |
5152 | u64_stats_update_end(syncp: &rxq_stats->napi_syncp); |
5153 | } |
5154 | |
5155 | static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) |
5156 | { |
5157 | struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; |
5158 | unsigned int entry = rx_q->dirty_rx; |
5159 | struct dma_desc *rx_desc = NULL; |
5160 | bool ret = true; |
5161 | |
5162 | budget = min(budget, stmmac_rx_dirty(priv, queue)); |
5163 | |
5164 | while (budget-- > 0 && entry != rx_q->cur_rx) { |
5165 | struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; |
5166 | dma_addr_t dma_addr; |
5167 | bool use_rx_wd; |
5168 | |
5169 | if (!buf->xdp) { |
5170 | buf->xdp = xsk_buff_alloc(pool: rx_q->xsk_pool); |
5171 | if (!buf->xdp) { |
5172 | ret = false; |
5173 | break; |
5174 | } |
5175 | } |
5176 | |
5177 | if (priv->extend_desc) |
5178 | rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); |
5179 | else |
5180 | rx_desc = rx_q->dma_rx + entry; |
5181 | |
5182 | dma_addr = xsk_buff_xdp_get_dma(xdp: buf->xdp); |
5183 | stmmac_set_desc_addr(priv, rx_desc, dma_addr); |
5184 | stmmac_set_desc_sec_addr(priv, rx_desc, 0, false); |
5185 | stmmac_refill_desc3(priv, rx_q, rx_desc); |
5186 | |
5187 | rx_q->rx_count_frames++; |
5188 | rx_q->rx_count_frames += priv->rx_coal_frames[queue]; |
5189 | if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) |
5190 | rx_q->rx_count_frames = 0; |
5191 | |
5192 | use_rx_wd = !priv->rx_coal_frames[queue]; |
5193 | use_rx_wd |= rx_q->rx_count_frames > 0; |
5194 | if (!priv->use_riwt) |
5195 | use_rx_wd = false; |
5196 | |
5197 | dma_wmb(); |
5198 | stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); |
5199 | |
5200 | entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); |
5201 | } |
5202 | |
5203 | if (rx_desc) { |
5204 | rx_q->dirty_rx = entry; |
5205 | rx_q->rx_tail_addr = rx_q->dma_rx_phy + |
5206 | (rx_q->dirty_rx * sizeof(struct dma_desc)); |
5207 | stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); |
5208 | } |
5209 | |
5210 | return ret; |
5211 | } |
5212 | |
5213 | static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp) |
5214 | { |
5215 | /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used |
5216 | * to represent incoming packet, whereas cb field in the same structure |
5217 | * is used to store driver specific info. Thus, struct stmmac_xdp_buff |
5218 | * is laid on top of xdp and cb fields of struct xdp_buff_xsk. |
5219 | */ |
5220 | return (struct stmmac_xdp_buff *)xdp; |
5221 | } |
5222 | |
5223 | static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) |
5224 | { |
5225 | struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; |
5226 | struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; |
5227 | unsigned int count = 0, error = 0, len = 0; |
5228 | int dirty = stmmac_rx_dirty(priv, queue); |
5229 | unsigned int next_entry = rx_q->cur_rx; |
5230 | u32 rx_errors = 0, rx_dropped = 0; |
5231 | unsigned int desc_size; |
5232 | struct bpf_prog *prog; |
5233 | bool failure = false; |
5234 | int xdp_status = 0; |
5235 | int status = 0; |
5236 | |
5237 | if (netif_msg_rx_status(priv)) { |
5238 | void *rx_head; |
5239 | |
5240 | netdev_dbg(priv->dev, "%s: descriptor ring:\n" , __func__); |
5241 | if (priv->extend_desc) { |
5242 | rx_head = (void *)rx_q->dma_erx; |
5243 | desc_size = sizeof(struct dma_extended_desc); |
5244 | } else { |
5245 | rx_head = (void *)rx_q->dma_rx; |
5246 | desc_size = sizeof(struct dma_desc); |
5247 | } |
5248 | |
5249 | stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, |
5250 | rx_q->dma_rx_phy, desc_size); |
5251 | } |
5252 | while (count < limit) { |
5253 | struct stmmac_rx_buffer *buf; |
5254 | struct stmmac_xdp_buff *ctx; |
5255 | unsigned int buf1_len = 0; |
5256 | struct dma_desc *np, *p; |
5257 | int entry; |
5258 | int res; |
5259 | |
5260 | if (!count && rx_q->state_saved) { |
5261 | error = rx_q->state.error; |
5262 | len = rx_q->state.len; |
5263 | } else { |
5264 | rx_q->state_saved = false; |
5265 | error = 0; |
5266 | len = 0; |
5267 | } |
5268 | |
5269 | if (count >= limit) |
5270 | break; |
5271 | |
5272 | read_again: |
5273 | buf1_len = 0; |
5274 | entry = next_entry; |
5275 | buf = &rx_q->buf_pool[entry]; |
5276 | |
5277 | if (dirty >= STMMAC_RX_FILL_BATCH) { |
5278 | failure = failure || |
5279 | !stmmac_rx_refill_zc(priv, queue, budget: dirty); |
5280 | dirty = 0; |
5281 | } |
5282 | |
5283 | if (priv->extend_desc) |
5284 | p = (struct dma_desc *)(rx_q->dma_erx + entry); |
5285 | else |
5286 | p = rx_q->dma_rx + entry; |
5287 | |
5288 | /* read the status of the incoming frame */ |
5289 | status = stmmac_rx_status(priv, &priv->xstats, p); |
5290 | /* check if managed by the DMA otherwise go ahead */ |
5291 | if (unlikely(status & dma_own)) |
5292 | break; |
5293 | |
5294 | /* Prefetch the next RX descriptor */ |
5295 | rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, |
5296 | priv->dma_conf.dma_rx_size); |
5297 | next_entry = rx_q->cur_rx; |
5298 | |
5299 | if (priv->extend_desc) |
5300 | np = (struct dma_desc *)(rx_q->dma_erx + next_entry); |
5301 | else |
5302 | np = rx_q->dma_rx + next_entry; |
5303 | |
5304 | prefetch(np); |
5305 | |
5306 | /* Ensure a valid XSK buffer before proceed */ |
5307 | if (!buf->xdp) |
5308 | break; |
5309 | |
5310 | if (priv->extend_desc) |
5311 | stmmac_rx_extended_status(priv, &priv->xstats, |
5312 | rx_q->dma_erx + entry); |
5313 | if (unlikely(status == discard_frame)) { |
5314 | xsk_buff_free(xdp: buf->xdp); |
5315 | buf->xdp = NULL; |
5316 | dirty++; |
5317 | error = 1; |
5318 | if (!priv->hwts_rx_en) |
5319 | rx_errors++; |
5320 | } |
5321 | |
5322 | if (unlikely(error && (status & rx_not_ls))) |
5323 | goto read_again; |
5324 | if (unlikely(error)) { |
5325 | count++; |
5326 | continue; |
5327 | } |
5328 | |
5329 | /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ |
5330 | if (likely(status & rx_not_ls)) { |
5331 | xsk_buff_free(xdp: buf->xdp); |
5332 | buf->xdp = NULL; |
5333 | dirty++; |
5334 | count++; |
5335 | goto read_again; |
5336 | } |
5337 | |
5338 | ctx = xsk_buff_to_stmmac_ctx(xdp: buf->xdp); |
5339 | ctx->priv = priv; |
5340 | ctx->desc = p; |
5341 | ctx->ndesc = np; |
5342 | |
5343 | /* XDP ZC Frame only support primary buffers for now */ |
5344 | buf1_len = stmmac_rx_buf1_len(priv, p, status, len); |
5345 | len += buf1_len; |
5346 | |
5347 | /* ACS is disabled; strip manually. */ |
5348 | if (likely(!(status & rx_not_ls))) { |
5349 | buf1_len -= ETH_FCS_LEN; |
5350 | len -= ETH_FCS_LEN; |
5351 | } |
5352 | |
5353 | /* RX buffer is good and fit into a XSK pool buffer */ |
5354 | buf->xdp->data_end = buf->xdp->data + buf1_len; |
5355 | xsk_buff_dma_sync_for_cpu(xdp: buf->xdp, pool: rx_q->xsk_pool); |
5356 | |
5357 | prog = READ_ONCE(priv->xdp_prog); |
5358 | res = __stmmac_xdp_run_prog(priv, prog, xdp: buf->xdp); |
5359 | |
5360 | switch (res) { |
5361 | case STMMAC_XDP_PASS: |
5362 | stmmac_dispatch_skb_zc(priv, queue, p, np, xdp: buf->xdp); |
5363 | xsk_buff_free(xdp: buf->xdp); |
5364 | break; |
5365 | case STMMAC_XDP_CONSUMED: |
5366 | xsk_buff_free(xdp: buf->xdp); |
5367 | rx_dropped++; |
5368 | break; |
5369 | case STMMAC_XDP_TX: |
5370 | case STMMAC_XDP_REDIRECT: |
5371 | xdp_status |= res; |
5372 | break; |
5373 | } |
5374 | |
5375 | buf->xdp = NULL; |
5376 | dirty++; |
5377 | count++; |
5378 | } |
5379 | |
5380 | if (status & rx_not_ls) { |
5381 | rx_q->state_saved = true; |
5382 | rx_q->state.error = error; |
5383 | rx_q->state.len = len; |
5384 | } |
5385 | |
5386 | stmmac_finalize_xdp_rx(priv, xdp_status); |
5387 | |
5388 | u64_stats_update_begin(syncp: &rxq_stats->napi_syncp); |
5389 | u64_stats_add(p: &rxq_stats->napi.rx_pkt_n, val: count); |
5390 | u64_stats_update_end(syncp: &rxq_stats->napi_syncp); |
5391 | |
5392 | priv->xstats.rx_dropped += rx_dropped; |
5393 | priv->xstats.rx_errors += rx_errors; |
5394 | |
5395 | if (xsk_uses_need_wakeup(pool: rx_q->xsk_pool)) { |
5396 | if (failure || stmmac_rx_dirty(priv, queue) > 0) |
5397 | xsk_set_rx_need_wakeup(pool: rx_q->xsk_pool); |
5398 | else |
5399 | xsk_clear_rx_need_wakeup(pool: rx_q->xsk_pool); |
5400 | |
5401 | return (int)count; |
5402 | } |
5403 | |
5404 | return failure ? limit : (int)count; |
5405 | } |
5406 | |
5407 | /** |
5408 | * stmmac_rx - manage the receive process |
5409 | * @priv: driver private structure |
5410 | * @limit: napi bugget |
5411 | * @queue: RX queue index. |
5412 | * Description : this the function called by the napi poll method. |
5413 | * It gets all the frames inside the ring. |
5414 | */ |
5415 | static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) |
5416 | { |
5417 | u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0; |
5418 | struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; |
5419 | struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; |
5420 | struct stmmac_channel *ch = &priv->channel[queue]; |
5421 | unsigned int count = 0, error = 0, len = 0; |
5422 | int status = 0, coe = priv->hw->rx_csum; |
5423 | unsigned int next_entry = rx_q->cur_rx; |
5424 | enum dma_data_direction dma_dir; |
5425 | unsigned int desc_size; |
5426 | struct sk_buff *skb = NULL; |
5427 | struct stmmac_xdp_buff ctx; |
5428 | int xdp_status = 0; |
5429 | int buf_sz; |
5430 | |
5431 | dma_dir = page_pool_get_dma_dir(pool: rx_q->page_pool); |
5432 | buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; |
5433 | limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); |
5434 | |
5435 | if (netif_msg_rx_status(priv)) { |
5436 | void *rx_head; |
5437 | |
5438 | netdev_dbg(priv->dev, "%s: descriptor ring:\n" , __func__); |
5439 | if (priv->extend_desc) { |
5440 | rx_head = (void *)rx_q->dma_erx; |
5441 | desc_size = sizeof(struct dma_extended_desc); |
5442 | } else { |
5443 | rx_head = (void *)rx_q->dma_rx; |
5444 | desc_size = sizeof(struct dma_desc); |
5445 | } |
5446 | |
5447 | stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, |
5448 | rx_q->dma_rx_phy, desc_size); |
5449 | } |
5450 | while (count < limit) { |
5451 | unsigned int buf1_len = 0, buf2_len = 0; |
5452 | enum pkt_hash_types hash_type; |
5453 | struct stmmac_rx_buffer *buf; |
5454 | struct dma_desc *np, *p; |
5455 | int entry; |
5456 | u32 hash; |
5457 | |
5458 | if (!count && rx_q->state_saved) { |
5459 | skb = rx_q->state.skb; |
5460 | error = rx_q->state.error; |
5461 | len = rx_q->state.len; |
5462 | } else { |
5463 | rx_q->state_saved = false; |
5464 | skb = NULL; |
5465 | error = 0; |
5466 | len = 0; |
5467 | } |
5468 | |
5469 | read_again: |
5470 | if (count >= limit) |
5471 | break; |
5472 | |
5473 | buf1_len = 0; |
5474 | buf2_len = 0; |
5475 | entry = next_entry; |
5476 | buf = &rx_q->buf_pool[entry]; |
5477 | |
5478 | if (priv->extend_desc) |
5479 | p = (struct dma_desc *)(rx_q->dma_erx + entry); |
5480 | else |
5481 | p = rx_q->dma_rx + entry; |
5482 | |
5483 | /* read the status of the incoming frame */ |
5484 | status = stmmac_rx_status(priv, &priv->xstats, p); |
5485 | /* check if managed by the DMA otherwise go ahead */ |
5486 | if (unlikely(status & dma_own)) |
5487 | break; |
5488 | |
5489 | rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, |
5490 | priv->dma_conf.dma_rx_size); |
5491 | next_entry = rx_q->cur_rx; |
5492 | |
5493 | if (priv->extend_desc) |
5494 | np = (struct dma_desc *)(rx_q->dma_erx + next_entry); |
5495 | else |
5496 | np = rx_q->dma_rx + next_entry; |
5497 | |
5498 | prefetch(np); |
5499 | |
5500 | if (priv->extend_desc) |
5501 | stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry); |
5502 | if (unlikely(status == discard_frame)) { |
5503 | page_pool_recycle_direct(pool: rx_q->page_pool, page: buf->page); |
5504 | buf->page = NULL; |
5505 | error = 1; |
5506 | if (!priv->hwts_rx_en) |
5507 | rx_errors++; |
5508 | } |
5509 | |
5510 | if (unlikely(error && (status & rx_not_ls))) |
5511 | goto read_again; |
5512 | if (unlikely(error)) { |
5513 | dev_kfree_skb(skb); |
5514 | skb = NULL; |
5515 | count++; |
5516 | continue; |
5517 | } |
5518 | |
5519 | /* Buffer is good. Go on. */ |
5520 | |
5521 | prefetch(page_address(buf->page) + buf->page_offset); |
5522 | if (buf->sec_page) |
5523 | prefetch(page_address(buf->sec_page)); |
5524 | |
5525 | buf1_len = stmmac_rx_buf1_len(priv, p, status, len); |
5526 | len += buf1_len; |
5527 | buf2_len = stmmac_rx_buf2_len(priv, p, status, len); |
5528 | len += buf2_len; |
5529 | |
5530 | /* ACS is disabled; strip manually. */ |
5531 | if (likely(!(status & rx_not_ls))) { |
5532 | if (buf2_len) { |
5533 | buf2_len -= ETH_FCS_LEN; |
5534 | len -= ETH_FCS_LEN; |
5535 | } else if (buf1_len) { |
5536 | buf1_len -= ETH_FCS_LEN; |
5537 | len -= ETH_FCS_LEN; |
5538 | } |
5539 | } |
5540 | |
5541 | if (!skb) { |
5542 | unsigned int pre_len, sync_len; |
5543 | |
5544 | dma_sync_single_for_cpu(dev: priv->device, addr: buf->addr, |
5545 | size: buf1_len, dir: dma_dir); |
5546 | |
5547 | xdp_init_buff(xdp: &ctx.xdp, frame_sz: buf_sz, rxq: &rx_q->xdp_rxq); |
5548 | xdp_prepare_buff(xdp: &ctx.xdp, page_address(buf->page), |
5549 | headroom: buf->page_offset, data_len: buf1_len, meta_valid: true); |
5550 | |
5551 | pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - |
5552 | buf->page_offset; |
5553 | |
5554 | ctx.priv = priv; |
5555 | ctx.desc = p; |
5556 | ctx.ndesc = np; |
5557 | |
5558 | skb = stmmac_xdp_run_prog(priv, xdp: &ctx.xdp); |
5559 | /* Due xdp_adjust_tail: DMA sync for_device |
5560 | * cover max len CPU touch |
5561 | */ |
5562 | sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - |
5563 | buf->page_offset; |
5564 | sync_len = max(sync_len, pre_len); |
5565 | |
5566 | /* For Not XDP_PASS verdict */ |
5567 | if (IS_ERR(ptr: skb)) { |
5568 | unsigned int xdp_res = -PTR_ERR(ptr: skb); |
5569 | |
5570 | if (xdp_res & STMMAC_XDP_CONSUMED) { |
5571 | page_pool_put_page(pool: rx_q->page_pool, |
5572 | page: virt_to_head_page(x: ctx.xdp.data), |
5573 | dma_sync_size: sync_len, allow_direct: true); |
5574 | buf->page = NULL; |
5575 | rx_dropped++; |
5576 | |
5577 | /* Clear skb as it was set as |
5578 | * status by XDP program. |
5579 | */ |
5580 | skb = NULL; |
5581 | |
5582 | if (unlikely((status & rx_not_ls))) |
5583 | goto read_again; |
5584 | |
5585 | count++; |
5586 | continue; |
5587 | } else if (xdp_res & (STMMAC_XDP_TX | |
5588 | STMMAC_XDP_REDIRECT)) { |
5589 | xdp_status |= xdp_res; |
5590 | buf->page = NULL; |
5591 | skb = NULL; |
5592 | count++; |
5593 | continue; |
5594 | } |
5595 | } |
5596 | } |
5597 | |
5598 | if (!skb) { |
5599 | /* XDP program may expand or reduce tail */ |
5600 | buf1_len = ctx.xdp.data_end - ctx.xdp.data; |
5601 | |
5602 | skb = napi_alloc_skb(napi: &ch->rx_napi, length: buf1_len); |
5603 | if (!skb) { |
5604 | rx_dropped++; |
5605 | count++; |
5606 | goto drain_data; |
5607 | } |
5608 | |
5609 | /* XDP program may adjust header */ |
5610 | skb_copy_to_linear_data(skb, from: ctx.xdp.data, len: buf1_len); |
5611 | skb_put(skb, len: buf1_len); |
5612 | |
5613 | /* Data payload copied into SKB, page ready for recycle */ |
5614 | page_pool_recycle_direct(pool: rx_q->page_pool, page: buf->page); |
5615 | buf->page = NULL; |
5616 | } else if (buf1_len) { |
5617 | dma_sync_single_for_cpu(dev: priv->device, addr: buf->addr, |
5618 | size: buf1_len, dir: dma_dir); |
5619 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
5620 | page: buf->page, off: buf->page_offset, size: buf1_len, |
5621 | truesize: priv->dma_conf.dma_buf_sz); |
5622 | |
5623 | /* Data payload appended into SKB */ |
5624 | skb_mark_for_recycle(skb); |
5625 | buf->page = NULL; |
5626 | } |
5627 | |
5628 | if (buf2_len) { |
5629 | dma_sync_single_for_cpu(dev: priv->device, addr: buf->sec_addr, |
5630 | size: buf2_len, dir: dma_dir); |
5631 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
5632 | page: buf->sec_page, off: 0, size: buf2_len, |
5633 | truesize: priv->dma_conf.dma_buf_sz); |
5634 | |
5635 | /* Data payload appended into SKB */ |
5636 | skb_mark_for_recycle(skb); |
5637 | buf->sec_page = NULL; |
5638 | } |
5639 | |
5640 | drain_data: |
5641 | if (likely(status & rx_not_ls)) |
5642 | goto read_again; |
5643 | if (!skb) |
5644 | continue; |
5645 | |
5646 | /* Got entire packet into SKB. Finish it. */ |
5647 | |
5648 | stmmac_get_rx_hwtstamp(priv, p, np, skb); |
5649 | |
5650 | if (priv->hw->hw_vlan_en) |
5651 | /* MAC level stripping. */ |
5652 | stmmac_rx_hw_vlan(priv, priv->hw, p, skb); |
5653 | else |
5654 | /* Driver level stripping. */ |
5655 | stmmac_rx_vlan(dev: priv->dev, skb); |
5656 | |
5657 | skb->protocol = eth_type_trans(skb, dev: priv->dev); |
5658 | |
5659 | if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb)) |
5660 | skb_checksum_none_assert(skb); |
5661 | else |
5662 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
5663 | |
5664 | if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) |
5665 | skb_set_hash(skb, hash, type: hash_type); |
5666 | |
5667 | skb_record_rx_queue(skb, rx_queue: queue); |
5668 | napi_gro_receive(napi: &ch->rx_napi, skb); |
5669 | skb = NULL; |
5670 | |
5671 | rx_packets++; |
5672 | rx_bytes += len; |
5673 | count++; |
5674 | } |
5675 | |
5676 | if (status & rx_not_ls || skb) { |
5677 | rx_q->state_saved = true; |
5678 | rx_q->state.skb = skb; |
5679 | rx_q->state.error = error; |
5680 | rx_q->state.len = len; |
5681 | } |
5682 | |
5683 | stmmac_finalize_xdp_rx(priv, xdp_status); |
5684 | |
5685 | stmmac_rx_refill(priv, queue); |
5686 | |
5687 | u64_stats_update_begin(syncp: &rxq_stats->napi_syncp); |
5688 | u64_stats_add(p: &rxq_stats->napi.rx_packets, val: rx_packets); |
5689 | u64_stats_add(p: &rxq_stats->napi.rx_bytes, val: rx_bytes); |
5690 | u64_stats_add(p: &rxq_stats->napi.rx_pkt_n, val: count); |
5691 | u64_stats_update_end(syncp: &rxq_stats->napi_syncp); |
5692 | |
5693 | priv->xstats.rx_dropped += rx_dropped; |
5694 | priv->xstats.rx_errors += rx_errors; |
5695 | |
5696 | return count; |
5697 | } |
5698 | |
5699 | static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) |
5700 | { |
5701 | struct stmmac_channel *ch = |
5702 | container_of(napi, struct stmmac_channel, rx_napi); |
5703 | struct stmmac_priv *priv = ch->priv_data; |
5704 | struct stmmac_rxq_stats *rxq_stats; |
5705 | u32 chan = ch->index; |
5706 | int work_done; |
5707 | |
5708 | rxq_stats = &priv->xstats.rxq_stats[chan]; |
5709 | u64_stats_update_begin(syncp: &rxq_stats->napi_syncp); |
5710 | u64_stats_inc(p: &rxq_stats->napi.poll); |
5711 | u64_stats_update_end(syncp: &rxq_stats->napi_syncp); |
5712 | |
5713 | work_done = stmmac_rx(priv, limit: budget, queue: chan); |
5714 | if (work_done < budget && napi_complete_done(n: napi, work_done)) { |
5715 | unsigned long flags; |
5716 | |
5717 | spin_lock_irqsave(&ch->lock, flags); |
5718 | stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); |
5719 | spin_unlock_irqrestore(lock: &ch->lock, flags); |
5720 | } |
5721 | |
5722 | return work_done; |
5723 | } |
5724 | |
5725 | static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) |
5726 | { |
5727 | struct stmmac_channel *ch = |
5728 | container_of(napi, struct stmmac_channel, tx_napi); |
5729 | struct stmmac_priv *priv = ch->priv_data; |
5730 | struct stmmac_txq_stats *txq_stats; |
5731 | bool pending_packets = false; |
5732 | u32 chan = ch->index; |
5733 | int work_done; |
5734 | |
5735 | txq_stats = &priv->xstats.txq_stats[chan]; |
5736 | u64_stats_update_begin(syncp: &txq_stats->napi_syncp); |
5737 | u64_stats_inc(p: &txq_stats->napi.poll); |
5738 | u64_stats_update_end(syncp: &txq_stats->napi_syncp); |
5739 | |
5740 | work_done = stmmac_tx_clean(priv, budget, queue: chan, pending_packets: &pending_packets); |
5741 | work_done = min(work_done, budget); |
5742 | |
5743 | if (work_done < budget && napi_complete_done(n: napi, work_done)) { |
5744 | unsigned long flags; |
5745 | |
5746 | spin_lock_irqsave(&ch->lock, flags); |
5747 | stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); |
5748 | spin_unlock_irqrestore(lock: &ch->lock, flags); |
5749 | } |
5750 | |
5751 | /* TX still have packet to handle, check if we need to arm tx timer */ |
5752 | if (pending_packets) |
5753 | stmmac_tx_timer_arm(priv, queue: chan); |
5754 | |
5755 | return work_done; |
5756 | } |
5757 | |
5758 | static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) |
5759 | { |
5760 | struct stmmac_channel *ch = |
5761 | container_of(napi, struct stmmac_channel, rxtx_napi); |
5762 | struct stmmac_priv *priv = ch->priv_data; |
5763 | bool tx_pending_packets = false; |
5764 | int rx_done, tx_done, rxtx_done; |
5765 | struct stmmac_rxq_stats *rxq_stats; |
5766 | struct stmmac_txq_stats *txq_stats; |
5767 | u32 chan = ch->index; |
5768 | |
5769 | rxq_stats = &priv->xstats.rxq_stats[chan]; |
5770 | u64_stats_update_begin(syncp: &rxq_stats->napi_syncp); |
5771 | u64_stats_inc(p: &rxq_stats->napi.poll); |
5772 | u64_stats_update_end(syncp: &rxq_stats->napi_syncp); |
5773 | |
5774 | txq_stats = &priv->xstats.txq_stats[chan]; |
5775 | u64_stats_update_begin(syncp: &txq_stats->napi_syncp); |
5776 | u64_stats_inc(p: &txq_stats->napi.poll); |
5777 | u64_stats_update_end(syncp: &txq_stats->napi_syncp); |
5778 | |
5779 | tx_done = stmmac_tx_clean(priv, budget, queue: chan, pending_packets: &tx_pending_packets); |
5780 | tx_done = min(tx_done, budget); |
5781 | |
5782 | rx_done = stmmac_rx_zc(priv, limit: budget, queue: chan); |
5783 | |
5784 | rxtx_done = max(tx_done, rx_done); |
5785 | |
5786 | /* If either TX or RX work is not complete, return budget |
5787 | * and keep pooling |
5788 | */ |
5789 | if (rxtx_done >= budget) |
5790 | return budget; |
5791 | |
5792 | /* all work done, exit the polling mode */ |
5793 | if (napi_complete_done(n: napi, work_done: rxtx_done)) { |
5794 | unsigned long flags; |
5795 | |
5796 | spin_lock_irqsave(&ch->lock, flags); |
5797 | /* Both RX and TX work done are compelte, |
5798 | * so enable both RX & TX IRQs. |
5799 | */ |
5800 | stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); |
5801 | spin_unlock_irqrestore(lock: &ch->lock, flags); |
5802 | } |
5803 | |
5804 | /* TX still have packet to handle, check if we need to arm tx timer */ |
5805 | if (tx_pending_packets) |
5806 | stmmac_tx_timer_arm(priv, queue: chan); |
5807 | |
5808 | return min(rxtx_done, budget - 1); |
5809 | } |
5810 | |
5811 | /** |
5812 | * stmmac_tx_timeout |
5813 | * @dev : Pointer to net device structure |
5814 | * @txqueue: the index of the hanging transmit queue |
5815 | * Description: this function is called when a packet transmission fails to |
5816 | * complete within a reasonable time. The driver will mark the error in the |
5817 | * netdev structure and arrange for the device to be reset to a sane state |
5818 | * in order to transmit a new packet. |
5819 | */ |
5820 | static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) |
5821 | { |
5822 | struct stmmac_priv *priv = netdev_priv(dev); |
5823 | |
5824 | stmmac_global_err(priv); |
5825 | } |
5826 | |
5827 | /** |
5828 | * stmmac_set_rx_mode - entry point for multicast addressing |
5829 | * @dev : pointer to the device structure |
5830 | * Description: |
5831 | * This function is a driver entry point which gets called by the kernel |
5832 | * whenever multicast addresses must be enabled/disabled. |
5833 | * Return value: |
5834 | * void. |
5835 | */ |
5836 | static void stmmac_set_rx_mode(struct net_device *dev) |
5837 | { |
5838 | struct stmmac_priv *priv = netdev_priv(dev); |
5839 | |
5840 | stmmac_set_filter(priv, priv->hw, dev); |
5841 | } |
5842 | |
5843 | /** |
5844 | * stmmac_change_mtu - entry point to change MTU size for the device. |
5845 | * @dev : device pointer. |
5846 | * @new_mtu : the new MTU size for the device. |
5847 | * Description: the Maximum Transfer Unit (MTU) is used by the network layer |
5848 | * to drive packet transmission. Ethernet has an MTU of 1500 octets |
5849 | * (ETH_DATA_LEN). This value can be changed with ifconfig. |
5850 | * Return value: |
5851 | * 0 on success and an appropriate (-)ve integer as defined in errno.h |
5852 | * file on failure. |
5853 | */ |
5854 | static int stmmac_change_mtu(struct net_device *dev, int new_mtu) |
5855 | { |
5856 | struct stmmac_priv *priv = netdev_priv(dev); |
5857 | int txfifosz = priv->plat->tx_fifo_size; |
5858 | struct stmmac_dma_conf *dma_conf; |
5859 | const int mtu = new_mtu; |
5860 | int ret; |
5861 | |
5862 | if (txfifosz == 0) |
5863 | txfifosz = priv->dma_cap.tx_fifo_size; |
5864 | |
5865 | txfifosz /= priv->plat->tx_queues_to_use; |
5866 | |
5867 | if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { |
5868 | netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n" ); |
5869 | return -EINVAL; |
5870 | } |
5871 | |
5872 | new_mtu = STMMAC_ALIGN(new_mtu); |
5873 | |
5874 | /* If condition true, FIFO is too small or MTU too large */ |
5875 | if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) |
5876 | return -EINVAL; |
5877 | |
5878 | if (netif_running(dev)) { |
5879 | netdev_dbg(priv->dev, "restarting interface to change its MTU\n" ); |
5880 | /* Try to allocate the new DMA conf with the new mtu */ |
5881 | dma_conf = stmmac_setup_dma_desc(priv, mtu); |
5882 | if (IS_ERR(ptr: dma_conf)) { |
5883 | netdev_err(dev: priv->dev, format: "failed allocating new dma conf for new MTU %d\n" , |
5884 | mtu); |
5885 | return PTR_ERR(ptr: dma_conf); |
5886 | } |
5887 | |
5888 | stmmac_release(dev); |
5889 | |
5890 | ret = __stmmac_open(dev, dma_conf); |
5891 | if (ret) { |
5892 | free_dma_desc_resources(priv, dma_conf); |
5893 | kfree(objp: dma_conf); |
5894 | netdev_err(dev: priv->dev, format: "failed reopening the interface after MTU change\n" ); |
5895 | return ret; |
5896 | } |
5897 | |
5898 | kfree(objp: dma_conf); |
5899 | |
5900 | stmmac_set_rx_mode(dev); |
5901 | } |
5902 | |
5903 | dev->mtu = mtu; |
5904 | netdev_update_features(dev); |
5905 | |
5906 | return 0; |
5907 | } |
5908 | |
5909 | static netdev_features_t stmmac_fix_features(struct net_device *dev, |
5910 | netdev_features_t features) |
5911 | { |
5912 | struct stmmac_priv *priv = netdev_priv(dev); |
5913 | |
5914 | if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) |
5915 | features &= ~NETIF_F_RXCSUM; |
5916 | |
5917 | if (!priv->plat->tx_coe) |
5918 | features &= ~NETIF_F_CSUM_MASK; |
5919 | |
5920 | /* Some GMAC devices have a bugged Jumbo frame support that |
5921 | * needs to have the Tx COE disabled for oversized frames |
5922 | * (due to limited buffer sizes). In this case we disable |
5923 | * the TX csum insertion in the TDES and not use SF. |
5924 | */ |
5925 | if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) |
5926 | features &= ~NETIF_F_CSUM_MASK; |
5927 | |
5928 | /* Disable tso if asked by ethtool */ |
5929 | if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { |
5930 | if (features & NETIF_F_TSO) |
5931 | priv->tso = true; |
5932 | else |
5933 | priv->tso = false; |
5934 | } |
5935 | |
5936 | return features; |
5937 | } |
5938 | |
5939 | static int stmmac_set_features(struct net_device *netdev, |
5940 | netdev_features_t features) |
5941 | { |
5942 | struct stmmac_priv *priv = netdev_priv(dev: netdev); |
5943 | |
5944 | /* Keep the COE Type in case of csum is supporting */ |
5945 | if (features & NETIF_F_RXCSUM) |
5946 | priv->hw->rx_csum = priv->plat->rx_coe; |
5947 | else |
5948 | priv->hw->rx_csum = 0; |
5949 | /* No check needed because rx_coe has been set before and it will be |
5950 | * fixed in case of issue. |
5951 | */ |
5952 | stmmac_rx_ipc(priv, priv->hw); |
5953 | |
5954 | if (priv->sph_cap) { |
5955 | bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; |
5956 | u32 chan; |
5957 | |
5958 | for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) |
5959 | stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); |
5960 | } |
5961 | |
5962 | if (features & NETIF_F_HW_VLAN_CTAG_RX) |
5963 | priv->hw->hw_vlan_en = true; |
5964 | else |
5965 | priv->hw->hw_vlan_en = false; |
5966 | |
5967 | stmmac_set_hw_vlan_mode(priv, priv->hw); |
5968 | |
5969 | return 0; |
5970 | } |
5971 | |
5972 | static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) |
5973 | { |
5974 | struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; |
5975 | enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; |
5976 | enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; |
5977 | bool *hs_enable = &fpe_cfg->hs_enable; |
5978 | |
5979 | if (status == FPE_EVENT_UNKNOWN || !*hs_enable) |
5980 | return; |
5981 | |
5982 | /* If LP has sent verify mPacket, LP is FPE capable */ |
5983 | if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { |
5984 | if (*lp_state < FPE_STATE_CAPABLE) |
5985 | *lp_state = FPE_STATE_CAPABLE; |
5986 | |
5987 | /* If user has requested FPE enable, quickly response */ |
5988 | if (*hs_enable) |
5989 | stmmac_fpe_send_mpacket(priv, priv->ioaddr, |
5990 | fpe_cfg, |
5991 | MPACKET_RESPONSE); |
5992 | } |
5993 | |
5994 | /* If Local has sent verify mPacket, Local is FPE capable */ |
5995 | if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { |
5996 | if (*lo_state < FPE_STATE_CAPABLE) |
5997 | *lo_state = FPE_STATE_CAPABLE; |
5998 | } |
5999 | |
6000 | /* If LP has sent response mPacket, LP is entering FPE ON */ |
6001 | if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) |
6002 | *lp_state = FPE_STATE_ENTERING_ON; |
6003 | |
6004 | /* If Local has sent response mPacket, Local is entering FPE ON */ |
6005 | if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) |
6006 | *lo_state = FPE_STATE_ENTERING_ON; |
6007 | |
6008 | if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && |
6009 | !test_and_set_bit(nr: __FPE_TASK_SCHED, addr: &priv->fpe_task_state) && |
6010 | priv->fpe_wq) { |
6011 | queue_work(wq: priv->fpe_wq, work: &priv->fpe_task); |
6012 | } |
6013 | } |
6014 | |
6015 | static void stmmac_common_interrupt(struct stmmac_priv *priv) |
6016 | { |
6017 | u32 rx_cnt = priv->plat->rx_queues_to_use; |
6018 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
6019 | u32 queues_count; |
6020 | u32 queue; |
6021 | bool xmac; |
6022 | |
6023 | xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; |
6024 | queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; |
6025 | |
6026 | if (priv->irq_wake) |
6027 | pm_wakeup_event(dev: priv->device, msec: 0); |
6028 | |
6029 | if (priv->dma_cap.estsel) |
6030 | stmmac_est_irq_status(priv, priv, priv->dev, |
6031 | &priv->xstats, tx_cnt); |
6032 | |
6033 | if (priv->dma_cap.fpesel) { |
6034 | int status = stmmac_fpe_irq_status(priv, priv->ioaddr, |
6035 | priv->dev); |
6036 | |
6037 | stmmac_fpe_event_status(priv, status); |
6038 | } |
6039 | |
6040 | /* To handle GMAC own interrupts */ |
6041 | if ((priv->plat->has_gmac) || xmac) { |
6042 | int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); |
6043 | |
6044 | if (unlikely(status)) { |
6045 | /* For LPI we need to save the tx status */ |
6046 | if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) |
6047 | priv->tx_path_in_lpi_mode = true; |
6048 | if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) |
6049 | priv->tx_path_in_lpi_mode = false; |
6050 | } |
6051 | |
6052 | for (queue = 0; queue < queues_count; queue++) |
6053 | stmmac_host_mtl_irq_status(priv, priv->hw, queue); |
6054 | |
6055 | /* PCS link status */ |
6056 | if (priv->hw->pcs && |
6057 | !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) { |
6058 | if (priv->xstats.pcs_link) |
6059 | netif_carrier_on(dev: priv->dev); |
6060 | else |
6061 | netif_carrier_off(dev: priv->dev); |
6062 | } |
6063 | |
6064 | stmmac_timestamp_interrupt(priv, priv); |
6065 | } |
6066 | } |
6067 | |
6068 | /** |
6069 | * stmmac_interrupt - main ISR |
6070 | * @irq: interrupt number. |
6071 | * @dev_id: to pass the net device pointer. |
6072 | * Description: this is the main driver interrupt service routine. |
6073 | * It can call: |
6074 | * o DMA service routine (to manage incoming frame reception and transmission |
6075 | * status) |
6076 | * o Core interrupts to manage: remote wake-up, management counter, LPI |
6077 | * interrupts. |
6078 | */ |
6079 | static irqreturn_t stmmac_interrupt(int irq, void *dev_id) |
6080 | { |
6081 | struct net_device *dev = (struct net_device *)dev_id; |
6082 | struct stmmac_priv *priv = netdev_priv(dev); |
6083 | |
6084 | /* Check if adapter is up */ |
6085 | if (test_bit(STMMAC_DOWN, &priv->state)) |
6086 | return IRQ_HANDLED; |
6087 | |
6088 | /* Check ASP error if it isn't delivered via an individual IRQ */ |
6089 | if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv)) |
6090 | return IRQ_HANDLED; |
6091 | |
6092 | /* To handle Common interrupts */ |
6093 | stmmac_common_interrupt(priv); |
6094 | |
6095 | /* To handle DMA interrupts */ |
6096 | stmmac_dma_interrupt(priv); |
6097 | |
6098 | return IRQ_HANDLED; |
6099 | } |
6100 | |
6101 | static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) |
6102 | { |
6103 | struct net_device *dev = (struct net_device *)dev_id; |
6104 | struct stmmac_priv *priv = netdev_priv(dev); |
6105 | |
6106 | /* Check if adapter is up */ |
6107 | if (test_bit(STMMAC_DOWN, &priv->state)) |
6108 | return IRQ_HANDLED; |
6109 | |
6110 | /* To handle Common interrupts */ |
6111 | stmmac_common_interrupt(priv); |
6112 | |
6113 | return IRQ_HANDLED; |
6114 | } |
6115 | |
6116 | static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) |
6117 | { |
6118 | struct net_device *dev = (struct net_device *)dev_id; |
6119 | struct stmmac_priv *priv = netdev_priv(dev); |
6120 | |
6121 | /* Check if adapter is up */ |
6122 | if (test_bit(STMMAC_DOWN, &priv->state)) |
6123 | return IRQ_HANDLED; |
6124 | |
6125 | /* Check if a fatal error happened */ |
6126 | stmmac_safety_feat_interrupt(priv); |
6127 | |
6128 | return IRQ_HANDLED; |
6129 | } |
6130 | |
6131 | static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) |
6132 | { |
6133 | struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; |
6134 | struct stmmac_dma_conf *dma_conf; |
6135 | int chan = tx_q->queue_index; |
6136 | struct stmmac_priv *priv; |
6137 | int status; |
6138 | |
6139 | dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]); |
6140 | priv = container_of(dma_conf, struct stmmac_priv, dma_conf); |
6141 | |
6142 | /* Check if adapter is up */ |
6143 | if (test_bit(STMMAC_DOWN, &priv->state)) |
6144 | return IRQ_HANDLED; |
6145 | |
6146 | status = stmmac_napi_check(priv, chan, dir: DMA_DIR_TX); |
6147 | |
6148 | if (unlikely(status & tx_hard_error_bump_tc)) { |
6149 | /* Try to bump up the dma threshold on this failure */ |
6150 | stmmac_bump_dma_threshold(priv, chan); |
6151 | } else if (unlikely(status == tx_hard_error)) { |
6152 | stmmac_tx_err(priv, chan); |
6153 | } |
6154 | |
6155 | return IRQ_HANDLED; |
6156 | } |
6157 | |
6158 | static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) |
6159 | { |
6160 | struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; |
6161 | struct stmmac_dma_conf *dma_conf; |
6162 | int chan = rx_q->queue_index; |
6163 | struct stmmac_priv *priv; |
6164 | |
6165 | dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]); |
6166 | priv = container_of(dma_conf, struct stmmac_priv, dma_conf); |
6167 | |
6168 | /* Check if adapter is up */ |
6169 | if (test_bit(STMMAC_DOWN, &priv->state)) |
6170 | return IRQ_HANDLED; |
6171 | |
6172 | stmmac_napi_check(priv, chan, dir: DMA_DIR_RX); |
6173 | |
6174 | return IRQ_HANDLED; |
6175 | } |
6176 | |
6177 | /** |
6178 | * stmmac_ioctl - Entry point for the Ioctl |
6179 | * @dev: Device pointer. |
6180 | * @rq: An IOCTL specefic structure, that can contain a pointer to |
6181 | * a proprietary structure used to pass information to the driver. |
6182 | * @cmd: IOCTL command |
6183 | * Description: |
6184 | * Currently it supports the phy_mii_ioctl(...) and HW time stamping. |
6185 | */ |
6186 | static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
6187 | { |
6188 | struct stmmac_priv *priv = netdev_priv (dev); |
6189 | int ret = -EOPNOTSUPP; |
6190 | |
6191 | if (!netif_running(dev)) |
6192 | return -EINVAL; |
6193 | |
6194 | switch (cmd) { |
6195 | case SIOCGMIIPHY: |
6196 | case SIOCGMIIREG: |
6197 | case SIOCSMIIREG: |
6198 | ret = phylink_mii_ioctl(priv->phylink, rq, cmd); |
6199 | break; |
6200 | case SIOCSHWTSTAMP: |
6201 | ret = stmmac_hwtstamp_set(dev, ifr: rq); |
6202 | break; |
6203 | case SIOCGHWTSTAMP: |
6204 | ret = stmmac_hwtstamp_get(dev, ifr: rq); |
6205 | break; |
6206 | default: |
6207 | break; |
6208 | } |
6209 | |
6210 | return ret; |
6211 | } |
6212 | |
6213 | static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, |
6214 | void *cb_priv) |
6215 | { |
6216 | struct stmmac_priv *priv = cb_priv; |
6217 | int ret = -EOPNOTSUPP; |
6218 | |
6219 | if (!tc_cls_can_offload_and_chain0(dev: priv->dev, common: type_data)) |
6220 | return ret; |
6221 | |
6222 | __stmmac_disable_all_queues(priv); |
6223 | |
6224 | switch (type) { |
6225 | case TC_SETUP_CLSU32: |
6226 | ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); |
6227 | break; |
6228 | case TC_SETUP_CLSFLOWER: |
6229 | ret = stmmac_tc_setup_cls(priv, priv, type_data); |
6230 | break; |
6231 | default: |
6232 | break; |
6233 | } |
6234 | |
6235 | stmmac_enable_all_queues(priv); |
6236 | return ret; |
6237 | } |
6238 | |
6239 | static LIST_HEAD(stmmac_block_cb_list); |
6240 | |
6241 | static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, |
6242 | void *type_data) |
6243 | { |
6244 | struct stmmac_priv *priv = netdev_priv(dev: ndev); |
6245 | |
6246 | switch (type) { |
6247 | case TC_QUERY_CAPS: |
6248 | return stmmac_tc_query_caps(priv, priv, type_data); |
6249 | case TC_SETUP_BLOCK: |
6250 | return flow_block_cb_setup_simple(f: type_data, |
6251 | driver_list: &stmmac_block_cb_list, |
6252 | cb: stmmac_setup_tc_block_cb, |
6253 | cb_ident: priv, cb_priv: priv, ingress_only: true); |
6254 | case TC_SETUP_QDISC_CBS: |
6255 | return stmmac_tc_setup_cbs(priv, priv, type_data); |
6256 | case TC_SETUP_QDISC_TAPRIO: |
6257 | return stmmac_tc_setup_taprio(priv, priv, type_data); |
6258 | case TC_SETUP_QDISC_ETF: |
6259 | return stmmac_tc_setup_etf(priv, priv, type_data); |
6260 | default: |
6261 | return -EOPNOTSUPP; |
6262 | } |
6263 | } |
6264 | |
6265 | static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, |
6266 | struct net_device *sb_dev) |
6267 | { |
6268 | int gso = skb_shinfo(skb)->gso_type; |
6269 | |
6270 | if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) { |
6271 | /* |
6272 | * There is no way to determine the number of TSO/USO |
6273 | * capable Queues. Let's use always the Queue 0 |
6274 | * because if TSO/USO is supported then at least this |
6275 | * one will be capable. |
6276 | */ |
6277 | return 0; |
6278 | } |
6279 | |
6280 | return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; |
6281 | } |
6282 | |
6283 | static int stmmac_set_mac_address(struct net_device *ndev, void *addr) |
6284 | { |
6285 | struct stmmac_priv *priv = netdev_priv(dev: ndev); |
6286 | int ret = 0; |
6287 | |
6288 | ret = pm_runtime_resume_and_get(dev: priv->device); |
6289 | if (ret < 0) |
6290 | return ret; |
6291 | |
6292 | ret = eth_mac_addr(dev: ndev, p: addr); |
6293 | if (ret) |
6294 | goto set_mac_error; |
6295 | |
6296 | stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); |
6297 | |
6298 | set_mac_error: |
6299 | pm_runtime_put(dev: priv->device); |
6300 | |
6301 | return ret; |
6302 | } |
6303 | |
6304 | #ifdef CONFIG_DEBUG_FS |
6305 | static struct dentry *stmmac_fs_dir; |
6306 | |
6307 | static void sysfs_display_ring(void *head, int size, int extend_desc, |
6308 | struct seq_file *seq, dma_addr_t dma_phy_addr) |
6309 | { |
6310 | struct dma_extended_desc *ep = (struct dma_extended_desc *)head; |
6311 | struct dma_desc *p = (struct dma_desc *)head; |
6312 | unsigned int desc_size; |
6313 | dma_addr_t dma_addr; |
6314 | int i; |
6315 | |
6316 | desc_size = extend_desc ? sizeof(*ep) : sizeof(*p); |
6317 | for (i = 0; i < size; i++) { |
6318 | dma_addr = dma_phy_addr + i * desc_size; |
6319 | seq_printf(m: seq, fmt: "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n" , |
6320 | i, &dma_addr, |
6321 | le32_to_cpu(p->des0), le32_to_cpu(p->des1), |
6322 | le32_to_cpu(p->des2), le32_to_cpu(p->des3)); |
6323 | if (extend_desc) |
6324 | p = &(++ep)->basic; |
6325 | else |
6326 | p++; |
6327 | } |
6328 | } |
6329 | |
6330 | static int stmmac_rings_status_show(struct seq_file *seq, void *v) |
6331 | { |
6332 | struct net_device *dev = seq->private; |
6333 | struct stmmac_priv *priv = netdev_priv(dev); |
6334 | u32 rx_count = priv->plat->rx_queues_to_use; |
6335 | u32 tx_count = priv->plat->tx_queues_to_use; |
6336 | u32 queue; |
6337 | |
6338 | if ((dev->flags & IFF_UP) == 0) |
6339 | return 0; |
6340 | |
6341 | for (queue = 0; queue < rx_count; queue++) { |
6342 | struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; |
6343 | |
6344 | seq_printf(m: seq, fmt: "RX Queue %d:\n" , queue); |
6345 | |
6346 | if (priv->extend_desc) { |
6347 | seq_printf(m: seq, fmt: "Extended descriptor ring:\n" ); |
6348 | sysfs_display_ring(head: (void *)rx_q->dma_erx, |
6349 | size: priv->dma_conf.dma_rx_size, extend_desc: 1, seq, dma_phy_addr: rx_q->dma_rx_phy); |
6350 | } else { |
6351 | seq_printf(m: seq, fmt: "Descriptor ring:\n" ); |
6352 | sysfs_display_ring(head: (void *)rx_q->dma_rx, |
6353 | size: priv->dma_conf.dma_rx_size, extend_desc: 0, seq, dma_phy_addr: rx_q->dma_rx_phy); |
6354 | } |
6355 | } |
6356 | |
6357 | for (queue = 0; queue < tx_count; queue++) { |
6358 | struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; |
6359 | |
6360 | seq_printf(m: seq, fmt: "TX Queue %d:\n" , queue); |
6361 | |
6362 | if (priv->extend_desc) { |
6363 | seq_printf(m: seq, fmt: "Extended descriptor ring:\n" ); |
6364 | sysfs_display_ring(head: (void *)tx_q->dma_etx, |
6365 | size: priv->dma_conf.dma_tx_size, extend_desc: 1, seq, dma_phy_addr: tx_q->dma_tx_phy); |
6366 | } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { |
6367 | seq_printf(m: seq, fmt: "Descriptor ring:\n" ); |
6368 | sysfs_display_ring(head: (void *)tx_q->dma_tx, |
6369 | size: priv->dma_conf.dma_tx_size, extend_desc: 0, seq, dma_phy_addr: tx_q->dma_tx_phy); |
6370 | } |
6371 | } |
6372 | |
6373 | return 0; |
6374 | } |
6375 | DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); |
6376 | |
6377 | static int stmmac_dma_cap_show(struct seq_file *seq, void *v) |
6378 | { |
6379 | static const char * const dwxgmac_timestamp_source[] = { |
6380 | "None" , |
6381 | "Internal" , |
6382 | "External" , |
6383 | "Both" , |
6384 | }; |
6385 | static const char * const dwxgmac_safety_feature_desc[] = { |
6386 | "No" , |
6387 | "All Safety Features with ECC and Parity" , |
6388 | "All Safety Features without ECC or Parity" , |
6389 | "All Safety Features with Parity Only" , |
6390 | "ECC Only" , |
6391 | "UNDEFINED" , |
6392 | "UNDEFINED" , |
6393 | "UNDEFINED" , |
6394 | }; |
6395 | struct net_device *dev = seq->private; |
6396 | struct stmmac_priv *priv = netdev_priv(dev); |
6397 | |
6398 | if (!priv->hw_cap_support) { |
6399 | seq_printf(m: seq, fmt: "DMA HW features not supported\n" ); |
6400 | return 0; |
6401 | } |
6402 | |
6403 | seq_printf(m: seq, fmt: "==============================\n" ); |
6404 | seq_printf(m: seq, fmt: "\tDMA HW features\n" ); |
6405 | seq_printf(m: seq, fmt: "==============================\n" ); |
6406 | |
6407 | seq_printf(m: seq, fmt: "\t10/100 Mbps: %s\n" , |
6408 | (priv->dma_cap.mbps_10_100) ? "Y" : "N" ); |
6409 | seq_printf(m: seq, fmt: "\t1000 Mbps: %s\n" , |
6410 | (priv->dma_cap.mbps_1000) ? "Y" : "N" ); |
6411 | seq_printf(m: seq, fmt: "\tHalf duplex: %s\n" , |
6412 | (priv->dma_cap.half_duplex) ? "Y" : "N" ); |
6413 | if (priv->plat->has_xgmac) { |
6414 | seq_printf(m: seq, |
6415 | fmt: "\tNumber of Additional MAC address registers: %d\n" , |
6416 | priv->dma_cap.multi_addr); |
6417 | } else { |
6418 | seq_printf(m: seq, fmt: "\tHash Filter: %s\n" , |
6419 | (priv->dma_cap.hash_filter) ? "Y" : "N" ); |
6420 | seq_printf(m: seq, fmt: "\tMultiple MAC address registers: %s\n" , |
6421 | (priv->dma_cap.multi_addr) ? "Y" : "N" ); |
6422 | } |
6423 | seq_printf(m: seq, fmt: "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n" , |
6424 | (priv->dma_cap.pcs) ? "Y" : "N" ); |
6425 | seq_printf(m: seq, fmt: "\tSMA (MDIO) Interface: %s\n" , |
6426 | (priv->dma_cap.sma_mdio) ? "Y" : "N" ); |
6427 | seq_printf(m: seq, fmt: "\tPMT Remote wake up: %s\n" , |
6428 | (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N" ); |
6429 | seq_printf(m: seq, fmt: "\tPMT Magic Frame: %s\n" , |
6430 | (priv->dma_cap.pmt_magic_frame) ? "Y" : "N" ); |
6431 | seq_printf(m: seq, fmt: "\tRMON module: %s\n" , |
6432 | (priv->dma_cap.rmon) ? "Y" : "N" ); |
6433 | seq_printf(m: seq, fmt: "\tIEEE 1588-2002 Time Stamp: %s\n" , |
6434 | (priv->dma_cap.time_stamp) ? "Y" : "N" ); |
6435 | seq_printf(m: seq, fmt: "\tIEEE 1588-2008 Advanced Time Stamp: %s\n" , |
6436 | (priv->dma_cap.atime_stamp) ? "Y" : "N" ); |
6437 | if (priv->plat->has_xgmac) |
6438 | seq_printf(m: seq, fmt: "\tTimestamp System Time Source: %s\n" , |
6439 | dwxgmac_timestamp_source[priv->dma_cap.tssrc]); |
6440 | seq_printf(m: seq, fmt: "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n" , |
6441 | (priv->dma_cap.eee) ? "Y" : "N" ); |
6442 | seq_printf(m: seq, fmt: "\tAV features: %s\n" , (priv->dma_cap.av) ? "Y" : "N" ); |
6443 | seq_printf(m: seq, fmt: "\tChecksum Offload in TX: %s\n" , |
6444 | (priv->dma_cap.tx_coe) ? "Y" : "N" ); |
6445 | if (priv->synopsys_id >= DWMAC_CORE_4_00 || |
6446 | priv->plat->has_xgmac) { |
6447 | seq_printf(m: seq, fmt: "\tIP Checksum Offload in RX: %s\n" , |
6448 | (priv->dma_cap.rx_coe) ? "Y" : "N" ); |
6449 | } else { |
6450 | seq_printf(m: seq, fmt: "\tIP Checksum Offload (type1) in RX: %s\n" , |
6451 | (priv->dma_cap.rx_coe_type1) ? "Y" : "N" ); |
6452 | seq_printf(m: seq, fmt: "\tIP Checksum Offload (type2) in RX: %s\n" , |
6453 | (priv->dma_cap.rx_coe_type2) ? "Y" : "N" ); |
6454 | seq_printf(m: seq, fmt: "\tRXFIFO > 2048bytes: %s\n" , |
6455 | (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N" ); |
6456 | } |
6457 | seq_printf(m: seq, fmt: "\tNumber of Additional RX channel: %d\n" , |
6458 | priv->dma_cap.number_rx_channel); |
6459 | seq_printf(m: seq, fmt: "\tNumber of Additional TX channel: %d\n" , |
6460 | priv->dma_cap.number_tx_channel); |
6461 | seq_printf(m: seq, fmt: "\tNumber of Additional RX queues: %d\n" , |
6462 | priv->dma_cap.number_rx_queues); |
6463 | seq_printf(m: seq, fmt: "\tNumber of Additional TX queues: %d\n" , |
6464 | priv->dma_cap.number_tx_queues); |
6465 | seq_printf(m: seq, fmt: "\tEnhanced descriptors: %s\n" , |
6466 | (priv->dma_cap.enh_desc) ? "Y" : "N" ); |
6467 | seq_printf(m: seq, fmt: "\tTX Fifo Size: %d\n" , priv->dma_cap.tx_fifo_size); |
6468 | seq_printf(m: seq, fmt: "\tRX Fifo Size: %d\n" , priv->dma_cap.rx_fifo_size); |
6469 | seq_printf(m: seq, fmt: "\tHash Table Size: %lu\n" , priv->dma_cap.hash_tb_sz ? |
6470 | (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0); |
6471 | seq_printf(m: seq, fmt: "\tTSO: %s\n" , priv->dma_cap.tsoen ? "Y" : "N" ); |
6472 | seq_printf(m: seq, fmt: "\tNumber of PPS Outputs: %d\n" , |
6473 | priv->dma_cap.pps_out_num); |
6474 | seq_printf(m: seq, fmt: "\tSafety Features: %s\n" , |
6475 | dwxgmac_safety_feature_desc[priv->dma_cap.asp]); |
6476 | seq_printf(m: seq, fmt: "\tFlexible RX Parser: %s\n" , |
6477 | priv->dma_cap.frpsel ? "Y" : "N" ); |
6478 | seq_printf(m: seq, fmt: "\tEnhanced Addressing: %d\n" , |
6479 | priv->dma_cap.host_dma_width); |
6480 | seq_printf(m: seq, fmt: "\tReceive Side Scaling: %s\n" , |
6481 | priv->dma_cap.rssen ? "Y" : "N" ); |
6482 | seq_printf(m: seq, fmt: "\tVLAN Hash Filtering: %s\n" , |
6483 | priv->dma_cap.vlhash ? "Y" : "N" ); |
6484 | seq_printf(m: seq, fmt: "\tSplit Header: %s\n" , |
6485 | priv->dma_cap.sphen ? "Y" : "N" ); |
6486 | seq_printf(m: seq, fmt: "\tVLAN TX Insertion: %s\n" , |
6487 | priv->dma_cap.vlins ? "Y" : "N" ); |
6488 | seq_printf(m: seq, fmt: "\tDouble VLAN: %s\n" , |
6489 | priv->dma_cap.dvlan ? "Y" : "N" ); |
6490 | seq_printf(m: seq, fmt: "\tNumber of L3/L4 Filters: %d\n" , |
6491 | priv->dma_cap.l3l4fnum); |
6492 | seq_printf(m: seq, fmt: "\tARP Offloading: %s\n" , |
6493 | priv->dma_cap.arpoffsel ? "Y" : "N" ); |
6494 | seq_printf(m: seq, fmt: "\tEnhancements to Scheduled Traffic (EST): %s\n" , |
6495 | priv->dma_cap.estsel ? "Y" : "N" ); |
6496 | seq_printf(m: seq, fmt: "\tFrame Preemption (FPE): %s\n" , |
6497 | priv->dma_cap.fpesel ? "Y" : "N" ); |
6498 | seq_printf(m: seq, fmt: "\tTime-Based Scheduling (TBS): %s\n" , |
6499 | priv->dma_cap.tbssel ? "Y" : "N" ); |
6500 | seq_printf(m: seq, fmt: "\tNumber of DMA Channels Enabled for TBS: %d\n" , |
6501 | priv->dma_cap.tbs_ch_num); |
6502 | seq_printf(m: seq, fmt: "\tPer-Stream Filtering: %s\n" , |
6503 | priv->dma_cap.sgfsel ? "Y" : "N" ); |
6504 | seq_printf(m: seq, fmt: "\tTX Timestamp FIFO Depth: %lu\n" , |
6505 | BIT(priv->dma_cap.ttsfd) >> 1); |
6506 | seq_printf(m: seq, fmt: "\tNumber of Traffic Classes: %d\n" , |
6507 | priv->dma_cap.numtc); |
6508 | seq_printf(m: seq, fmt: "\tDCB Feature: %s\n" , |
6509 | priv->dma_cap.dcben ? "Y" : "N" ); |
6510 | seq_printf(m: seq, fmt: "\tIEEE 1588 High Word Register: %s\n" , |
6511 | priv->dma_cap.advthword ? "Y" : "N" ); |
6512 | seq_printf(m: seq, fmt: "\tPTP Offload: %s\n" , |
6513 | priv->dma_cap.ptoen ? "Y" : "N" ); |
6514 | seq_printf(m: seq, fmt: "\tOne-Step Timestamping: %s\n" , |
6515 | priv->dma_cap.osten ? "Y" : "N" ); |
6516 | seq_printf(m: seq, fmt: "\tPriority-Based Flow Control: %s\n" , |
6517 | priv->dma_cap.pfcen ? "Y" : "N" ); |
6518 | seq_printf(m: seq, fmt: "\tNumber of Flexible RX Parser Instructions: %lu\n" , |
6519 | BIT(priv->dma_cap.frpes) << 6); |
6520 | seq_printf(m: seq, fmt: "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n" , |
6521 | BIT(priv->dma_cap.frpbs) << 6); |
6522 | seq_printf(m: seq, fmt: "\tParallel Instruction Processor Engines: %d\n" , |
6523 | priv->dma_cap.frppipe_num); |
6524 | seq_printf(m: seq, fmt: "\tNumber of Extended VLAN Tag Filters: %lu\n" , |
6525 | priv->dma_cap.nrvf_num ? |
6526 | (BIT(priv->dma_cap.nrvf_num) << 1) : 0); |
6527 | seq_printf(m: seq, fmt: "\tWidth of the Time Interval Field in GCL: %d\n" , |
6528 | priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0); |
6529 | seq_printf(m: seq, fmt: "\tDepth of GCL: %lu\n" , |
6530 | priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0); |
6531 | seq_printf(m: seq, fmt: "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n" , |
6532 | priv->dma_cap.cbtisel ? "Y" : "N" ); |
6533 | seq_printf(m: seq, fmt: "\tNumber of Auxiliary Snapshot Inputs: %d\n" , |
6534 | priv->dma_cap.aux_snapshot_n); |
6535 | seq_printf(m: seq, fmt: "\tOne-Step Timestamping for PTP over UDP/IP: %s\n" , |
6536 | priv->dma_cap.pou_ost_en ? "Y" : "N" ); |
6537 | seq_printf(m: seq, fmt: "\tEnhanced DMA: %s\n" , |
6538 | priv->dma_cap.edma ? "Y" : "N" ); |
6539 | seq_printf(m: seq, fmt: "\tDifferent Descriptor Cache: %s\n" , |
6540 | priv->dma_cap.ediffc ? "Y" : "N" ); |
6541 | seq_printf(m: seq, fmt: "\tVxLAN/NVGRE: %s\n" , |
6542 | priv->dma_cap.vxn ? "Y" : "N" ); |
6543 | seq_printf(m: seq, fmt: "\tDebug Memory Interface: %s\n" , |
6544 | priv->dma_cap.dbgmem ? "Y" : "N" ); |
6545 | seq_printf(m: seq, fmt: "\tNumber of Policing Counters: %lu\n" , |
6546 | priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0); |
6547 | return 0; |
6548 | } |
6549 | DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); |
6550 | |
6551 | /* Use network device events to rename debugfs file entries. |
6552 | */ |
6553 | static int stmmac_device_event(struct notifier_block *unused, |
6554 | unsigned long event, void *ptr) |
6555 | { |
6556 | struct net_device *dev = netdev_notifier_info_to_dev(info: ptr); |
6557 | struct stmmac_priv *priv = netdev_priv(dev); |
6558 | |
6559 | if (dev->netdev_ops != &stmmac_netdev_ops) |
6560 | goto done; |
6561 | |
6562 | switch (event) { |
6563 | case NETDEV_CHANGENAME: |
6564 | if (priv->dbgfs_dir) |
6565 | priv->dbgfs_dir = debugfs_rename(old_dir: stmmac_fs_dir, |
6566 | old_dentry: priv->dbgfs_dir, |
6567 | new_dir: stmmac_fs_dir, |
6568 | new_name: dev->name); |
6569 | break; |
6570 | } |
6571 | done: |
6572 | return NOTIFY_DONE; |
6573 | } |
6574 | |
6575 | static struct notifier_block stmmac_notifier = { |
6576 | .notifier_call = stmmac_device_event, |
6577 | }; |
6578 | |
6579 | static void stmmac_init_fs(struct net_device *dev) |
6580 | { |
6581 | struct stmmac_priv *priv = netdev_priv(dev); |
6582 | |
6583 | rtnl_lock(); |
6584 | |
6585 | /* Create per netdev entries */ |
6586 | priv->dbgfs_dir = debugfs_create_dir(name: dev->name, parent: stmmac_fs_dir); |
6587 | |
6588 | /* Entry to report DMA RX/TX rings */ |
6589 | debugfs_create_file(name: "descriptors_status" , mode: 0444, parent: priv->dbgfs_dir, data: dev, |
6590 | fops: &stmmac_rings_status_fops); |
6591 | |
6592 | /* Entry to report the DMA HW features */ |
6593 | debugfs_create_file(name: "dma_cap" , mode: 0444, parent: priv->dbgfs_dir, data: dev, |
6594 | fops: &stmmac_dma_cap_fops); |
6595 | |
6596 | rtnl_unlock(); |
6597 | } |
6598 | |
6599 | static void stmmac_exit_fs(struct net_device *dev) |
6600 | { |
6601 | struct stmmac_priv *priv = netdev_priv(dev); |
6602 | |
6603 | debugfs_remove_recursive(dentry: priv->dbgfs_dir); |
6604 | } |
6605 | #endif /* CONFIG_DEBUG_FS */ |
6606 | |
6607 | static u32 stmmac_vid_crc32_le(__le16 vid_le) |
6608 | { |
6609 | unsigned char *data = (unsigned char *)&vid_le; |
6610 | unsigned char data_byte = 0; |
6611 | u32 crc = ~0x0; |
6612 | u32 temp = 0; |
6613 | int i, bits; |
6614 | |
6615 | bits = get_bitmask_order(VLAN_VID_MASK); |
6616 | for (i = 0; i < bits; i++) { |
6617 | if ((i % 8) == 0) |
6618 | data_byte = data[i / 8]; |
6619 | |
6620 | temp = ((crc & 1) ^ data_byte) & 1; |
6621 | crc >>= 1; |
6622 | data_byte >>= 1; |
6623 | |
6624 | if (temp) |
6625 | crc ^= 0xedb88320; |
6626 | } |
6627 | |
6628 | return crc; |
6629 | } |
6630 | |
6631 | static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) |
6632 | { |
6633 | u32 crc, hash = 0; |
6634 | __le16 pmatch = 0; |
6635 | int count = 0; |
6636 | u16 vid = 0; |
6637 | |
6638 | for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { |
6639 | __le16 vid_le = cpu_to_le16(vid); |
6640 | crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; |
6641 | hash |= (1 << crc); |
6642 | count++; |
6643 | } |
6644 | |
6645 | if (!priv->dma_cap.vlhash) { |
6646 | if (count > 2) /* VID = 0 always passes filter */ |
6647 | return -EOPNOTSUPP; |
6648 | |
6649 | pmatch = cpu_to_le16(vid); |
6650 | hash = 0; |
6651 | } |
6652 | |
6653 | return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); |
6654 | } |
6655 | |
6656 | static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) |
6657 | { |
6658 | struct stmmac_priv *priv = netdev_priv(dev: ndev); |
6659 | bool is_double = false; |
6660 | int ret; |
6661 | |
6662 | ret = pm_runtime_resume_and_get(dev: priv->device); |
6663 | if (ret < 0) |
6664 | return ret; |
6665 | |
6666 | if (be16_to_cpu(proto) == ETH_P_8021AD) |
6667 | is_double = true; |
6668 | |
6669 | set_bit(nr: vid, addr: priv->active_vlans); |
6670 | ret = stmmac_vlan_update(priv, is_double); |
6671 | if (ret) { |
6672 | clear_bit(nr: vid, addr: priv->active_vlans); |
6673 | goto err_pm_put; |
6674 | } |
6675 | |
6676 | if (priv->hw->num_vlan) { |
6677 | ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); |
6678 | if (ret) |
6679 | goto err_pm_put; |
6680 | } |
6681 | err_pm_put: |
6682 | pm_runtime_put(dev: priv->device); |
6683 | |
6684 | return ret; |
6685 | } |
6686 | |
6687 | static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) |
6688 | { |
6689 | struct stmmac_priv *priv = netdev_priv(dev: ndev); |
6690 | bool is_double = false; |
6691 | int ret; |
6692 | |
6693 | ret = pm_runtime_resume_and_get(dev: priv->device); |
6694 | if (ret < 0) |
6695 | return ret; |
6696 | |
6697 | if (be16_to_cpu(proto) == ETH_P_8021AD) |
6698 | is_double = true; |
6699 | |
6700 | clear_bit(nr: vid, addr: priv->active_vlans); |
6701 | |
6702 | if (priv->hw->num_vlan) { |
6703 | ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); |
6704 | if (ret) |
6705 | goto del_vlan_error; |
6706 | } |
6707 | |
6708 | ret = stmmac_vlan_update(priv, is_double); |
6709 | |
6710 | del_vlan_error: |
6711 | pm_runtime_put(dev: priv->device); |
6712 | |
6713 | return ret; |
6714 | } |
6715 | |
6716 | static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) |
6717 | { |
6718 | struct stmmac_priv *priv = netdev_priv(dev); |
6719 | |
6720 | switch (bpf->command) { |
6721 | case XDP_SETUP_PROG: |
6722 | return stmmac_xdp_set_prog(priv, prog: bpf->prog, extack: bpf->extack); |
6723 | case XDP_SETUP_XSK_POOL: |
6724 | return stmmac_xdp_setup_pool(priv, pool: bpf->xsk.pool, |
6725 | queue: bpf->xsk.queue_id); |
6726 | default: |
6727 | return -EOPNOTSUPP; |
6728 | } |
6729 | } |
6730 | |
6731 | static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, |
6732 | struct xdp_frame **frames, u32 flags) |
6733 | { |
6734 | struct stmmac_priv *priv = netdev_priv(dev); |
6735 | int cpu = smp_processor_id(); |
6736 | struct netdev_queue *nq; |
6737 | int i, nxmit = 0; |
6738 | int queue; |
6739 | |
6740 | if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) |
6741 | return -ENETDOWN; |
6742 | |
6743 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
6744 | return -EINVAL; |
6745 | |
6746 | queue = stmmac_xdp_get_tx_queue(priv, cpu); |
6747 | nq = netdev_get_tx_queue(dev: priv->dev, index: queue); |
6748 | |
6749 | __netif_tx_lock(txq: nq, cpu); |
6750 | /* Avoids TX time-out as we are sharing with slow path */ |
6751 | txq_trans_cond_update(txq: nq); |
6752 | |
6753 | for (i = 0; i < num_frames; i++) { |
6754 | int res; |
6755 | |
6756 | res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf: frames[i], dma_map: true); |
6757 | if (res == STMMAC_XDP_CONSUMED) |
6758 | break; |
6759 | |
6760 | nxmit++; |
6761 | } |
6762 | |
6763 | if (flags & XDP_XMIT_FLUSH) { |
6764 | stmmac_flush_tx_descriptors(priv, queue); |
6765 | stmmac_tx_timer_arm(priv, queue); |
6766 | } |
6767 | |
6768 | __netif_tx_unlock(txq: nq); |
6769 | |
6770 | return nxmit; |
6771 | } |
6772 | |
6773 | void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) |
6774 | { |
6775 | struct stmmac_channel *ch = &priv->channel[queue]; |
6776 | unsigned long flags; |
6777 | |
6778 | spin_lock_irqsave(&ch->lock, flags); |
6779 | stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); |
6780 | spin_unlock_irqrestore(lock: &ch->lock, flags); |
6781 | |
6782 | stmmac_stop_rx_dma(priv, chan: queue); |
6783 | __free_dma_rx_desc_resources(priv, dma_conf: &priv->dma_conf, queue); |
6784 | } |
6785 | |
6786 | void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) |
6787 | { |
6788 | struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; |
6789 | struct stmmac_channel *ch = &priv->channel[queue]; |
6790 | unsigned long flags; |
6791 | u32 buf_size; |
6792 | int ret; |
6793 | |
6794 | ret = __alloc_dma_rx_desc_resources(priv, dma_conf: &priv->dma_conf, queue); |
6795 | if (ret) { |
6796 | netdev_err(dev: priv->dev, format: "Failed to alloc RX desc.\n" ); |
6797 | return; |
6798 | } |
6799 | |
6800 | ret = __init_dma_rx_desc_rings(priv, dma_conf: &priv->dma_conf, queue, GFP_KERNEL); |
6801 | if (ret) { |
6802 | __free_dma_rx_desc_resources(priv, dma_conf: &priv->dma_conf, queue); |
6803 | netdev_err(dev: priv->dev, format: "Failed to init RX desc.\n" ); |
6804 | return; |
6805 | } |
6806 | |
6807 | stmmac_reset_rx_queue(priv, queue); |
6808 | stmmac_clear_rx_descriptors(priv, dma_conf: &priv->dma_conf, queue); |
6809 | |
6810 | stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, |
6811 | rx_q->dma_rx_phy, rx_q->queue_index); |
6812 | |
6813 | rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * |
6814 | sizeof(struct dma_desc)); |
6815 | stmmac_set_rx_tail_ptr(priv, priv->ioaddr, |
6816 | rx_q->rx_tail_addr, rx_q->queue_index); |
6817 | |
6818 | if (rx_q->xsk_pool && rx_q->buf_alloc_num) { |
6819 | buf_size = xsk_pool_get_rx_frame_size(pool: rx_q->xsk_pool); |
6820 | stmmac_set_dma_bfsize(priv, priv->ioaddr, |
6821 | buf_size, |
6822 | rx_q->queue_index); |
6823 | } else { |
6824 | stmmac_set_dma_bfsize(priv, priv->ioaddr, |
6825 | priv->dma_conf.dma_buf_sz, |
6826 | rx_q->queue_index); |
6827 | } |
6828 | |
6829 | stmmac_start_rx_dma(priv, chan: queue); |
6830 | |
6831 | spin_lock_irqsave(&ch->lock, flags); |
6832 | stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); |
6833 | spin_unlock_irqrestore(lock: &ch->lock, flags); |
6834 | } |
6835 | |
6836 | void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) |
6837 | { |
6838 | struct stmmac_channel *ch = &priv->channel[queue]; |
6839 | unsigned long flags; |
6840 | |
6841 | spin_lock_irqsave(&ch->lock, flags); |
6842 | stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); |
6843 | spin_unlock_irqrestore(lock: &ch->lock, flags); |
6844 | |
6845 | stmmac_stop_tx_dma(priv, chan: queue); |
6846 | __free_dma_tx_desc_resources(priv, dma_conf: &priv->dma_conf, queue); |
6847 | } |
6848 | |
6849 | void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) |
6850 | { |
6851 | struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; |
6852 | struct stmmac_channel *ch = &priv->channel[queue]; |
6853 | unsigned long flags; |
6854 | int ret; |
6855 | |
6856 | ret = __alloc_dma_tx_desc_resources(priv, dma_conf: &priv->dma_conf, queue); |
6857 | if (ret) { |
6858 | netdev_err(dev: priv->dev, format: "Failed to alloc TX desc.\n" ); |
6859 | return; |
6860 | } |
6861 | |
6862 | ret = __init_dma_tx_desc_rings(priv, dma_conf: &priv->dma_conf, queue); |
6863 | if (ret) { |
6864 | __free_dma_tx_desc_resources(priv, dma_conf: &priv->dma_conf, queue); |
6865 | netdev_err(dev: priv->dev, format: "Failed to init TX desc.\n" ); |
6866 | return; |
6867 | } |
6868 | |
6869 | stmmac_reset_tx_queue(priv, queue); |
6870 | stmmac_clear_tx_descriptors(priv, dma_conf: &priv->dma_conf, queue); |
6871 | |
6872 | stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, |
6873 | tx_q->dma_tx_phy, tx_q->queue_index); |
6874 | |
6875 | if (tx_q->tbs & STMMAC_TBS_AVAIL) |
6876 | stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); |
6877 | |
6878 | tx_q->tx_tail_addr = tx_q->dma_tx_phy; |
6879 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, |
6880 | tx_q->tx_tail_addr, tx_q->queue_index); |
6881 | |
6882 | stmmac_start_tx_dma(priv, chan: queue); |
6883 | |
6884 | spin_lock_irqsave(&ch->lock, flags); |
6885 | stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); |
6886 | spin_unlock_irqrestore(lock: &ch->lock, flags); |
6887 | } |
6888 | |
6889 | void stmmac_xdp_release(struct net_device *dev) |
6890 | { |
6891 | struct stmmac_priv *priv = netdev_priv(dev); |
6892 | u32 chan; |
6893 | |
6894 | /* Ensure tx function is not running */ |
6895 | netif_tx_disable(dev); |
6896 | |
6897 | /* Disable NAPI process */ |
6898 | stmmac_disable_all_queues(priv); |
6899 | |
6900 | for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) |
6901 | hrtimer_cancel(timer: &priv->dma_conf.tx_queue[chan].txtimer); |
6902 | |
6903 | /* Free the IRQ lines */ |
6904 | stmmac_free_irq(dev, irq_err: REQ_IRQ_ERR_ALL, irq_idx: 0); |
6905 | |
6906 | /* Stop TX/RX DMA channels */ |
6907 | stmmac_stop_all_dma(priv); |
6908 | |
6909 | /* Release and free the Rx/Tx resources */ |
6910 | free_dma_desc_resources(priv, dma_conf: &priv->dma_conf); |
6911 | |
6912 | /* Disable the MAC Rx/Tx */ |
6913 | stmmac_mac_set(priv, priv->ioaddr, false); |
6914 | |
6915 | /* set trans_start so we don't get spurious |
6916 | * watchdogs during reset |
6917 | */ |
6918 | netif_trans_update(dev); |
6919 | netif_carrier_off(dev); |
6920 | } |
6921 | |
6922 | int stmmac_xdp_open(struct net_device *dev) |
6923 | { |
6924 | struct stmmac_priv *priv = netdev_priv(dev); |
6925 | u32 rx_cnt = priv->plat->rx_queues_to_use; |
6926 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
6927 | u32 dma_csr_ch = max(rx_cnt, tx_cnt); |
6928 | struct stmmac_rx_queue *rx_q; |
6929 | struct stmmac_tx_queue *tx_q; |
6930 | u32 buf_size; |
6931 | bool sph_en; |
6932 | u32 chan; |
6933 | int ret; |
6934 | |
6935 | ret = alloc_dma_desc_resources(priv, dma_conf: &priv->dma_conf); |
6936 | if (ret < 0) { |
6937 | netdev_err(dev, format: "%s: DMA descriptors allocation failed\n" , |
6938 | __func__); |
6939 | goto dma_desc_error; |
6940 | } |
6941 | |
6942 | ret = init_dma_desc_rings(dev, dma_conf: &priv->dma_conf, GFP_KERNEL); |
6943 | if (ret < 0) { |
6944 | netdev_err(dev, format: "%s: DMA descriptors initialization failed\n" , |
6945 | __func__); |
6946 | goto init_error; |
6947 | } |
6948 | |
6949 | stmmac_reset_queues_param(priv); |
6950 | |
6951 | /* DMA CSR Channel configuration */ |
6952 | for (chan = 0; chan < dma_csr_ch; chan++) { |
6953 | stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); |
6954 | stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); |
6955 | } |
6956 | |
6957 | /* Adjust Split header */ |
6958 | sph_en = (priv->hw->rx_csum > 0) && priv->sph; |
6959 | |
6960 | /* DMA RX Channel Configuration */ |
6961 | for (chan = 0; chan < rx_cnt; chan++) { |
6962 | rx_q = &priv->dma_conf.rx_queue[chan]; |
6963 | |
6964 | stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, |
6965 | rx_q->dma_rx_phy, chan); |
6966 | |
6967 | rx_q->rx_tail_addr = rx_q->dma_rx_phy + |
6968 | (rx_q->buf_alloc_num * |
6969 | sizeof(struct dma_desc)); |
6970 | stmmac_set_rx_tail_ptr(priv, priv->ioaddr, |
6971 | rx_q->rx_tail_addr, chan); |
6972 | |
6973 | if (rx_q->xsk_pool && rx_q->buf_alloc_num) { |
6974 | buf_size = xsk_pool_get_rx_frame_size(pool: rx_q->xsk_pool); |
6975 | stmmac_set_dma_bfsize(priv, priv->ioaddr, |
6976 | buf_size, |
6977 | rx_q->queue_index); |
6978 | } else { |
6979 | stmmac_set_dma_bfsize(priv, priv->ioaddr, |
6980 | priv->dma_conf.dma_buf_sz, |
6981 | rx_q->queue_index); |
6982 | } |
6983 | |
6984 | stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); |
6985 | } |
6986 | |
6987 | /* DMA TX Channel Configuration */ |
6988 | for (chan = 0; chan < tx_cnt; chan++) { |
6989 | tx_q = &priv->dma_conf.tx_queue[chan]; |
6990 | |
6991 | stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, |
6992 | tx_q->dma_tx_phy, chan); |
6993 | |
6994 | tx_q->tx_tail_addr = tx_q->dma_tx_phy; |
6995 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, |
6996 | tx_q->tx_tail_addr, chan); |
6997 | |
6998 | hrtimer_init(timer: &tx_q->txtimer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL); |
6999 | tx_q->txtimer.function = stmmac_tx_timer; |
7000 | } |
7001 | |
7002 | /* Enable the MAC Rx/Tx */ |
7003 | stmmac_mac_set(priv, priv->ioaddr, true); |
7004 | |
7005 | /* Start Rx & Tx DMA Channels */ |
7006 | stmmac_start_all_dma(priv); |
7007 | |
7008 | ret = stmmac_request_irq(dev); |
7009 | if (ret) |
7010 | goto irq_error; |
7011 | |
7012 | /* Enable NAPI process*/ |
7013 | stmmac_enable_all_queues(priv); |
7014 | netif_carrier_on(dev); |
7015 | netif_tx_start_all_queues(dev); |
7016 | stmmac_enable_all_dma_irq(priv); |
7017 | |
7018 | return 0; |
7019 | |
7020 | irq_error: |
7021 | for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) |
7022 | hrtimer_cancel(timer: &priv->dma_conf.tx_queue[chan].txtimer); |
7023 | |
7024 | stmmac_hw_teardown(dev); |
7025 | init_error: |
7026 | free_dma_desc_resources(priv, dma_conf: &priv->dma_conf); |
7027 | dma_desc_error: |
7028 | return ret; |
7029 | } |
7030 | |
7031 | int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) |
7032 | { |
7033 | struct stmmac_priv *priv = netdev_priv(dev); |
7034 | struct stmmac_rx_queue *rx_q; |
7035 | struct stmmac_tx_queue *tx_q; |
7036 | struct stmmac_channel *ch; |
7037 | |
7038 | if (test_bit(STMMAC_DOWN, &priv->state) || |
7039 | !netif_carrier_ok(dev: priv->dev)) |
7040 | return -ENETDOWN; |
7041 | |
7042 | if (!stmmac_xdp_is_enabled(priv)) |
7043 | return -EINVAL; |
7044 | |
7045 | if (queue >= priv->plat->rx_queues_to_use || |
7046 | queue >= priv->plat->tx_queues_to_use) |
7047 | return -EINVAL; |
7048 | |
7049 | rx_q = &priv->dma_conf.rx_queue[queue]; |
7050 | tx_q = &priv->dma_conf.tx_queue[queue]; |
7051 | ch = &priv->channel[queue]; |
7052 | |
7053 | if (!rx_q->xsk_pool && !tx_q->xsk_pool) |
7054 | return -EINVAL; |
7055 | |
7056 | if (!napi_if_scheduled_mark_missed(n: &ch->rxtx_napi)) { |
7057 | /* EQoS does not have per-DMA channel SW interrupt, |
7058 | * so we schedule RX Napi straight-away. |
7059 | */ |
7060 | if (likely(napi_schedule_prep(&ch->rxtx_napi))) |
7061 | __napi_schedule(n: &ch->rxtx_napi); |
7062 | } |
7063 | |
7064 | return 0; |
7065 | } |
7066 | |
7067 | static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) |
7068 | { |
7069 | struct stmmac_priv *priv = netdev_priv(dev); |
7070 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
7071 | u32 rx_cnt = priv->plat->rx_queues_to_use; |
7072 | unsigned int start; |
7073 | int q; |
7074 | |
7075 | for (q = 0; q < tx_cnt; q++) { |
7076 | struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; |
7077 | u64 tx_packets; |
7078 | u64 tx_bytes; |
7079 | |
7080 | do { |
7081 | start = u64_stats_fetch_begin(syncp: &txq_stats->q_syncp); |
7082 | tx_bytes = u64_stats_read(p: &txq_stats->q.tx_bytes); |
7083 | } while (u64_stats_fetch_retry(syncp: &txq_stats->q_syncp, start)); |
7084 | do { |
7085 | start = u64_stats_fetch_begin(syncp: &txq_stats->napi_syncp); |
7086 | tx_packets = u64_stats_read(p: &txq_stats->napi.tx_packets); |
7087 | } while (u64_stats_fetch_retry(syncp: &txq_stats->napi_syncp, start)); |
7088 | |
7089 | stats->tx_packets += tx_packets; |
7090 | stats->tx_bytes += tx_bytes; |
7091 | } |
7092 | |
7093 | for (q = 0; q < rx_cnt; q++) { |
7094 | struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; |
7095 | u64 rx_packets; |
7096 | u64 rx_bytes; |
7097 | |
7098 | do { |
7099 | start = u64_stats_fetch_begin(syncp: &rxq_stats->napi_syncp); |
7100 | rx_packets = u64_stats_read(p: &rxq_stats->napi.rx_packets); |
7101 | rx_bytes = u64_stats_read(p: &rxq_stats->napi.rx_bytes); |
7102 | } while (u64_stats_fetch_retry(syncp: &rxq_stats->napi_syncp, start)); |
7103 | |
7104 | stats->rx_packets += rx_packets; |
7105 | stats->rx_bytes += rx_bytes; |
7106 | } |
7107 | |
7108 | stats->rx_dropped = priv->xstats.rx_dropped; |
7109 | stats->rx_errors = priv->xstats.rx_errors; |
7110 | stats->tx_dropped = priv->xstats.tx_dropped; |
7111 | stats->tx_errors = priv->xstats.tx_errors; |
7112 | stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier; |
7113 | stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision; |
7114 | stats->rx_length_errors = priv->xstats.rx_length; |
7115 | stats->rx_crc_errors = priv->xstats.rx_crc_errors; |
7116 | stats->rx_over_errors = priv->xstats.rx_overflow_cntr; |
7117 | stats->rx_missed_errors = priv->xstats.rx_missed_cntr; |
7118 | } |
7119 | |
7120 | static const struct net_device_ops stmmac_netdev_ops = { |
7121 | .ndo_open = stmmac_open, |
7122 | .ndo_start_xmit = stmmac_xmit, |
7123 | .ndo_stop = stmmac_release, |
7124 | .ndo_change_mtu = stmmac_change_mtu, |
7125 | .ndo_fix_features = stmmac_fix_features, |
7126 | .ndo_set_features = stmmac_set_features, |
7127 | .ndo_set_rx_mode = stmmac_set_rx_mode, |
7128 | .ndo_tx_timeout = stmmac_tx_timeout, |
7129 | .ndo_eth_ioctl = stmmac_ioctl, |
7130 | .ndo_get_stats64 = stmmac_get_stats64, |
7131 | .ndo_setup_tc = stmmac_setup_tc, |
7132 | .ndo_select_queue = stmmac_select_queue, |
7133 | .ndo_set_mac_address = stmmac_set_mac_address, |
7134 | .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, |
7135 | .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, |
7136 | .ndo_bpf = stmmac_bpf, |
7137 | .ndo_xdp_xmit = stmmac_xdp_xmit, |
7138 | .ndo_xsk_wakeup = stmmac_xsk_wakeup, |
7139 | }; |
7140 | |
7141 | static void stmmac_reset_subtask(struct stmmac_priv *priv) |
7142 | { |
7143 | if (!test_and_clear_bit(nr: STMMAC_RESET_REQUESTED, addr: &priv->state)) |
7144 | return; |
7145 | if (test_bit(STMMAC_DOWN, &priv->state)) |
7146 | return; |
7147 | |
7148 | netdev_err(dev: priv->dev, format: "Reset adapter.\n" ); |
7149 | |
7150 | rtnl_lock(); |
7151 | netif_trans_update(dev: priv->dev); |
7152 | while (test_and_set_bit(nr: STMMAC_RESETING, addr: &priv->state)) |
7153 | usleep_range(min: 1000, max: 2000); |
7154 | |
7155 | set_bit(nr: STMMAC_DOWN, addr: &priv->state); |
7156 | dev_close(dev: priv->dev); |
7157 | dev_open(dev: priv->dev, NULL); |
7158 | clear_bit(nr: STMMAC_DOWN, addr: &priv->state); |
7159 | clear_bit(nr: STMMAC_RESETING, addr: &priv->state); |
7160 | rtnl_unlock(); |
7161 | } |
7162 | |
7163 | static void stmmac_service_task(struct work_struct *work) |
7164 | { |
7165 | struct stmmac_priv *priv = container_of(work, struct stmmac_priv, |
7166 | service_task); |
7167 | |
7168 | stmmac_reset_subtask(priv); |
7169 | clear_bit(nr: STMMAC_SERVICE_SCHED, addr: &priv->state); |
7170 | } |
7171 | |
7172 | /** |
7173 | * stmmac_hw_init - Init the MAC device |
7174 | * @priv: driver private structure |
7175 | * Description: this function is to configure the MAC device according to |
7176 | * some platform parameters or the HW capability register. It prepares the |
7177 | * driver to use either ring or chain modes and to setup either enhanced or |
7178 | * normal descriptors. |
7179 | */ |
7180 | static int stmmac_hw_init(struct stmmac_priv *priv) |
7181 | { |
7182 | int ret; |
7183 | |
7184 | /* dwmac-sun8i only work in chain mode */ |
7185 | if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) |
7186 | chain_mode = 1; |
7187 | priv->chain_mode = chain_mode; |
7188 | |
7189 | /* Initialize HW Interface */ |
7190 | ret = stmmac_hwif_init(priv); |
7191 | if (ret) |
7192 | return ret; |
7193 | |
7194 | /* Get the HW capability (new GMAC newer than 3.50a) */ |
7195 | priv->hw_cap_support = stmmac_get_hw_features(priv); |
7196 | if (priv->hw_cap_support) { |
7197 | dev_info(priv->device, "DMA HW capability register supported\n" ); |
7198 | |
7199 | /* We can override some gmac/dma configuration fields: e.g. |
7200 | * enh_desc, tx_coe (e.g. that are passed through the |
7201 | * platform) with the values from the HW capability |
7202 | * register (if supported). |
7203 | */ |
7204 | priv->plat->enh_desc = priv->dma_cap.enh_desc; |
7205 | priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && |
7206 | !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL); |
7207 | priv->hw->pmt = priv->plat->pmt; |
7208 | if (priv->dma_cap.hash_tb_sz) { |
7209 | priv->hw->multicast_filter_bins = |
7210 | (BIT(priv->dma_cap.hash_tb_sz) << 5); |
7211 | priv->hw->mcast_bits_log2 = |
7212 | ilog2(priv->hw->multicast_filter_bins); |
7213 | } |
7214 | |
7215 | /* TXCOE doesn't work in thresh DMA mode */ |
7216 | if (priv->plat->force_thresh_dma_mode) |
7217 | priv->plat->tx_coe = 0; |
7218 | else |
7219 | priv->plat->tx_coe = priv->dma_cap.tx_coe; |
7220 | |
7221 | /* In case of GMAC4 rx_coe is from HW cap register. */ |
7222 | priv->plat->rx_coe = priv->dma_cap.rx_coe; |
7223 | |
7224 | if (priv->dma_cap.rx_coe_type2) |
7225 | priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; |
7226 | else if (priv->dma_cap.rx_coe_type1) |
7227 | priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; |
7228 | |
7229 | } else { |
7230 | dev_info(priv->device, "No HW DMA feature register supported\n" ); |
7231 | } |
7232 | |
7233 | if (priv->plat->rx_coe) { |
7234 | priv->hw->rx_csum = priv->plat->rx_coe; |
7235 | dev_info(priv->device, "RX Checksum Offload Engine supported\n" ); |
7236 | if (priv->synopsys_id < DWMAC_CORE_4_00) |
7237 | dev_info(priv->device, "COE Type %d\n" , priv->hw->rx_csum); |
7238 | } |
7239 | if (priv->plat->tx_coe) |
7240 | dev_info(priv->device, "TX Checksum insertion supported\n" ); |
7241 | |
7242 | if (priv->plat->pmt) { |
7243 | dev_info(priv->device, "Wake-Up On Lan supported\n" ); |
7244 | device_set_wakeup_capable(dev: priv->device, capable: 1); |
7245 | } |
7246 | |
7247 | if (priv->dma_cap.tsoen) |
7248 | dev_info(priv->device, "TSO supported\n" ); |
7249 | |
7250 | priv->hw->vlan_fail_q_en = |
7251 | (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN); |
7252 | priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; |
7253 | |
7254 | /* Run HW quirks, if any */ |
7255 | if (priv->hwif_quirks) { |
7256 | ret = priv->hwif_quirks(priv); |
7257 | if (ret) |
7258 | return ret; |
7259 | } |
7260 | |
7261 | /* Rx Watchdog is available in the COREs newer than the 3.40. |
7262 | * In some case, for example on bugged HW this feature |
7263 | * has to be disable and this can be done by passing the |
7264 | * riwt_off field from the platform. |
7265 | */ |
7266 | if (((priv->synopsys_id >= DWMAC_CORE_3_50) || |
7267 | (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { |
7268 | priv->use_riwt = 1; |
7269 | dev_info(priv->device, |
7270 | "Enable RX Mitigation via HW Watchdog Timer\n" ); |
7271 | } |
7272 | |
7273 | return 0; |
7274 | } |
7275 | |
7276 | static void stmmac_napi_add(struct net_device *dev) |
7277 | { |
7278 | struct stmmac_priv *priv = netdev_priv(dev); |
7279 | u32 queue, maxq; |
7280 | |
7281 | maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); |
7282 | |
7283 | for (queue = 0; queue < maxq; queue++) { |
7284 | struct stmmac_channel *ch = &priv->channel[queue]; |
7285 | |
7286 | ch->priv_data = priv; |
7287 | ch->index = queue; |
7288 | spin_lock_init(&ch->lock); |
7289 | |
7290 | if (queue < priv->plat->rx_queues_to_use) { |
7291 | netif_napi_add(dev, napi: &ch->rx_napi, poll: stmmac_napi_poll_rx); |
7292 | } |
7293 | if (queue < priv->plat->tx_queues_to_use) { |
7294 | netif_napi_add_tx(dev, napi: &ch->tx_napi, |
7295 | poll: stmmac_napi_poll_tx); |
7296 | } |
7297 | if (queue < priv->plat->rx_queues_to_use && |
7298 | queue < priv->plat->tx_queues_to_use) { |
7299 | netif_napi_add(dev, napi: &ch->rxtx_napi, |
7300 | poll: stmmac_napi_poll_rxtx); |
7301 | } |
7302 | } |
7303 | } |
7304 | |
7305 | static void stmmac_napi_del(struct net_device *dev) |
7306 | { |
7307 | struct stmmac_priv *priv = netdev_priv(dev); |
7308 | u32 queue, maxq; |
7309 | |
7310 | maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); |
7311 | |
7312 | for (queue = 0; queue < maxq; queue++) { |
7313 | struct stmmac_channel *ch = &priv->channel[queue]; |
7314 | |
7315 | if (queue < priv->plat->rx_queues_to_use) |
7316 | netif_napi_del(napi: &ch->rx_napi); |
7317 | if (queue < priv->plat->tx_queues_to_use) |
7318 | netif_napi_del(napi: &ch->tx_napi); |
7319 | if (queue < priv->plat->rx_queues_to_use && |
7320 | queue < priv->plat->tx_queues_to_use) { |
7321 | netif_napi_del(napi: &ch->rxtx_napi); |
7322 | } |
7323 | } |
7324 | } |
7325 | |
7326 | int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) |
7327 | { |
7328 | struct stmmac_priv *priv = netdev_priv(dev); |
7329 | int ret = 0, i; |
7330 | int max_speed; |
7331 | |
7332 | if (netif_running(dev)) |
7333 | stmmac_release(dev); |
7334 | |
7335 | stmmac_napi_del(dev); |
7336 | |
7337 | priv->plat->rx_queues_to_use = rx_cnt; |
7338 | priv->plat->tx_queues_to_use = tx_cnt; |
7339 | if (!netif_is_rxfh_configured(dev)) |
7340 | for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) |
7341 | priv->rss.table[i] = ethtool_rxfh_indir_default(index: i, |
7342 | n_rx_rings: rx_cnt); |
7343 | |
7344 | stmmac_mac_phylink_get_caps(priv); |
7345 | |
7346 | priv->phylink_config.mac_capabilities = priv->hw->link.caps; |
7347 | |
7348 | max_speed = priv->plat->max_speed; |
7349 | if (max_speed) |
7350 | phylink_limit_mac_speed(config: &priv->phylink_config, max_speed); |
7351 | |
7352 | stmmac_napi_add(dev); |
7353 | |
7354 | if (netif_running(dev)) |
7355 | ret = stmmac_open(dev); |
7356 | |
7357 | return ret; |
7358 | } |
7359 | |
7360 | int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) |
7361 | { |
7362 | struct stmmac_priv *priv = netdev_priv(dev); |
7363 | int ret = 0; |
7364 | |
7365 | if (netif_running(dev)) |
7366 | stmmac_release(dev); |
7367 | |
7368 | priv->dma_conf.dma_rx_size = rx_size; |
7369 | priv->dma_conf.dma_tx_size = tx_size; |
7370 | |
7371 | if (netif_running(dev)) |
7372 | ret = stmmac_open(dev); |
7373 | |
7374 | return ret; |
7375 | } |
7376 | |
7377 | #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" |
7378 | static void stmmac_fpe_lp_task(struct work_struct *work) |
7379 | { |
7380 | struct stmmac_priv *priv = container_of(work, struct stmmac_priv, |
7381 | fpe_task); |
7382 | struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; |
7383 | enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; |
7384 | enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; |
7385 | bool *hs_enable = &fpe_cfg->hs_enable; |
7386 | bool *enable = &fpe_cfg->enable; |
7387 | int retries = 20; |
7388 | |
7389 | while (retries-- > 0) { |
7390 | /* Bail out immediately if FPE handshake is OFF */ |
7391 | if (*lo_state == FPE_STATE_OFF || !*hs_enable) |
7392 | break; |
7393 | |
7394 | if (*lo_state == FPE_STATE_ENTERING_ON && |
7395 | *lp_state == FPE_STATE_ENTERING_ON) { |
7396 | stmmac_fpe_configure(priv, priv->ioaddr, |
7397 | fpe_cfg, |
7398 | priv->plat->tx_queues_to_use, |
7399 | priv->plat->rx_queues_to_use, |
7400 | *enable); |
7401 | |
7402 | netdev_info(dev: priv->dev, format: "configured FPE\n" ); |
7403 | |
7404 | *lo_state = FPE_STATE_ON; |
7405 | *lp_state = FPE_STATE_ON; |
7406 | netdev_info(dev: priv->dev, format: "!!! BOTH FPE stations ON\n" ); |
7407 | break; |
7408 | } |
7409 | |
7410 | if ((*lo_state == FPE_STATE_CAPABLE || |
7411 | *lo_state == FPE_STATE_ENTERING_ON) && |
7412 | *lp_state != FPE_STATE_ON) { |
7413 | netdev_info(dev: priv->dev, SEND_VERIFY_MPAKCET_FMT, |
7414 | *lo_state, *lp_state); |
7415 | stmmac_fpe_send_mpacket(priv, priv->ioaddr, |
7416 | fpe_cfg, |
7417 | MPACKET_VERIFY); |
7418 | } |
7419 | /* Sleep then retry */ |
7420 | msleep(msecs: 500); |
7421 | } |
7422 | |
7423 | clear_bit(nr: __FPE_TASK_SCHED, addr: &priv->fpe_task_state); |
7424 | } |
7425 | |
7426 | void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) |
7427 | { |
7428 | if (priv->plat->fpe_cfg->hs_enable != enable) { |
7429 | if (enable) { |
7430 | stmmac_fpe_send_mpacket(priv, priv->ioaddr, |
7431 | priv->plat->fpe_cfg, |
7432 | MPACKET_VERIFY); |
7433 | } else { |
7434 | priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; |
7435 | priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; |
7436 | } |
7437 | |
7438 | priv->plat->fpe_cfg->hs_enable = enable; |
7439 | } |
7440 | } |
7441 | |
7442 | static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) |
7443 | { |
7444 | const struct stmmac_xdp_buff *ctx = (void *)_ctx; |
7445 | struct dma_desc *desc_contains_ts = ctx->desc; |
7446 | struct stmmac_priv *priv = ctx->priv; |
7447 | struct dma_desc *ndesc = ctx->ndesc; |
7448 | struct dma_desc *desc = ctx->desc; |
7449 | u64 ns = 0; |
7450 | |
7451 | if (!priv->hwts_rx_en) |
7452 | return -ENODATA; |
7453 | |
7454 | /* For GMAC4, the valid timestamp is from CTX next desc. */ |
7455 | if (priv->plat->has_gmac4 || priv->plat->has_xgmac) |
7456 | desc_contains_ts = ndesc; |
7457 | |
7458 | /* Check if timestamp is available */ |
7459 | if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) { |
7460 | stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns); |
7461 | ns -= priv->plat->cdc_error_adj; |
7462 | *timestamp = ns_to_ktime(ns); |
7463 | return 0; |
7464 | } |
7465 | |
7466 | return -ENODATA; |
7467 | } |
7468 | |
7469 | static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = { |
7470 | .xmo_rx_timestamp = stmmac_xdp_rx_timestamp, |
7471 | }; |
7472 | |
7473 | /** |
7474 | * stmmac_dvr_probe |
7475 | * @device: device pointer |
7476 | * @plat_dat: platform data pointer |
7477 | * @res: stmmac resource pointer |
7478 | * Description: this is the main probe function used to |
7479 | * call the alloc_etherdev, allocate the priv structure. |
7480 | * Return: |
7481 | * returns 0 on success, otherwise errno. |
7482 | */ |
7483 | int stmmac_dvr_probe(struct device *device, |
7484 | struct plat_stmmacenet_data *plat_dat, |
7485 | struct stmmac_resources *res) |
7486 | { |
7487 | struct net_device *ndev = NULL; |
7488 | struct stmmac_priv *priv; |
7489 | u32 rxq; |
7490 | int i, ret = 0; |
7491 | |
7492 | ndev = devm_alloc_etherdev_mqs(dev: device, sizeof_priv: sizeof(struct stmmac_priv), |
7493 | MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); |
7494 | if (!ndev) |
7495 | return -ENOMEM; |
7496 | |
7497 | SET_NETDEV_DEV(ndev, device); |
7498 | |
7499 | priv = netdev_priv(dev: ndev); |
7500 | priv->device = device; |
7501 | priv->dev = ndev; |
7502 | |
7503 | for (i = 0; i < MTL_MAX_RX_QUEUES; i++) |
7504 | u64_stats_init(syncp: &priv->xstats.rxq_stats[i].napi_syncp); |
7505 | for (i = 0; i < MTL_MAX_TX_QUEUES; i++) { |
7506 | u64_stats_init(syncp: &priv->xstats.txq_stats[i].q_syncp); |
7507 | u64_stats_init(syncp: &priv->xstats.txq_stats[i].napi_syncp); |
7508 | } |
7509 | |
7510 | priv->xstats.pcpu_stats = |
7511 | devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats); |
7512 | if (!priv->xstats.pcpu_stats) |
7513 | return -ENOMEM; |
7514 | |
7515 | stmmac_set_ethtool_ops(netdev: ndev); |
7516 | priv->pause = pause; |
7517 | priv->plat = plat_dat; |
7518 | priv->ioaddr = res->addr; |
7519 | priv->dev->base_addr = (unsigned long)res->addr; |
7520 | priv->plat->dma_cfg->multi_msi_en = |
7521 | (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN); |
7522 | |
7523 | priv->dev->irq = res->irq; |
7524 | priv->wol_irq = res->wol_irq; |
7525 | priv->lpi_irq = res->lpi_irq; |
7526 | priv->sfty_irq = res->sfty_irq; |
7527 | priv->sfty_ce_irq = res->sfty_ce_irq; |
7528 | priv->sfty_ue_irq = res->sfty_ue_irq; |
7529 | for (i = 0; i < MTL_MAX_RX_QUEUES; i++) |
7530 | priv->rx_irq[i] = res->rx_irq[i]; |
7531 | for (i = 0; i < MTL_MAX_TX_QUEUES; i++) |
7532 | priv->tx_irq[i] = res->tx_irq[i]; |
7533 | |
7534 | if (!is_zero_ether_addr(addr: res->mac)) |
7535 | eth_hw_addr_set(dev: priv->dev, addr: res->mac); |
7536 | |
7537 | dev_set_drvdata(dev: device, data: priv->dev); |
7538 | |
7539 | /* Verify driver arguments */ |
7540 | stmmac_verify_args(); |
7541 | |
7542 | priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); |
7543 | if (!priv->af_xdp_zc_qps) |
7544 | return -ENOMEM; |
7545 | |
7546 | /* Allocate workqueue */ |
7547 | priv->wq = create_singlethread_workqueue("stmmac_wq" ); |
7548 | if (!priv->wq) { |
7549 | dev_err(priv->device, "failed to create workqueue\n" ); |
7550 | ret = -ENOMEM; |
7551 | goto error_wq_init; |
7552 | } |
7553 | |
7554 | INIT_WORK(&priv->service_task, stmmac_service_task); |
7555 | |
7556 | /* Initialize Link Partner FPE workqueue */ |
7557 | INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); |
7558 | |
7559 | /* Override with kernel parameters if supplied XXX CRS XXX |
7560 | * this needs to have multiple instances |
7561 | */ |
7562 | if ((phyaddr >= 0) && (phyaddr <= 31)) |
7563 | priv->plat->phy_addr = phyaddr; |
7564 | |
7565 | if (priv->plat->stmmac_rst) { |
7566 | ret = reset_control_assert(rstc: priv->plat->stmmac_rst); |
7567 | reset_control_deassert(rstc: priv->plat->stmmac_rst); |
7568 | /* Some reset controllers have only reset callback instead of |
7569 | * assert + deassert callbacks pair. |
7570 | */ |
7571 | if (ret == -ENOTSUPP) |
7572 | reset_control_reset(rstc: priv->plat->stmmac_rst); |
7573 | } |
7574 | |
7575 | ret = reset_control_deassert(rstc: priv->plat->stmmac_ahb_rst); |
7576 | if (ret == -ENOTSUPP) |
7577 | dev_err(priv->device, "unable to bring out of ahb reset: %pe\n" , |
7578 | ERR_PTR(ret)); |
7579 | |
7580 | /* Wait a bit for the reset to take effect */ |
7581 | udelay(10); |
7582 | |
7583 | /* Init MAC and get the capabilities */ |
7584 | ret = stmmac_hw_init(priv); |
7585 | if (ret) |
7586 | goto error_hw_init; |
7587 | |
7588 | /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch. |
7589 | */ |
7590 | if (priv->synopsys_id < DWMAC_CORE_5_20) |
7591 | priv->plat->dma_cfg->dche = false; |
7592 | |
7593 | stmmac_check_ether_addr(priv); |
7594 | |
7595 | ndev->netdev_ops = &stmmac_netdev_ops; |
7596 | |
7597 | ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops; |
7598 | ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops; |
7599 | |
7600 | ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
7601 | NETIF_F_RXCSUM; |
7602 | ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | |
7603 | NETDEV_XDP_ACT_XSK_ZEROCOPY; |
7604 | |
7605 | ret = stmmac_tc_init(priv, priv); |
7606 | if (!ret) { |
7607 | ndev->hw_features |= NETIF_F_HW_TC; |
7608 | } |
7609 | |
7610 | if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { |
7611 | ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; |
7612 | if (priv->plat->has_gmac4) |
7613 | ndev->hw_features |= NETIF_F_GSO_UDP_L4; |
7614 | priv->tso = true; |
7615 | dev_info(priv->device, "TSO feature enabled\n" ); |
7616 | } |
7617 | |
7618 | if (priv->dma_cap.sphen && |
7619 | !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) { |
7620 | ndev->hw_features |= NETIF_F_GRO; |
7621 | priv->sph_cap = true; |
7622 | priv->sph = priv->sph_cap; |
7623 | dev_info(priv->device, "SPH feature enabled\n" ); |
7624 | } |
7625 | |
7626 | /* Ideally our host DMA address width is the same as for the |
7627 | * device. However, it may differ and then we have to use our |
7628 | * host DMA width for allocation and the device DMA width for |
7629 | * register handling. |
7630 | */ |
7631 | if (priv->plat->host_dma_width) |
7632 | priv->dma_cap.host_dma_width = priv->plat->host_dma_width; |
7633 | else |
7634 | priv->dma_cap.host_dma_width = priv->dma_cap.addr64; |
7635 | |
7636 | if (priv->dma_cap.host_dma_width) { |
7637 | ret = dma_set_mask_and_coherent(dev: device, |
7638 | DMA_BIT_MASK(priv->dma_cap.host_dma_width)); |
7639 | if (!ret) { |
7640 | dev_info(priv->device, "Using %d/%d bits DMA host/device width\n" , |
7641 | priv->dma_cap.host_dma_width, priv->dma_cap.addr64); |
7642 | |
7643 | /* |
7644 | * If more than 32 bits can be addressed, make sure to |
7645 | * enable enhanced addressing mode. |
7646 | */ |
7647 | if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) |
7648 | priv->plat->dma_cfg->eame = true; |
7649 | } else { |
7650 | ret = dma_set_mask_and_coherent(dev: device, DMA_BIT_MASK(32)); |
7651 | if (ret) { |
7652 | dev_err(priv->device, "Failed to set DMA Mask\n" ); |
7653 | goto error_hw_init; |
7654 | } |
7655 | |
7656 | priv->dma_cap.host_dma_width = 32; |
7657 | } |
7658 | } |
7659 | |
7660 | ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; |
7661 | ndev->watchdog_timeo = msecs_to_jiffies(m: watchdog); |
7662 | #ifdef STMMAC_VLAN_TAG_USED |
7663 | /* Both mac100 and gmac support receive VLAN tag detection */ |
7664 | ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; |
7665 | ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; |
7666 | priv->hw->hw_vlan_en = true; |
7667 | |
7668 | if (priv->dma_cap.vlhash) { |
7669 | ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
7670 | ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; |
7671 | } |
7672 | if (priv->dma_cap.vlins) { |
7673 | ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; |
7674 | if (priv->dma_cap.dvlan) |
7675 | ndev->features |= NETIF_F_HW_VLAN_STAG_TX; |
7676 | } |
7677 | #endif |
7678 | priv->msg_enable = netif_msg_init(debug_value: debug, default_msg_enable_bits: default_msg_level); |
7679 | |
7680 | priv->xstats.threshold = tc; |
7681 | |
7682 | /* Initialize RSS */ |
7683 | rxq = priv->plat->rx_queues_to_use; |
7684 | netdev_rss_key_fill(buffer: priv->rss.key, len: sizeof(priv->rss.key)); |
7685 | for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) |
7686 | priv->rss.table[i] = ethtool_rxfh_indir_default(index: i, n_rx_rings: rxq); |
7687 | |
7688 | if (priv->dma_cap.rssen && priv->plat->rss_en) |
7689 | ndev->features |= NETIF_F_RXHASH; |
7690 | |
7691 | ndev->vlan_features |= ndev->features; |
7692 | /* TSO doesn't work on VLANs yet */ |
7693 | ndev->vlan_features &= ~NETIF_F_TSO; |
7694 | |
7695 | /* MTU range: 46 - hw-specific max */ |
7696 | ndev->min_mtu = ETH_ZLEN - ETH_HLEN; |
7697 | if (priv->plat->has_xgmac) |
7698 | ndev->max_mtu = XGMAC_JUMBO_LEN; |
7699 | else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) |
7700 | ndev->max_mtu = JUMBO_LEN; |
7701 | else |
7702 | ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); |
7703 | /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu |
7704 | * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. |
7705 | */ |
7706 | if ((priv->plat->maxmtu < ndev->max_mtu) && |
7707 | (priv->plat->maxmtu >= ndev->min_mtu)) |
7708 | ndev->max_mtu = priv->plat->maxmtu; |
7709 | else if (priv->plat->maxmtu < ndev->min_mtu) |
7710 | dev_warn(priv->device, |
7711 | "%s: warning: maxmtu having invalid value (%d)\n" , |
7712 | __func__, priv->plat->maxmtu); |
7713 | |
7714 | if (flow_ctrl) |
7715 | priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ |
7716 | |
7717 | ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
7718 | |
7719 | /* Setup channels NAPI */ |
7720 | stmmac_napi_add(dev: ndev); |
7721 | |
7722 | mutex_init(&priv->lock); |
7723 | |
7724 | /* If a specific clk_csr value is passed from the platform |
7725 | * this means that the CSR Clock Range selection cannot be |
7726 | * changed at run-time and it is fixed. Viceversa the driver'll try to |
7727 | * set the MDC clock dynamically according to the csr actual |
7728 | * clock input. |
7729 | */ |
7730 | if (priv->plat->clk_csr >= 0) |
7731 | priv->clk_csr = priv->plat->clk_csr; |
7732 | else |
7733 | stmmac_clk_csr_set(priv); |
7734 | |
7735 | stmmac_check_pcs_mode(priv); |
7736 | |
7737 | pm_runtime_get_noresume(dev: device); |
7738 | pm_runtime_set_active(dev: device); |
7739 | if (!pm_runtime_enabled(dev: device)) |
7740 | pm_runtime_enable(dev: device); |
7741 | |
7742 | if (priv->hw->pcs != STMMAC_PCS_TBI && |
7743 | priv->hw->pcs != STMMAC_PCS_RTBI) { |
7744 | /* MDIO bus Registration */ |
7745 | ret = stmmac_mdio_register(ndev); |
7746 | if (ret < 0) { |
7747 | dev_err_probe(dev: priv->device, err: ret, |
7748 | fmt: "%s: MDIO bus (id: %d) registration failed\n" , |
7749 | __func__, priv->plat->bus_id); |
7750 | goto error_mdio_register; |
7751 | } |
7752 | } |
7753 | |
7754 | if (priv->plat->speed_mode_2500) |
7755 | priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); |
7756 | |
7757 | if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) { |
7758 | ret = stmmac_xpcs_setup(mii: priv->mii); |
7759 | if (ret) |
7760 | goto error_xpcs_setup; |
7761 | } |
7762 | |
7763 | ret = stmmac_phy_setup(priv); |
7764 | if (ret) { |
7765 | netdev_err(dev: ndev, format: "failed to setup phy (%d)\n" , ret); |
7766 | goto error_phy_setup; |
7767 | } |
7768 | |
7769 | ret = register_netdev(dev: ndev); |
7770 | if (ret) { |
7771 | dev_err(priv->device, "%s: ERROR %i registering the device\n" , |
7772 | __func__, ret); |
7773 | goto error_netdev_register; |
7774 | } |
7775 | |
7776 | #ifdef CONFIG_DEBUG_FS |
7777 | stmmac_init_fs(dev: ndev); |
7778 | #endif |
7779 | |
7780 | if (priv->plat->dump_debug_regs) |
7781 | priv->plat->dump_debug_regs(priv->plat->bsp_priv); |
7782 | |
7783 | /* Let pm_runtime_put() disable the clocks. |
7784 | * If CONFIG_PM is not enabled, the clocks will stay powered. |
7785 | */ |
7786 | pm_runtime_put(dev: device); |
7787 | |
7788 | return ret; |
7789 | |
7790 | error_netdev_register: |
7791 | phylink_destroy(priv->phylink); |
7792 | error_xpcs_setup: |
7793 | error_phy_setup: |
7794 | if (priv->hw->pcs != STMMAC_PCS_TBI && |
7795 | priv->hw->pcs != STMMAC_PCS_RTBI) |
7796 | stmmac_mdio_unregister(ndev); |
7797 | error_mdio_register: |
7798 | stmmac_napi_del(dev: ndev); |
7799 | error_hw_init: |
7800 | destroy_workqueue(wq: priv->wq); |
7801 | error_wq_init: |
7802 | bitmap_free(bitmap: priv->af_xdp_zc_qps); |
7803 | |
7804 | return ret; |
7805 | } |
7806 | EXPORT_SYMBOL_GPL(stmmac_dvr_probe); |
7807 | |
7808 | /** |
7809 | * stmmac_dvr_remove |
7810 | * @dev: device pointer |
7811 | * Description: this function resets the TX/RX processes, disables the MAC RX/TX |
7812 | * changes the link status, releases the DMA descriptor rings. |
7813 | */ |
7814 | void stmmac_dvr_remove(struct device *dev) |
7815 | { |
7816 | struct net_device *ndev = dev_get_drvdata(dev); |
7817 | struct stmmac_priv *priv = netdev_priv(dev: ndev); |
7818 | |
7819 | netdev_info(dev: priv->dev, format: "%s: removing driver" , __func__); |
7820 | |
7821 | pm_runtime_get_sync(dev); |
7822 | |
7823 | stmmac_stop_all_dma(priv); |
7824 | stmmac_mac_set(priv, priv->ioaddr, false); |
7825 | netif_carrier_off(dev: ndev); |
7826 | unregister_netdev(dev: ndev); |
7827 | |
7828 | #ifdef CONFIG_DEBUG_FS |
7829 | stmmac_exit_fs(dev: ndev); |
7830 | #endif |
7831 | phylink_destroy(priv->phylink); |
7832 | if (priv->plat->stmmac_rst) |
7833 | reset_control_assert(rstc: priv->plat->stmmac_rst); |
7834 | reset_control_assert(rstc: priv->plat->stmmac_ahb_rst); |
7835 | if (priv->hw->pcs != STMMAC_PCS_TBI && |
7836 | priv->hw->pcs != STMMAC_PCS_RTBI) |
7837 | stmmac_mdio_unregister(ndev); |
7838 | destroy_workqueue(wq: priv->wq); |
7839 | mutex_destroy(lock: &priv->lock); |
7840 | bitmap_free(bitmap: priv->af_xdp_zc_qps); |
7841 | |
7842 | pm_runtime_disable(dev); |
7843 | pm_runtime_put_noidle(dev); |
7844 | } |
7845 | EXPORT_SYMBOL_GPL(stmmac_dvr_remove); |
7846 | |
7847 | /** |
7848 | * stmmac_suspend - suspend callback |
7849 | * @dev: device pointer |
7850 | * Description: this is the function to suspend the device and it is called |
7851 | * by the platform driver to stop the network queue, release the resources, |
7852 | * program the PMT register (for WoL), clean and release driver resources. |
7853 | */ |
7854 | int stmmac_suspend(struct device *dev) |
7855 | { |
7856 | struct net_device *ndev = dev_get_drvdata(dev); |
7857 | struct stmmac_priv *priv = netdev_priv(dev: ndev); |
7858 | u32 chan; |
7859 | |
7860 | if (!ndev || !netif_running(dev: ndev)) |
7861 | return 0; |
7862 | |
7863 | mutex_lock(&priv->lock); |
7864 | |
7865 | netif_device_detach(dev: ndev); |
7866 | |
7867 | stmmac_disable_all_queues(priv); |
7868 | |
7869 | for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) |
7870 | hrtimer_cancel(timer: &priv->dma_conf.tx_queue[chan].txtimer); |
7871 | |
7872 | if (priv->eee_enabled) { |
7873 | priv->tx_path_in_lpi_mode = false; |
7874 | del_timer_sync(timer: &priv->eee_ctrl_timer); |
7875 | } |
7876 | |
7877 | /* Stop TX/RX DMA */ |
7878 | stmmac_stop_all_dma(priv); |
7879 | |
7880 | if (priv->plat->serdes_powerdown) |
7881 | priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); |
7882 | |
7883 | /* Enable Power down mode by programming the PMT regs */ |
7884 | if (device_may_wakeup(dev: priv->device) && priv->plat->pmt) { |
7885 | stmmac_pmt(priv, priv->hw, priv->wolopts); |
7886 | priv->irq_wake = 1; |
7887 | } else { |
7888 | stmmac_mac_set(priv, priv->ioaddr, false); |
7889 | pinctrl_pm_select_sleep_state(dev: priv->device); |
7890 | } |
7891 | |
7892 | mutex_unlock(lock: &priv->lock); |
7893 | |
7894 | rtnl_lock(); |
7895 | if (device_may_wakeup(dev: priv->device) && priv->plat->pmt) { |
7896 | phylink_suspend(pl: priv->phylink, mac_wol: true); |
7897 | } else { |
7898 | if (device_may_wakeup(dev: priv->device)) |
7899 | phylink_speed_down(pl: priv->phylink, sync: false); |
7900 | phylink_suspend(pl: priv->phylink, mac_wol: false); |
7901 | } |
7902 | rtnl_unlock(); |
7903 | |
7904 | if (priv->dma_cap.fpesel) { |
7905 | /* Disable FPE */ |
7906 | stmmac_fpe_configure(priv, priv->ioaddr, |
7907 | priv->plat->fpe_cfg, |
7908 | priv->plat->tx_queues_to_use, |
7909 | priv->plat->rx_queues_to_use, false); |
7910 | |
7911 | stmmac_fpe_handshake(priv, enable: false); |
7912 | stmmac_fpe_stop_wq(priv); |
7913 | } |
7914 | |
7915 | priv->speed = SPEED_UNKNOWN; |
7916 | return 0; |
7917 | } |
7918 | EXPORT_SYMBOL_GPL(stmmac_suspend); |
7919 | |
7920 | static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) |
7921 | { |
7922 | struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; |
7923 | |
7924 | rx_q->cur_rx = 0; |
7925 | rx_q->dirty_rx = 0; |
7926 | } |
7927 | |
7928 | static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue) |
7929 | { |
7930 | struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; |
7931 | |
7932 | tx_q->cur_tx = 0; |
7933 | tx_q->dirty_tx = 0; |
7934 | tx_q->mss = 0; |
7935 | |
7936 | netdev_tx_reset_queue(q: netdev_get_tx_queue(dev: priv->dev, index: queue)); |
7937 | } |
7938 | |
7939 | /** |
7940 | * stmmac_reset_queues_param - reset queue parameters |
7941 | * @priv: device pointer |
7942 | */ |
7943 | static void stmmac_reset_queues_param(struct stmmac_priv *priv) |
7944 | { |
7945 | u32 rx_cnt = priv->plat->rx_queues_to_use; |
7946 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
7947 | u32 queue; |
7948 | |
7949 | for (queue = 0; queue < rx_cnt; queue++) |
7950 | stmmac_reset_rx_queue(priv, queue); |
7951 | |
7952 | for (queue = 0; queue < tx_cnt; queue++) |
7953 | stmmac_reset_tx_queue(priv, queue); |
7954 | } |
7955 | |
7956 | /** |
7957 | * stmmac_resume - resume callback |
7958 | * @dev: device pointer |
7959 | * Description: when resume this function is invoked to setup the DMA and CORE |
7960 | * in a usable state. |
7961 | */ |
7962 | int stmmac_resume(struct device *dev) |
7963 | { |
7964 | struct net_device *ndev = dev_get_drvdata(dev); |
7965 | struct stmmac_priv *priv = netdev_priv(dev: ndev); |
7966 | int ret; |
7967 | |
7968 | if (!netif_running(dev: ndev)) |
7969 | return 0; |
7970 | |
7971 | /* Power Down bit, into the PM register, is cleared |
7972 | * automatically as soon as a magic packet or a Wake-up frame |
7973 | * is received. Anyway, it's better to manually clear |
7974 | * this bit because it can generate problems while resuming |
7975 | * from another devices (e.g. serial console). |
7976 | */ |
7977 | if (device_may_wakeup(dev: priv->device) && priv->plat->pmt) { |
7978 | mutex_lock(&priv->lock); |
7979 | stmmac_pmt(priv, priv->hw, 0); |
7980 | mutex_unlock(lock: &priv->lock); |
7981 | priv->irq_wake = 0; |
7982 | } else { |
7983 | pinctrl_pm_select_default_state(dev: priv->device); |
7984 | /* reset the phy so that it's ready */ |
7985 | if (priv->mii) |
7986 | stmmac_mdio_reset(mii: priv->mii); |
7987 | } |
7988 | |
7989 | if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && |
7990 | priv->plat->serdes_powerup) { |
7991 | ret = priv->plat->serdes_powerup(ndev, |
7992 | priv->plat->bsp_priv); |
7993 | |
7994 | if (ret < 0) |
7995 | return ret; |
7996 | } |
7997 | |
7998 | rtnl_lock(); |
7999 | if (device_may_wakeup(dev: priv->device) && priv->plat->pmt) { |
8000 | phylink_resume(pl: priv->phylink); |
8001 | } else { |
8002 | phylink_resume(pl: priv->phylink); |
8003 | if (device_may_wakeup(dev: priv->device)) |
8004 | phylink_speed_up(pl: priv->phylink); |
8005 | } |
8006 | rtnl_unlock(); |
8007 | |
8008 | rtnl_lock(); |
8009 | mutex_lock(&priv->lock); |
8010 | |
8011 | stmmac_reset_queues_param(priv); |
8012 | |
8013 | stmmac_free_tx_skbufs(priv); |
8014 | stmmac_clear_descriptors(priv, dma_conf: &priv->dma_conf); |
8015 | |
8016 | stmmac_hw_setup(dev: ndev, ptp_register: false); |
8017 | stmmac_init_coalesce(priv); |
8018 | stmmac_set_rx_mode(dev: ndev); |
8019 | |
8020 | stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); |
8021 | |
8022 | stmmac_enable_all_queues(priv); |
8023 | stmmac_enable_all_dma_irq(priv); |
8024 | |
8025 | mutex_unlock(lock: &priv->lock); |
8026 | rtnl_unlock(); |
8027 | |
8028 | netif_device_attach(dev: ndev); |
8029 | |
8030 | return 0; |
8031 | } |
8032 | EXPORT_SYMBOL_GPL(stmmac_resume); |
8033 | |
8034 | #ifndef MODULE |
8035 | static int __init stmmac_cmdline_opt(char *str) |
8036 | { |
8037 | char *opt; |
8038 | |
8039 | if (!str || !*str) |
8040 | return 1; |
8041 | while ((opt = strsep(&str, "," )) != NULL) { |
8042 | if (!strncmp(opt, "debug:" , 6)) { |
8043 | if (kstrtoint(s: opt + 6, base: 0, res: &debug)) |
8044 | goto err; |
8045 | } else if (!strncmp(opt, "phyaddr:" , 8)) { |
8046 | if (kstrtoint(s: opt + 8, base: 0, res: &phyaddr)) |
8047 | goto err; |
8048 | } else if (!strncmp(opt, "buf_sz:" , 7)) { |
8049 | if (kstrtoint(s: opt + 7, base: 0, res: &buf_sz)) |
8050 | goto err; |
8051 | } else if (!strncmp(opt, "tc:" , 3)) { |
8052 | if (kstrtoint(s: opt + 3, base: 0, res: &tc)) |
8053 | goto err; |
8054 | } else if (!strncmp(opt, "watchdog:" , 9)) { |
8055 | if (kstrtoint(s: opt + 9, base: 0, res: &watchdog)) |
8056 | goto err; |
8057 | } else if (!strncmp(opt, "flow_ctrl:" , 10)) { |
8058 | if (kstrtoint(s: opt + 10, base: 0, res: &flow_ctrl)) |
8059 | goto err; |
8060 | } else if (!strncmp(opt, "pause:" , 6)) { |
8061 | if (kstrtoint(s: opt + 6, base: 0, res: &pause)) |
8062 | goto err; |
8063 | } else if (!strncmp(opt, "eee_timer:" , 10)) { |
8064 | if (kstrtoint(s: opt + 10, base: 0, res: &eee_timer)) |
8065 | goto err; |
8066 | } else if (!strncmp(opt, "chain_mode:" , 11)) { |
8067 | if (kstrtoint(s: opt + 11, base: 0, res: &chain_mode)) |
8068 | goto err; |
8069 | } |
8070 | } |
8071 | return 1; |
8072 | |
8073 | err: |
8074 | pr_err("%s: ERROR broken module parameter conversion" , __func__); |
8075 | return 1; |
8076 | } |
8077 | |
8078 | __setup("stmmaceth=" , stmmac_cmdline_opt); |
8079 | #endif /* MODULE */ |
8080 | |
8081 | static int __init stmmac_init(void) |
8082 | { |
8083 | #ifdef CONFIG_DEBUG_FS |
8084 | /* Create debugfs main directory if it doesn't exist yet */ |
8085 | if (!stmmac_fs_dir) |
8086 | stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); |
8087 | register_netdevice_notifier(nb: &stmmac_notifier); |
8088 | #endif |
8089 | |
8090 | return 0; |
8091 | } |
8092 | |
8093 | static void __exit stmmac_exit(void) |
8094 | { |
8095 | #ifdef CONFIG_DEBUG_FS |
8096 | unregister_netdevice_notifier(nb: &stmmac_notifier); |
8097 | debugfs_remove_recursive(dentry: stmmac_fs_dir); |
8098 | #endif |
8099 | } |
8100 | |
8101 | module_init(stmmac_init) |
8102 | module_exit(stmmac_exit) |
8103 | |
8104 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver" ); |
8105 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>" ); |
8106 | MODULE_LICENSE("GPL" ); |
8107 | |