1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* 10G controller driver for Samsung SoCs |
3 | * |
4 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. |
5 | * http://www.samsung.com |
6 | * |
7 | * Author: Siva Reddy Kallam <siva.kallam@samsung.com> |
8 | */ |
9 | |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | |
12 | #include <linux/clk.h> |
13 | #include <linux/crc32.h> |
14 | #include <linux/dma-mapping.h> |
15 | #include <linux/etherdevice.h> |
16 | #include <linux/ethtool.h> |
17 | #include <linux/if.h> |
18 | #include <linux/if_ether.h> |
19 | #include <linux/if_vlan.h> |
20 | #include <linux/init.h> |
21 | #include <linux/interrupt.h> |
22 | #include <linux/ip.h> |
23 | #include <linux/kernel.h> |
24 | #include <linux/mii.h> |
25 | #include <linux/module.h> |
26 | #include <linux/net_tstamp.h> |
27 | #include <linux/netdevice.h> |
28 | #include <linux/phy.h> |
29 | #include <linux/platform_device.h> |
30 | #include <linux/prefetch.h> |
31 | #include <linux/skbuff.h> |
32 | #include <linux/slab.h> |
33 | #include <linux/tcp.h> |
34 | #include <linux/sxgbe_platform.h> |
35 | |
36 | #include "sxgbe_common.h" |
37 | #include "sxgbe_desc.h" |
38 | #include "sxgbe_dma.h" |
39 | #include "sxgbe_mtl.h" |
40 | #include "sxgbe_reg.h" |
41 | |
42 | #define SXGBE_ALIGN(x) L1_CACHE_ALIGN(x) |
43 | #define JUMBO_LEN 9000 |
44 | |
45 | /* Module parameters */ |
46 | #define TX_TIMEO 5000 |
47 | #define DMA_TX_SIZE 512 |
48 | #define DMA_RX_SIZE 1024 |
49 | #define TC_DEFAULT 64 |
50 | #define DMA_BUFFER_SIZE BUF_SIZE_2KiB |
51 | /* The default timer value as per the sxgbe specification 1 sec(1000 ms) */ |
52 | #define SXGBE_DEFAULT_LPI_TIMER 1000 |
53 | |
54 | static int debug = -1; |
55 | static int eee_timer = SXGBE_DEFAULT_LPI_TIMER; |
56 | |
57 | module_param(eee_timer, int, 0644); |
58 | |
59 | module_param(debug, int, 0644); |
60 | static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | |
61 | NETIF_MSG_LINK | NETIF_MSG_IFUP | |
62 | NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); |
63 | |
64 | static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id); |
65 | static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id); |
66 | static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id); |
67 | |
68 | #define SXGBE_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) |
69 | |
70 | #define SXGBE_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x)) |
71 | |
72 | /** |
73 | * sxgbe_verify_args - verify the driver parameters. |
74 | * Description: it verifies if some wrong parameter is passed to the driver. |
75 | * Note that wrong parameters are replaced with the default values. |
76 | */ |
77 | static void sxgbe_verify_args(void) |
78 | { |
79 | if (unlikely(eee_timer < 0)) |
80 | eee_timer = SXGBE_DEFAULT_LPI_TIMER; |
81 | } |
82 | |
83 | static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv) |
84 | { |
85 | /* Check and enter in LPI mode */ |
86 | if (!priv->tx_path_in_lpi_mode) |
87 | priv->hw->mac->set_eee_mode(priv->ioaddr); |
88 | } |
89 | |
90 | void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv) |
91 | { |
92 | /* Exit and disable EEE in case of we are in LPI state. */ |
93 | priv->hw->mac->reset_eee_mode(priv->ioaddr); |
94 | del_timer_sync(timer: &priv->eee_ctrl_timer); |
95 | priv->tx_path_in_lpi_mode = false; |
96 | } |
97 | |
98 | /** |
99 | * sxgbe_eee_ctrl_timer |
100 | * @t: timer list containing a data |
101 | * Description: |
102 | * If there is no data transfer and if we are not in LPI state, |
103 | * then MAC Transmitter can be moved to LPI state. |
104 | */ |
105 | static void sxgbe_eee_ctrl_timer(struct timer_list *t) |
106 | { |
107 | struct sxgbe_priv_data *priv = from_timer(priv, t, eee_ctrl_timer); |
108 | |
109 | sxgbe_enable_eee_mode(priv); |
110 | mod_timer(timer: &priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer)); |
111 | } |
112 | |
113 | /** |
114 | * sxgbe_eee_init |
115 | * @priv: private device pointer |
116 | * Description: |
117 | * If the EEE support has been enabled while configuring the driver, |
118 | * if the GMAC actually supports the EEE (from the HW cap reg) and the |
119 | * phy can also manage EEE, so enable the LPI state and start the timer |
120 | * to verify if the tx path can enter in LPI state. |
121 | */ |
122 | bool sxgbe_eee_init(struct sxgbe_priv_data * const priv) |
123 | { |
124 | struct net_device *ndev = priv->dev; |
125 | bool ret = false; |
126 | |
127 | /* MAC core supports the EEE feature. */ |
128 | if (priv->hw_cap.eee) { |
129 | /* Check if the PHY supports EEE */ |
130 | if (phy_init_eee(phydev: ndev->phydev, clk_stop_enable: true)) |
131 | return false; |
132 | |
133 | priv->eee_active = 1; |
134 | timer_setup(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer, 0); |
135 | priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer); |
136 | add_timer(timer: &priv->eee_ctrl_timer); |
137 | |
138 | priv->hw->mac->set_eee_timer(priv->ioaddr, |
139 | SXGBE_DEFAULT_LPI_TIMER, |
140 | priv->tx_lpi_timer); |
141 | |
142 | pr_info("Energy-Efficient Ethernet initialized\n" ); |
143 | |
144 | ret = true; |
145 | } |
146 | |
147 | return ret; |
148 | } |
149 | |
150 | static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv) |
151 | { |
152 | struct net_device *ndev = priv->dev; |
153 | |
154 | /* When the EEE has been already initialised we have to |
155 | * modify the PLS bit in the LPI ctrl & status reg according |
156 | * to the PHY link status. For this reason. |
157 | */ |
158 | if (priv->eee_enabled) |
159 | priv->hw->mac->set_eee_pls(priv->ioaddr, ndev->phydev->link); |
160 | } |
161 | |
162 | /** |
163 | * sxgbe_clk_csr_set - dynamically set the MDC clock |
164 | * @priv: driver private structure |
165 | * Description: this is to dynamically set the MDC clock according to the csr |
166 | * clock input. |
167 | */ |
168 | static void sxgbe_clk_csr_set(struct sxgbe_priv_data *priv) |
169 | { |
170 | u32 clk_rate = clk_get_rate(clk: priv->sxgbe_clk); |
171 | |
172 | /* assign the proper divider, this will be used during |
173 | * mdio communication |
174 | */ |
175 | if (clk_rate < SXGBE_CSR_F_150M) |
176 | priv->clk_csr = SXGBE_CSR_100_150M; |
177 | else if (clk_rate <= SXGBE_CSR_F_250M) |
178 | priv->clk_csr = SXGBE_CSR_150_250M; |
179 | else if (clk_rate <= SXGBE_CSR_F_300M) |
180 | priv->clk_csr = SXGBE_CSR_250_300M; |
181 | else if (clk_rate <= SXGBE_CSR_F_350M) |
182 | priv->clk_csr = SXGBE_CSR_300_350M; |
183 | else if (clk_rate <= SXGBE_CSR_F_400M) |
184 | priv->clk_csr = SXGBE_CSR_350_400M; |
185 | else if (clk_rate <= SXGBE_CSR_F_500M) |
186 | priv->clk_csr = SXGBE_CSR_400_500M; |
187 | } |
188 | |
189 | /* minimum number of free TX descriptors required to wake up TX process */ |
190 | #define SXGBE_TX_THRESH(x) (x->dma_tx_size/4) |
191 | |
192 | static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize) |
193 | { |
194 | return queue->dirty_tx + tx_qsize - queue->cur_tx - 1; |
195 | } |
196 | |
197 | /** |
198 | * sxgbe_adjust_link |
199 | * @dev: net device structure |
200 | * Description: it adjusts the link parameters. |
201 | */ |
202 | static void sxgbe_adjust_link(struct net_device *dev) |
203 | { |
204 | struct sxgbe_priv_data *priv = netdev_priv(dev); |
205 | struct phy_device *phydev = dev->phydev; |
206 | u8 new_state = 0; |
207 | u8 speed = 0xff; |
208 | |
209 | if (!phydev) |
210 | return; |
211 | |
212 | /* SXGBE is not supporting auto-negotiation and |
213 | * half duplex mode. so, not handling duplex change |
214 | * in this function. only handling speed and link status |
215 | */ |
216 | if (phydev->link) { |
217 | if (phydev->speed != priv->speed) { |
218 | new_state = 1; |
219 | switch (phydev->speed) { |
220 | case SPEED_10000: |
221 | speed = SXGBE_SPEED_10G; |
222 | break; |
223 | case SPEED_2500: |
224 | speed = SXGBE_SPEED_2_5G; |
225 | break; |
226 | case SPEED_1000: |
227 | speed = SXGBE_SPEED_1G; |
228 | break; |
229 | default: |
230 | netif_err(priv, link, dev, |
231 | "Speed (%d) not supported\n" , |
232 | phydev->speed); |
233 | } |
234 | |
235 | priv->speed = phydev->speed; |
236 | priv->hw->mac->set_speed(priv->ioaddr, speed); |
237 | } |
238 | |
239 | if (!priv->oldlink) { |
240 | new_state = 1; |
241 | priv->oldlink = 1; |
242 | } |
243 | } else if (priv->oldlink) { |
244 | new_state = 1; |
245 | priv->oldlink = 0; |
246 | priv->speed = SPEED_UNKNOWN; |
247 | } |
248 | |
249 | if (new_state & netif_msg_link(priv)) |
250 | phy_print_status(phydev); |
251 | |
252 | /* Alter the MAC settings for EEE */ |
253 | sxgbe_eee_adjust(priv); |
254 | } |
255 | |
256 | /** |
257 | * sxgbe_init_phy - PHY initialization |
258 | * @ndev: net device structure |
259 | * Description: it initializes the driver's PHY state, and attaches the PHY |
260 | * to the mac driver. |
261 | * Return value: |
262 | * 0 on success |
263 | */ |
264 | static int sxgbe_init_phy(struct net_device *ndev) |
265 | { |
266 | char phy_id_fmt[MII_BUS_ID_SIZE + 3]; |
267 | char bus_id[MII_BUS_ID_SIZE]; |
268 | struct phy_device *phydev; |
269 | struct sxgbe_priv_data *priv = netdev_priv(dev: ndev); |
270 | int phy_iface = priv->plat->interface; |
271 | |
272 | /* assign default link status */ |
273 | priv->oldlink = 0; |
274 | priv->speed = SPEED_UNKNOWN; |
275 | priv->oldduplex = DUPLEX_UNKNOWN; |
276 | |
277 | if (priv->plat->phy_bus_name) |
278 | snprintf(buf: bus_id, MII_BUS_ID_SIZE, fmt: "%s-%x" , |
279 | priv->plat->phy_bus_name, priv->plat->bus_id); |
280 | else |
281 | snprintf(buf: bus_id, MII_BUS_ID_SIZE, fmt: "sxgbe-%x" , |
282 | priv->plat->bus_id); |
283 | |
284 | snprintf(buf: phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, |
285 | priv->plat->phy_addr); |
286 | netdev_dbg(ndev, "%s: trying to attach to %s\n" , __func__, phy_id_fmt); |
287 | |
288 | phydev = phy_connect(dev: ndev, bus_id: phy_id_fmt, handler: &sxgbe_adjust_link, interface: phy_iface); |
289 | |
290 | if (IS_ERR(ptr: phydev)) { |
291 | netdev_err(dev: ndev, format: "Could not attach to PHY\n" ); |
292 | return PTR_ERR(ptr: phydev); |
293 | } |
294 | |
295 | /* Stop Advertising 1000BASE Capability if interface is not GMII */ |
296 | if ((phy_iface == PHY_INTERFACE_MODE_MII) || |
297 | (phy_iface == PHY_INTERFACE_MODE_RMII)) |
298 | phy_set_max_speed(phydev, SPEED_1000); |
299 | |
300 | if (phydev->phy_id == 0) { |
301 | phy_disconnect(phydev); |
302 | return -ENODEV; |
303 | } |
304 | |
305 | netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n" , |
306 | __func__, phydev->phy_id, phydev->link); |
307 | |
308 | return 0; |
309 | } |
310 | |
311 | /** |
312 | * sxgbe_clear_descriptors: clear descriptors |
313 | * @priv: driver private structure |
314 | * Description: this function is called to clear the tx and rx descriptors |
315 | * in case of both basic and extended descriptors are used. |
316 | */ |
317 | static void sxgbe_clear_descriptors(struct sxgbe_priv_data *priv) |
318 | { |
319 | int i, j; |
320 | unsigned int txsize = priv->dma_tx_size; |
321 | unsigned int rxsize = priv->dma_rx_size; |
322 | |
323 | /* Clear the Rx/Tx descriptors */ |
324 | for (j = 0; j < SXGBE_RX_QUEUES; j++) { |
325 | for (i = 0; i < rxsize; i++) |
326 | priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i], |
327 | priv->use_riwt, priv->mode, |
328 | (i == rxsize - 1)); |
329 | } |
330 | |
331 | for (j = 0; j < SXGBE_TX_QUEUES; j++) { |
332 | for (i = 0; i < txsize; i++) |
333 | priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]); |
334 | } |
335 | } |
336 | |
337 | static int sxgbe_init_rx_buffers(struct net_device *dev, |
338 | struct sxgbe_rx_norm_desc *p, int i, |
339 | unsigned int dma_buf_sz, |
340 | struct sxgbe_rx_queue *rx_ring) |
341 | { |
342 | struct sxgbe_priv_data *priv = netdev_priv(dev); |
343 | struct sk_buff *skb; |
344 | |
345 | skb = __netdev_alloc_skb_ip_align(dev, length: dma_buf_sz, GFP_KERNEL); |
346 | if (!skb) |
347 | return -ENOMEM; |
348 | |
349 | rx_ring->rx_skbuff[i] = skb; |
350 | rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, |
351 | dma_buf_sz, DMA_FROM_DEVICE); |
352 | |
353 | if (dma_mapping_error(dev: priv->device, dma_addr: rx_ring->rx_skbuff_dma[i])) { |
354 | netdev_err(dev, format: "%s: DMA mapping error\n" , __func__); |
355 | dev_kfree_skb_any(skb); |
356 | return -EINVAL; |
357 | } |
358 | |
359 | p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i]; |
360 | |
361 | return 0; |
362 | } |
363 | |
364 | /** |
365 | * sxgbe_free_rx_buffers - free what sxgbe_init_rx_buffers() allocated |
366 | * @dev: net device structure |
367 | * @p: dec pointer |
368 | * @i: index |
369 | * @dma_buf_sz: size |
370 | * @rx_ring: ring to be freed |
371 | * |
372 | * Description: this function initializes the DMA RX descriptor |
373 | */ |
374 | static void sxgbe_free_rx_buffers(struct net_device *dev, |
375 | struct sxgbe_rx_norm_desc *p, int i, |
376 | unsigned int dma_buf_sz, |
377 | struct sxgbe_rx_queue *rx_ring) |
378 | { |
379 | struct sxgbe_priv_data *priv = netdev_priv(dev); |
380 | |
381 | kfree_skb(skb: rx_ring->rx_skbuff[i]); |
382 | dma_unmap_single(priv->device, rx_ring->rx_skbuff_dma[i], |
383 | dma_buf_sz, DMA_FROM_DEVICE); |
384 | } |
385 | |
386 | /** |
387 | * init_tx_ring - init the TX descriptor ring |
388 | * @dev: net device structure |
389 | * @queue_no: queue |
390 | * @tx_ring: ring to be initialised |
391 | * @tx_rsize: ring size |
392 | * Description: this function initializes the DMA TX descriptor |
393 | */ |
394 | static int init_tx_ring(struct device *dev, u8 queue_no, |
395 | struct sxgbe_tx_queue *tx_ring, int tx_rsize) |
396 | { |
397 | /* TX ring is not allcoated */ |
398 | if (!tx_ring) { |
399 | dev_err(dev, "No memory for TX queue of SXGBE\n" ); |
400 | return -ENOMEM; |
401 | } |
402 | |
403 | /* allocate memory for TX descriptors */ |
404 | tx_ring->dma_tx = dma_alloc_coherent(dev, |
405 | size: tx_rsize * sizeof(struct sxgbe_tx_norm_desc), |
406 | dma_handle: &tx_ring->dma_tx_phy, GFP_KERNEL); |
407 | if (!tx_ring->dma_tx) |
408 | return -ENOMEM; |
409 | |
410 | /* allocate memory for TX skbuff array */ |
411 | tx_ring->tx_skbuff_dma = devm_kcalloc(dev, n: tx_rsize, |
412 | size: sizeof(dma_addr_t), GFP_KERNEL); |
413 | if (!tx_ring->tx_skbuff_dma) |
414 | goto dmamem_err; |
415 | |
416 | tx_ring->tx_skbuff = devm_kcalloc(dev, n: tx_rsize, |
417 | size: sizeof(struct sk_buff *), GFP_KERNEL); |
418 | |
419 | if (!tx_ring->tx_skbuff) |
420 | goto dmamem_err; |
421 | |
422 | /* assign queue number */ |
423 | tx_ring->queue_no = queue_no; |
424 | |
425 | /* initialise counters */ |
426 | tx_ring->dirty_tx = 0; |
427 | tx_ring->cur_tx = 0; |
428 | |
429 | return 0; |
430 | |
431 | dmamem_err: |
432 | dma_free_coherent(dev, size: tx_rsize * sizeof(struct sxgbe_tx_norm_desc), |
433 | cpu_addr: tx_ring->dma_tx, dma_handle: tx_ring->dma_tx_phy); |
434 | return -ENOMEM; |
435 | } |
436 | |
437 | /** |
438 | * free_rx_ring - free the RX descriptor ring |
439 | * @dev: net device structure |
440 | * @rx_ring: ring to be initialised |
441 | * @rx_rsize: ring size |
442 | * Description: this function initializes the DMA RX descriptor |
443 | */ |
444 | static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring, |
445 | int rx_rsize) |
446 | { |
447 | dma_free_coherent(dev, size: rx_rsize * sizeof(struct sxgbe_rx_norm_desc), |
448 | cpu_addr: rx_ring->dma_rx, dma_handle: rx_ring->dma_rx_phy); |
449 | kfree(objp: rx_ring->rx_skbuff_dma); |
450 | kfree(objp: rx_ring->rx_skbuff); |
451 | } |
452 | |
453 | /** |
454 | * init_rx_ring - init the RX descriptor ring |
455 | * @dev: net device structure |
456 | * @queue_no: queue |
457 | * @rx_ring: ring to be initialised |
458 | * @rx_rsize: ring size |
459 | * Description: this function initializes the DMA RX descriptor |
460 | */ |
461 | static int init_rx_ring(struct net_device *dev, u8 queue_no, |
462 | struct sxgbe_rx_queue *rx_ring, int rx_rsize) |
463 | { |
464 | struct sxgbe_priv_data *priv = netdev_priv(dev); |
465 | int desc_index; |
466 | unsigned int bfsize = 0; |
467 | unsigned int ret = 0; |
468 | |
469 | /* Set the max buffer size according to the MTU. */ |
470 | bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8); |
471 | |
472 | netif_dbg(priv, probe, dev, "%s: bfsize %d\n" , __func__, bfsize); |
473 | |
474 | /* RX ring is not allcoated */ |
475 | if (rx_ring == NULL) { |
476 | netdev_err(dev, format: "No memory for RX queue\n" ); |
477 | return -ENOMEM; |
478 | } |
479 | |
480 | /* assign queue number */ |
481 | rx_ring->queue_no = queue_no; |
482 | |
483 | /* allocate memory for RX descriptors */ |
484 | rx_ring->dma_rx = dma_alloc_coherent(dev: priv->device, |
485 | size: rx_rsize * sizeof(struct sxgbe_rx_norm_desc), |
486 | dma_handle: &rx_ring->dma_rx_phy, GFP_KERNEL); |
487 | |
488 | if (rx_ring->dma_rx == NULL) |
489 | return -ENOMEM; |
490 | |
491 | /* allocate memory for RX skbuff array */ |
492 | rx_ring->rx_skbuff_dma = kmalloc_array(n: rx_rsize, |
493 | size: sizeof(dma_addr_t), GFP_KERNEL); |
494 | if (!rx_ring->rx_skbuff_dma) { |
495 | ret = -ENOMEM; |
496 | goto err_free_dma_rx; |
497 | } |
498 | |
499 | rx_ring->rx_skbuff = kmalloc_array(n: rx_rsize, |
500 | size: sizeof(struct sk_buff *), GFP_KERNEL); |
501 | if (!rx_ring->rx_skbuff) { |
502 | ret = -ENOMEM; |
503 | goto err_free_skbuff_dma; |
504 | } |
505 | |
506 | /* initialise the buffers */ |
507 | for (desc_index = 0; desc_index < rx_rsize; desc_index++) { |
508 | struct sxgbe_rx_norm_desc *p; |
509 | p = rx_ring->dma_rx + desc_index; |
510 | ret = sxgbe_init_rx_buffers(dev, p, i: desc_index, |
511 | dma_buf_sz: bfsize, rx_ring); |
512 | if (ret) |
513 | goto err_free_rx_buffers; |
514 | } |
515 | |
516 | /* initialise counters */ |
517 | rx_ring->cur_rx = 0; |
518 | rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize); |
519 | priv->dma_buf_sz = bfsize; |
520 | |
521 | return 0; |
522 | |
523 | err_free_rx_buffers: |
524 | while (--desc_index >= 0) { |
525 | struct sxgbe_rx_norm_desc *p; |
526 | |
527 | p = rx_ring->dma_rx + desc_index; |
528 | sxgbe_free_rx_buffers(dev, p, i: desc_index, dma_buf_sz: bfsize, rx_ring); |
529 | } |
530 | kfree(objp: rx_ring->rx_skbuff); |
531 | err_free_skbuff_dma: |
532 | kfree(objp: rx_ring->rx_skbuff_dma); |
533 | err_free_dma_rx: |
534 | dma_free_coherent(dev: priv->device, |
535 | size: rx_rsize * sizeof(struct sxgbe_rx_norm_desc), |
536 | cpu_addr: rx_ring->dma_rx, dma_handle: rx_ring->dma_rx_phy); |
537 | |
538 | return ret; |
539 | } |
540 | /** |
541 | * free_tx_ring - free the TX descriptor ring |
542 | * @dev: net device structure |
543 | * @tx_ring: ring to be initialised |
544 | * @tx_rsize: ring size |
545 | * Description: this function initializes the DMA TX descriptor |
546 | */ |
547 | static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring, |
548 | int tx_rsize) |
549 | { |
550 | dma_free_coherent(dev, size: tx_rsize * sizeof(struct sxgbe_tx_norm_desc), |
551 | cpu_addr: tx_ring->dma_tx, dma_handle: tx_ring->dma_tx_phy); |
552 | } |
553 | |
554 | /** |
555 | * init_dma_desc_rings - init the RX/TX descriptor rings |
556 | * @netd: net device structure |
557 | * Description: this function initializes the DMA RX/TX descriptors |
558 | * and allocates the socket buffers. It suppors the chained and ring |
559 | * modes. |
560 | */ |
561 | static int init_dma_desc_rings(struct net_device *netd) |
562 | { |
563 | int queue_num, ret; |
564 | struct sxgbe_priv_data *priv = netdev_priv(dev: netd); |
565 | int tx_rsize = priv->dma_tx_size; |
566 | int rx_rsize = priv->dma_rx_size; |
567 | |
568 | /* Allocate memory for queue structures and TX descs */ |
569 | SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { |
570 | ret = init_tx_ring(dev: priv->device, queue_no: queue_num, |
571 | tx_ring: priv->txq[queue_num], tx_rsize); |
572 | if (ret) { |
573 | dev_err(&netd->dev, "TX DMA ring allocation failed!\n" ); |
574 | goto txalloc_err; |
575 | } |
576 | |
577 | /* save private pointer in each ring this |
578 | * pointer is needed during cleaing TX queue |
579 | */ |
580 | priv->txq[queue_num]->priv_ptr = priv; |
581 | } |
582 | |
583 | /* Allocate memory for queue structures and RX descs */ |
584 | SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { |
585 | ret = init_rx_ring(dev: netd, queue_no: queue_num, |
586 | rx_ring: priv->rxq[queue_num], rx_rsize); |
587 | if (ret) { |
588 | netdev_err(dev: netd, format: "RX DMA ring allocation failed!!\n" ); |
589 | goto rxalloc_err; |
590 | } |
591 | |
592 | /* save private pointer in each ring this |
593 | * pointer is needed during cleaing TX queue |
594 | */ |
595 | priv->rxq[queue_num]->priv_ptr = priv; |
596 | } |
597 | |
598 | sxgbe_clear_descriptors(priv); |
599 | |
600 | return 0; |
601 | |
602 | txalloc_err: |
603 | while (queue_num--) |
604 | free_tx_ring(dev: priv->device, tx_ring: priv->txq[queue_num], tx_rsize); |
605 | return ret; |
606 | |
607 | rxalloc_err: |
608 | while (queue_num--) |
609 | free_rx_ring(dev: priv->device, rx_ring: priv->rxq[queue_num], rx_rsize); |
610 | return ret; |
611 | } |
612 | |
613 | static void tx_free_ring_skbufs(struct sxgbe_tx_queue *txqueue) |
614 | { |
615 | int dma_desc; |
616 | struct sxgbe_priv_data *priv = txqueue->priv_ptr; |
617 | int tx_rsize = priv->dma_tx_size; |
618 | |
619 | for (dma_desc = 0; dma_desc < tx_rsize; dma_desc++) { |
620 | struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc; |
621 | |
622 | if (txqueue->tx_skbuff_dma[dma_desc]) |
623 | dma_unmap_single(priv->device, |
624 | txqueue->tx_skbuff_dma[dma_desc], |
625 | priv->hw->desc->get_tx_len(tdesc), |
626 | DMA_TO_DEVICE); |
627 | |
628 | dev_kfree_skb_any(skb: txqueue->tx_skbuff[dma_desc]); |
629 | txqueue->tx_skbuff[dma_desc] = NULL; |
630 | txqueue->tx_skbuff_dma[dma_desc] = 0; |
631 | } |
632 | } |
633 | |
634 | |
635 | static void dma_free_tx_skbufs(struct sxgbe_priv_data *priv) |
636 | { |
637 | int queue_num; |
638 | |
639 | SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { |
640 | struct sxgbe_tx_queue *tqueue = priv->txq[queue_num]; |
641 | tx_free_ring_skbufs(txqueue: tqueue); |
642 | } |
643 | } |
644 | |
645 | static void free_dma_desc_resources(struct sxgbe_priv_data *priv) |
646 | { |
647 | int queue_num; |
648 | int tx_rsize = priv->dma_tx_size; |
649 | int rx_rsize = priv->dma_rx_size; |
650 | |
651 | /* Release the DMA TX buffers */ |
652 | dma_free_tx_skbufs(priv); |
653 | |
654 | /* Release the TX ring memory also */ |
655 | SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { |
656 | free_tx_ring(dev: priv->device, tx_ring: priv->txq[queue_num], tx_rsize); |
657 | } |
658 | |
659 | /* Release the RX ring memory also */ |
660 | SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { |
661 | free_rx_ring(dev: priv->device, rx_ring: priv->rxq[queue_num], rx_rsize); |
662 | } |
663 | } |
664 | |
665 | static int txring_mem_alloc(struct sxgbe_priv_data *priv) |
666 | { |
667 | int queue_num; |
668 | |
669 | SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { |
670 | priv->txq[queue_num] = devm_kmalloc(dev: priv->device, |
671 | size: sizeof(struct sxgbe_tx_queue), GFP_KERNEL); |
672 | if (!priv->txq[queue_num]) |
673 | return -ENOMEM; |
674 | } |
675 | |
676 | return 0; |
677 | } |
678 | |
679 | static int rxring_mem_alloc(struct sxgbe_priv_data *priv) |
680 | { |
681 | int queue_num; |
682 | |
683 | SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { |
684 | priv->rxq[queue_num] = devm_kmalloc(dev: priv->device, |
685 | size: sizeof(struct sxgbe_rx_queue), GFP_KERNEL); |
686 | if (!priv->rxq[queue_num]) |
687 | return -ENOMEM; |
688 | } |
689 | |
690 | return 0; |
691 | } |
692 | |
693 | /** |
694 | * sxgbe_mtl_operation_mode - HW MTL operation mode |
695 | * @priv: driver private structure |
696 | * Description: it sets the MTL operation mode: tx/rx MTL thresholds |
697 | * or Store-And-Forward capability. |
698 | */ |
699 | static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv) |
700 | { |
701 | int queue_num; |
702 | |
703 | /* TX/RX threshold control */ |
704 | if (likely(priv->plat->force_sf_dma_mode)) { |
705 | /* set TC mode for TX QUEUES */ |
706 | SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num) |
707 | priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num, |
708 | SXGBE_MTL_SFMODE); |
709 | priv->tx_tc = SXGBE_MTL_SFMODE; |
710 | |
711 | /* set TC mode for RX QUEUES */ |
712 | SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num) |
713 | priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num, |
714 | SXGBE_MTL_SFMODE); |
715 | priv->rx_tc = SXGBE_MTL_SFMODE; |
716 | } else if (unlikely(priv->plat->force_thresh_dma_mode)) { |
717 | /* set TC mode for TX QUEUES */ |
718 | SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num) |
719 | priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num, |
720 | priv->tx_tc); |
721 | /* set TC mode for RX QUEUES */ |
722 | SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num) |
723 | priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num, |
724 | priv->rx_tc); |
725 | } else { |
726 | pr_err("ERROR: %s: Invalid TX threshold mode\n" , __func__); |
727 | } |
728 | } |
729 | |
730 | /** |
731 | * sxgbe_tx_queue_clean: |
732 | * @tqueue: queue pointer |
733 | * Description: it reclaims resources after transmission completes. |
734 | */ |
735 | static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue) |
736 | { |
737 | struct sxgbe_priv_data *priv = tqueue->priv_ptr; |
738 | unsigned int tx_rsize = priv->dma_tx_size; |
739 | struct netdev_queue *dev_txq; |
740 | u8 queue_no = tqueue->queue_no; |
741 | |
742 | dev_txq = netdev_get_tx_queue(dev: priv->dev, index: queue_no); |
743 | |
744 | __netif_tx_lock(txq: dev_txq, smp_processor_id()); |
745 | |
746 | priv->xstats.tx_clean++; |
747 | while (tqueue->dirty_tx != tqueue->cur_tx) { |
748 | unsigned int entry = tqueue->dirty_tx % tx_rsize; |
749 | struct sk_buff *skb = tqueue->tx_skbuff[entry]; |
750 | struct sxgbe_tx_norm_desc *p; |
751 | |
752 | p = tqueue->dma_tx + entry; |
753 | |
754 | /* Check if the descriptor is owned by the DMA. */ |
755 | if (priv->hw->desc->get_tx_owner(p)) |
756 | break; |
757 | |
758 | if (netif_msg_tx_done(priv)) |
759 | pr_debug("%s: curr %d, dirty %d\n" , |
760 | __func__, tqueue->cur_tx, tqueue->dirty_tx); |
761 | |
762 | if (likely(tqueue->tx_skbuff_dma[entry])) { |
763 | dma_unmap_single(priv->device, |
764 | tqueue->tx_skbuff_dma[entry], |
765 | priv->hw->desc->get_tx_len(p), |
766 | DMA_TO_DEVICE); |
767 | tqueue->tx_skbuff_dma[entry] = 0; |
768 | } |
769 | |
770 | if (likely(skb)) { |
771 | dev_kfree_skb(skb); |
772 | tqueue->tx_skbuff[entry] = NULL; |
773 | } |
774 | |
775 | priv->hw->desc->release_tx_desc(p); |
776 | |
777 | tqueue->dirty_tx++; |
778 | } |
779 | |
780 | /* wake up queue */ |
781 | if (unlikely(netif_tx_queue_stopped(dev_txq) && |
782 | sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) { |
783 | if (netif_msg_tx_done(priv)) |
784 | pr_debug("%s: restart transmit\n" , __func__); |
785 | netif_tx_wake_queue(dev_queue: dev_txq); |
786 | } |
787 | |
788 | __netif_tx_unlock(txq: dev_txq); |
789 | } |
790 | |
791 | /** |
792 | * sxgbe_tx_all_clean: |
793 | * @priv: driver private structure |
794 | * Description: it reclaims resources after transmission completes. |
795 | */ |
796 | static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv) |
797 | { |
798 | u8 queue_num; |
799 | |
800 | SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { |
801 | struct sxgbe_tx_queue *tqueue = priv->txq[queue_num]; |
802 | |
803 | sxgbe_tx_queue_clean(tqueue); |
804 | } |
805 | |
806 | if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { |
807 | sxgbe_enable_eee_mode(priv); |
808 | mod_timer(timer: &priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer)); |
809 | } |
810 | } |
811 | |
812 | /** |
813 | * sxgbe_restart_tx_queue: irq tx error mng function |
814 | * @priv: driver private structure |
815 | * @queue_num: queue number |
816 | * Description: it cleans the descriptors and restarts the transmission |
817 | * in case of errors. |
818 | */ |
819 | static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num) |
820 | { |
821 | struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num]; |
822 | struct netdev_queue *dev_txq = netdev_get_tx_queue(dev: priv->dev, |
823 | index: queue_num); |
824 | |
825 | /* stop the queue */ |
826 | netif_tx_stop_queue(dev_queue: dev_txq); |
827 | |
828 | /* stop the tx dma */ |
829 | priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num); |
830 | |
831 | /* free the skbuffs of the ring */ |
832 | tx_free_ring_skbufs(txqueue: tx_ring); |
833 | |
834 | /* initialise counters */ |
835 | tx_ring->cur_tx = 0; |
836 | tx_ring->dirty_tx = 0; |
837 | |
838 | /* start the tx dma */ |
839 | priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num); |
840 | |
841 | priv->dev->stats.tx_errors++; |
842 | |
843 | /* wakeup the queue */ |
844 | netif_tx_wake_queue(dev_queue: dev_txq); |
845 | } |
846 | |
847 | /** |
848 | * sxgbe_reset_all_tx_queues: irq tx error mng function |
849 | * @priv: driver private structure |
850 | * Description: it cleans all the descriptors and |
851 | * restarts the transmission on all queues in case of errors. |
852 | */ |
853 | static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data *priv) |
854 | { |
855 | int queue_num; |
856 | |
857 | /* On TX timeout of net device, resetting of all queues |
858 | * may not be proper way, revisit this later if needed |
859 | */ |
860 | SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) |
861 | sxgbe_restart_tx_queue(priv, queue_num); |
862 | } |
863 | |
864 | /** |
865 | * sxgbe_get_hw_features: get XMAC capabilities from the HW cap. register. |
866 | * @priv: driver private structure |
867 | * Description: |
868 | * new GMAC chip generations have a new register to indicate the |
869 | * presence of the optional feature/functions. |
870 | * This can be also used to override the value passed through the |
871 | * platform and necessary for old MAC10/100 and GMAC chips. |
872 | */ |
873 | static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv) |
874 | { |
875 | int rval = 0; |
876 | struct sxgbe_hw_features *features = &priv->hw_cap; |
877 | |
878 | /* Read First Capability Register CAP[0] */ |
879 | rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0); |
880 | if (rval) { |
881 | features->pmt_remote_wake_up = |
882 | SXGBE_HW_FEAT_PMT_TEMOTE_WOP(rval); |
883 | features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval); |
884 | features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval); |
885 | features->tx_csum_offload = |
886 | SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(rval); |
887 | features->rx_csum_offload = |
888 | SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(rval); |
889 | features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval); |
890 | features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval); |
891 | features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval); |
892 | features->eee = SXGBE_HW_FEAT_EEE(rval); |
893 | } |
894 | |
895 | /* Read First Capability Register CAP[1] */ |
896 | rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1); |
897 | if (rval) { |
898 | features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval); |
899 | features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval); |
900 | features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval); |
901 | features->dcb_enable = SXGBE_HW_FEAT_DCB(rval); |
902 | features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval); |
903 | features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval); |
904 | features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval); |
905 | features->rss_enable = SXGBE_HW_FEAT_RSS(rval); |
906 | features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval); |
907 | features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval); |
908 | } |
909 | |
910 | /* Read First Capability Register CAP[2] */ |
911 | rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2); |
912 | if (rval) { |
913 | features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval); |
914 | features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval); |
915 | features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval); |
916 | features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval); |
917 | features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval); |
918 | features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval); |
919 | } |
920 | |
921 | return rval; |
922 | } |
923 | |
924 | /** |
925 | * sxgbe_check_ether_addr: check if the MAC addr is valid |
926 | * @priv: driver private structure |
927 | * Description: |
928 | * it is to verify if the MAC address is valid, in case of failures it |
929 | * generates a random MAC address |
930 | */ |
931 | static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv) |
932 | { |
933 | if (!is_valid_ether_addr(addr: priv->dev->dev_addr)) { |
934 | u8 addr[ETH_ALEN]; |
935 | |
936 | priv->hw->mac->get_umac_addr((void __iomem *) |
937 | priv->ioaddr, addr, 0); |
938 | if (is_valid_ether_addr(addr)) |
939 | eth_hw_addr_set(dev: priv->dev, addr); |
940 | else |
941 | eth_hw_addr_random(dev: priv->dev); |
942 | } |
943 | dev_info(priv->device, "device MAC address %pM\n" , |
944 | priv->dev->dev_addr); |
945 | } |
946 | |
947 | /** |
948 | * sxgbe_init_dma_engine: DMA init. |
949 | * @priv: driver private structure |
950 | * Description: |
951 | * It inits the DMA invoking the specific SXGBE callback. |
952 | * Some DMA parameters can be passed from the platform; |
953 | * in case of these are not passed a default is kept for the MAC or GMAC. |
954 | */ |
955 | static int sxgbe_init_dma_engine(struct sxgbe_priv_data *priv) |
956 | { |
957 | int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_map = 0; |
958 | int queue_num; |
959 | |
960 | if (priv->plat->dma_cfg) { |
961 | pbl = priv->plat->dma_cfg->pbl; |
962 | fixed_burst = priv->plat->dma_cfg->fixed_burst; |
963 | burst_map = priv->plat->dma_cfg->burst_map; |
964 | } |
965 | |
966 | SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) |
967 | priv->hw->dma->cha_init(priv->ioaddr, queue_num, |
968 | fixed_burst, pbl, |
969 | (priv->txq[queue_num])->dma_tx_phy, |
970 | (priv->rxq[queue_num])->dma_rx_phy, |
971 | priv->dma_tx_size, priv->dma_rx_size); |
972 | |
973 | return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map); |
974 | } |
975 | |
976 | /** |
977 | * sxgbe_init_mtl_engine: MTL init. |
978 | * @priv: driver private structure |
979 | * Description: |
980 | * It inits the MTL invoking the specific SXGBE callback. |
981 | */ |
982 | static void sxgbe_init_mtl_engine(struct sxgbe_priv_data *priv) |
983 | { |
984 | int queue_num; |
985 | |
986 | SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { |
987 | priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num, |
988 | priv->hw_cap.tx_mtl_qsize); |
989 | priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num); |
990 | } |
991 | } |
992 | |
993 | /** |
994 | * sxgbe_disable_mtl_engine: MTL disable. |
995 | * @priv: driver private structure |
996 | * Description: |
997 | * It disables the MTL queues by invoking the specific SXGBE callback. |
998 | */ |
999 | static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv) |
1000 | { |
1001 | int queue_num; |
1002 | |
1003 | SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) |
1004 | priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num); |
1005 | } |
1006 | |
1007 | |
1008 | /** |
1009 | * sxgbe_tx_timer: mitigation sw timer for tx. |
1010 | * @t: timer pointer |
1011 | * Description: |
1012 | * This is the timer handler to directly invoke the sxgbe_tx_clean. |
1013 | */ |
1014 | static void sxgbe_tx_timer(struct timer_list *t) |
1015 | { |
1016 | struct sxgbe_tx_queue *p = from_timer(p, t, txtimer); |
1017 | sxgbe_tx_queue_clean(tqueue: p); |
1018 | } |
1019 | |
1020 | /** |
1021 | * sxgbe_tx_init_coalesce: init tx mitigation options. |
1022 | * @priv: driver private structure |
1023 | * Description: |
1024 | * This inits the transmit coalesce parameters: i.e. timer rate, |
1025 | * timer handler and default threshold used for enabling the |
1026 | * interrupt on completion bit. |
1027 | */ |
1028 | static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv) |
1029 | { |
1030 | u8 queue_num; |
1031 | |
1032 | SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { |
1033 | struct sxgbe_tx_queue *p = priv->txq[queue_num]; |
1034 | p->tx_coal_frames = SXGBE_TX_FRAMES; |
1035 | p->tx_coal_timer = SXGBE_COAL_TX_TIMER; |
1036 | timer_setup(&p->txtimer, sxgbe_tx_timer, 0); |
1037 | p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer); |
1038 | add_timer(timer: &p->txtimer); |
1039 | } |
1040 | } |
1041 | |
1042 | static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv) |
1043 | { |
1044 | u8 queue_num; |
1045 | |
1046 | SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { |
1047 | struct sxgbe_tx_queue *p = priv->txq[queue_num]; |
1048 | del_timer_sync(timer: &p->txtimer); |
1049 | } |
1050 | } |
1051 | |
1052 | /** |
1053 | * sxgbe_open - open entry point of the driver |
1054 | * @dev : pointer to the device structure. |
1055 | * Description: |
1056 | * This function is the open entry point of the driver. |
1057 | * Return value: |
1058 | * 0 on success and an appropriate (-)ve integer as defined in errno.h |
1059 | * file on failure. |
1060 | */ |
1061 | static int sxgbe_open(struct net_device *dev) |
1062 | { |
1063 | struct sxgbe_priv_data *priv = netdev_priv(dev); |
1064 | int ret, queue_num; |
1065 | |
1066 | clk_prepare_enable(clk: priv->sxgbe_clk); |
1067 | |
1068 | sxgbe_check_ether_addr(priv); |
1069 | |
1070 | /* Init the phy */ |
1071 | ret = sxgbe_init_phy(ndev: dev); |
1072 | if (ret) { |
1073 | netdev_err(dev, format: "%s: Cannot attach to PHY (error: %d)\n" , |
1074 | __func__, ret); |
1075 | goto phy_error; |
1076 | } |
1077 | |
1078 | /* Create and initialize the TX/RX descriptors chains. */ |
1079 | priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE); |
1080 | priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE); |
1081 | priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE); |
1082 | priv->tx_tc = TC_DEFAULT; |
1083 | priv->rx_tc = TC_DEFAULT; |
1084 | init_dma_desc_rings(netd: dev); |
1085 | |
1086 | /* DMA initialization and SW reset */ |
1087 | ret = sxgbe_init_dma_engine(priv); |
1088 | if (ret < 0) { |
1089 | netdev_err(dev, format: "%s: DMA initialization failed\n" , __func__); |
1090 | goto init_error; |
1091 | } |
1092 | |
1093 | /* MTL initialization */ |
1094 | sxgbe_init_mtl_engine(priv); |
1095 | |
1096 | /* Copy the MAC addr into the HW */ |
1097 | priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0); |
1098 | |
1099 | /* Initialize the MAC Core */ |
1100 | priv->hw->mac->core_init(priv->ioaddr); |
1101 | SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { |
1102 | priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num); |
1103 | } |
1104 | |
1105 | /* Request the IRQ lines */ |
1106 | ret = devm_request_irq(dev: priv->device, irq: priv->irq, handler: sxgbe_common_interrupt, |
1107 | IRQF_SHARED, devname: dev->name, dev_id: dev); |
1108 | if (unlikely(ret < 0)) { |
1109 | netdev_err(dev, format: "%s: ERROR: allocating the IRQ %d (error: %d)\n" , |
1110 | __func__, priv->irq, ret); |
1111 | goto init_error; |
1112 | } |
1113 | |
1114 | /* If the LPI irq is different from the mac irq |
1115 | * register a dedicated handler |
1116 | */ |
1117 | if (priv->lpi_irq != dev->irq) { |
1118 | ret = devm_request_irq(dev: priv->device, irq: priv->lpi_irq, |
1119 | handler: sxgbe_common_interrupt, |
1120 | IRQF_SHARED, devname: dev->name, dev_id: dev); |
1121 | if (unlikely(ret < 0)) { |
1122 | netdev_err(dev, format: "%s: ERROR: allocating the LPI IRQ %d (%d)\n" , |
1123 | __func__, priv->lpi_irq, ret); |
1124 | goto init_error; |
1125 | } |
1126 | } |
1127 | |
1128 | /* Request TX DMA irq lines */ |
1129 | SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { |
1130 | ret = devm_request_irq(dev: priv->device, |
1131 | irq: (priv->txq[queue_num])->irq_no, |
1132 | handler: sxgbe_tx_interrupt, irqflags: 0, |
1133 | devname: dev->name, dev_id: priv->txq[queue_num]); |
1134 | if (unlikely(ret < 0)) { |
1135 | netdev_err(dev, format: "%s: ERROR: allocating TX IRQ %d (error: %d)\n" , |
1136 | __func__, priv->irq, ret); |
1137 | goto init_error; |
1138 | } |
1139 | } |
1140 | |
1141 | /* Request RX DMA irq lines */ |
1142 | SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { |
1143 | ret = devm_request_irq(dev: priv->device, |
1144 | irq: (priv->rxq[queue_num])->irq_no, |
1145 | handler: sxgbe_rx_interrupt, irqflags: 0, |
1146 | devname: dev->name, dev_id: priv->rxq[queue_num]); |
1147 | if (unlikely(ret < 0)) { |
1148 | netdev_err(dev, format: "%s: ERROR: allocating TX IRQ %d (error: %d)\n" , |
1149 | __func__, priv->irq, ret); |
1150 | goto init_error; |
1151 | } |
1152 | } |
1153 | |
1154 | /* Enable the MAC Rx/Tx */ |
1155 | priv->hw->mac->enable_tx(priv->ioaddr, true); |
1156 | priv->hw->mac->enable_rx(priv->ioaddr, true); |
1157 | |
1158 | /* Set the HW DMA mode and the COE */ |
1159 | sxgbe_mtl_operation_mode(priv); |
1160 | |
1161 | /* Extra statistics */ |
1162 | memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats)); |
1163 | |
1164 | priv->xstats.tx_threshold = priv->tx_tc; |
1165 | priv->xstats.rx_threshold = priv->rx_tc; |
1166 | |
1167 | /* Start the ball rolling... */ |
1168 | netdev_dbg(dev, "DMA RX/TX processes started...\n" ); |
1169 | priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES); |
1170 | priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES); |
1171 | |
1172 | if (dev->phydev) |
1173 | phy_start(phydev: dev->phydev); |
1174 | |
1175 | /* initialise TX coalesce parameters */ |
1176 | sxgbe_tx_init_coalesce(priv); |
1177 | |
1178 | if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { |
1179 | priv->rx_riwt = SXGBE_MAX_DMA_RIWT; |
1180 | priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT); |
1181 | } |
1182 | |
1183 | priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER; |
1184 | priv->eee_enabled = sxgbe_eee_init(priv); |
1185 | |
1186 | napi_enable(n: &priv->napi); |
1187 | netif_start_queue(dev); |
1188 | |
1189 | return 0; |
1190 | |
1191 | init_error: |
1192 | free_dma_desc_resources(priv); |
1193 | if (dev->phydev) |
1194 | phy_disconnect(phydev: dev->phydev); |
1195 | phy_error: |
1196 | clk_disable_unprepare(clk: priv->sxgbe_clk); |
1197 | |
1198 | return ret; |
1199 | } |
1200 | |
1201 | /** |
1202 | * sxgbe_release - close entry point of the driver |
1203 | * @dev : device pointer. |
1204 | * Description: |
1205 | * This is the stop entry point of the driver. |
1206 | */ |
1207 | static int sxgbe_release(struct net_device *dev) |
1208 | { |
1209 | struct sxgbe_priv_data *priv = netdev_priv(dev); |
1210 | |
1211 | if (priv->eee_enabled) |
1212 | del_timer_sync(timer: &priv->eee_ctrl_timer); |
1213 | |
1214 | /* Stop and disconnect the PHY */ |
1215 | if (dev->phydev) { |
1216 | phy_stop(phydev: dev->phydev); |
1217 | phy_disconnect(phydev: dev->phydev); |
1218 | } |
1219 | |
1220 | netif_tx_stop_all_queues(dev); |
1221 | |
1222 | napi_disable(n: &priv->napi); |
1223 | |
1224 | /* delete TX timers */ |
1225 | sxgbe_tx_del_timer(priv); |
1226 | |
1227 | /* Stop TX/RX DMA and clear the descriptors */ |
1228 | priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES); |
1229 | priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); |
1230 | |
1231 | /* disable MTL queue */ |
1232 | sxgbe_disable_mtl_engine(priv); |
1233 | |
1234 | /* Release and free the Rx/Tx resources */ |
1235 | free_dma_desc_resources(priv); |
1236 | |
1237 | /* Disable the MAC Rx/Tx */ |
1238 | priv->hw->mac->enable_tx(priv->ioaddr, false); |
1239 | priv->hw->mac->enable_rx(priv->ioaddr, false); |
1240 | |
1241 | clk_disable_unprepare(clk: priv->sxgbe_clk); |
1242 | |
1243 | return 0; |
1244 | } |
1245 | /* Prepare first Tx descriptor for doing TSO operation */ |
1246 | static void sxgbe_tso_prepare(struct sxgbe_priv_data *priv, |
1247 | struct sxgbe_tx_norm_desc *first_desc, |
1248 | struct sk_buff *skb) |
1249 | { |
1250 | unsigned int total_hdr_len, tcp_hdr_len; |
1251 | |
1252 | /* Write first Tx descriptor with appropriate value */ |
1253 | tcp_hdr_len = tcp_hdrlen(skb); |
1254 | total_hdr_len = skb_transport_offset(skb) + tcp_hdr_len; |
1255 | |
1256 | first_desc->tdes01 = dma_map_single(priv->device, skb->data, |
1257 | total_hdr_len, DMA_TO_DEVICE); |
1258 | if (dma_mapping_error(dev: priv->device, dma_addr: first_desc->tdes01)) |
1259 | pr_err("%s: TX dma mapping failed!!\n" , __func__); |
1260 | |
1261 | first_desc->tdes23.tx_rd_des23.first_desc = 1; |
1262 | priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len, |
1263 | tcp_hdr_len, |
1264 | skb->len - total_hdr_len); |
1265 | } |
1266 | |
1267 | /** |
1268 | * sxgbe_xmit: Tx entry point of the driver |
1269 | * @skb : the socket buffer |
1270 | * @dev : device pointer |
1271 | * Description : this is the tx entry point of the driver. |
1272 | * It programs the chain or the ring and supports oversized frames |
1273 | * and SG feature. |
1274 | */ |
1275 | static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev) |
1276 | { |
1277 | unsigned int entry, frag_num; |
1278 | int cksum_flag = 0; |
1279 | struct netdev_queue *dev_txq; |
1280 | unsigned txq_index = skb_get_queue_mapping(skb); |
1281 | struct sxgbe_priv_data *priv = netdev_priv(dev); |
1282 | unsigned int tx_rsize = priv->dma_tx_size; |
1283 | struct sxgbe_tx_queue *tqueue = priv->txq[txq_index]; |
1284 | struct sxgbe_tx_norm_desc *tx_desc, *first_desc; |
1285 | struct sxgbe_tx_ctxt_desc *ctxt_desc = NULL; |
1286 | int nr_frags = skb_shinfo(skb)->nr_frags; |
1287 | int no_pagedlen = skb_headlen(skb); |
1288 | int is_jumbo = 0; |
1289 | u16 cur_mss = skb_shinfo(skb)->gso_size; |
1290 | u32 ctxt_desc_req = 0; |
1291 | |
1292 | /* get the TX queue handle */ |
1293 | dev_txq = netdev_get_tx_queue(dev, index: txq_index); |
1294 | |
1295 | if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss)) |
1296 | ctxt_desc_req = 1; |
1297 | |
1298 | if (unlikely(skb_vlan_tag_present(skb) || |
1299 | ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
1300 | tqueue->hwts_tx_en))) |
1301 | ctxt_desc_req = 1; |
1302 | |
1303 | if (priv->tx_path_in_lpi_mode) |
1304 | sxgbe_disable_eee_mode(priv); |
1305 | |
1306 | if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) < nr_frags + 1)) { |
1307 | if (!netif_tx_queue_stopped(dev_queue: dev_txq)) { |
1308 | netif_tx_stop_queue(dev_queue: dev_txq); |
1309 | netdev_err(dev, format: "%s: Tx Ring is full when %d queue is awake\n" , |
1310 | __func__, txq_index); |
1311 | } |
1312 | return NETDEV_TX_BUSY; |
1313 | } |
1314 | |
1315 | entry = tqueue->cur_tx % tx_rsize; |
1316 | tx_desc = tqueue->dma_tx + entry; |
1317 | |
1318 | first_desc = tx_desc; |
1319 | if (ctxt_desc_req) |
1320 | ctxt_desc = (struct sxgbe_tx_ctxt_desc *)first_desc; |
1321 | |
1322 | /* save the skb address */ |
1323 | tqueue->tx_skbuff[entry] = skb; |
1324 | |
1325 | if (!is_jumbo) { |
1326 | if (likely(skb_is_gso(skb))) { |
1327 | /* TSO support */ |
1328 | if (unlikely(tqueue->prev_mss != cur_mss)) { |
1329 | priv->hw->desc->tx_ctxt_desc_set_mss( |
1330 | ctxt_desc, cur_mss); |
1331 | priv->hw->desc->tx_ctxt_desc_set_tcmssv( |
1332 | ctxt_desc); |
1333 | priv->hw->desc->tx_ctxt_desc_reset_ostc( |
1334 | ctxt_desc); |
1335 | priv->hw->desc->tx_ctxt_desc_set_ctxt( |
1336 | ctxt_desc); |
1337 | priv->hw->desc->tx_ctxt_desc_set_owner( |
1338 | ctxt_desc); |
1339 | |
1340 | entry = (++tqueue->cur_tx) % tx_rsize; |
1341 | first_desc = tqueue->dma_tx + entry; |
1342 | |
1343 | tqueue->prev_mss = cur_mss; |
1344 | } |
1345 | sxgbe_tso_prepare(priv, first_desc, skb); |
1346 | } else { |
1347 | tx_desc->tdes01 = dma_map_single(priv->device, |
1348 | skb->data, no_pagedlen, DMA_TO_DEVICE); |
1349 | if (dma_mapping_error(dev: priv->device, dma_addr: tx_desc->tdes01)) |
1350 | netdev_err(dev, format: "%s: TX dma mapping failed!!\n" , |
1351 | __func__); |
1352 | |
1353 | priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen, |
1354 | no_pagedlen, cksum_flag); |
1355 | } |
1356 | } |
1357 | |
1358 | for (frag_num = 0; frag_num < nr_frags; frag_num++) { |
1359 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num]; |
1360 | int len = skb_frag_size(frag); |
1361 | |
1362 | entry = (++tqueue->cur_tx) % tx_rsize; |
1363 | tx_desc = tqueue->dma_tx + entry; |
1364 | tx_desc->tdes01 = skb_frag_dma_map(dev: priv->device, frag, offset: 0, size: len, |
1365 | dir: DMA_TO_DEVICE); |
1366 | |
1367 | tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01; |
1368 | tqueue->tx_skbuff[entry] = NULL; |
1369 | |
1370 | /* prepare the descriptor */ |
1371 | priv->hw->desc->prepare_tx_desc(tx_desc, 0, len, |
1372 | len, cksum_flag); |
1373 | /* memory barrier to flush descriptor */ |
1374 | wmb(); |
1375 | |
1376 | /* set the owner */ |
1377 | priv->hw->desc->set_tx_owner(tx_desc); |
1378 | } |
1379 | |
1380 | /* close the descriptors */ |
1381 | priv->hw->desc->close_tx_desc(tx_desc); |
1382 | |
1383 | /* memory barrier to flush descriptor */ |
1384 | wmb(); |
1385 | |
1386 | tqueue->tx_count_frames += nr_frags + 1; |
1387 | if (tqueue->tx_count_frames > tqueue->tx_coal_frames) { |
1388 | priv->hw->desc->clear_tx_ic(tx_desc); |
1389 | priv->xstats.tx_reset_ic_bit++; |
1390 | mod_timer(timer: &tqueue->txtimer, |
1391 | SXGBE_COAL_TIMER(tqueue->tx_coal_timer)); |
1392 | } else { |
1393 | tqueue->tx_count_frames = 0; |
1394 | } |
1395 | |
1396 | /* set owner for first desc */ |
1397 | priv->hw->desc->set_tx_owner(first_desc); |
1398 | |
1399 | /* memory barrier to flush descriptor */ |
1400 | wmb(); |
1401 | |
1402 | tqueue->cur_tx++; |
1403 | |
1404 | /* display current ring */ |
1405 | netif_dbg(priv, pktdata, dev, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n" , |
1406 | __func__, tqueue->cur_tx % tx_rsize, |
1407 | tqueue->dirty_tx % tx_rsize, entry, |
1408 | first_desc, nr_frags); |
1409 | |
1410 | if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) <= (MAX_SKB_FRAGS + 1))) { |
1411 | netif_dbg(priv, hw, dev, "%s: stop transmitted packets\n" , |
1412 | __func__); |
1413 | netif_tx_stop_queue(dev_queue: dev_txq); |
1414 | } |
1415 | |
1416 | dev->stats.tx_bytes += skb->len; |
1417 | |
1418 | if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
1419 | tqueue->hwts_tx_en)) { |
1420 | /* declare that device is doing timestamping */ |
1421 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
1422 | priv->hw->desc->tx_enable_tstamp(first_desc); |
1423 | } |
1424 | |
1425 | skb_tx_timestamp(skb); |
1426 | |
1427 | priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index); |
1428 | |
1429 | return NETDEV_TX_OK; |
1430 | } |
1431 | |
1432 | /** |
1433 | * sxgbe_rx_refill: refill used skb preallocated buffers |
1434 | * @priv: driver private structure |
1435 | * Description : this is to reallocate the skb for the reception process |
1436 | * that is based on zero-copy. |
1437 | */ |
1438 | static void sxgbe_rx_refill(struct sxgbe_priv_data *priv) |
1439 | { |
1440 | unsigned int rxsize = priv->dma_rx_size; |
1441 | int bfsize = priv->dma_buf_sz; |
1442 | u8 qnum = priv->cur_rx_qnum; |
1443 | |
1444 | for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0; |
1445 | priv->rxq[qnum]->dirty_rx++) { |
1446 | unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize; |
1447 | struct sxgbe_rx_norm_desc *p; |
1448 | |
1449 | p = priv->rxq[qnum]->dma_rx + entry; |
1450 | |
1451 | if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) { |
1452 | struct sk_buff *skb; |
1453 | |
1454 | skb = netdev_alloc_skb_ip_align(dev: priv->dev, length: bfsize); |
1455 | |
1456 | if (unlikely(skb == NULL)) |
1457 | break; |
1458 | |
1459 | priv->rxq[qnum]->rx_skbuff[entry] = skb; |
1460 | priv->rxq[qnum]->rx_skbuff_dma[entry] = |
1461 | dma_map_single(priv->device, skb->data, bfsize, |
1462 | DMA_FROM_DEVICE); |
1463 | |
1464 | p->rdes23.rx_rd_des23.buf2_addr = |
1465 | priv->rxq[qnum]->rx_skbuff_dma[entry]; |
1466 | } |
1467 | |
1468 | /* Added memory barrier for RX descriptor modification */ |
1469 | wmb(); |
1470 | priv->hw->desc->set_rx_owner(p); |
1471 | priv->hw->desc->set_rx_int_on_com(p); |
1472 | /* Added memory barrier for RX descriptor modification */ |
1473 | wmb(); |
1474 | } |
1475 | } |
1476 | |
1477 | /** |
1478 | * sxgbe_rx: receive the frames from the remote host |
1479 | * @priv: driver private structure |
1480 | * @limit: napi bugget. |
1481 | * Description : this the function called by the napi poll method. |
1482 | * It gets all the frames inside the ring. |
1483 | */ |
1484 | static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit) |
1485 | { |
1486 | u8 qnum = priv->cur_rx_qnum; |
1487 | unsigned int rxsize = priv->dma_rx_size; |
1488 | unsigned int entry = priv->rxq[qnum]->cur_rx; |
1489 | unsigned int next_entry = 0; |
1490 | unsigned int count = 0; |
1491 | int checksum; |
1492 | int status; |
1493 | |
1494 | while (count < limit) { |
1495 | struct sxgbe_rx_norm_desc *p; |
1496 | struct sk_buff *skb; |
1497 | int frame_len; |
1498 | |
1499 | p = priv->rxq[qnum]->dma_rx + entry; |
1500 | |
1501 | if (priv->hw->desc->get_rx_owner(p)) |
1502 | break; |
1503 | |
1504 | count++; |
1505 | |
1506 | next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize; |
1507 | prefetch(priv->rxq[qnum]->dma_rx + next_entry); |
1508 | |
1509 | /* Read the status of the incoming frame and also get checksum |
1510 | * value based on whether it is enabled in SXGBE hardware or |
1511 | * not. |
1512 | */ |
1513 | status = priv->hw->desc->rx_wbstatus(p, &priv->xstats, |
1514 | &checksum); |
1515 | if (unlikely(status < 0)) { |
1516 | entry = next_entry; |
1517 | continue; |
1518 | } |
1519 | if (unlikely(!priv->rxcsum_insertion)) |
1520 | checksum = CHECKSUM_NONE; |
1521 | |
1522 | skb = priv->rxq[qnum]->rx_skbuff[entry]; |
1523 | |
1524 | if (unlikely(!skb)) |
1525 | netdev_err(dev: priv->dev, format: "rx descriptor is not consistent\n" ); |
1526 | |
1527 | prefetch(skb->data - NET_IP_ALIGN); |
1528 | priv->rxq[qnum]->rx_skbuff[entry] = NULL; |
1529 | |
1530 | frame_len = priv->hw->desc->get_rx_frame_len(p); |
1531 | |
1532 | skb_put(skb, len: frame_len); |
1533 | |
1534 | skb->ip_summed = checksum; |
1535 | if (checksum == CHECKSUM_NONE) |
1536 | netif_receive_skb(skb); |
1537 | else |
1538 | napi_gro_receive(napi: &priv->napi, skb); |
1539 | |
1540 | entry = next_entry; |
1541 | } |
1542 | |
1543 | sxgbe_rx_refill(priv); |
1544 | |
1545 | return count; |
1546 | } |
1547 | |
1548 | /** |
1549 | * sxgbe_poll - sxgbe poll method (NAPI) |
1550 | * @napi : pointer to the napi structure. |
1551 | * @budget : maximum number of packets that the current CPU can receive from |
1552 | * all interfaces. |
1553 | * Description : |
1554 | * To look at the incoming frames and clear the tx resources. |
1555 | */ |
1556 | static int sxgbe_poll(struct napi_struct *napi, int budget) |
1557 | { |
1558 | struct sxgbe_priv_data *priv = container_of(napi, |
1559 | struct sxgbe_priv_data, napi); |
1560 | int work_done = 0; |
1561 | u8 qnum = priv->cur_rx_qnum; |
1562 | |
1563 | priv->xstats.napi_poll++; |
1564 | /* first, clean the tx queues */ |
1565 | sxgbe_tx_all_clean(priv); |
1566 | |
1567 | work_done = sxgbe_rx(priv, limit: budget); |
1568 | if (work_done < budget) { |
1569 | napi_complete_done(n: napi, work_done); |
1570 | priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum); |
1571 | } |
1572 | |
1573 | return work_done; |
1574 | } |
1575 | |
1576 | /** |
1577 | * sxgbe_tx_timeout |
1578 | * @dev : Pointer to net device structure |
1579 | * @txqueue: index of the hanging queue |
1580 | * Description: this function is called when a packet transmission fails to |
1581 | * complete within a reasonable time. The driver will mark the error in the |
1582 | * netdev structure and arrange for the device to be reset to a sane state |
1583 | * in order to transmit a new packet. |
1584 | */ |
1585 | static void sxgbe_tx_timeout(struct net_device *dev, unsigned int txqueue) |
1586 | { |
1587 | struct sxgbe_priv_data *priv = netdev_priv(dev); |
1588 | |
1589 | sxgbe_reset_all_tx_queues(priv); |
1590 | } |
1591 | |
1592 | /** |
1593 | * sxgbe_common_interrupt - main ISR |
1594 | * @irq: interrupt number. |
1595 | * @dev_id: to pass the net device pointer. |
1596 | * Description: this is the main driver interrupt service routine. |
1597 | * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI |
1598 | * interrupts. |
1599 | */ |
1600 | static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id) |
1601 | { |
1602 | struct net_device *netdev = (struct net_device *)dev_id; |
1603 | struct sxgbe_priv_data *priv = netdev_priv(dev: netdev); |
1604 | int status; |
1605 | |
1606 | status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats); |
1607 | /* For LPI we need to save the tx status */ |
1608 | if (status & TX_ENTRY_LPI_MODE) { |
1609 | priv->xstats.tx_lpi_entry_n++; |
1610 | priv->tx_path_in_lpi_mode = true; |
1611 | } |
1612 | if (status & TX_EXIT_LPI_MODE) { |
1613 | priv->xstats.tx_lpi_exit_n++; |
1614 | priv->tx_path_in_lpi_mode = false; |
1615 | } |
1616 | if (status & RX_ENTRY_LPI_MODE) |
1617 | priv->xstats.rx_lpi_entry_n++; |
1618 | if (status & RX_EXIT_LPI_MODE) |
1619 | priv->xstats.rx_lpi_exit_n++; |
1620 | |
1621 | return IRQ_HANDLED; |
1622 | } |
1623 | |
1624 | /** |
1625 | * sxgbe_tx_interrupt - TX DMA ISR |
1626 | * @irq: interrupt number. |
1627 | * @dev_id: to pass the net device pointer. |
1628 | * Description: this is the tx dma interrupt service routine. |
1629 | */ |
1630 | static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id) |
1631 | { |
1632 | int status; |
1633 | struct sxgbe_tx_queue *txq = (struct sxgbe_tx_queue *)dev_id; |
1634 | struct sxgbe_priv_data *priv = txq->priv_ptr; |
1635 | |
1636 | /* get the channel status */ |
1637 | status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no, |
1638 | &priv->xstats); |
1639 | /* check for normal path */ |
1640 | if (likely((status & handle_tx))) |
1641 | napi_schedule(n: &priv->napi); |
1642 | |
1643 | /* check for unrecoverable error */ |
1644 | if (unlikely((status & tx_hard_error))) |
1645 | sxgbe_restart_tx_queue(priv, queue_num: txq->queue_no); |
1646 | |
1647 | /* check for TC configuration change */ |
1648 | if (unlikely((status & tx_bump_tc) && |
1649 | (priv->tx_tc != SXGBE_MTL_SFMODE) && |
1650 | (priv->tx_tc < 512))) { |
1651 | /* step of TX TC is 32 till 128, otherwise 64 */ |
1652 | priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64; |
1653 | priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, |
1654 | txq->queue_no, priv->tx_tc); |
1655 | priv->xstats.tx_threshold = priv->tx_tc; |
1656 | } |
1657 | |
1658 | return IRQ_HANDLED; |
1659 | } |
1660 | |
1661 | /** |
1662 | * sxgbe_rx_interrupt - RX DMA ISR |
1663 | * @irq: interrupt number. |
1664 | * @dev_id: to pass the net device pointer. |
1665 | * Description: this is the rx dma interrupt service routine. |
1666 | */ |
1667 | static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id) |
1668 | { |
1669 | int status; |
1670 | struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id; |
1671 | struct sxgbe_priv_data *priv = rxq->priv_ptr; |
1672 | |
1673 | /* get the channel status */ |
1674 | status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no, |
1675 | &priv->xstats); |
1676 | |
1677 | if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) { |
1678 | priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no); |
1679 | __napi_schedule(n: &priv->napi); |
1680 | } |
1681 | |
1682 | /* check for TC configuration change */ |
1683 | if (unlikely((status & rx_bump_tc) && |
1684 | (priv->rx_tc != SXGBE_MTL_SFMODE) && |
1685 | (priv->rx_tc < 128))) { |
1686 | /* step of TC is 32 */ |
1687 | priv->rx_tc += 32; |
1688 | priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, |
1689 | rxq->queue_no, priv->rx_tc); |
1690 | priv->xstats.rx_threshold = priv->rx_tc; |
1691 | } |
1692 | |
1693 | return IRQ_HANDLED; |
1694 | } |
1695 | |
1696 | static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi) |
1697 | { |
1698 | u64 val = readl(addr: ioaddr + reg_lo); |
1699 | |
1700 | val |= ((u64)readl(addr: ioaddr + reg_hi)) << 32; |
1701 | |
1702 | return val; |
1703 | } |
1704 | |
1705 | |
1706 | /* sxgbe_get_stats64 - entry point to see statistical information of device |
1707 | * @dev : device pointer. |
1708 | * @stats : pointer to hold all the statistical information of device. |
1709 | * Description: |
1710 | * This function is a driver entry point whenever ifconfig command gets |
1711 | * executed to see device statistics. Statistics are number of |
1712 | * bytes sent or received, errors occurred etc. |
1713 | */ |
1714 | static void sxgbe_get_stats64(struct net_device *dev, |
1715 | struct rtnl_link_stats64 *stats) |
1716 | { |
1717 | struct sxgbe_priv_data *priv = netdev_priv(dev); |
1718 | void __iomem *ioaddr = priv->ioaddr; |
1719 | u64 count; |
1720 | |
1721 | spin_lock(lock: &priv->stats_lock); |
1722 | /* Freeze the counter registers before reading value otherwise it may |
1723 | * get updated by hardware while we are reading them |
1724 | */ |
1725 | writel(SXGBE_MMC_CTRL_CNT_FRZ, addr: ioaddr + SXGBE_MMC_CTL_REG); |
1726 | |
1727 | stats->rx_bytes = sxgbe_get_stat64(ioaddr, |
1728 | SXGBE_MMC_RXOCTETLO_GCNT_REG, |
1729 | SXGBE_MMC_RXOCTETHI_GCNT_REG); |
1730 | |
1731 | stats->rx_packets = sxgbe_get_stat64(ioaddr, |
1732 | SXGBE_MMC_RXFRAMELO_GBCNT_REG, |
1733 | SXGBE_MMC_RXFRAMEHI_GBCNT_REG); |
1734 | |
1735 | stats->multicast = sxgbe_get_stat64(ioaddr, |
1736 | SXGBE_MMC_RXMULTILO_GCNT_REG, |
1737 | SXGBE_MMC_RXMULTIHI_GCNT_REG); |
1738 | |
1739 | stats->rx_crc_errors = sxgbe_get_stat64(ioaddr, |
1740 | SXGBE_MMC_RXCRCERRLO_REG, |
1741 | SXGBE_MMC_RXCRCERRHI_REG); |
1742 | |
1743 | stats->rx_length_errors = sxgbe_get_stat64(ioaddr, |
1744 | SXGBE_MMC_RXLENERRLO_REG, |
1745 | SXGBE_MMC_RXLENERRHI_REG); |
1746 | |
1747 | stats->rx_missed_errors = sxgbe_get_stat64(ioaddr, |
1748 | SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG, |
1749 | SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG); |
1750 | |
1751 | stats->tx_bytes = sxgbe_get_stat64(ioaddr, |
1752 | SXGBE_MMC_TXOCTETLO_GCNT_REG, |
1753 | SXGBE_MMC_TXOCTETHI_GCNT_REG); |
1754 | |
1755 | count = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GBCNT_REG, |
1756 | SXGBE_MMC_TXFRAMEHI_GBCNT_REG); |
1757 | |
1758 | stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG, |
1759 | SXGBE_MMC_TXFRAMEHI_GCNT_REG); |
1760 | stats->tx_errors = count - stats->tx_errors; |
1761 | stats->tx_packets = count; |
1762 | stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG, |
1763 | SXGBE_MMC_TXUFLWHI_GBCNT_REG); |
1764 | writel(val: 0, addr: ioaddr + SXGBE_MMC_CTL_REG); |
1765 | spin_unlock(lock: &priv->stats_lock); |
1766 | } |
1767 | |
1768 | /* sxgbe_set_features - entry point to set offload features of the device. |
1769 | * @dev : device pointer. |
1770 | * @features : features which are required to be set. |
1771 | * Description: |
1772 | * This function is a driver entry point and called by Linux kernel whenever |
1773 | * any device features are set or reset by user. |
1774 | * Return value: |
1775 | * This function returns 0 after setting or resetting device features. |
1776 | */ |
1777 | static int sxgbe_set_features(struct net_device *dev, |
1778 | netdev_features_t features) |
1779 | { |
1780 | struct sxgbe_priv_data *priv = netdev_priv(dev); |
1781 | netdev_features_t changed = dev->features ^ features; |
1782 | |
1783 | if (changed & NETIF_F_RXCSUM) { |
1784 | if (features & NETIF_F_RXCSUM) { |
1785 | priv->hw->mac->enable_rx_csum(priv->ioaddr); |
1786 | priv->rxcsum_insertion = true; |
1787 | } else { |
1788 | priv->hw->mac->disable_rx_csum(priv->ioaddr); |
1789 | priv->rxcsum_insertion = false; |
1790 | } |
1791 | } |
1792 | |
1793 | return 0; |
1794 | } |
1795 | |
1796 | /* sxgbe_change_mtu - entry point to change MTU size for the device. |
1797 | * @dev : device pointer. |
1798 | * @new_mtu : the new MTU size for the device. |
1799 | * Description: the Maximum Transfer Unit (MTU) is used by the network layer |
1800 | * to drive packet transmission. Ethernet has an MTU of 1500 octets |
1801 | * (ETH_DATA_LEN). This value can be changed with ifconfig. |
1802 | * Return value: |
1803 | * 0 on success and an appropriate (-)ve integer as defined in errno.h |
1804 | * file on failure. |
1805 | */ |
1806 | static int sxgbe_change_mtu(struct net_device *dev, int new_mtu) |
1807 | { |
1808 | dev->mtu = new_mtu; |
1809 | |
1810 | if (!netif_running(dev)) |
1811 | return 0; |
1812 | |
1813 | /* Recevice ring buffer size is needed to be set based on MTU. If MTU is |
1814 | * changed then reinitilisation of the receive ring buffers need to be |
1815 | * done. Hence bring interface down and bring interface back up |
1816 | */ |
1817 | sxgbe_release(dev); |
1818 | return sxgbe_open(dev); |
1819 | } |
1820 | |
1821 | static void sxgbe_set_umac_addr(void __iomem *ioaddr, unsigned char *addr, |
1822 | unsigned int reg_n) |
1823 | { |
1824 | unsigned long data; |
1825 | |
1826 | data = (addr[5] << 8) | addr[4]; |
1827 | /* For MAC Addr registers se have to set the Address Enable (AE) |
1828 | * bit that has no effect on the High Reg 0 where the bit 31 (MO) |
1829 | * is RO. |
1830 | */ |
1831 | writel(val: data | SXGBE_HI_REG_AE, addr: ioaddr + SXGBE_ADDR_HIGH(reg_n)); |
1832 | data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; |
1833 | writel(val: data, addr: ioaddr + SXGBE_ADDR_LOW(reg_n)); |
1834 | } |
1835 | |
1836 | /** |
1837 | * sxgbe_set_rx_mode - entry point for setting different receive mode of |
1838 | * a device. unicast, multicast addressing |
1839 | * @dev : pointer to the device structure |
1840 | * Description: |
1841 | * This function is a driver entry point which gets called by the kernel |
1842 | * whenever different receive mode like unicast, multicast and promiscuous |
1843 | * must be enabled/disabled. |
1844 | * Return value: |
1845 | * void. |
1846 | */ |
1847 | static void sxgbe_set_rx_mode(struct net_device *dev) |
1848 | { |
1849 | struct sxgbe_priv_data *priv = netdev_priv(dev); |
1850 | void __iomem *ioaddr = (void __iomem *)priv->ioaddr; |
1851 | unsigned int value = 0; |
1852 | u32 mc_filter[2]; |
1853 | struct netdev_hw_addr *ha; |
1854 | int reg = 1; |
1855 | |
1856 | netdev_dbg(dev, "%s: # mcasts %d, # unicast %d\n" , |
1857 | __func__, netdev_mc_count(dev), netdev_uc_count(dev)); |
1858 | |
1859 | if (dev->flags & IFF_PROMISC) { |
1860 | value = SXGBE_FRAME_FILTER_PR; |
1861 | |
1862 | } else if ((netdev_mc_count(dev) > SXGBE_HASH_TABLE_SIZE) || |
1863 | (dev->flags & IFF_ALLMULTI)) { |
1864 | value = SXGBE_FRAME_FILTER_PM; /* pass all multi */ |
1865 | writel(val: 0xffffffff, addr: ioaddr + SXGBE_HASH_HIGH); |
1866 | writel(val: 0xffffffff, addr: ioaddr + SXGBE_HASH_LOW); |
1867 | |
1868 | } else if (!netdev_mc_empty(dev)) { |
1869 | /* Hash filter for multicast */ |
1870 | value = SXGBE_FRAME_FILTER_HMC; |
1871 | |
1872 | memset(mc_filter, 0, sizeof(mc_filter)); |
1873 | netdev_for_each_mc_addr(ha, dev) { |
1874 | /* The upper 6 bits of the calculated CRC are used to |
1875 | * index the contens of the hash table |
1876 | */ |
1877 | int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26; |
1878 | |
1879 | /* The most significant bit determines the register to |
1880 | * use (H/L) while the other 5 bits determine the bit |
1881 | * within the register. |
1882 | */ |
1883 | mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); |
1884 | } |
1885 | writel(val: mc_filter[0], addr: ioaddr + SXGBE_HASH_LOW); |
1886 | writel(val: mc_filter[1], addr: ioaddr + SXGBE_HASH_HIGH); |
1887 | } |
1888 | |
1889 | /* Handle multiple unicast addresses (perfect filtering) */ |
1890 | if (netdev_uc_count(dev) > SXGBE_MAX_PERFECT_ADDRESSES) |
1891 | /* Switch to promiscuous mode if more than 16 addrs |
1892 | * are required |
1893 | */ |
1894 | value |= SXGBE_FRAME_FILTER_PR; |
1895 | else { |
1896 | netdev_for_each_uc_addr(ha, dev) { |
1897 | sxgbe_set_umac_addr(ioaddr, addr: ha->addr, reg_n: reg); |
1898 | reg++; |
1899 | } |
1900 | } |
1901 | #ifdef FRAME_FILTER_DEBUG |
1902 | /* Enable Receive all mode (to debug filtering_fail errors) */ |
1903 | value |= SXGBE_FRAME_FILTER_RA; |
1904 | #endif |
1905 | writel(val: value, addr: ioaddr + SXGBE_FRAME_FILTER); |
1906 | |
1907 | netdev_dbg(dev, "Filter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n" , |
1908 | readl(ioaddr + SXGBE_FRAME_FILTER), |
1909 | readl(ioaddr + SXGBE_HASH_HIGH), |
1910 | readl(ioaddr + SXGBE_HASH_LOW)); |
1911 | } |
1912 | |
1913 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1914 | /** |
1915 | * sxgbe_poll_controller - entry point for polling receive by device |
1916 | * @dev : pointer to the device structure |
1917 | * Description: |
1918 | * This function is used by NETCONSOLE and other diagnostic tools |
1919 | * to allow network I/O with interrupts disabled. |
1920 | * Return value: |
1921 | * Void. |
1922 | */ |
1923 | static void sxgbe_poll_controller(struct net_device *dev) |
1924 | { |
1925 | struct sxgbe_priv_data *priv = netdev_priv(dev); |
1926 | |
1927 | disable_irq(irq: priv->irq); |
1928 | sxgbe_rx_interrupt(irq: priv->irq, dev_id: dev); |
1929 | enable_irq(irq: priv->irq); |
1930 | } |
1931 | #endif |
1932 | |
1933 | /* sxgbe_ioctl - Entry point for the Ioctl |
1934 | * @dev: Device pointer. |
1935 | * @rq: An IOCTL specefic structure, that can contain a pointer to |
1936 | * a proprietary structure used to pass information to the driver. |
1937 | * @cmd: IOCTL command |
1938 | * Description: |
1939 | * Currently it supports the phy_mii_ioctl(...) and HW time stamping. |
1940 | */ |
1941 | static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
1942 | { |
1943 | int ret = -EOPNOTSUPP; |
1944 | |
1945 | if (!netif_running(dev)) |
1946 | return -EINVAL; |
1947 | |
1948 | switch (cmd) { |
1949 | case SIOCGMIIPHY: |
1950 | case SIOCGMIIREG: |
1951 | case SIOCSMIIREG: |
1952 | ret = phy_do_ioctl(dev, ifr: rq, cmd); |
1953 | break; |
1954 | default: |
1955 | break; |
1956 | } |
1957 | |
1958 | return ret; |
1959 | } |
1960 | |
1961 | static const struct net_device_ops sxgbe_netdev_ops = { |
1962 | .ndo_open = sxgbe_open, |
1963 | .ndo_start_xmit = sxgbe_xmit, |
1964 | .ndo_stop = sxgbe_release, |
1965 | .ndo_get_stats64 = sxgbe_get_stats64, |
1966 | .ndo_change_mtu = sxgbe_change_mtu, |
1967 | .ndo_set_features = sxgbe_set_features, |
1968 | .ndo_set_rx_mode = sxgbe_set_rx_mode, |
1969 | .ndo_tx_timeout = sxgbe_tx_timeout, |
1970 | .ndo_eth_ioctl = sxgbe_ioctl, |
1971 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1972 | .ndo_poll_controller = sxgbe_poll_controller, |
1973 | #endif |
1974 | .ndo_set_mac_address = eth_mac_addr, |
1975 | }; |
1976 | |
1977 | /* Get the hardware ops */ |
1978 | static void sxgbe_get_ops(struct sxgbe_ops * const ops_ptr) |
1979 | { |
1980 | ops_ptr->mac = sxgbe_get_core_ops(); |
1981 | ops_ptr->desc = sxgbe_get_desc_ops(); |
1982 | ops_ptr->dma = sxgbe_get_dma_ops(); |
1983 | ops_ptr->mtl = sxgbe_get_mtl_ops(); |
1984 | |
1985 | /* set the MDIO communication Address/Data regisers */ |
1986 | ops_ptr->mii.addr = SXGBE_MDIO_SCMD_ADD_REG; |
1987 | ops_ptr->mii.data = SXGBE_MDIO_SCMD_DATA_REG; |
1988 | |
1989 | /* Assigning the default link settings |
1990 | * no SXGBE defined default values to be set in registers, |
1991 | * so assigning as 0 for port and duplex |
1992 | */ |
1993 | ops_ptr->link.port = 0; |
1994 | ops_ptr->link.duplex = 0; |
1995 | ops_ptr->link.speed = SXGBE_SPEED_10G; |
1996 | } |
1997 | |
1998 | /** |
1999 | * sxgbe_hw_init - Init the GMAC device |
2000 | * @priv: driver private structure |
2001 | * Description: this function checks the HW capability |
2002 | * (if supported) and sets the driver's features. |
2003 | */ |
2004 | static int sxgbe_hw_init(struct sxgbe_priv_data * const priv) |
2005 | { |
2006 | u32 ctrl_ids; |
2007 | |
2008 | priv->hw = kmalloc(size: sizeof(*priv->hw), GFP_KERNEL); |
2009 | if(!priv->hw) |
2010 | return -ENOMEM; |
2011 | |
2012 | /* get the hardware ops */ |
2013 | sxgbe_get_ops(ops_ptr: priv->hw); |
2014 | |
2015 | /* get the controller id */ |
2016 | ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr); |
2017 | priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16; |
2018 | priv->hw->ctrl_id = (ctrl_ids & 0x000000ff); |
2019 | pr_info("user ID: 0x%x, Controller ID: 0x%x\n" , |
2020 | priv->hw->ctrl_uid, priv->hw->ctrl_id); |
2021 | |
2022 | /* get the H/W features */ |
2023 | if (!sxgbe_get_hw_features(priv)) |
2024 | pr_info("Hardware features not found\n" ); |
2025 | |
2026 | if (priv->hw_cap.tx_csum_offload) |
2027 | pr_info("TX Checksum offload supported\n" ); |
2028 | |
2029 | if (priv->hw_cap.rx_csum_offload) |
2030 | pr_info("RX Checksum offload supported\n" ); |
2031 | |
2032 | return 0; |
2033 | } |
2034 | |
2035 | static int sxgbe_sw_reset(void __iomem *addr) |
2036 | { |
2037 | int retry_count = 10; |
2038 | |
2039 | writel(SXGBE_DMA_SOFT_RESET, addr: addr + SXGBE_DMA_MODE_REG); |
2040 | while (retry_count--) { |
2041 | if (!(readl(addr: addr + SXGBE_DMA_MODE_REG) & |
2042 | SXGBE_DMA_SOFT_RESET)) |
2043 | break; |
2044 | mdelay(10); |
2045 | } |
2046 | |
2047 | if (retry_count < 0) |
2048 | return -EBUSY; |
2049 | |
2050 | return 0; |
2051 | } |
2052 | |
2053 | /** |
2054 | * sxgbe_drv_probe |
2055 | * @device: device pointer |
2056 | * @plat_dat: platform data pointer |
2057 | * @addr: iobase memory address |
2058 | * Description: this is the main probe function used to |
2059 | * call the alloc_etherdev, allocate the priv structure. |
2060 | */ |
2061 | struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device, |
2062 | struct sxgbe_plat_data *plat_dat, |
2063 | void __iomem *addr) |
2064 | { |
2065 | struct sxgbe_priv_data *priv; |
2066 | struct net_device *ndev; |
2067 | int ret; |
2068 | u8 queue_num; |
2069 | |
2070 | ndev = alloc_etherdev_mqs(sizeof_priv: sizeof(struct sxgbe_priv_data), |
2071 | SXGBE_TX_QUEUES, SXGBE_RX_QUEUES); |
2072 | if (!ndev) |
2073 | return NULL; |
2074 | |
2075 | SET_NETDEV_DEV(ndev, device); |
2076 | |
2077 | priv = netdev_priv(dev: ndev); |
2078 | priv->device = device; |
2079 | priv->dev = ndev; |
2080 | |
2081 | sxgbe_set_ethtool_ops(netdev: ndev); |
2082 | priv->plat = plat_dat; |
2083 | priv->ioaddr = addr; |
2084 | |
2085 | ret = sxgbe_sw_reset(addr: priv->ioaddr); |
2086 | if (ret) |
2087 | goto error_free_netdev; |
2088 | |
2089 | /* Verify driver arguments */ |
2090 | sxgbe_verify_args(); |
2091 | |
2092 | /* Init MAC and get the capabilities */ |
2093 | ret = sxgbe_hw_init(priv); |
2094 | if (ret) |
2095 | goto error_free_netdev; |
2096 | |
2097 | /* allocate memory resources for Descriptor rings */ |
2098 | ret = txring_mem_alloc(priv); |
2099 | if (ret) |
2100 | goto error_free_hw; |
2101 | |
2102 | ret = rxring_mem_alloc(priv); |
2103 | if (ret) |
2104 | goto error_free_hw; |
2105 | |
2106 | ndev->netdev_ops = &sxgbe_netdev_ops; |
2107 | |
2108 | ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
2109 | NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 | |
2110 | NETIF_F_GRO; |
2111 | ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; |
2112 | ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO); |
2113 | |
2114 | /* assign filtering support */ |
2115 | ndev->priv_flags |= IFF_UNICAST_FLT; |
2116 | |
2117 | /* MTU range: 68 - 9000 */ |
2118 | ndev->min_mtu = MIN_MTU; |
2119 | ndev->max_mtu = MAX_MTU; |
2120 | |
2121 | priv->msg_enable = netif_msg_init(debug_value: debug, default_msg_enable_bits: default_msg_level); |
2122 | |
2123 | /* Enable TCP segmentation offload for all DMA channels */ |
2124 | if (priv->hw_cap.tcpseg_offload) { |
2125 | SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { |
2126 | priv->hw->dma->enable_tso(priv->ioaddr, queue_num); |
2127 | } |
2128 | } |
2129 | |
2130 | /* Enable Rx checksum offload */ |
2131 | if (priv->hw_cap.rx_csum_offload) { |
2132 | priv->hw->mac->enable_rx_csum(priv->ioaddr); |
2133 | priv->rxcsum_insertion = true; |
2134 | } |
2135 | |
2136 | /* Initialise pause frame settings */ |
2137 | priv->rx_pause = 1; |
2138 | priv->tx_pause = 1; |
2139 | |
2140 | /* Rx Watchdog is available, enable depend on platform data */ |
2141 | if (!priv->plat->riwt_off) { |
2142 | priv->use_riwt = 1; |
2143 | pr_info("Enable RX Mitigation via HW Watchdog Timer\n" ); |
2144 | } |
2145 | |
2146 | netif_napi_add(dev: ndev, napi: &priv->napi, poll: sxgbe_poll); |
2147 | |
2148 | spin_lock_init(&priv->stats_lock); |
2149 | |
2150 | priv->sxgbe_clk = clk_get(dev: priv->device, SXGBE_RESOURCE_NAME); |
2151 | if (IS_ERR(ptr: priv->sxgbe_clk)) { |
2152 | netdev_warn(dev: ndev, format: "%s: warning: cannot get CSR clock\n" , |
2153 | __func__); |
2154 | goto error_napi_del; |
2155 | } |
2156 | |
2157 | /* If a specific clk_csr value is passed from the platform |
2158 | * this means that the CSR Clock Range selection cannot be |
2159 | * changed at run-time and it is fixed. Viceversa the driver'll try to |
2160 | * set the MDC clock dynamically according to the csr actual |
2161 | * clock input. |
2162 | */ |
2163 | if (!priv->plat->clk_csr) |
2164 | sxgbe_clk_csr_set(priv); |
2165 | else |
2166 | priv->clk_csr = priv->plat->clk_csr; |
2167 | |
2168 | /* MDIO bus Registration */ |
2169 | ret = sxgbe_mdio_register(ndev); |
2170 | if (ret < 0) { |
2171 | netdev_dbg(ndev, "%s: MDIO bus (id: %d) registration failed\n" , |
2172 | __func__, priv->plat->bus_id); |
2173 | goto error_clk_put; |
2174 | } |
2175 | |
2176 | ret = register_netdev(dev: ndev); |
2177 | if (ret) { |
2178 | pr_err("%s: ERROR %i registering the device\n" , __func__, ret); |
2179 | goto error_mdio_unregister; |
2180 | } |
2181 | |
2182 | sxgbe_check_ether_addr(priv); |
2183 | |
2184 | return priv; |
2185 | |
2186 | error_mdio_unregister: |
2187 | sxgbe_mdio_unregister(ndev); |
2188 | error_clk_put: |
2189 | clk_put(clk: priv->sxgbe_clk); |
2190 | error_napi_del: |
2191 | netif_napi_del(napi: &priv->napi); |
2192 | error_free_hw: |
2193 | kfree(objp: priv->hw); |
2194 | error_free_netdev: |
2195 | free_netdev(dev: ndev); |
2196 | |
2197 | return NULL; |
2198 | } |
2199 | |
2200 | /** |
2201 | * sxgbe_drv_remove |
2202 | * @ndev: net device pointer |
2203 | * Description: this function resets the TX/RX processes, disables the MAC RX/TX |
2204 | * changes the link status, releases the DMA descriptor rings. |
2205 | */ |
2206 | void sxgbe_drv_remove(struct net_device *ndev) |
2207 | { |
2208 | struct sxgbe_priv_data *priv = netdev_priv(dev: ndev); |
2209 | u8 queue_num; |
2210 | |
2211 | netdev_info(dev: ndev, format: "%s: removing driver\n" , __func__); |
2212 | |
2213 | SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { |
2214 | priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num); |
2215 | } |
2216 | |
2217 | priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); |
2218 | priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES); |
2219 | |
2220 | priv->hw->mac->enable_tx(priv->ioaddr, false); |
2221 | priv->hw->mac->enable_rx(priv->ioaddr, false); |
2222 | |
2223 | unregister_netdev(dev: ndev); |
2224 | |
2225 | sxgbe_mdio_unregister(ndev); |
2226 | |
2227 | clk_put(clk: priv->sxgbe_clk); |
2228 | |
2229 | netif_napi_del(napi: &priv->napi); |
2230 | |
2231 | kfree(objp: priv->hw); |
2232 | |
2233 | free_netdev(dev: ndev); |
2234 | } |
2235 | |
2236 | #ifdef CONFIG_PM |
2237 | int sxgbe_suspend(struct net_device *ndev) |
2238 | { |
2239 | return 0; |
2240 | } |
2241 | |
2242 | int sxgbe_resume(struct net_device *ndev) |
2243 | { |
2244 | return 0; |
2245 | } |
2246 | |
2247 | int sxgbe_freeze(struct net_device *ndev) |
2248 | { |
2249 | return -ENOSYS; |
2250 | } |
2251 | |
2252 | int sxgbe_restore(struct net_device *ndev) |
2253 | { |
2254 | return -ENOSYS; |
2255 | } |
2256 | #endif /* CONFIG_PM */ |
2257 | |
2258 | /* Driver is configured as Platform driver */ |
2259 | static int __init sxgbe_init(void) |
2260 | { |
2261 | int ret; |
2262 | |
2263 | ret = sxgbe_register_platform(); |
2264 | if (ret) |
2265 | goto err; |
2266 | return 0; |
2267 | err: |
2268 | pr_err("driver registration failed\n" ); |
2269 | return ret; |
2270 | } |
2271 | |
2272 | static void __exit sxgbe_exit(void) |
2273 | { |
2274 | sxgbe_unregister_platform(); |
2275 | } |
2276 | |
2277 | module_init(sxgbe_init); |
2278 | module_exit(sxgbe_exit); |
2279 | |
2280 | #ifndef MODULE |
2281 | static int __init sxgbe_cmdline_opt(char *str) |
2282 | { |
2283 | char *opt; |
2284 | |
2285 | if (!str || !*str) |
2286 | return 1; |
2287 | while ((opt = strsep(&str, "," )) != NULL) { |
2288 | if (!strncmp(opt, "eee_timer:" , 10)) { |
2289 | if (kstrtoint(s: opt + 10, base: 0, res: &eee_timer)) |
2290 | goto err; |
2291 | } |
2292 | } |
2293 | return 1; |
2294 | |
2295 | err: |
2296 | pr_err("%s: ERROR broken module parameter conversion\n" , __func__); |
2297 | return 1; |
2298 | } |
2299 | |
2300 | __setup("sxgbeeth=" , sxgbe_cmdline_opt); |
2301 | #endif /* MODULE */ |
2302 | |
2303 | |
2304 | |
2305 | MODULE_DESCRIPTION("Samsung 10G/2.5G/1G Ethernet PLATFORM driver" ); |
2306 | |
2307 | MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)" ); |
2308 | MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value" ); |
2309 | |
2310 | MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@samsung.com>" ); |
2311 | MODULE_AUTHOR("ByungHo An <bh74.an@samsung.com>" ); |
2312 | MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>" ); |
2313 | MODULE_AUTHOR("Vipul Pandya <vipul.pandya@samsung.com>" ); |
2314 | |
2315 | MODULE_LICENSE("GPL" ); |
2316 | |