1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
3 */
4
5/* Qualcomm Technologies, Inc. EMAC Gigabit Ethernet Driver */
6
7#include <linux/if_ether.h>
8#include <linux/if_vlan.h>
9#include <linux/interrupt.h>
10#include <linux/io.h>
11#include <linux/module.h>
12#include <linux/of.h>
13#include <linux/of_net.h>
14#include <linux/phy.h>
15#include <linux/platform_device.h>
16#include <linux/acpi.h>
17#include "emac.h"
18#include "emac-mac.h"
19#include "emac-phy.h"
20#include "emac-sgmii.h"
21
22#define EMAC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
23 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
24
25#define EMAC_RRD_SIZE 4
26/* The RRD size if timestamping is enabled: */
27#define EMAC_TS_RRD_SIZE 6
28#define EMAC_TPD_SIZE 4
29#define EMAC_RFD_SIZE 2
30
31#define REG_MAC_RX_STATUS_BIN EMAC_RXMAC_STATC_REG0
32#define REG_MAC_RX_STATUS_END EMAC_RXMAC_STATC_REG22
33#define REG_MAC_TX_STATUS_BIN EMAC_TXMAC_STATC_REG0
34#define REG_MAC_TX_STATUS_END EMAC_TXMAC_STATC_REG24
35
36#define RXQ0_NUM_RFD_PREF_DEF 8
37#define TXQ0_NUM_TPD_PREF_DEF 5
38
39#define EMAC_PREAMBLE_DEF 7
40
41#define DMAR_DLY_CNT_DEF 15
42#define DMAW_DLY_CNT_DEF 4
43
44#define IMR_NORMAL_MASK (ISR_ERROR | ISR_OVER | ISR_TX_PKT)
45
46#define ISR_TX_PKT (\
47 TX_PKT_INT |\
48 TX_PKT_INT1 |\
49 TX_PKT_INT2 |\
50 TX_PKT_INT3)
51
52#define ISR_OVER (\
53 RFD0_UR_INT |\
54 RFD1_UR_INT |\
55 RFD2_UR_INT |\
56 RFD3_UR_INT |\
57 RFD4_UR_INT |\
58 RXF_OF_INT |\
59 TXF_UR_INT)
60
61#define ISR_ERROR (\
62 DMAR_TO_INT |\
63 DMAW_TO_INT |\
64 TXQ_TO_INT)
65
66/* in sync with enum emac_clk_id */
67static const char * const emac_clk_name[] = {
68 "axi_clk", "cfg_ahb_clk", "high_speed_clk", "mdio_clk", "tx_clk",
69 "rx_clk", "sys_clk"
70};
71
72void emac_reg_update32(void __iomem *addr, u32 mask, u32 val)
73{
74 u32 data = readl(addr);
75
76 writel(val: ((data & ~mask) | val), addr);
77}
78
79/* reinitialize */
80int emac_reinit_locked(struct emac_adapter *adpt)
81{
82 int ret;
83
84 mutex_lock(&adpt->reset_lock);
85
86 emac_mac_down(adpt);
87 emac_sgmii_reset(adpt);
88 ret = emac_mac_up(adpt);
89
90 mutex_unlock(lock: &adpt->reset_lock);
91
92 return ret;
93}
94
95/* NAPI */
96static int emac_napi_rtx(struct napi_struct *napi, int budget)
97{
98 struct emac_rx_queue *rx_q =
99 container_of(napi, struct emac_rx_queue, napi);
100 struct emac_adapter *adpt = netdev_priv(dev: rx_q->netdev);
101 struct emac_irq *irq = rx_q->irq;
102 int work_done = 0;
103
104 emac_mac_rx_process(adpt, rx_q, num_pkts: &work_done, max_pkts: budget);
105
106 if (work_done < budget) {
107 napi_complete_done(n: napi, work_done);
108
109 irq->mask |= rx_q->intr;
110 writel(val: irq->mask, addr: adpt->base + EMAC_INT_MASK);
111 }
112
113 return work_done;
114}
115
116/* Transmit the packet */
117static netdev_tx_t emac_start_xmit(struct sk_buff *skb,
118 struct net_device *netdev)
119{
120 struct emac_adapter *adpt = netdev_priv(dev: netdev);
121
122 return emac_mac_tx_buf_send(adpt, tx_q: &adpt->tx_q, skb);
123}
124
125static irqreturn_t emac_isr(int _irq, void *data)
126{
127 struct emac_irq *irq = data;
128 struct emac_adapter *adpt =
129 container_of(irq, struct emac_adapter, irq);
130 struct emac_rx_queue *rx_q = &adpt->rx_q;
131 u32 isr, status;
132
133 /* disable the interrupt */
134 writel(val: 0, addr: adpt->base + EMAC_INT_MASK);
135
136 isr = readl_relaxed(adpt->base + EMAC_INT_STATUS);
137
138 status = isr & irq->mask;
139 if (status == 0)
140 goto exit;
141
142 if (status & ISR_ERROR) {
143 net_err_ratelimited("%s: error interrupt 0x%lx\n",
144 adpt->netdev->name, status & ISR_ERROR);
145 /* reset MAC */
146 schedule_work(work: &adpt->work_thread);
147 }
148
149 /* Schedule the napi for receive queue with interrupt
150 * status bit set
151 */
152 if (status & rx_q->intr) {
153 if (napi_schedule_prep(n: &rx_q->napi)) {
154 irq->mask &= ~rx_q->intr;
155 __napi_schedule(n: &rx_q->napi);
156 }
157 }
158
159 if (status & TX_PKT_INT)
160 emac_mac_tx_process(adpt, tx_q: &adpt->tx_q);
161
162 if (status & ISR_OVER)
163 net_warn_ratelimited("%s: TX/RX overflow interrupt\n",
164 adpt->netdev->name);
165
166exit:
167 /* enable the interrupt */
168 writel(val: irq->mask, addr: adpt->base + EMAC_INT_MASK);
169
170 return IRQ_HANDLED;
171}
172
173/* Configure VLAN tag strip/insert feature */
174static int emac_set_features(struct net_device *netdev,
175 netdev_features_t features)
176{
177 netdev_features_t changed = features ^ netdev->features;
178 struct emac_adapter *adpt = netdev_priv(dev: netdev);
179
180 /* We only need to reprogram the hardware if the VLAN tag features
181 * have changed, and if it's already running.
182 */
183 if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX)))
184 return 0;
185
186 if (!netif_running(dev: netdev))
187 return 0;
188
189 /* emac_mac_mode_config() uses netdev->features to configure the EMAC,
190 * so make sure it's set first.
191 */
192 netdev->features = features;
193
194 return emac_reinit_locked(adpt);
195}
196
197/* Configure Multicast and Promiscuous modes */
198static void emac_rx_mode_set(struct net_device *netdev)
199{
200 struct emac_adapter *adpt = netdev_priv(dev: netdev);
201 struct netdev_hw_addr *ha;
202
203 emac_mac_mode_config(adpt);
204
205 /* update multicast address filtering */
206 emac_mac_multicast_addr_clear(adpt);
207 netdev_for_each_mc_addr(ha, netdev)
208 emac_mac_multicast_addr_set(adpt, addr: ha->addr);
209}
210
211/* Change the Maximum Transfer Unit (MTU) */
212static int emac_change_mtu(struct net_device *netdev, int new_mtu)
213{
214 struct emac_adapter *adpt = netdev_priv(dev: netdev);
215
216 netif_dbg(adpt, hw, adpt->netdev,
217 "changing MTU from %d to %d\n", netdev->mtu,
218 new_mtu);
219 netdev->mtu = new_mtu;
220
221 if (netif_running(dev: netdev))
222 return emac_reinit_locked(adpt);
223
224 return 0;
225}
226
227/* Called when the network interface is made active */
228static int emac_open(struct net_device *netdev)
229{
230 struct emac_adapter *adpt = netdev_priv(dev: netdev);
231 struct emac_irq *irq = &adpt->irq;
232 int ret;
233
234 ret = request_irq(irq: irq->irq, handler: emac_isr, flags: 0, name: "emac-core0", dev: irq);
235 if (ret) {
236 netdev_err(dev: adpt->netdev, format: "could not request emac-core0 irq\n");
237 return ret;
238 }
239
240 /* allocate rx/tx dma buffer & descriptors */
241 ret = emac_mac_rx_tx_rings_alloc_all(adpt);
242 if (ret) {
243 netdev_err(dev: adpt->netdev, format: "error allocating rx/tx rings\n");
244 free_irq(irq->irq, irq);
245 return ret;
246 }
247
248 ret = emac_sgmii_open(adpt);
249 if (ret) {
250 emac_mac_rx_tx_rings_free_all(adpt);
251 free_irq(irq->irq, irq);
252 return ret;
253 }
254
255 ret = emac_mac_up(adpt);
256 if (ret) {
257 emac_mac_rx_tx_rings_free_all(adpt);
258 free_irq(irq->irq, irq);
259 emac_sgmii_close(adpt);
260 return ret;
261 }
262
263 return 0;
264}
265
266/* Called when the network interface is disabled */
267static int emac_close(struct net_device *netdev)
268{
269 struct emac_adapter *adpt = netdev_priv(dev: netdev);
270
271 mutex_lock(&adpt->reset_lock);
272
273 emac_sgmii_close(adpt);
274 emac_mac_down(adpt);
275 emac_mac_rx_tx_rings_free_all(adpt);
276
277 free_irq(adpt->irq.irq, &adpt->irq);
278
279 mutex_unlock(lock: &adpt->reset_lock);
280
281 return 0;
282}
283
284/* Respond to a TX hang */
285static void emac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
286{
287 struct emac_adapter *adpt = netdev_priv(dev: netdev);
288
289 schedule_work(work: &adpt->work_thread);
290}
291
292/**
293 * emac_update_hw_stats - read the EMAC stat registers
294 * @adpt: pointer to adapter struct
295 *
296 * Reads the stats registers and write the values to adpt->stats.
297 *
298 * adpt->stats.lock must be held while calling this function,
299 * and while reading from adpt->stats.
300 */
301void emac_update_hw_stats(struct emac_adapter *adpt)
302{
303 struct emac_stats *stats = &adpt->stats;
304 u64 *stats_itr = &adpt->stats.rx_ok;
305 void __iomem *base = adpt->base;
306 unsigned int addr;
307
308 addr = REG_MAC_RX_STATUS_BIN;
309 while (addr <= REG_MAC_RX_STATUS_END) {
310 *stats_itr += readl_relaxed(base + addr);
311 stats_itr++;
312 addr += sizeof(u32);
313 }
314
315 /* additional rx status */
316 stats->rx_crc_align += readl_relaxed(base + EMAC_RXMAC_STATC_REG23);
317 stats->rx_jabbers += readl_relaxed(base + EMAC_RXMAC_STATC_REG24);
318
319 /* update tx status */
320 addr = REG_MAC_TX_STATUS_BIN;
321 stats_itr = &stats->tx_ok;
322
323 while (addr <= REG_MAC_TX_STATUS_END) {
324 *stats_itr += readl_relaxed(base + addr);
325 stats_itr++;
326 addr += sizeof(u32);
327 }
328
329 /* additional tx status */
330 stats->tx_col += readl_relaxed(base + EMAC_TXMAC_STATC_REG25);
331}
332
333/* Provide network statistics info for the interface */
334static void emac_get_stats64(struct net_device *netdev,
335 struct rtnl_link_stats64 *net_stats)
336{
337 struct emac_adapter *adpt = netdev_priv(dev: netdev);
338 struct emac_stats *stats = &adpt->stats;
339
340 spin_lock(lock: &stats->lock);
341
342 emac_update_hw_stats(adpt);
343
344 /* return parsed statistics */
345 net_stats->rx_packets = stats->rx_ok;
346 net_stats->tx_packets = stats->tx_ok;
347 net_stats->rx_bytes = stats->rx_byte_cnt;
348 net_stats->tx_bytes = stats->tx_byte_cnt;
349 net_stats->multicast = stats->rx_mcast;
350 net_stats->collisions = stats->tx_1_col + stats->tx_2_col * 2 +
351 stats->tx_late_col + stats->tx_abort_col;
352
353 net_stats->rx_errors = stats->rx_frag + stats->rx_fcs_err +
354 stats->rx_len_err + stats->rx_sz_ov +
355 stats->rx_align_err;
356 net_stats->rx_fifo_errors = stats->rx_rxf_ov;
357 net_stats->rx_length_errors = stats->rx_len_err;
358 net_stats->rx_crc_errors = stats->rx_fcs_err;
359 net_stats->rx_frame_errors = stats->rx_align_err;
360 net_stats->rx_over_errors = stats->rx_rxf_ov;
361 net_stats->rx_missed_errors = stats->rx_rxf_ov;
362
363 net_stats->tx_errors = stats->tx_late_col + stats->tx_abort_col +
364 stats->tx_underrun + stats->tx_trunc;
365 net_stats->tx_fifo_errors = stats->tx_underrun;
366 net_stats->tx_aborted_errors = stats->tx_abort_col;
367 net_stats->tx_window_errors = stats->tx_late_col;
368
369 spin_unlock(lock: &stats->lock);
370}
371
372static const struct net_device_ops emac_netdev_ops = {
373 .ndo_open = emac_open,
374 .ndo_stop = emac_close,
375 .ndo_validate_addr = eth_validate_addr,
376 .ndo_start_xmit = emac_start_xmit,
377 .ndo_set_mac_address = eth_mac_addr,
378 .ndo_change_mtu = emac_change_mtu,
379 .ndo_eth_ioctl = phy_do_ioctl_running,
380 .ndo_tx_timeout = emac_tx_timeout,
381 .ndo_get_stats64 = emac_get_stats64,
382 .ndo_set_features = emac_set_features,
383 .ndo_set_rx_mode = emac_rx_mode_set,
384};
385
386/* Watchdog task routine, called to reinitialize the EMAC */
387static void emac_work_thread(struct work_struct *work)
388{
389 struct emac_adapter *adpt =
390 container_of(work, struct emac_adapter, work_thread);
391
392 emac_reinit_locked(adpt);
393}
394
395/* Initialize various data structures */
396static void emac_init_adapter(struct emac_adapter *adpt)
397{
398 u32 reg;
399
400 adpt->rrd_size = EMAC_RRD_SIZE;
401 adpt->tpd_size = EMAC_TPD_SIZE;
402 adpt->rfd_size = EMAC_RFD_SIZE;
403
404 /* descriptors */
405 adpt->tx_desc_cnt = EMAC_DEF_TX_DESCS;
406 adpt->rx_desc_cnt = EMAC_DEF_RX_DESCS;
407
408 /* dma */
409 adpt->dma_order = emac_dma_ord_out;
410 adpt->dmar_block = emac_dma_req_4096;
411 adpt->dmaw_block = emac_dma_req_128;
412 adpt->dmar_dly_cnt = DMAR_DLY_CNT_DEF;
413 adpt->dmaw_dly_cnt = DMAW_DLY_CNT_DEF;
414 adpt->tpd_burst = TXQ0_NUM_TPD_PREF_DEF;
415 adpt->rfd_burst = RXQ0_NUM_RFD_PREF_DEF;
416
417 /* irq moderator */
418 reg = ((EMAC_DEF_RX_IRQ_MOD >> 1) << IRQ_MODERATOR2_INIT_SHFT) |
419 ((EMAC_DEF_TX_IRQ_MOD >> 1) << IRQ_MODERATOR_INIT_SHFT);
420 adpt->irq_mod = reg;
421
422 /* others */
423 adpt->preamble = EMAC_PREAMBLE_DEF;
424
425 /* default to automatic flow control */
426 adpt->automatic = true;
427
428 /* Disable single-pause-frame mode by default */
429 adpt->single_pause_mode = false;
430}
431
432/* Get the clock */
433static int emac_clks_get(struct platform_device *pdev,
434 struct emac_adapter *adpt)
435{
436 unsigned int i;
437
438 for (i = 0; i < EMAC_CLK_CNT; i++) {
439 struct clk *clk = devm_clk_get(dev: &pdev->dev, id: emac_clk_name[i]);
440
441 if (IS_ERR(ptr: clk)) {
442 dev_err(&pdev->dev,
443 "could not claim clock %s (error=%li)\n",
444 emac_clk_name[i], PTR_ERR(clk));
445
446 return PTR_ERR(ptr: clk);
447 }
448
449 adpt->clk[i] = clk;
450 }
451
452 return 0;
453}
454
455/* Initialize clocks */
456static int emac_clks_phase1_init(struct platform_device *pdev,
457 struct emac_adapter *adpt)
458{
459 int ret;
460
461 /* On ACPI platforms, clocks are controlled by firmware and/or
462 * ACPI, not by drivers.
463 */
464 if (has_acpi_companion(dev: &pdev->dev))
465 return 0;
466
467 ret = emac_clks_get(pdev, adpt);
468 if (ret)
469 return ret;
470
471 ret = clk_prepare_enable(clk: adpt->clk[EMAC_CLK_AXI]);
472 if (ret)
473 return ret;
474
475 ret = clk_prepare_enable(clk: adpt->clk[EMAC_CLK_CFG_AHB]);
476 if (ret)
477 goto disable_clk_axi;
478
479 ret = clk_set_rate(clk: adpt->clk[EMAC_CLK_HIGH_SPEED], rate: 19200000);
480 if (ret)
481 goto disable_clk_cfg_ahb;
482
483 ret = clk_prepare_enable(clk: adpt->clk[EMAC_CLK_HIGH_SPEED]);
484 if (ret)
485 goto disable_clk_cfg_ahb;
486
487 return 0;
488
489disable_clk_cfg_ahb:
490 clk_disable_unprepare(clk: adpt->clk[EMAC_CLK_CFG_AHB]);
491disable_clk_axi:
492 clk_disable_unprepare(clk: adpt->clk[EMAC_CLK_AXI]);
493
494 return ret;
495}
496
497/* Enable clocks; needs emac_clks_phase1_init to be called before */
498static int emac_clks_phase2_init(struct platform_device *pdev,
499 struct emac_adapter *adpt)
500{
501 int ret;
502
503 if (has_acpi_companion(dev: &pdev->dev))
504 return 0;
505
506 ret = clk_set_rate(clk: adpt->clk[EMAC_CLK_TX], rate: 125000000);
507 if (ret)
508 return ret;
509
510 ret = clk_prepare_enable(clk: adpt->clk[EMAC_CLK_TX]);
511 if (ret)
512 return ret;
513
514 ret = clk_set_rate(clk: adpt->clk[EMAC_CLK_HIGH_SPEED], rate: 125000000);
515 if (ret)
516 return ret;
517
518 ret = clk_set_rate(clk: adpt->clk[EMAC_CLK_MDIO], rate: 25000000);
519 if (ret)
520 return ret;
521
522 ret = clk_prepare_enable(clk: adpt->clk[EMAC_CLK_MDIO]);
523 if (ret)
524 return ret;
525
526 ret = clk_prepare_enable(clk: adpt->clk[EMAC_CLK_RX]);
527 if (ret)
528 return ret;
529
530 return clk_prepare_enable(clk: adpt->clk[EMAC_CLK_SYS]);
531}
532
533static void emac_clks_teardown(struct emac_adapter *adpt)
534{
535
536 unsigned int i;
537
538 for (i = 0; i < EMAC_CLK_CNT; i++)
539 clk_disable_unprepare(clk: adpt->clk[i]);
540}
541
542/* Get the resources */
543static int emac_probe_resources(struct platform_device *pdev,
544 struct emac_adapter *adpt)
545{
546 struct net_device *netdev = adpt->netdev;
547 int ret = 0;
548
549 /* get mac address */
550 if (device_get_ethdev_address(dev: &pdev->dev, netdev))
551 eth_hw_addr_random(dev: netdev);
552
553 /* Core 0 interrupt */
554 ret = platform_get_irq(pdev, 0);
555 if (ret < 0)
556 return ret;
557 adpt->irq.irq = ret;
558
559 /* base register address */
560 adpt->base = devm_platform_ioremap_resource(pdev, index: 0);
561 if (IS_ERR(ptr: adpt->base))
562 return PTR_ERR(ptr: adpt->base);
563
564 /* CSR register address */
565 adpt->csr = devm_platform_ioremap_resource(pdev, index: 1);
566 if (IS_ERR(ptr: adpt->csr))
567 return PTR_ERR(ptr: adpt->csr);
568
569 netdev->base_addr = (unsigned long)adpt->base;
570
571 return 0;
572}
573
574static const struct of_device_id emac_dt_match[] = {
575 {
576 .compatible = "qcom,fsm9900-emac",
577 },
578 {}
579};
580MODULE_DEVICE_TABLE(of, emac_dt_match);
581
582#if IS_ENABLED(CONFIG_ACPI)
583static const struct acpi_device_id emac_acpi_match[] = {
584 {
585 .id = "QCOM8070",
586 },
587 {}
588};
589MODULE_DEVICE_TABLE(acpi, emac_acpi_match);
590#endif
591
592static int emac_probe(struct platform_device *pdev)
593{
594 struct net_device *netdev;
595 struct emac_adapter *adpt;
596 struct emac_sgmii *phy;
597 u16 devid, revid;
598 u32 reg;
599 int ret;
600
601 /* The TPD buffer address is limited to:
602 * 1. PTP: 45bits. (Driver doesn't support yet.)
603 * 2. NON-PTP: 46bits.
604 */
605 ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(46));
606 if (ret) {
607 dev_err(&pdev->dev, "could not set DMA mask\n");
608 return ret;
609 }
610
611 netdev = alloc_etherdev(sizeof(struct emac_adapter));
612 if (!netdev)
613 return -ENOMEM;
614
615 dev_set_drvdata(dev: &pdev->dev, data: netdev);
616 SET_NETDEV_DEV(netdev, &pdev->dev);
617 emac_set_ethtool_ops(netdev);
618
619 adpt = netdev_priv(dev: netdev);
620 adpt->netdev = netdev;
621 adpt->msg_enable = EMAC_MSG_DEFAULT;
622
623 phy = &adpt->phy;
624 atomic_set(v: &phy->decode_error_count, i: 0);
625
626 mutex_init(&adpt->reset_lock);
627 spin_lock_init(&adpt->stats.lock);
628
629 adpt->irq.mask = RX_PKT_INT0 | IMR_NORMAL_MASK;
630
631 ret = emac_probe_resources(pdev, adpt);
632 if (ret)
633 goto err_undo_netdev;
634
635 /* initialize clocks */
636 ret = emac_clks_phase1_init(pdev, adpt);
637 if (ret) {
638 dev_err(&pdev->dev, "could not initialize clocks\n");
639 goto err_undo_netdev;
640 }
641
642 netdev->watchdog_timeo = EMAC_WATCHDOG_TIME;
643 netdev->irq = adpt->irq.irq;
644
645 netdev->netdev_ops = &emac_netdev_ops;
646
647 emac_init_adapter(adpt);
648
649 /* init external phy */
650 ret = emac_phy_config(pdev, adpt);
651 if (ret)
652 goto err_undo_clocks;
653
654 /* init internal sgmii phy */
655 ret = emac_sgmii_config(pdev, adpt);
656 if (ret)
657 goto err_undo_mdiobus;
658
659 /* enable clocks */
660 ret = emac_clks_phase2_init(pdev, adpt);
661 if (ret) {
662 dev_err(&pdev->dev, "could not initialize clocks\n");
663 goto err_undo_mdiobus;
664 }
665
666 /* set hw features */
667 netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
668 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX |
669 NETIF_F_HW_VLAN_CTAG_TX;
670 netdev->hw_features = netdev->features;
671
672 netdev->vlan_features |= NETIF_F_SG | NETIF_F_HW_CSUM |
673 NETIF_F_TSO | NETIF_F_TSO6;
674
675 /* MTU range: 46 - 9194 */
676 netdev->min_mtu = EMAC_MIN_ETH_FRAME_SIZE -
677 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
678 netdev->max_mtu = EMAC_MAX_ETH_FRAME_SIZE -
679 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
680
681 INIT_WORK(&adpt->work_thread, emac_work_thread);
682
683 /* Initialize queues */
684 emac_mac_rx_tx_ring_init_all(pdev, adpt);
685
686 netif_napi_add(dev: netdev, napi: &adpt->rx_q.napi, poll: emac_napi_rtx);
687
688 ret = register_netdev(dev: netdev);
689 if (ret) {
690 dev_err(&pdev->dev, "could not register net device\n");
691 goto err_undo_napi;
692 }
693
694 reg = readl_relaxed(adpt->base + EMAC_DMA_MAS_CTRL);
695 devid = (reg & DEV_ID_NUM_BMSK) >> DEV_ID_NUM_SHFT;
696 revid = (reg & DEV_REV_NUM_BMSK) >> DEV_REV_NUM_SHFT;
697 reg = readl_relaxed(adpt->base + EMAC_CORE_HW_VERSION);
698
699 netif_info(adpt, probe, netdev,
700 "hardware id %d.%d, hardware version %d.%d.%d\n",
701 devid, revid,
702 (reg & MAJOR_BMSK) >> MAJOR_SHFT,
703 (reg & MINOR_BMSK) >> MINOR_SHFT,
704 (reg & STEP_BMSK) >> STEP_SHFT);
705
706 return 0;
707
708err_undo_napi:
709 netif_napi_del(napi: &adpt->rx_q.napi);
710err_undo_mdiobus:
711 put_device(dev: &adpt->phydev->mdio.dev);
712 mdiobus_unregister(bus: adpt->mii_bus);
713err_undo_clocks:
714 emac_clks_teardown(adpt);
715err_undo_netdev:
716 free_netdev(dev: netdev);
717
718 return ret;
719}
720
721static void emac_remove(struct platform_device *pdev)
722{
723 struct net_device *netdev = dev_get_drvdata(dev: &pdev->dev);
724 struct emac_adapter *adpt = netdev_priv(dev: netdev);
725
726 netif_carrier_off(dev: netdev);
727 netif_tx_disable(dev: netdev);
728
729 unregister_netdev(dev: netdev);
730 netif_napi_del(napi: &adpt->rx_q.napi);
731
732 free_irq(adpt->irq.irq, &adpt->irq);
733 cancel_work_sync(work: &adpt->work_thread);
734
735 emac_clks_teardown(adpt);
736
737 put_device(dev: &adpt->phydev->mdio.dev);
738 mdiobus_unregister(bus: adpt->mii_bus);
739
740 if (adpt->phy.digital)
741 iounmap(addr: adpt->phy.digital);
742 iounmap(addr: adpt->phy.base);
743
744 free_netdev(dev: netdev);
745}
746
747static void emac_shutdown(struct platform_device *pdev)
748{
749 struct net_device *netdev = dev_get_drvdata(dev: &pdev->dev);
750 struct emac_adapter *adpt = netdev_priv(dev: netdev);
751
752 if (netdev->flags & IFF_UP) {
753 /* Closing the SGMII turns off its interrupts */
754 emac_sgmii_close(adpt);
755
756 /* Resetting the MAC turns off all DMA and its interrupts */
757 emac_mac_reset(adpt);
758 }
759}
760
761static struct platform_driver emac_platform_driver = {
762 .probe = emac_probe,
763 .remove_new = emac_remove,
764 .driver = {
765 .name = "qcom-emac",
766 .of_match_table = emac_dt_match,
767 .acpi_match_table = ACPI_PTR(emac_acpi_match),
768 },
769 .shutdown = emac_shutdown,
770};
771
772module_platform_driver(emac_platform_driver);
773
774MODULE_DESCRIPTION("Qualcomm EMAC Gigabit Ethernet driver");
775MODULE_LICENSE("GPL v2");
776MODULE_ALIAS("platform:qcom-emac");
777

source code of linux/drivers/net/ethernet/qualcomm/emac/emac.c