1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Renesas Ethernet Switch device driver |
3 | * |
4 | * Copyright (C) 2022 Renesas Electronics Corporation |
5 | */ |
6 | |
7 | #include <linux/clk.h> |
8 | #include <linux/dma-mapping.h> |
9 | #include <linux/err.h> |
10 | #include <linux/etherdevice.h> |
11 | #include <linux/iopoll.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/module.h> |
14 | #include <linux/net_tstamp.h> |
15 | #include <linux/of.h> |
16 | #include <linux/of_mdio.h> |
17 | #include <linux/of_net.h> |
18 | #include <linux/phy/phy.h> |
19 | #include <linux/platform_device.h> |
20 | #include <linux/pm.h> |
21 | #include <linux/pm_runtime.h> |
22 | #include <linux/rtnetlink.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/spinlock.h> |
25 | #include <linux/sys_soc.h> |
26 | |
27 | #include "rswitch.h" |
28 | |
29 | static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected) |
30 | { |
31 | u32 val; |
32 | |
33 | return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected, |
34 | 1, RSWITCH_TIMEOUT_US); |
35 | } |
36 | |
37 | static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set) |
38 | { |
39 | iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg); |
40 | } |
41 | |
42 | /* Common Agent block (COMA) */ |
43 | static void rswitch_reset(struct rswitch_private *priv) |
44 | { |
45 | iowrite32(RRC_RR, priv->addr + RRC); |
46 | iowrite32(RRC_RR_CLR, priv->addr + RRC); |
47 | } |
48 | |
49 | static void rswitch_clock_enable(struct rswitch_private *priv) |
50 | { |
51 | iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC); |
52 | } |
53 | |
54 | static void rswitch_clock_disable(struct rswitch_private *priv) |
55 | { |
56 | iowrite32(RCDC_RCD, priv->addr + RCDC); |
57 | } |
58 | |
59 | static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, |
60 | unsigned int port) |
61 | { |
62 | u32 val = ioread32(coma_addr + RCEC); |
63 | |
64 | if (val & RCEC_RCE) |
65 | return (val & BIT(port)) ? true : false; |
66 | else |
67 | return false; |
68 | } |
69 | |
70 | static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, unsigned int port, |
71 | int enable) |
72 | { |
73 | u32 val; |
74 | |
75 | if (enable) { |
76 | val = ioread32(coma_addr + RCEC); |
77 | iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC); |
78 | } else { |
79 | val = ioread32(coma_addr + RCDC); |
80 | iowrite32(val | BIT(port), coma_addr + RCDC); |
81 | } |
82 | } |
83 | |
84 | static int rswitch_bpool_config(struct rswitch_private *priv) |
85 | { |
86 | u32 val; |
87 | |
88 | val = ioread32(priv->addr + CABPIRM); |
89 | if (val & CABPIRM_BPR) |
90 | return 0; |
91 | |
92 | iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM); |
93 | |
94 | return rswitch_reg_wait(addr: priv->addr, offs: CABPIRM, CABPIRM_BPR, CABPIRM_BPR); |
95 | } |
96 | |
97 | static void rswitch_coma_init(struct rswitch_private *priv) |
98 | { |
99 | iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0); |
100 | } |
101 | |
102 | /* R-Switch-2 block (TOP) */ |
103 | static void rswitch_top_init(struct rswitch_private *priv) |
104 | { |
105 | unsigned int i; |
106 | |
107 | for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++) |
108 | iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i)); |
109 | } |
110 | |
111 | /* Forwarding engine block (MFWD) */ |
112 | static void rswitch_fwd_init(struct rswitch_private *priv) |
113 | { |
114 | unsigned int i; |
115 | |
116 | /* For ETHA */ |
117 | for (i = 0; i < RSWITCH_NUM_PORTS; i++) { |
118 | iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i)); |
119 | iowrite32(0, priv->addr + FWPBFC(i)); |
120 | } |
121 | |
122 | for (i = 0; i < RSWITCH_NUM_PORTS; i++) { |
123 | iowrite32(priv->rdev[i]->rx_queue->index, |
124 | priv->addr + FWPBFCSDC(GWCA_INDEX, i)); |
125 | iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i)); |
126 | } |
127 | |
128 | /* For GWCA */ |
129 | iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index)); |
130 | iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index)); |
131 | iowrite32(0, priv->addr + FWPBFC(priv->gwca.index)); |
132 | iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index)); |
133 | } |
134 | |
135 | /* Gateway CPU agent block (GWCA) */ |
136 | static int rswitch_gwca_change_mode(struct rswitch_private *priv, |
137 | enum rswitch_gwca_mode mode) |
138 | { |
139 | int ret; |
140 | |
141 | if (!rswitch_agent_clock_is_enabled(coma_addr: priv->addr, port: priv->gwca.index)) |
142 | rswitch_agent_clock_ctrl(coma_addr: priv->addr, port: priv->gwca.index, enable: 1); |
143 | |
144 | iowrite32(mode, priv->addr + GWMC); |
145 | |
146 | ret = rswitch_reg_wait(addr: priv->addr, offs: GWMS, GWMS_OPS_MASK, expected: mode); |
147 | |
148 | if (mode == GWMC_OPC_DISABLE) |
149 | rswitch_agent_clock_ctrl(coma_addr: priv->addr, port: priv->gwca.index, enable: 0); |
150 | |
151 | return ret; |
152 | } |
153 | |
154 | static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv) |
155 | { |
156 | iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM); |
157 | |
158 | return rswitch_reg_wait(addr: priv->addr, offs: GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR); |
159 | } |
160 | |
161 | static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv) |
162 | { |
163 | iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM); |
164 | |
165 | return rswitch_reg_wait(addr: priv->addr, offs: GWARIRM, GWARIRM_ARR, GWARIRM_ARR); |
166 | } |
167 | |
168 | static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx) |
169 | { |
170 | u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits; |
171 | unsigned int i; |
172 | |
173 | for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { |
174 | if (dis[i] & mask[i]) |
175 | return true; |
176 | } |
177 | |
178 | return false; |
179 | } |
180 | |
181 | static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis) |
182 | { |
183 | unsigned int i; |
184 | |
185 | for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { |
186 | dis[i] = ioread32(priv->addr + GWDIS(i)); |
187 | dis[i] &= ioread32(priv->addr + GWDIE(i)); |
188 | } |
189 | } |
190 | |
191 | static void rswitch_enadis_data_irq(struct rswitch_private *priv, |
192 | unsigned int index, bool enable) |
193 | { |
194 | u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32); |
195 | |
196 | iowrite32(BIT(index % 32), priv->addr + offs); |
197 | } |
198 | |
199 | static void rswitch_ack_data_irq(struct rswitch_private *priv, |
200 | unsigned int index) |
201 | { |
202 | u32 offs = GWDIS(index / 32); |
203 | |
204 | iowrite32(BIT(index % 32), priv->addr + offs); |
205 | } |
206 | |
207 | static unsigned int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, |
208 | bool cur, unsigned int num) |
209 | { |
210 | unsigned int index = cur ? gq->cur : gq->dirty; |
211 | |
212 | if (index + num >= gq->ring_size) |
213 | index = (index + num) % gq->ring_size; |
214 | else |
215 | index += num; |
216 | |
217 | return index; |
218 | } |
219 | |
220 | static unsigned int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq) |
221 | { |
222 | if (gq->cur >= gq->dirty) |
223 | return gq->cur - gq->dirty; |
224 | else |
225 | return gq->ring_size - gq->dirty + gq->cur; |
226 | } |
227 | |
228 | static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq) |
229 | { |
230 | struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty]; |
231 | |
232 | if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) |
233 | return true; |
234 | |
235 | return false; |
236 | } |
237 | |
238 | static int rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue *gq, |
239 | unsigned int start_index, |
240 | unsigned int num) |
241 | { |
242 | unsigned int i, index; |
243 | |
244 | for (i = 0; i < num; i++) { |
245 | index = (i + start_index) % gq->ring_size; |
246 | if (gq->rx_bufs[index]) |
247 | continue; |
248 | gq->rx_bufs[index] = netdev_alloc_frag(RSWITCH_BUF_SIZE); |
249 | if (!gq->rx_bufs[index]) |
250 | goto err; |
251 | } |
252 | |
253 | return 0; |
254 | |
255 | err: |
256 | for (; i-- > 0; ) { |
257 | index = (i + start_index) % gq->ring_size; |
258 | skb_free_frag(addr: gq->rx_bufs[index]); |
259 | gq->rx_bufs[index] = NULL; |
260 | } |
261 | |
262 | return -ENOMEM; |
263 | } |
264 | |
265 | static void rswitch_gwca_queue_free(struct net_device *ndev, |
266 | struct rswitch_gwca_queue *gq) |
267 | { |
268 | unsigned int i; |
269 | |
270 | if (!gq->dir_tx) { |
271 | dma_free_coherent(dev: ndev->dev.parent, |
272 | size: sizeof(struct rswitch_ext_ts_desc) * |
273 | (gq->ring_size + 1), cpu_addr: gq->rx_ring, dma_handle: gq->ring_dma); |
274 | gq->rx_ring = NULL; |
275 | |
276 | for (i = 0; i < gq->ring_size; i++) |
277 | skb_free_frag(addr: gq->rx_bufs[i]); |
278 | kfree(objp: gq->rx_bufs); |
279 | gq->rx_bufs = NULL; |
280 | } else { |
281 | dma_free_coherent(dev: ndev->dev.parent, |
282 | size: sizeof(struct rswitch_ext_desc) * |
283 | (gq->ring_size + 1), cpu_addr: gq->tx_ring, dma_handle: gq->ring_dma); |
284 | gq->tx_ring = NULL; |
285 | kfree(objp: gq->skbs); |
286 | gq->skbs = NULL; |
287 | kfree(objp: gq->unmap_addrs); |
288 | gq->unmap_addrs = NULL; |
289 | } |
290 | } |
291 | |
292 | static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv) |
293 | { |
294 | struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; |
295 | |
296 | dma_free_coherent(dev: &priv->pdev->dev, |
297 | size: sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1), |
298 | cpu_addr: gq->ts_ring, dma_handle: gq->ring_dma); |
299 | gq->ts_ring = NULL; |
300 | } |
301 | |
302 | static int rswitch_gwca_queue_alloc(struct net_device *ndev, |
303 | struct rswitch_private *priv, |
304 | struct rswitch_gwca_queue *gq, |
305 | bool dir_tx, unsigned int ring_size) |
306 | { |
307 | unsigned int i, bit; |
308 | |
309 | gq->dir_tx = dir_tx; |
310 | gq->ring_size = ring_size; |
311 | gq->ndev = ndev; |
312 | |
313 | if (!dir_tx) { |
314 | gq->rx_bufs = kcalloc(n: gq->ring_size, size: sizeof(*gq->rx_bufs), GFP_KERNEL); |
315 | if (!gq->rx_bufs) |
316 | return -ENOMEM; |
317 | if (rswitch_gwca_queue_alloc_rx_buf(gq, start_index: 0, num: gq->ring_size) < 0) |
318 | goto out; |
319 | |
320 | gq->rx_ring = dma_alloc_coherent(dev: ndev->dev.parent, |
321 | size: sizeof(struct rswitch_ext_ts_desc) * |
322 | (gq->ring_size + 1), dma_handle: &gq->ring_dma, GFP_KERNEL); |
323 | } else { |
324 | gq->skbs = kcalloc(n: gq->ring_size, size: sizeof(*gq->skbs), GFP_KERNEL); |
325 | if (!gq->skbs) |
326 | return -ENOMEM; |
327 | gq->unmap_addrs = kcalloc(n: gq->ring_size, size: sizeof(*gq->unmap_addrs), GFP_KERNEL); |
328 | if (!gq->unmap_addrs) |
329 | goto out; |
330 | gq->tx_ring = dma_alloc_coherent(dev: ndev->dev.parent, |
331 | size: sizeof(struct rswitch_ext_desc) * |
332 | (gq->ring_size + 1), dma_handle: &gq->ring_dma, GFP_KERNEL); |
333 | } |
334 | |
335 | if (!gq->rx_ring && !gq->tx_ring) |
336 | goto out; |
337 | |
338 | i = gq->index / 32; |
339 | bit = BIT(gq->index % 32); |
340 | if (dir_tx) |
341 | priv->gwca.tx_irq_bits[i] |= bit; |
342 | else |
343 | priv->gwca.rx_irq_bits[i] |= bit; |
344 | |
345 | return 0; |
346 | |
347 | out: |
348 | rswitch_gwca_queue_free(ndev, gq); |
349 | |
350 | return -ENOMEM; |
351 | } |
352 | |
353 | static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr) |
354 | { |
355 | desc->dptrl = cpu_to_le32(lower_32_bits(addr)); |
356 | desc->dptrh = upper_32_bits(addr) & 0xff; |
357 | } |
358 | |
359 | static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc) |
360 | { |
361 | return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32; |
362 | } |
363 | |
364 | static int rswitch_gwca_queue_format(struct net_device *ndev, |
365 | struct rswitch_private *priv, |
366 | struct rswitch_gwca_queue *gq) |
367 | { |
368 | unsigned int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size; |
369 | struct rswitch_ext_desc *desc; |
370 | struct rswitch_desc *linkfix; |
371 | dma_addr_t dma_addr; |
372 | unsigned int i; |
373 | |
374 | memset(gq->tx_ring, 0, ring_size); |
375 | for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) { |
376 | if (!gq->dir_tx) { |
377 | dma_addr = dma_map_single(ndev->dev.parent, |
378 | gq->rx_bufs[i] + RSWITCH_HEADROOM, |
379 | RSWITCH_MAP_BUF_SIZE, |
380 | DMA_FROM_DEVICE); |
381 | if (dma_mapping_error(dev: ndev->dev.parent, dma_addr)) |
382 | goto err; |
383 | |
384 | desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE); |
385 | rswitch_desc_set_dptr(desc: &desc->desc, addr: dma_addr); |
386 | desc->desc.die_dt = DT_FEMPTY | DIE; |
387 | } else { |
388 | desc->desc.die_dt = DT_EEMPTY | DIE; |
389 | } |
390 | } |
391 | rswitch_desc_set_dptr(desc: &desc->desc, addr: gq->ring_dma); |
392 | desc->desc.die_dt = DT_LINKFIX; |
393 | |
394 | linkfix = &priv->gwca.linkfix_table[gq->index]; |
395 | linkfix->die_dt = DT_LINKFIX; |
396 | rswitch_desc_set_dptr(desc: linkfix, addr: gq->ring_dma); |
397 | |
398 | iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE, |
399 | priv->addr + GWDCC_OFFS(gq->index)); |
400 | |
401 | return 0; |
402 | |
403 | err: |
404 | if (!gq->dir_tx) { |
405 | for (desc = gq->tx_ring; i-- > 0; desc++) { |
406 | dma_addr = rswitch_desc_get_dptr(desc: &desc->desc); |
407 | dma_unmap_single(ndev->dev.parent, dma_addr, |
408 | RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE); |
409 | } |
410 | } |
411 | |
412 | return -ENOMEM; |
413 | } |
414 | |
415 | static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv, |
416 | unsigned int start_index, |
417 | unsigned int num) |
418 | { |
419 | struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; |
420 | struct rswitch_ts_desc *desc; |
421 | unsigned int i, index; |
422 | |
423 | for (i = 0; i < num; i++) { |
424 | index = (i + start_index) % gq->ring_size; |
425 | desc = &gq->ts_ring[index]; |
426 | desc->desc.die_dt = DT_FEMPTY_ND | DIE; |
427 | } |
428 | } |
429 | |
430 | static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev, |
431 | struct rswitch_gwca_queue *gq, |
432 | unsigned int start_index, |
433 | unsigned int num) |
434 | { |
435 | struct rswitch_device *rdev = netdev_priv(dev: ndev); |
436 | struct rswitch_ext_ts_desc *desc; |
437 | unsigned int i, index; |
438 | dma_addr_t dma_addr; |
439 | |
440 | for (i = 0; i < num; i++) { |
441 | index = (i + start_index) % gq->ring_size; |
442 | desc = &gq->rx_ring[index]; |
443 | if (!gq->dir_tx) { |
444 | dma_addr = dma_map_single(ndev->dev.parent, |
445 | gq->rx_bufs[index] + RSWITCH_HEADROOM, |
446 | RSWITCH_MAP_BUF_SIZE, |
447 | DMA_FROM_DEVICE); |
448 | if (dma_mapping_error(dev: ndev->dev.parent, dma_addr)) |
449 | goto err; |
450 | |
451 | desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE); |
452 | rswitch_desc_set_dptr(desc: &desc->desc, addr: dma_addr); |
453 | dma_wmb(); |
454 | desc->desc.die_dt = DT_FEMPTY | DIE; |
455 | desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index)); |
456 | } else { |
457 | desc->desc.die_dt = DT_EEMPTY | DIE; |
458 | } |
459 | } |
460 | |
461 | return 0; |
462 | |
463 | err: |
464 | if (!gq->dir_tx) { |
465 | for (; i-- > 0; ) { |
466 | index = (i + start_index) % gq->ring_size; |
467 | desc = &gq->rx_ring[index]; |
468 | dma_addr = rswitch_desc_get_dptr(desc: &desc->desc); |
469 | dma_unmap_single(ndev->dev.parent, dma_addr, |
470 | RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE); |
471 | } |
472 | } |
473 | |
474 | return -ENOMEM; |
475 | } |
476 | |
477 | static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev, |
478 | struct rswitch_private *priv, |
479 | struct rswitch_gwca_queue *gq) |
480 | { |
481 | unsigned int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size; |
482 | struct rswitch_ext_ts_desc *desc; |
483 | struct rswitch_desc *linkfix; |
484 | int err; |
485 | |
486 | memset(gq->rx_ring, 0, ring_size); |
487 | err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, start_index: 0, num: gq->ring_size); |
488 | if (err < 0) |
489 | return err; |
490 | |
491 | desc = &gq->rx_ring[gq->ring_size]; /* Last */ |
492 | rswitch_desc_set_dptr(desc: &desc->desc, addr: gq->ring_dma); |
493 | desc->desc.die_dt = DT_LINKFIX; |
494 | |
495 | linkfix = &priv->gwca.linkfix_table[gq->index]; |
496 | linkfix->die_dt = DT_LINKFIX; |
497 | rswitch_desc_set_dptr(desc: linkfix, addr: gq->ring_dma); |
498 | |
499 | iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | |
500 | GWDCC_ETS | GWDCC_EDE, |
501 | priv->addr + GWDCC_OFFS(gq->index)); |
502 | |
503 | return 0; |
504 | } |
505 | |
506 | static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv) |
507 | { |
508 | unsigned int i, num_queues = priv->gwca.num_queues; |
509 | struct rswitch_gwca *gwca = &priv->gwca; |
510 | struct device *dev = &priv->pdev->dev; |
511 | |
512 | gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues; |
513 | gwca->linkfix_table = dma_alloc_coherent(dev, size: gwca->linkfix_table_size, |
514 | dma_handle: &gwca->linkfix_table_dma, GFP_KERNEL); |
515 | if (!gwca->linkfix_table) |
516 | return -ENOMEM; |
517 | for (i = 0; i < num_queues; i++) |
518 | gwca->linkfix_table[i].die_dt = DT_EOS; |
519 | |
520 | return 0; |
521 | } |
522 | |
523 | static void rswitch_gwca_linkfix_free(struct rswitch_private *priv) |
524 | { |
525 | struct rswitch_gwca *gwca = &priv->gwca; |
526 | |
527 | if (gwca->linkfix_table) |
528 | dma_free_coherent(dev: &priv->pdev->dev, size: gwca->linkfix_table_size, |
529 | cpu_addr: gwca->linkfix_table, dma_handle: gwca->linkfix_table_dma); |
530 | gwca->linkfix_table = NULL; |
531 | } |
532 | |
533 | static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv) |
534 | { |
535 | struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; |
536 | struct rswitch_ts_desc *desc; |
537 | |
538 | gq->ring_size = TS_RING_SIZE; |
539 | gq->ts_ring = dma_alloc_coherent(dev: &priv->pdev->dev, |
540 | size: sizeof(struct rswitch_ts_desc) * |
541 | (gq->ring_size + 1), dma_handle: &gq->ring_dma, GFP_KERNEL); |
542 | |
543 | if (!gq->ts_ring) |
544 | return -ENOMEM; |
545 | |
546 | rswitch_gwca_ts_queue_fill(priv, start_index: 0, TS_RING_SIZE); |
547 | desc = &gq->ts_ring[gq->ring_size]; |
548 | desc->desc.die_dt = DT_LINKFIX; |
549 | rswitch_desc_set_dptr(desc: &desc->desc, addr: gq->ring_dma); |
550 | INIT_LIST_HEAD(list: &priv->gwca.ts_info_list); |
551 | |
552 | return 0; |
553 | } |
554 | |
555 | static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv) |
556 | { |
557 | struct rswitch_gwca_queue *gq; |
558 | unsigned int index; |
559 | |
560 | index = find_first_zero_bit(addr: priv->gwca.used, size: priv->gwca.num_queues); |
561 | if (index >= priv->gwca.num_queues) |
562 | return NULL; |
563 | set_bit(nr: index, addr: priv->gwca.used); |
564 | gq = &priv->gwca.queues[index]; |
565 | memset(gq, 0, sizeof(*gq)); |
566 | gq->index = index; |
567 | |
568 | return gq; |
569 | } |
570 | |
571 | static void rswitch_gwca_put(struct rswitch_private *priv, |
572 | struct rswitch_gwca_queue *gq) |
573 | { |
574 | clear_bit(nr: gq->index, addr: priv->gwca.used); |
575 | } |
576 | |
577 | static int rswitch_txdmac_alloc(struct net_device *ndev) |
578 | { |
579 | struct rswitch_device *rdev = netdev_priv(dev: ndev); |
580 | struct rswitch_private *priv = rdev->priv; |
581 | int err; |
582 | |
583 | rdev->tx_queue = rswitch_gwca_get(priv); |
584 | if (!rdev->tx_queue) |
585 | return -EBUSY; |
586 | |
587 | err = rswitch_gwca_queue_alloc(ndev, priv, gq: rdev->tx_queue, dir_tx: true, TX_RING_SIZE); |
588 | if (err < 0) { |
589 | rswitch_gwca_put(priv, gq: rdev->tx_queue); |
590 | return err; |
591 | } |
592 | |
593 | return 0; |
594 | } |
595 | |
596 | static void rswitch_txdmac_free(struct net_device *ndev) |
597 | { |
598 | struct rswitch_device *rdev = netdev_priv(dev: ndev); |
599 | |
600 | rswitch_gwca_queue_free(ndev, gq: rdev->tx_queue); |
601 | rswitch_gwca_put(priv: rdev->priv, gq: rdev->tx_queue); |
602 | } |
603 | |
604 | static int rswitch_txdmac_init(struct rswitch_private *priv, unsigned int index) |
605 | { |
606 | struct rswitch_device *rdev = priv->rdev[index]; |
607 | |
608 | return rswitch_gwca_queue_format(ndev: rdev->ndev, priv, gq: rdev->tx_queue); |
609 | } |
610 | |
611 | static int rswitch_rxdmac_alloc(struct net_device *ndev) |
612 | { |
613 | struct rswitch_device *rdev = netdev_priv(dev: ndev); |
614 | struct rswitch_private *priv = rdev->priv; |
615 | int err; |
616 | |
617 | rdev->rx_queue = rswitch_gwca_get(priv); |
618 | if (!rdev->rx_queue) |
619 | return -EBUSY; |
620 | |
621 | err = rswitch_gwca_queue_alloc(ndev, priv, gq: rdev->rx_queue, dir_tx: false, RX_RING_SIZE); |
622 | if (err < 0) { |
623 | rswitch_gwca_put(priv, gq: rdev->rx_queue); |
624 | return err; |
625 | } |
626 | |
627 | return 0; |
628 | } |
629 | |
630 | static void rswitch_rxdmac_free(struct net_device *ndev) |
631 | { |
632 | struct rswitch_device *rdev = netdev_priv(dev: ndev); |
633 | |
634 | rswitch_gwca_queue_free(ndev, gq: rdev->rx_queue); |
635 | rswitch_gwca_put(priv: rdev->priv, gq: rdev->rx_queue); |
636 | } |
637 | |
638 | static int rswitch_rxdmac_init(struct rswitch_private *priv, unsigned int index) |
639 | { |
640 | struct rswitch_device *rdev = priv->rdev[index]; |
641 | struct net_device *ndev = rdev->ndev; |
642 | |
643 | return rswitch_gwca_queue_ext_ts_format(ndev, priv, gq: rdev->rx_queue); |
644 | } |
645 | |
646 | static int rswitch_gwca_hw_init(struct rswitch_private *priv) |
647 | { |
648 | unsigned int i; |
649 | int err; |
650 | |
651 | err = rswitch_gwca_change_mode(priv, mode: GWMC_OPC_DISABLE); |
652 | if (err < 0) |
653 | return err; |
654 | err = rswitch_gwca_change_mode(priv, mode: GWMC_OPC_CONFIG); |
655 | if (err < 0) |
656 | return err; |
657 | |
658 | err = rswitch_gwca_mcast_table_reset(priv); |
659 | if (err < 0) |
660 | return err; |
661 | err = rswitch_gwca_axi_ram_reset(priv); |
662 | if (err < 0) |
663 | return err; |
664 | |
665 | iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC); |
666 | iowrite32(0, priv->addr + GWTTFC); |
667 | iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1); |
668 | iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0); |
669 | iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10); |
670 | iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00); |
671 | iowrite32(GWMDNC_TSDMN(1) | GWMDNC_TXDMN(0x1e) | GWMDNC_RXDMN(0x1f), |
672 | priv->addr + GWMDNC); |
673 | iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0); |
674 | |
675 | iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0); |
676 | |
677 | for (i = 0; i < RSWITCH_NUM_PORTS; i++) { |
678 | err = rswitch_rxdmac_init(priv, index: i); |
679 | if (err < 0) |
680 | return err; |
681 | err = rswitch_txdmac_init(priv, index: i); |
682 | if (err < 0) |
683 | return err; |
684 | } |
685 | |
686 | err = rswitch_gwca_change_mode(priv, mode: GWMC_OPC_DISABLE); |
687 | if (err < 0) |
688 | return err; |
689 | return rswitch_gwca_change_mode(priv, mode: GWMC_OPC_OPERATION); |
690 | } |
691 | |
692 | static int rswitch_gwca_hw_deinit(struct rswitch_private *priv) |
693 | { |
694 | int err; |
695 | |
696 | err = rswitch_gwca_change_mode(priv, mode: GWMC_OPC_DISABLE); |
697 | if (err < 0) |
698 | return err; |
699 | err = rswitch_gwca_change_mode(priv, mode: GWMC_OPC_RESET); |
700 | if (err < 0) |
701 | return err; |
702 | |
703 | return rswitch_gwca_change_mode(priv, mode: GWMC_OPC_DISABLE); |
704 | } |
705 | |
706 | static int rswitch_gwca_halt(struct rswitch_private *priv) |
707 | { |
708 | int err; |
709 | |
710 | priv->gwca_halt = true; |
711 | err = rswitch_gwca_hw_deinit(priv); |
712 | dev_err(&priv->pdev->dev, "halted (%d)\n" , err); |
713 | |
714 | return err; |
715 | } |
716 | |
717 | static struct sk_buff *rswitch_rx_handle_desc(struct net_device *ndev, |
718 | struct rswitch_gwca_queue *gq, |
719 | struct rswitch_ext_ts_desc *desc) |
720 | { |
721 | dma_addr_t dma_addr = rswitch_desc_get_dptr(desc: &desc->desc); |
722 | u16 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS; |
723 | u8 die_dt = desc->desc.die_dt & DT_MASK; |
724 | struct sk_buff *skb = NULL; |
725 | |
726 | dma_unmap_single(ndev->dev.parent, dma_addr, RSWITCH_MAP_BUF_SIZE, |
727 | DMA_FROM_DEVICE); |
728 | |
729 | /* The RX descriptor order will be one of the following: |
730 | * - FSINGLE |
731 | * - FSTART -> FEND |
732 | * - FSTART -> FMID -> FEND |
733 | */ |
734 | |
735 | /* Check whether the descriptor is unexpected order */ |
736 | switch (die_dt) { |
737 | case DT_FSTART: |
738 | case DT_FSINGLE: |
739 | if (gq->skb_fstart) { |
740 | dev_kfree_skb_any(skb: gq->skb_fstart); |
741 | gq->skb_fstart = NULL; |
742 | ndev->stats.rx_dropped++; |
743 | } |
744 | break; |
745 | case DT_FMID: |
746 | case DT_FEND: |
747 | if (!gq->skb_fstart) { |
748 | ndev->stats.rx_dropped++; |
749 | return NULL; |
750 | } |
751 | break; |
752 | default: |
753 | break; |
754 | } |
755 | |
756 | /* Handle the descriptor */ |
757 | switch (die_dt) { |
758 | case DT_FSTART: |
759 | case DT_FSINGLE: |
760 | skb = build_skb(data: gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE); |
761 | if (skb) { |
762 | skb_reserve(skb, RSWITCH_HEADROOM); |
763 | skb_put(skb, len: pkt_len); |
764 | gq->pkt_len = pkt_len; |
765 | if (die_dt == DT_FSTART) { |
766 | gq->skb_fstart = skb; |
767 | skb = NULL; |
768 | } |
769 | } |
770 | break; |
771 | case DT_FMID: |
772 | case DT_FEND: |
773 | skb_add_rx_frag(skb: gq->skb_fstart, skb_shinfo(gq->skb_fstart)->nr_frags, |
774 | virt_to_page(gq->rx_bufs[gq->cur]), |
775 | offset_in_page(gq->rx_bufs[gq->cur]) + RSWITCH_HEADROOM, |
776 | size: pkt_len, RSWITCH_BUF_SIZE); |
777 | if (die_dt == DT_FEND) { |
778 | skb = gq->skb_fstart; |
779 | gq->skb_fstart = NULL; |
780 | } |
781 | gq->pkt_len += pkt_len; |
782 | break; |
783 | default: |
784 | netdev_err(dev: ndev, format: "%s: unexpected value (%x)\n" , __func__, die_dt); |
785 | break; |
786 | } |
787 | |
788 | return skb; |
789 | } |
790 | |
791 | static bool rswitch_rx(struct net_device *ndev, int *quota) |
792 | { |
793 | struct rswitch_device *rdev = netdev_priv(dev: ndev); |
794 | struct rswitch_gwca_queue *gq = rdev->rx_queue; |
795 | struct rswitch_ext_ts_desc *desc; |
796 | int limit, boguscnt, ret; |
797 | struct sk_buff *skb; |
798 | unsigned int num; |
799 | u32 get_ts; |
800 | |
801 | if (*quota <= 0) |
802 | return true; |
803 | |
804 | boguscnt = min_t(int, gq->ring_size, *quota); |
805 | limit = boguscnt; |
806 | |
807 | desc = &gq->rx_ring[gq->cur]; |
808 | while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) { |
809 | dma_rmb(); |
810 | skb = rswitch_rx_handle_desc(ndev, gq, desc); |
811 | if (!skb) |
812 | goto out; |
813 | |
814 | get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; |
815 | if (get_ts) { |
816 | struct skb_shared_hwtstamps *shhwtstamps; |
817 | struct timespec64 ts; |
818 | |
819 | shhwtstamps = skb_hwtstamps(skb); |
820 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); |
821 | ts.tv_sec = __le32_to_cpu(desc->ts_sec); |
822 | ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); |
823 | shhwtstamps->hwtstamp = timespec64_to_ktime(ts); |
824 | } |
825 | skb->protocol = eth_type_trans(skb, dev: ndev); |
826 | napi_gro_receive(napi: &rdev->napi, skb); |
827 | rdev->ndev->stats.rx_packets++; |
828 | rdev->ndev->stats.rx_bytes += gq->pkt_len; |
829 | |
830 | out: |
831 | gq->rx_bufs[gq->cur] = NULL; |
832 | gq->cur = rswitch_next_queue_index(gq, cur: true, num: 1); |
833 | desc = &gq->rx_ring[gq->cur]; |
834 | |
835 | if (--boguscnt <= 0) |
836 | break; |
837 | } |
838 | |
839 | num = rswitch_get_num_cur_queues(gq); |
840 | ret = rswitch_gwca_queue_alloc_rx_buf(gq, start_index: gq->dirty, num); |
841 | if (ret < 0) |
842 | goto err; |
843 | ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, start_index: gq->dirty, num); |
844 | if (ret < 0) |
845 | goto err; |
846 | gq->dirty = rswitch_next_queue_index(gq, cur: false, num); |
847 | |
848 | *quota -= limit - boguscnt; |
849 | |
850 | return boguscnt <= 0; |
851 | |
852 | err: |
853 | rswitch_gwca_halt(priv: rdev->priv); |
854 | |
855 | return 0; |
856 | } |
857 | |
858 | static void rswitch_tx_free(struct net_device *ndev) |
859 | { |
860 | struct rswitch_device *rdev = netdev_priv(dev: ndev); |
861 | struct rswitch_gwca_queue *gq = rdev->tx_queue; |
862 | struct rswitch_ext_desc *desc; |
863 | struct sk_buff *skb; |
864 | |
865 | for (; rswitch_get_num_cur_queues(gq) > 0; |
866 | gq->dirty = rswitch_next_queue_index(gq, cur: false, num: 1)) { |
867 | desc = &gq->tx_ring[gq->dirty]; |
868 | if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) |
869 | break; |
870 | |
871 | dma_rmb(); |
872 | skb = gq->skbs[gq->dirty]; |
873 | if (skb) { |
874 | dma_unmap_single(ndev->dev.parent, |
875 | gq->unmap_addrs[gq->dirty], |
876 | skb->len, DMA_TO_DEVICE); |
877 | dev_kfree_skb_any(skb: gq->skbs[gq->dirty]); |
878 | gq->skbs[gq->dirty] = NULL; |
879 | rdev->ndev->stats.tx_packets++; |
880 | rdev->ndev->stats.tx_bytes += skb->len; |
881 | } |
882 | desc->desc.die_dt = DT_EEMPTY; |
883 | } |
884 | } |
885 | |
886 | static int rswitch_poll(struct napi_struct *napi, int budget) |
887 | { |
888 | struct net_device *ndev = napi->dev; |
889 | struct rswitch_private *priv; |
890 | struct rswitch_device *rdev; |
891 | unsigned long flags; |
892 | int quota = budget; |
893 | |
894 | rdev = netdev_priv(dev: ndev); |
895 | priv = rdev->priv; |
896 | |
897 | retry: |
898 | rswitch_tx_free(ndev); |
899 | |
900 | if (rswitch_rx(ndev, quota: "a)) |
901 | goto out; |
902 | else if (rdev->priv->gwca_halt) |
903 | goto err; |
904 | else if (rswitch_is_queue_rxed(gq: rdev->rx_queue)) |
905 | goto retry; |
906 | |
907 | netif_wake_subqueue(dev: ndev, queue_index: 0); |
908 | |
909 | if (napi_complete_done(n: napi, work_done: budget - quota)) { |
910 | spin_lock_irqsave(&priv->lock, flags); |
911 | rswitch_enadis_data_irq(priv, index: rdev->tx_queue->index, enable: true); |
912 | rswitch_enadis_data_irq(priv, index: rdev->rx_queue->index, enable: true); |
913 | spin_unlock_irqrestore(lock: &priv->lock, flags); |
914 | } |
915 | |
916 | out: |
917 | return budget - quota; |
918 | |
919 | err: |
920 | napi_complete(n: napi); |
921 | |
922 | return 0; |
923 | } |
924 | |
925 | static void rswitch_queue_interrupt(struct net_device *ndev) |
926 | { |
927 | struct rswitch_device *rdev = netdev_priv(dev: ndev); |
928 | |
929 | if (napi_schedule_prep(n: &rdev->napi)) { |
930 | spin_lock(lock: &rdev->priv->lock); |
931 | rswitch_enadis_data_irq(priv: rdev->priv, index: rdev->tx_queue->index, enable: false); |
932 | rswitch_enadis_data_irq(priv: rdev->priv, index: rdev->rx_queue->index, enable: false); |
933 | spin_unlock(lock: &rdev->priv->lock); |
934 | __napi_schedule(n: &rdev->napi); |
935 | } |
936 | } |
937 | |
938 | static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis) |
939 | { |
940 | struct rswitch_gwca_queue *gq; |
941 | unsigned int i, index, bit; |
942 | |
943 | for (i = 0; i < priv->gwca.num_queues; i++) { |
944 | gq = &priv->gwca.queues[i]; |
945 | index = gq->index / 32; |
946 | bit = BIT(gq->index % 32); |
947 | if (!(dis[index] & bit)) |
948 | continue; |
949 | |
950 | rswitch_ack_data_irq(priv, index: gq->index); |
951 | rswitch_queue_interrupt(ndev: gq->ndev); |
952 | } |
953 | |
954 | return IRQ_HANDLED; |
955 | } |
956 | |
957 | static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id) |
958 | { |
959 | struct rswitch_private *priv = dev_id; |
960 | u32 dis[RSWITCH_NUM_IRQ_REGS]; |
961 | irqreturn_t ret = IRQ_NONE; |
962 | |
963 | rswitch_get_data_irq_status(priv, dis); |
964 | |
965 | if (rswitch_is_any_data_irq(priv, dis, tx: true) || |
966 | rswitch_is_any_data_irq(priv, dis, tx: false)) |
967 | ret = rswitch_data_irq(priv, dis); |
968 | |
969 | return ret; |
970 | } |
971 | |
972 | static int rswitch_gwca_request_irqs(struct rswitch_private *priv) |
973 | { |
974 | char *resource_name, *irq_name; |
975 | int i, ret, irq; |
976 | |
977 | for (i = 0; i < GWCA_NUM_IRQS; i++) { |
978 | resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i); |
979 | if (!resource_name) |
980 | return -ENOMEM; |
981 | |
982 | irq = platform_get_irq_byname(priv->pdev, resource_name); |
983 | kfree(objp: resource_name); |
984 | if (irq < 0) |
985 | return irq; |
986 | |
987 | irq_name = devm_kasprintf(dev: &priv->pdev->dev, GFP_KERNEL, |
988 | GWCA_IRQ_NAME, i); |
989 | if (!irq_name) |
990 | return -ENOMEM; |
991 | |
992 | ret = devm_request_irq(dev: &priv->pdev->dev, irq, handler: rswitch_gwca_irq, |
993 | irqflags: 0, devname: irq_name, dev_id: priv); |
994 | if (ret < 0) |
995 | return ret; |
996 | } |
997 | |
998 | return 0; |
999 | } |
1000 | |
1001 | static void rswitch_ts(struct rswitch_private *priv) |
1002 | { |
1003 | struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; |
1004 | struct rswitch_gwca_ts_info *ts_info, *ts_info2; |
1005 | struct skb_shared_hwtstamps shhwtstamps; |
1006 | struct rswitch_ts_desc *desc; |
1007 | struct timespec64 ts; |
1008 | unsigned int num; |
1009 | u32 tag, port; |
1010 | |
1011 | desc = &gq->ts_ring[gq->cur]; |
1012 | while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) { |
1013 | dma_rmb(); |
1014 | |
1015 | port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl)); |
1016 | tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl)); |
1017 | |
1018 | list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) { |
1019 | if (!(ts_info->port == port && ts_info->tag == tag)) |
1020 | continue; |
1021 | |
1022 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
1023 | ts.tv_sec = __le32_to_cpu(desc->ts_sec); |
1024 | ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); |
1025 | shhwtstamps.hwtstamp = timespec64_to_ktime(ts); |
1026 | skb_tstamp_tx(orig_skb: ts_info->skb, hwtstamps: &shhwtstamps); |
1027 | dev_consume_skb_irq(skb: ts_info->skb); |
1028 | list_del(entry: &ts_info->list); |
1029 | kfree(objp: ts_info); |
1030 | break; |
1031 | } |
1032 | |
1033 | gq->cur = rswitch_next_queue_index(gq, cur: true, num: 1); |
1034 | desc = &gq->ts_ring[gq->cur]; |
1035 | } |
1036 | |
1037 | num = rswitch_get_num_cur_queues(gq); |
1038 | rswitch_gwca_ts_queue_fill(priv, start_index: gq->dirty, num); |
1039 | gq->dirty = rswitch_next_queue_index(gq, cur: false, num); |
1040 | } |
1041 | |
1042 | static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id) |
1043 | { |
1044 | struct rswitch_private *priv = dev_id; |
1045 | |
1046 | if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) { |
1047 | iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS); |
1048 | rswitch_ts(priv); |
1049 | |
1050 | return IRQ_HANDLED; |
1051 | } |
1052 | |
1053 | return IRQ_NONE; |
1054 | } |
1055 | |
1056 | static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv) |
1057 | { |
1058 | int irq; |
1059 | |
1060 | irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME); |
1061 | if (irq < 0) |
1062 | return irq; |
1063 | |
1064 | return devm_request_irq(dev: &priv->pdev->dev, irq, handler: rswitch_gwca_ts_irq, |
1065 | irqflags: 0, GWCA_TS_IRQ_NAME, dev_id: priv); |
1066 | } |
1067 | |
1068 | /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */ |
1069 | static int rswitch_etha_change_mode(struct rswitch_etha *etha, |
1070 | enum rswitch_etha_mode mode) |
1071 | { |
1072 | int ret; |
1073 | |
1074 | if (!rswitch_agent_clock_is_enabled(coma_addr: etha->coma_addr, port: etha->index)) |
1075 | rswitch_agent_clock_ctrl(coma_addr: etha->coma_addr, port: etha->index, enable: 1); |
1076 | |
1077 | iowrite32(mode, etha->addr + EAMC); |
1078 | |
1079 | ret = rswitch_reg_wait(addr: etha->addr, offs: EAMS, EAMS_OPS_MASK, expected: mode); |
1080 | |
1081 | if (mode == EAMC_OPC_DISABLE) |
1082 | rswitch_agent_clock_ctrl(coma_addr: etha->coma_addr, port: etha->index, enable: 0); |
1083 | |
1084 | return ret; |
1085 | } |
1086 | |
1087 | static void rswitch_etha_read_mac_address(struct rswitch_etha *etha) |
1088 | { |
1089 | u32 mrmac0 = ioread32(etha->addr + MRMAC0); |
1090 | u32 mrmac1 = ioread32(etha->addr + MRMAC1); |
1091 | u8 *mac = ða->mac_addr[0]; |
1092 | |
1093 | mac[0] = (mrmac0 >> 8) & 0xFF; |
1094 | mac[1] = (mrmac0 >> 0) & 0xFF; |
1095 | mac[2] = (mrmac1 >> 24) & 0xFF; |
1096 | mac[3] = (mrmac1 >> 16) & 0xFF; |
1097 | mac[4] = (mrmac1 >> 8) & 0xFF; |
1098 | mac[5] = (mrmac1 >> 0) & 0xFF; |
1099 | } |
1100 | |
1101 | static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac) |
1102 | { |
1103 | iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0); |
1104 | iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], |
1105 | etha->addr + MRMAC1); |
1106 | } |
1107 | |
1108 | static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha) |
1109 | { |
1110 | iowrite32(MLVC_PLV, etha->addr + MLVC); |
1111 | |
1112 | return rswitch_reg_wait(addr: etha->addr, offs: MLVC, MLVC_PLV, expected: 0); |
1113 | } |
1114 | |
1115 | static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac) |
1116 | { |
1117 | u32 val; |
1118 | |
1119 | rswitch_etha_write_mac_address(etha, mac); |
1120 | |
1121 | switch (etha->speed) { |
1122 | case 100: |
1123 | val = MPIC_LSC_100M; |
1124 | break; |
1125 | case 1000: |
1126 | val = MPIC_LSC_1G; |
1127 | break; |
1128 | case 2500: |
1129 | val = MPIC_LSC_2_5G; |
1130 | break; |
1131 | default: |
1132 | return; |
1133 | } |
1134 | |
1135 | iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC); |
1136 | } |
1137 | |
1138 | static void rswitch_etha_enable_mii(struct rswitch_etha *etha) |
1139 | { |
1140 | rswitch_modify(addr: etha->addr, reg: MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK, |
1141 | MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06)); |
1142 | rswitch_modify(addr: etha->addr, reg: MPSM, clear: 0, MPSM_MFF_C45); |
1143 | } |
1144 | |
1145 | static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac) |
1146 | { |
1147 | int err; |
1148 | |
1149 | err = rswitch_etha_change_mode(etha, mode: EAMC_OPC_DISABLE); |
1150 | if (err < 0) |
1151 | return err; |
1152 | err = rswitch_etha_change_mode(etha, mode: EAMC_OPC_CONFIG); |
1153 | if (err < 0) |
1154 | return err; |
1155 | |
1156 | iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC); |
1157 | rswitch_rmac_setting(etha, mac); |
1158 | rswitch_etha_enable_mii(etha); |
1159 | |
1160 | err = rswitch_etha_wait_link_verification(etha); |
1161 | if (err < 0) |
1162 | return err; |
1163 | |
1164 | err = rswitch_etha_change_mode(etha, mode: EAMC_OPC_DISABLE); |
1165 | if (err < 0) |
1166 | return err; |
1167 | |
1168 | return rswitch_etha_change_mode(etha, mode: EAMC_OPC_OPERATION); |
1169 | } |
1170 | |
1171 | static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read, |
1172 | int phyad, int devad, int regad, int data) |
1173 | { |
1174 | int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45; |
1175 | u32 val; |
1176 | int ret; |
1177 | |
1178 | if (devad == 0xffffffff) |
1179 | return -ENODEV; |
1180 | |
1181 | writel(MMIS1_CLEAR_FLAGS, addr: etha->addr + MMIS1); |
1182 | |
1183 | val = MPSM_PSME | MPSM_MFF_C45; |
1184 | iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM); |
1185 | |
1186 | ret = rswitch_reg_wait(addr: etha->addr, offs: MMIS1, MMIS1_PAACS, MMIS1_PAACS); |
1187 | if (ret) |
1188 | return ret; |
1189 | |
1190 | rswitch_modify(addr: etha->addr, reg: MMIS1, MMIS1_PAACS, MMIS1_PAACS); |
1191 | |
1192 | if (read) { |
1193 | writel(val: (pop << 13) | (devad << 8) | (phyad << 3) | val, addr: etha->addr + MPSM); |
1194 | |
1195 | ret = rswitch_reg_wait(addr: etha->addr, offs: MMIS1, MMIS1_PRACS, MMIS1_PRACS); |
1196 | if (ret) |
1197 | return ret; |
1198 | |
1199 | ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16; |
1200 | |
1201 | rswitch_modify(addr: etha->addr, reg: MMIS1, MMIS1_PRACS, MMIS1_PRACS); |
1202 | } else { |
1203 | iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val, |
1204 | etha->addr + MPSM); |
1205 | |
1206 | ret = rswitch_reg_wait(addr: etha->addr, offs: MMIS1, MMIS1_PWACS, MMIS1_PWACS); |
1207 | } |
1208 | |
1209 | return ret; |
1210 | } |
1211 | |
1212 | static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad, |
1213 | int regad) |
1214 | { |
1215 | struct rswitch_etha *etha = bus->priv; |
1216 | |
1217 | return rswitch_etha_set_access(etha, read: true, phyad: addr, devad, regad, data: 0); |
1218 | } |
1219 | |
1220 | static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad, |
1221 | int regad, u16 val) |
1222 | { |
1223 | struct rswitch_etha *etha = bus->priv; |
1224 | |
1225 | return rswitch_etha_set_access(etha, read: false, phyad: addr, devad, regad, data: val); |
1226 | } |
1227 | |
1228 | /* Call of_node_put(port) after done */ |
1229 | static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev) |
1230 | { |
1231 | struct device_node *ports, *port; |
1232 | int err = 0; |
1233 | u32 index; |
1234 | |
1235 | ports = of_get_child_by_name(node: rdev->ndev->dev.parent->of_node, |
1236 | name: "ethernet-ports" ); |
1237 | if (!ports) |
1238 | return NULL; |
1239 | |
1240 | for_each_child_of_node(ports, port) { |
1241 | err = of_property_read_u32(np: port, propname: "reg" , out_value: &index); |
1242 | if (err < 0) { |
1243 | port = NULL; |
1244 | goto out; |
1245 | } |
1246 | if (index == rdev->etha->index) { |
1247 | if (!of_device_is_available(device: port)) |
1248 | port = NULL; |
1249 | break; |
1250 | } |
1251 | } |
1252 | |
1253 | out: |
1254 | of_node_put(node: ports); |
1255 | |
1256 | return port; |
1257 | } |
1258 | |
1259 | static int rswitch_etha_get_params(struct rswitch_device *rdev) |
1260 | { |
1261 | u32 max_speed; |
1262 | int err; |
1263 | |
1264 | if (!rdev->np_port) |
1265 | return 0; /* ignored */ |
1266 | |
1267 | err = of_get_phy_mode(np: rdev->np_port, interface: &rdev->etha->phy_interface); |
1268 | if (err) |
1269 | return err; |
1270 | |
1271 | err = of_property_read_u32(np: rdev->np_port, propname: "max-speed" , out_value: &max_speed); |
1272 | if (!err) { |
1273 | rdev->etha->speed = max_speed; |
1274 | return 0; |
1275 | } |
1276 | |
1277 | /* if no "max-speed" property, let's use default speed */ |
1278 | switch (rdev->etha->phy_interface) { |
1279 | case PHY_INTERFACE_MODE_MII: |
1280 | rdev->etha->speed = SPEED_100; |
1281 | break; |
1282 | case PHY_INTERFACE_MODE_SGMII: |
1283 | rdev->etha->speed = SPEED_1000; |
1284 | break; |
1285 | case PHY_INTERFACE_MODE_USXGMII: |
1286 | rdev->etha->speed = SPEED_2500; |
1287 | break; |
1288 | default: |
1289 | return -EINVAL; |
1290 | } |
1291 | |
1292 | return 0; |
1293 | } |
1294 | |
1295 | static int rswitch_mii_register(struct rswitch_device *rdev) |
1296 | { |
1297 | struct device_node *mdio_np; |
1298 | struct mii_bus *mii_bus; |
1299 | int err; |
1300 | |
1301 | mii_bus = mdiobus_alloc(); |
1302 | if (!mii_bus) |
1303 | return -ENOMEM; |
1304 | |
1305 | mii_bus->name = "rswitch_mii" ; |
1306 | sprintf(buf: mii_bus->id, fmt: "etha%d" , rdev->etha->index); |
1307 | mii_bus->priv = rdev->etha; |
1308 | mii_bus->read_c45 = rswitch_etha_mii_read_c45; |
1309 | mii_bus->write_c45 = rswitch_etha_mii_write_c45; |
1310 | mii_bus->parent = &rdev->priv->pdev->dev; |
1311 | |
1312 | mdio_np = of_get_child_by_name(node: rdev->np_port, name: "mdio" ); |
1313 | err = of_mdiobus_register(mdio: mii_bus, np: mdio_np); |
1314 | if (err < 0) { |
1315 | mdiobus_free(bus: mii_bus); |
1316 | goto out; |
1317 | } |
1318 | |
1319 | rdev->etha->mii = mii_bus; |
1320 | |
1321 | out: |
1322 | of_node_put(node: mdio_np); |
1323 | |
1324 | return err; |
1325 | } |
1326 | |
1327 | static void rswitch_mii_unregister(struct rswitch_device *rdev) |
1328 | { |
1329 | if (rdev->etha->mii) { |
1330 | mdiobus_unregister(bus: rdev->etha->mii); |
1331 | mdiobus_free(bus: rdev->etha->mii); |
1332 | rdev->etha->mii = NULL; |
1333 | } |
1334 | } |
1335 | |
1336 | static void rswitch_adjust_link(struct net_device *ndev) |
1337 | { |
1338 | struct rswitch_device *rdev = netdev_priv(dev: ndev); |
1339 | struct phy_device *phydev = ndev->phydev; |
1340 | |
1341 | if (phydev->link != rdev->etha->link) { |
1342 | phy_print_status(phydev); |
1343 | if (phydev->link) |
1344 | phy_power_on(phy: rdev->serdes); |
1345 | else if (rdev->serdes->power_count) |
1346 | phy_power_off(phy: rdev->serdes); |
1347 | |
1348 | rdev->etha->link = phydev->link; |
1349 | |
1350 | if (!rdev->priv->etha_no_runtime_change && |
1351 | phydev->speed != rdev->etha->speed) { |
1352 | rdev->etha->speed = phydev->speed; |
1353 | |
1354 | rswitch_etha_hw_init(etha: rdev->etha, mac: rdev->ndev->dev_addr); |
1355 | phy_set_speed(phy: rdev->serdes, speed: rdev->etha->speed); |
1356 | } |
1357 | } |
1358 | } |
1359 | |
1360 | static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev, |
1361 | struct phy_device *phydev) |
1362 | { |
1363 | if (!rdev->priv->etha_no_runtime_change) |
1364 | return; |
1365 | |
1366 | switch (rdev->etha->speed) { |
1367 | case SPEED_2500: |
1368 | phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_1000baseT_Full_BIT); |
1369 | phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_100baseT_Full_BIT); |
1370 | break; |
1371 | case SPEED_1000: |
1372 | phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_2500baseX_Full_BIT); |
1373 | phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_100baseT_Full_BIT); |
1374 | break; |
1375 | case SPEED_100: |
1376 | phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_2500baseX_Full_BIT); |
1377 | phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_1000baseT_Full_BIT); |
1378 | break; |
1379 | default: |
1380 | break; |
1381 | } |
1382 | |
1383 | phy_set_max_speed(phydev, max_speed: rdev->etha->speed); |
1384 | } |
1385 | |
1386 | static int rswitch_phy_device_init(struct rswitch_device *rdev) |
1387 | { |
1388 | struct phy_device *phydev; |
1389 | struct device_node *phy; |
1390 | int err = -ENOENT; |
1391 | |
1392 | if (!rdev->np_port) |
1393 | return -ENODEV; |
1394 | |
1395 | phy = of_parse_phandle(np: rdev->np_port, phandle_name: "phy-handle" , index: 0); |
1396 | if (!phy) |
1397 | return -ENODEV; |
1398 | |
1399 | /* Set phydev->host_interfaces before calling of_phy_connect() to |
1400 | * configure the PHY with the information of host_interfaces. |
1401 | */ |
1402 | phydev = of_phy_find_device(phy_np: phy); |
1403 | if (!phydev) |
1404 | goto out; |
1405 | __set_bit(rdev->etha->phy_interface, phydev->host_interfaces); |
1406 | phydev->mac_managed_pm = true; |
1407 | |
1408 | phydev = of_phy_connect(dev: rdev->ndev, phy_np: phy, hndlr: rswitch_adjust_link, flags: 0, |
1409 | iface: rdev->etha->phy_interface); |
1410 | if (!phydev) |
1411 | goto out; |
1412 | |
1413 | phy_set_max_speed(phydev, SPEED_2500); |
1414 | phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_10baseT_Half_BIT); |
1415 | phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_10baseT_Full_BIT); |
1416 | phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_100baseT_Half_BIT); |
1417 | phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_1000baseT_Half_BIT); |
1418 | rswitch_phy_remove_link_mode(rdev, phydev); |
1419 | |
1420 | phy_attached_info(phydev); |
1421 | |
1422 | err = 0; |
1423 | out: |
1424 | of_node_put(node: phy); |
1425 | |
1426 | return err; |
1427 | } |
1428 | |
1429 | static void rswitch_phy_device_deinit(struct rswitch_device *rdev) |
1430 | { |
1431 | if (rdev->ndev->phydev) |
1432 | phy_disconnect(phydev: rdev->ndev->phydev); |
1433 | } |
1434 | |
1435 | static int rswitch_serdes_set_params(struct rswitch_device *rdev) |
1436 | { |
1437 | int err; |
1438 | |
1439 | err = phy_set_mode_ext(phy: rdev->serdes, mode: PHY_MODE_ETHERNET, |
1440 | submode: rdev->etha->phy_interface); |
1441 | if (err < 0) |
1442 | return err; |
1443 | |
1444 | return phy_set_speed(phy: rdev->serdes, speed: rdev->etha->speed); |
1445 | } |
1446 | |
1447 | static int rswitch_ether_port_init_one(struct rswitch_device *rdev) |
1448 | { |
1449 | int err; |
1450 | |
1451 | if (!rdev->etha->operated) { |
1452 | err = rswitch_etha_hw_init(etha: rdev->etha, mac: rdev->ndev->dev_addr); |
1453 | if (err < 0) |
1454 | return err; |
1455 | if (rdev->priv->etha_no_runtime_change) |
1456 | rdev->etha->operated = true; |
1457 | } |
1458 | |
1459 | err = rswitch_mii_register(rdev); |
1460 | if (err < 0) |
1461 | return err; |
1462 | |
1463 | err = rswitch_phy_device_init(rdev); |
1464 | if (err < 0) |
1465 | goto err_phy_device_init; |
1466 | |
1467 | rdev->serdes = devm_of_phy_get(dev: &rdev->priv->pdev->dev, np: rdev->np_port, NULL); |
1468 | if (IS_ERR(ptr: rdev->serdes)) { |
1469 | err = PTR_ERR(ptr: rdev->serdes); |
1470 | goto err_serdes_phy_get; |
1471 | } |
1472 | |
1473 | err = rswitch_serdes_set_params(rdev); |
1474 | if (err < 0) |
1475 | goto err_serdes_set_params; |
1476 | |
1477 | return 0; |
1478 | |
1479 | err_serdes_set_params: |
1480 | err_serdes_phy_get: |
1481 | rswitch_phy_device_deinit(rdev); |
1482 | |
1483 | err_phy_device_init: |
1484 | rswitch_mii_unregister(rdev); |
1485 | |
1486 | return err; |
1487 | } |
1488 | |
1489 | static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev) |
1490 | { |
1491 | rswitch_phy_device_deinit(rdev); |
1492 | rswitch_mii_unregister(rdev); |
1493 | } |
1494 | |
1495 | static int rswitch_ether_port_init_all(struct rswitch_private *priv) |
1496 | { |
1497 | unsigned int i; |
1498 | int err; |
1499 | |
1500 | rswitch_for_each_enabled_port(priv, i) { |
1501 | err = rswitch_ether_port_init_one(rdev: priv->rdev[i]); |
1502 | if (err) |
1503 | goto err_init_one; |
1504 | } |
1505 | |
1506 | rswitch_for_each_enabled_port(priv, i) { |
1507 | err = phy_init(phy: priv->rdev[i]->serdes); |
1508 | if (err) |
1509 | goto err_serdes; |
1510 | } |
1511 | |
1512 | return 0; |
1513 | |
1514 | err_serdes: |
1515 | rswitch_for_each_enabled_port_continue_reverse(priv, i) |
1516 | phy_exit(phy: priv->rdev[i]->serdes); |
1517 | i = RSWITCH_NUM_PORTS; |
1518 | |
1519 | err_init_one: |
1520 | rswitch_for_each_enabled_port_continue_reverse(priv, i) |
1521 | rswitch_ether_port_deinit_one(rdev: priv->rdev[i]); |
1522 | |
1523 | return err; |
1524 | } |
1525 | |
1526 | static void rswitch_ether_port_deinit_all(struct rswitch_private *priv) |
1527 | { |
1528 | unsigned int i; |
1529 | |
1530 | for (i = 0; i < RSWITCH_NUM_PORTS; i++) { |
1531 | phy_exit(phy: priv->rdev[i]->serdes); |
1532 | rswitch_ether_port_deinit_one(rdev: priv->rdev[i]); |
1533 | } |
1534 | } |
1535 | |
1536 | static int rswitch_open(struct net_device *ndev) |
1537 | { |
1538 | struct rswitch_device *rdev = netdev_priv(dev: ndev); |
1539 | unsigned long flags; |
1540 | |
1541 | phy_start(phydev: ndev->phydev); |
1542 | |
1543 | napi_enable(n: &rdev->napi); |
1544 | netif_start_queue(dev: ndev); |
1545 | |
1546 | spin_lock_irqsave(&rdev->priv->lock, flags); |
1547 | rswitch_enadis_data_irq(priv: rdev->priv, index: rdev->tx_queue->index, enable: true); |
1548 | rswitch_enadis_data_irq(priv: rdev->priv, index: rdev->rx_queue->index, enable: true); |
1549 | spin_unlock_irqrestore(lock: &rdev->priv->lock, flags); |
1550 | |
1551 | if (bitmap_empty(src: rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) |
1552 | iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); |
1553 | |
1554 | bitmap_set(map: rdev->priv->opened_ports, start: rdev->port, nbits: 1); |
1555 | |
1556 | return 0; |
1557 | }; |
1558 | |
1559 | static int rswitch_stop(struct net_device *ndev) |
1560 | { |
1561 | struct rswitch_device *rdev = netdev_priv(dev: ndev); |
1562 | struct rswitch_gwca_ts_info *ts_info, *ts_info2; |
1563 | unsigned long flags; |
1564 | |
1565 | netif_tx_stop_all_queues(dev: ndev); |
1566 | bitmap_clear(map: rdev->priv->opened_ports, start: rdev->port, nbits: 1); |
1567 | |
1568 | if (bitmap_empty(src: rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) |
1569 | iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); |
1570 | |
1571 | list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) { |
1572 | if (ts_info->port != rdev->port) |
1573 | continue; |
1574 | dev_kfree_skb_irq(skb: ts_info->skb); |
1575 | list_del(entry: &ts_info->list); |
1576 | kfree(objp: ts_info); |
1577 | } |
1578 | |
1579 | spin_lock_irqsave(&rdev->priv->lock, flags); |
1580 | rswitch_enadis_data_irq(priv: rdev->priv, index: rdev->tx_queue->index, enable: false); |
1581 | rswitch_enadis_data_irq(priv: rdev->priv, index: rdev->rx_queue->index, enable: false); |
1582 | spin_unlock_irqrestore(lock: &rdev->priv->lock, flags); |
1583 | |
1584 | phy_stop(phydev: ndev->phydev); |
1585 | napi_disable(n: &rdev->napi); |
1586 | |
1587 | return 0; |
1588 | }; |
1589 | |
1590 | static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev, |
1591 | struct sk_buff *skb, |
1592 | struct rswitch_ext_desc *desc) |
1593 | { |
1594 | desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | |
1595 | INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT); |
1596 | if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { |
1597 | struct rswitch_gwca_ts_info *ts_info; |
1598 | |
1599 | ts_info = kzalloc(size: sizeof(*ts_info), GFP_ATOMIC); |
1600 | if (!ts_info) |
1601 | return false; |
1602 | |
1603 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
1604 | rdev->ts_tag++; |
1605 | desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC); |
1606 | |
1607 | ts_info->skb = skb_get(skb); |
1608 | ts_info->port = rdev->port; |
1609 | ts_info->tag = rdev->ts_tag; |
1610 | list_add_tail(new: &ts_info->list, head: &rdev->priv->gwca.ts_info_list); |
1611 | |
1612 | skb_tx_timestamp(skb); |
1613 | } |
1614 | |
1615 | return true; |
1616 | } |
1617 | |
1618 | static bool rswitch_ext_desc_set(struct rswitch_device *rdev, |
1619 | struct sk_buff *skb, |
1620 | struct rswitch_ext_desc *desc, |
1621 | dma_addr_t dma_addr, u16 len, u8 die_dt) |
1622 | { |
1623 | rswitch_desc_set_dptr(desc: &desc->desc, addr: dma_addr); |
1624 | desc->desc.info_ds = cpu_to_le16(len); |
1625 | if (!rswitch_ext_desc_set_info1(rdev, skb, desc)) |
1626 | return false; |
1627 | |
1628 | dma_wmb(); |
1629 | |
1630 | desc->desc.die_dt = die_dt; |
1631 | |
1632 | return true; |
1633 | } |
1634 | |
1635 | static u8 rswitch_ext_desc_get_die_dt(unsigned int nr_desc, unsigned int index) |
1636 | { |
1637 | if (nr_desc == 1) |
1638 | return DT_FSINGLE | DIE; |
1639 | if (index == 0) |
1640 | return DT_FSTART; |
1641 | if (nr_desc - 1 == index) |
1642 | return DT_FEND | DIE; |
1643 | return DT_FMID; |
1644 | } |
1645 | |
1646 | static u16 rswitch_ext_desc_get_len(u8 die_dt, unsigned int orig_len) |
1647 | { |
1648 | switch (die_dt & DT_MASK) { |
1649 | case DT_FSINGLE: |
1650 | case DT_FEND: |
1651 | return (orig_len % RSWITCH_DESC_BUF_SIZE) ?: RSWITCH_DESC_BUF_SIZE; |
1652 | case DT_FSTART: |
1653 | case DT_FMID: |
1654 | return RSWITCH_DESC_BUF_SIZE; |
1655 | default: |
1656 | return 0; |
1657 | } |
1658 | } |
1659 | |
1660 | static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
1661 | { |
1662 | struct rswitch_device *rdev = netdev_priv(dev: ndev); |
1663 | struct rswitch_gwca_queue *gq = rdev->tx_queue; |
1664 | dma_addr_t dma_addr, dma_addr_orig; |
1665 | netdev_tx_t ret = NETDEV_TX_OK; |
1666 | struct rswitch_ext_desc *desc; |
1667 | unsigned int i, nr_desc; |
1668 | u8 die_dt; |
1669 | u16 len; |
1670 | |
1671 | nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1; |
1672 | if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) { |
1673 | netif_stop_subqueue(dev: ndev, queue_index: 0); |
1674 | return NETDEV_TX_BUSY; |
1675 | } |
1676 | |
1677 | if (skb_put_padto(skb, ETH_ZLEN)) |
1678 | return ret; |
1679 | |
1680 | dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE); |
1681 | if (dma_mapping_error(dev: ndev->dev.parent, dma_addr: dma_addr_orig)) |
1682 | goto err_kfree; |
1683 | |
1684 | gq->skbs[gq->cur] = skb; |
1685 | gq->unmap_addrs[gq->cur] = dma_addr_orig; |
1686 | |
1687 | /* DT_FSTART should be set at last. So, this is reverse order. */ |
1688 | for (i = nr_desc; i-- > 0; ) { |
1689 | desc = &gq->tx_ring[rswitch_next_queue_index(gq, cur: true, num: i)]; |
1690 | die_dt = rswitch_ext_desc_get_die_dt(nr_desc, index: i); |
1691 | dma_addr = dma_addr_orig + i * RSWITCH_DESC_BUF_SIZE; |
1692 | len = rswitch_ext_desc_get_len(die_dt, orig_len: skb->len); |
1693 | if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, len, die_dt)) |
1694 | goto err_unmap; |
1695 | } |
1696 | |
1697 | wmb(); /* gq->cur must be incremented after die_dt was set */ |
1698 | |
1699 | gq->cur = rswitch_next_queue_index(gq, cur: true, num: nr_desc); |
1700 | rswitch_modify(addr: rdev->addr, GWTRC(gq->index), clear: 0, BIT(gq->index % 32)); |
1701 | |
1702 | return ret; |
1703 | |
1704 | err_unmap: |
1705 | dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE); |
1706 | |
1707 | err_kfree: |
1708 | dev_kfree_skb_any(skb); |
1709 | |
1710 | return ret; |
1711 | } |
1712 | |
1713 | static struct net_device_stats *rswitch_get_stats(struct net_device *ndev) |
1714 | { |
1715 | return &ndev->stats; |
1716 | } |
1717 | |
1718 | static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req) |
1719 | { |
1720 | struct rswitch_device *rdev = netdev_priv(dev: ndev); |
1721 | struct rcar_gen4_ptp_private *ptp_priv; |
1722 | struct hwtstamp_config config; |
1723 | |
1724 | ptp_priv = rdev->priv->ptp_priv; |
1725 | |
1726 | config.flags = 0; |
1727 | config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : |
1728 | HWTSTAMP_TX_OFF; |
1729 | switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) { |
1730 | case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT: |
1731 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; |
1732 | break; |
1733 | case RCAR_GEN4_RXTSTAMP_TYPE_ALL: |
1734 | config.rx_filter = HWTSTAMP_FILTER_ALL; |
1735 | break; |
1736 | default: |
1737 | config.rx_filter = HWTSTAMP_FILTER_NONE; |
1738 | break; |
1739 | } |
1740 | |
1741 | return copy_to_user(to: req->ifr_data, from: &config, n: sizeof(config)) ? -EFAULT : 0; |
1742 | } |
1743 | |
1744 | static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req) |
1745 | { |
1746 | struct rswitch_device *rdev = netdev_priv(dev: ndev); |
1747 | u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED; |
1748 | struct hwtstamp_config config; |
1749 | u32 tstamp_tx_ctrl; |
1750 | |
1751 | if (copy_from_user(to: &config, from: req->ifr_data, n: sizeof(config))) |
1752 | return -EFAULT; |
1753 | |
1754 | if (config.flags) |
1755 | return -EINVAL; |
1756 | |
1757 | switch (config.tx_type) { |
1758 | case HWTSTAMP_TX_OFF: |
1759 | tstamp_tx_ctrl = 0; |
1760 | break; |
1761 | case HWTSTAMP_TX_ON: |
1762 | tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED; |
1763 | break; |
1764 | default: |
1765 | return -ERANGE; |
1766 | } |
1767 | |
1768 | switch (config.rx_filter) { |
1769 | case HWTSTAMP_FILTER_NONE: |
1770 | tstamp_rx_ctrl = 0; |
1771 | break; |
1772 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: |
1773 | tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; |
1774 | break; |
1775 | default: |
1776 | config.rx_filter = HWTSTAMP_FILTER_ALL; |
1777 | tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL; |
1778 | break; |
1779 | } |
1780 | |
1781 | rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl; |
1782 | rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl; |
1783 | |
1784 | return copy_to_user(to: req->ifr_data, from: &config, n: sizeof(config)) ? -EFAULT : 0; |
1785 | } |
1786 | |
1787 | static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) |
1788 | { |
1789 | if (!netif_running(dev: ndev)) |
1790 | return -EINVAL; |
1791 | |
1792 | switch (cmd) { |
1793 | case SIOCGHWTSTAMP: |
1794 | return rswitch_hwstamp_get(ndev, req); |
1795 | case SIOCSHWTSTAMP: |
1796 | return rswitch_hwstamp_set(ndev, req); |
1797 | default: |
1798 | return phy_mii_ioctl(phydev: ndev->phydev, ifr: req, cmd); |
1799 | } |
1800 | } |
1801 | |
1802 | static const struct net_device_ops rswitch_netdev_ops = { |
1803 | .ndo_open = rswitch_open, |
1804 | .ndo_stop = rswitch_stop, |
1805 | .ndo_start_xmit = rswitch_start_xmit, |
1806 | .ndo_get_stats = rswitch_get_stats, |
1807 | .ndo_eth_ioctl = rswitch_eth_ioctl, |
1808 | .ndo_validate_addr = eth_validate_addr, |
1809 | .ndo_set_mac_address = eth_mac_addr, |
1810 | }; |
1811 | |
1812 | static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) |
1813 | { |
1814 | struct rswitch_device *rdev = netdev_priv(dev: ndev); |
1815 | |
1816 | info->phc_index = ptp_clock_index(ptp: rdev->priv->ptp_priv->clock); |
1817 | info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | |
1818 | SOF_TIMESTAMPING_RX_SOFTWARE | |
1819 | SOF_TIMESTAMPING_SOFTWARE | |
1820 | SOF_TIMESTAMPING_TX_HARDWARE | |
1821 | SOF_TIMESTAMPING_RX_HARDWARE | |
1822 | SOF_TIMESTAMPING_RAW_HARDWARE; |
1823 | info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); |
1824 | info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); |
1825 | |
1826 | return 0; |
1827 | } |
1828 | |
1829 | static const struct ethtool_ops rswitch_ethtool_ops = { |
1830 | .get_ts_info = rswitch_get_ts_info, |
1831 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
1832 | .set_link_ksettings = phy_ethtool_set_link_ksettings, |
1833 | }; |
1834 | |
1835 | static const struct of_device_id renesas_eth_sw_of_table[] = { |
1836 | { .compatible = "renesas,r8a779f0-ether-switch" , }, |
1837 | { } |
1838 | }; |
1839 | MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table); |
1840 | |
1841 | static void rswitch_etha_init(struct rswitch_private *priv, unsigned int index) |
1842 | { |
1843 | struct rswitch_etha *etha = &priv->etha[index]; |
1844 | |
1845 | memset(etha, 0, sizeof(*etha)); |
1846 | etha->index = index; |
1847 | etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE; |
1848 | etha->coma_addr = priv->addr; |
1849 | |
1850 | /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1. |
1851 | * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply |
1852 | * both the numerator and the denominator by 10. |
1853 | */ |
1854 | etha->psmcs = clk_get_rate(clk: priv->clk) / 100000 / (25 * 2) - 1; |
1855 | } |
1856 | |
1857 | static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index) |
1858 | { |
1859 | struct platform_device *pdev = priv->pdev; |
1860 | struct rswitch_device *rdev; |
1861 | struct net_device *ndev; |
1862 | int err; |
1863 | |
1864 | if (index >= RSWITCH_NUM_PORTS) |
1865 | return -EINVAL; |
1866 | |
1867 | ndev = alloc_etherdev_mqs(sizeof_priv: sizeof(struct rswitch_device), txqs: 1, rxqs: 1); |
1868 | if (!ndev) |
1869 | return -ENOMEM; |
1870 | |
1871 | SET_NETDEV_DEV(ndev, &pdev->dev); |
1872 | ether_setup(dev: ndev); |
1873 | |
1874 | rdev = netdev_priv(dev: ndev); |
1875 | rdev->ndev = ndev; |
1876 | rdev->priv = priv; |
1877 | priv->rdev[index] = rdev; |
1878 | rdev->port = index; |
1879 | rdev->etha = &priv->etha[index]; |
1880 | rdev->addr = priv->addr; |
1881 | |
1882 | ndev->base_addr = (unsigned long)rdev->addr; |
1883 | snprintf(buf: ndev->name, IFNAMSIZ, fmt: "tsn%d" , index); |
1884 | ndev->netdev_ops = &rswitch_netdev_ops; |
1885 | ndev->ethtool_ops = &rswitch_ethtool_ops; |
1886 | ndev->max_mtu = RSWITCH_MAX_MTU; |
1887 | ndev->min_mtu = ETH_MIN_MTU; |
1888 | |
1889 | netif_napi_add(dev: ndev, napi: &rdev->napi, poll: rswitch_poll); |
1890 | |
1891 | rdev->np_port = rswitch_get_port_node(rdev); |
1892 | rdev->disabled = !rdev->np_port; |
1893 | err = of_get_ethdev_address(np: rdev->np_port, dev: ndev); |
1894 | of_node_put(node: rdev->np_port); |
1895 | if (err) { |
1896 | if (is_valid_ether_addr(addr: rdev->etha->mac_addr)) |
1897 | eth_hw_addr_set(dev: ndev, addr: rdev->etha->mac_addr); |
1898 | else |
1899 | eth_hw_addr_random(dev: ndev); |
1900 | } |
1901 | |
1902 | err = rswitch_etha_get_params(rdev); |
1903 | if (err < 0) |
1904 | goto out_get_params; |
1905 | |
1906 | if (rdev->priv->gwca.speed < rdev->etha->speed) |
1907 | rdev->priv->gwca.speed = rdev->etha->speed; |
1908 | |
1909 | err = rswitch_rxdmac_alloc(ndev); |
1910 | if (err < 0) |
1911 | goto out_rxdmac; |
1912 | |
1913 | err = rswitch_txdmac_alloc(ndev); |
1914 | if (err < 0) |
1915 | goto out_txdmac; |
1916 | |
1917 | return 0; |
1918 | |
1919 | out_txdmac: |
1920 | rswitch_rxdmac_free(ndev); |
1921 | |
1922 | out_rxdmac: |
1923 | out_get_params: |
1924 | netif_napi_del(napi: &rdev->napi); |
1925 | free_netdev(dev: ndev); |
1926 | |
1927 | return err; |
1928 | } |
1929 | |
1930 | static void rswitch_device_free(struct rswitch_private *priv, unsigned int index) |
1931 | { |
1932 | struct rswitch_device *rdev = priv->rdev[index]; |
1933 | struct net_device *ndev = rdev->ndev; |
1934 | |
1935 | rswitch_txdmac_free(ndev); |
1936 | rswitch_rxdmac_free(ndev); |
1937 | netif_napi_del(napi: &rdev->napi); |
1938 | free_netdev(dev: ndev); |
1939 | } |
1940 | |
1941 | static int rswitch_init(struct rswitch_private *priv) |
1942 | { |
1943 | unsigned int i; |
1944 | int err; |
1945 | |
1946 | for (i = 0; i < RSWITCH_NUM_PORTS; i++) |
1947 | rswitch_etha_init(priv, index: i); |
1948 | |
1949 | rswitch_clock_enable(priv); |
1950 | for (i = 0; i < RSWITCH_NUM_PORTS; i++) |
1951 | rswitch_etha_read_mac_address(etha: &priv->etha[i]); |
1952 | |
1953 | rswitch_reset(priv); |
1954 | |
1955 | rswitch_clock_enable(priv); |
1956 | rswitch_top_init(priv); |
1957 | err = rswitch_bpool_config(priv); |
1958 | if (err < 0) |
1959 | return err; |
1960 | |
1961 | rswitch_coma_init(priv); |
1962 | |
1963 | err = rswitch_gwca_linkfix_alloc(priv); |
1964 | if (err < 0) |
1965 | return -ENOMEM; |
1966 | |
1967 | err = rswitch_gwca_ts_queue_alloc(priv); |
1968 | if (err < 0) |
1969 | goto err_ts_queue_alloc; |
1970 | |
1971 | for (i = 0; i < RSWITCH_NUM_PORTS; i++) { |
1972 | err = rswitch_device_alloc(priv, index: i); |
1973 | if (err < 0) { |
1974 | for (; i-- > 0; ) |
1975 | rswitch_device_free(priv, index: i); |
1976 | goto err_device_alloc; |
1977 | } |
1978 | } |
1979 | |
1980 | rswitch_fwd_init(priv); |
1981 | |
1982 | err = rcar_gen4_ptp_register(ptp_priv: priv->ptp_priv, layout: RCAR_GEN4_PTP_REG_LAYOUT, |
1983 | rate: clk_get_rate(clk: priv->clk)); |
1984 | if (err < 0) |
1985 | goto err_ptp_register; |
1986 | |
1987 | err = rswitch_gwca_request_irqs(priv); |
1988 | if (err < 0) |
1989 | goto err_gwca_request_irq; |
1990 | |
1991 | err = rswitch_gwca_ts_request_irqs(priv); |
1992 | if (err < 0) |
1993 | goto err_gwca_ts_request_irq; |
1994 | |
1995 | err = rswitch_gwca_hw_init(priv); |
1996 | if (err < 0) |
1997 | goto err_gwca_hw_init; |
1998 | |
1999 | err = rswitch_ether_port_init_all(priv); |
2000 | if (err) |
2001 | goto err_ether_port_init_all; |
2002 | |
2003 | rswitch_for_each_enabled_port(priv, i) { |
2004 | err = register_netdev(dev: priv->rdev[i]->ndev); |
2005 | if (err) { |
2006 | rswitch_for_each_enabled_port_continue_reverse(priv, i) |
2007 | unregister_netdev(dev: priv->rdev[i]->ndev); |
2008 | goto err_register_netdev; |
2009 | } |
2010 | } |
2011 | |
2012 | rswitch_for_each_enabled_port(priv, i) |
2013 | netdev_info(dev: priv->rdev[i]->ndev, format: "MAC address %pM\n" , |
2014 | priv->rdev[i]->ndev->dev_addr); |
2015 | |
2016 | return 0; |
2017 | |
2018 | err_register_netdev: |
2019 | rswitch_ether_port_deinit_all(priv); |
2020 | |
2021 | err_ether_port_init_all: |
2022 | rswitch_gwca_hw_deinit(priv); |
2023 | |
2024 | err_gwca_hw_init: |
2025 | err_gwca_ts_request_irq: |
2026 | err_gwca_request_irq: |
2027 | rcar_gen4_ptp_unregister(ptp_priv: priv->ptp_priv); |
2028 | |
2029 | err_ptp_register: |
2030 | for (i = 0; i < RSWITCH_NUM_PORTS; i++) |
2031 | rswitch_device_free(priv, index: i); |
2032 | |
2033 | err_device_alloc: |
2034 | rswitch_gwca_ts_queue_free(priv); |
2035 | |
2036 | err_ts_queue_alloc: |
2037 | rswitch_gwca_linkfix_free(priv); |
2038 | |
2039 | return err; |
2040 | } |
2041 | |
2042 | static const struct soc_device_attribute rswitch_soc_no_speed_change[] = { |
2043 | { .soc_id = "r8a779f0" , .revision = "ES1.0" }, |
2044 | { /* Sentinel */ } |
2045 | }; |
2046 | |
2047 | static int renesas_eth_sw_probe(struct platform_device *pdev) |
2048 | { |
2049 | const struct soc_device_attribute *attr; |
2050 | struct rswitch_private *priv; |
2051 | struct resource *res; |
2052 | int ret; |
2053 | |
2054 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base" ); |
2055 | if (!res) { |
2056 | dev_err(&pdev->dev, "invalid resource\n" ); |
2057 | return -EINVAL; |
2058 | } |
2059 | |
2060 | priv = devm_kzalloc(dev: &pdev->dev, size: sizeof(*priv), GFP_KERNEL); |
2061 | if (!priv) |
2062 | return -ENOMEM; |
2063 | spin_lock_init(&priv->lock); |
2064 | |
2065 | priv->clk = devm_clk_get(dev: &pdev->dev, NULL); |
2066 | if (IS_ERR(ptr: priv->clk)) |
2067 | return PTR_ERR(ptr: priv->clk); |
2068 | |
2069 | attr = soc_device_match(matches: rswitch_soc_no_speed_change); |
2070 | if (attr) |
2071 | priv->etha_no_runtime_change = true; |
2072 | |
2073 | priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); |
2074 | if (!priv->ptp_priv) |
2075 | return -ENOMEM; |
2076 | |
2077 | platform_set_drvdata(pdev, data: priv); |
2078 | priv->pdev = pdev; |
2079 | priv->addr = devm_ioremap_resource(dev: &pdev->dev, res); |
2080 | if (IS_ERR(ptr: priv->addr)) |
2081 | return PTR_ERR(ptr: priv->addr); |
2082 | |
2083 | priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4; |
2084 | |
2085 | ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(40)); |
2086 | if (ret < 0) { |
2087 | ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(32)); |
2088 | if (ret < 0) |
2089 | return ret; |
2090 | } |
2091 | |
2092 | priv->gwca.index = AGENT_INDEX_GWCA; |
2093 | priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV, |
2094 | RSWITCH_MAX_NUM_QUEUES); |
2095 | priv->gwca.queues = devm_kcalloc(dev: &pdev->dev, n: priv->gwca.num_queues, |
2096 | size: sizeof(*priv->gwca.queues), GFP_KERNEL); |
2097 | if (!priv->gwca.queues) |
2098 | return -ENOMEM; |
2099 | |
2100 | pm_runtime_enable(dev: &pdev->dev); |
2101 | pm_runtime_get_sync(dev: &pdev->dev); |
2102 | |
2103 | ret = rswitch_init(priv); |
2104 | if (ret < 0) { |
2105 | pm_runtime_put(dev: &pdev->dev); |
2106 | pm_runtime_disable(dev: &pdev->dev); |
2107 | return ret; |
2108 | } |
2109 | |
2110 | device_set_wakeup_capable(dev: &pdev->dev, capable: 1); |
2111 | |
2112 | return ret; |
2113 | } |
2114 | |
2115 | static void rswitch_deinit(struct rswitch_private *priv) |
2116 | { |
2117 | unsigned int i; |
2118 | |
2119 | rswitch_gwca_hw_deinit(priv); |
2120 | rcar_gen4_ptp_unregister(ptp_priv: priv->ptp_priv); |
2121 | |
2122 | rswitch_for_each_enabled_port(priv, i) { |
2123 | struct rswitch_device *rdev = priv->rdev[i]; |
2124 | |
2125 | unregister_netdev(dev: rdev->ndev); |
2126 | rswitch_ether_port_deinit_one(rdev); |
2127 | phy_exit(phy: priv->rdev[i]->serdes); |
2128 | } |
2129 | |
2130 | for (i = 0; i < RSWITCH_NUM_PORTS; i++) |
2131 | rswitch_device_free(priv, index: i); |
2132 | |
2133 | rswitch_gwca_ts_queue_free(priv); |
2134 | rswitch_gwca_linkfix_free(priv); |
2135 | |
2136 | rswitch_clock_disable(priv); |
2137 | } |
2138 | |
2139 | static void renesas_eth_sw_remove(struct platform_device *pdev) |
2140 | { |
2141 | struct rswitch_private *priv = platform_get_drvdata(pdev); |
2142 | |
2143 | rswitch_deinit(priv); |
2144 | |
2145 | pm_runtime_put(dev: &pdev->dev); |
2146 | pm_runtime_disable(dev: &pdev->dev); |
2147 | |
2148 | platform_set_drvdata(pdev, NULL); |
2149 | } |
2150 | |
2151 | static int renesas_eth_sw_suspend(struct device *dev) |
2152 | { |
2153 | struct rswitch_private *priv = dev_get_drvdata(dev); |
2154 | struct net_device *ndev; |
2155 | unsigned int i; |
2156 | |
2157 | rswitch_for_each_enabled_port(priv, i) { |
2158 | ndev = priv->rdev[i]->ndev; |
2159 | if (netif_running(dev: ndev)) { |
2160 | netif_device_detach(dev: ndev); |
2161 | rswitch_stop(ndev); |
2162 | } |
2163 | if (priv->rdev[i]->serdes->init_count) |
2164 | phy_exit(phy: priv->rdev[i]->serdes); |
2165 | } |
2166 | |
2167 | return 0; |
2168 | } |
2169 | |
2170 | static int renesas_eth_sw_resume(struct device *dev) |
2171 | { |
2172 | struct rswitch_private *priv = dev_get_drvdata(dev); |
2173 | struct net_device *ndev; |
2174 | unsigned int i; |
2175 | |
2176 | rswitch_for_each_enabled_port(priv, i) { |
2177 | phy_init(phy: priv->rdev[i]->serdes); |
2178 | ndev = priv->rdev[i]->ndev; |
2179 | if (netif_running(dev: ndev)) { |
2180 | rswitch_open(ndev); |
2181 | netif_device_attach(dev: ndev); |
2182 | } |
2183 | } |
2184 | |
2185 | return 0; |
2186 | } |
2187 | |
2188 | static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend, |
2189 | renesas_eth_sw_resume); |
2190 | |
2191 | static struct platform_driver renesas_eth_sw_driver_platform = { |
2192 | .probe = renesas_eth_sw_probe, |
2193 | .remove_new = renesas_eth_sw_remove, |
2194 | .driver = { |
2195 | .name = "renesas_eth_sw" , |
2196 | .pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops), |
2197 | .of_match_table = renesas_eth_sw_of_table, |
2198 | } |
2199 | }; |
2200 | module_platform_driver(renesas_eth_sw_driver_platform); |
2201 | MODULE_AUTHOR("Yoshihiro Shimoda" ); |
2202 | MODULE_DESCRIPTION("Renesas Ethernet Switch device driver" ); |
2203 | MODULE_LICENSE("GPL" ); |
2204 | |