1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* Applied Micro X-Gene SoC Ethernet Driver |
3 | * |
4 | * Copyright (c) 2014, Applied Micro Circuits Corporation |
5 | * Authors: Iyappan Subramanian <isubramanian@apm.com> |
6 | * Ravi Patel <rapatel@apm.com> |
7 | * Keyur Chudgar <kchudgar@apm.com> |
8 | */ |
9 | |
10 | #include <linux/gpio.h> |
11 | #include "xgene_enet_main.h" |
12 | #include "xgene_enet_hw.h" |
13 | #include "xgene_enet_sgmac.h" |
14 | #include "xgene_enet_xgmac.h" |
15 | |
16 | #define RES_ENET_CSR 0 |
17 | #define RES_RING_CSR 1 |
18 | #define RES_RING_CMD 2 |
19 | |
20 | static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) |
21 | { |
22 | struct xgene_enet_raw_desc16 *raw_desc; |
23 | int i; |
24 | |
25 | if (!buf_pool) |
26 | return; |
27 | |
28 | for (i = 0; i < buf_pool->slots; i++) { |
29 | raw_desc = &buf_pool->raw_desc16[i]; |
30 | |
31 | /* Hardware expects descriptor in little endian format */ |
32 | raw_desc->m0 = cpu_to_le64(i | |
33 | SET_VAL(FPQNUM, buf_pool->dst_ring_num) | |
34 | SET_VAL(STASH, 3)); |
35 | } |
36 | } |
37 | |
38 | static u16 xgene_enet_get_data_len(u64 bufdatalen) |
39 | { |
40 | u16 hw_len, mask; |
41 | |
42 | hw_len = GET_VAL(BUFDATALEN, bufdatalen); |
43 | |
44 | if (unlikely(hw_len == 0x7800)) { |
45 | return 0; |
46 | } else if (!(hw_len & BIT(14))) { |
47 | mask = GENMASK(13, 0); |
48 | return (hw_len & mask) ? (hw_len & mask) : SIZE_16K; |
49 | } else if (!(hw_len & GENMASK(13, 12))) { |
50 | mask = GENMASK(11, 0); |
51 | return (hw_len & mask) ? (hw_len & mask) : SIZE_4K; |
52 | } else { |
53 | mask = GENMASK(11, 0); |
54 | return (hw_len & mask) ? (hw_len & mask) : SIZE_2K; |
55 | } |
56 | } |
57 | |
58 | static u16 xgene_enet_set_data_len(u32 size) |
59 | { |
60 | u16 hw_len; |
61 | |
62 | hw_len = (size == SIZE_4K) ? BIT(14) : 0; |
63 | |
64 | return hw_len; |
65 | } |
66 | |
67 | static int xgene_enet_refill_pagepool(struct xgene_enet_desc_ring *buf_pool, |
68 | u32 nbuf) |
69 | { |
70 | struct xgene_enet_raw_desc16 *raw_desc; |
71 | struct xgene_enet_pdata *pdata; |
72 | struct net_device *ndev; |
73 | dma_addr_t dma_addr; |
74 | struct device *dev; |
75 | struct page *page; |
76 | u32 slots, tail; |
77 | u16 hw_len; |
78 | int i; |
79 | |
80 | if (unlikely(!buf_pool)) |
81 | return 0; |
82 | |
83 | ndev = buf_pool->ndev; |
84 | pdata = netdev_priv(dev: ndev); |
85 | dev = ndev_to_dev(ndev); |
86 | slots = buf_pool->slots - 1; |
87 | tail = buf_pool->tail; |
88 | |
89 | for (i = 0; i < nbuf; i++) { |
90 | raw_desc = &buf_pool->raw_desc16[tail]; |
91 | |
92 | page = dev_alloc_page(); |
93 | if (unlikely(!page)) |
94 | return -ENOMEM; |
95 | |
96 | dma_addr = dma_map_page(dev, page, 0, |
97 | PAGE_SIZE, DMA_FROM_DEVICE); |
98 | if (unlikely(dma_mapping_error(dev, dma_addr))) { |
99 | put_page(page); |
100 | return -ENOMEM; |
101 | } |
102 | |
103 | hw_len = xgene_enet_set_data_len(PAGE_SIZE); |
104 | raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | |
105 | SET_VAL(BUFDATALEN, hw_len) | |
106 | SET_BIT(COHERENT)); |
107 | |
108 | buf_pool->frag_page[tail] = page; |
109 | tail = (tail + 1) & slots; |
110 | } |
111 | |
112 | pdata->ring_ops->wr_cmd(buf_pool, nbuf); |
113 | buf_pool->tail = tail; |
114 | |
115 | return 0; |
116 | } |
117 | |
118 | static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, |
119 | u32 nbuf) |
120 | { |
121 | struct sk_buff *skb; |
122 | struct xgene_enet_raw_desc16 *raw_desc; |
123 | struct xgene_enet_pdata *pdata; |
124 | struct net_device *ndev; |
125 | struct device *dev; |
126 | dma_addr_t dma_addr; |
127 | u32 tail = buf_pool->tail; |
128 | u32 slots = buf_pool->slots - 1; |
129 | u16 bufdatalen, len; |
130 | int i; |
131 | |
132 | ndev = buf_pool->ndev; |
133 | dev = ndev_to_dev(ndev: buf_pool->ndev); |
134 | pdata = netdev_priv(dev: ndev); |
135 | |
136 | bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0)); |
137 | len = XGENE_ENET_STD_MTU; |
138 | |
139 | for (i = 0; i < nbuf; i++) { |
140 | raw_desc = &buf_pool->raw_desc16[tail]; |
141 | |
142 | skb = netdev_alloc_skb_ip_align(dev: ndev, length: len); |
143 | if (unlikely(!skb)) |
144 | return -ENOMEM; |
145 | |
146 | dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); |
147 | if (dma_mapping_error(dev, dma_addr)) { |
148 | netdev_err(dev: ndev, format: "DMA mapping error\n" ); |
149 | dev_kfree_skb_any(skb); |
150 | return -EINVAL; |
151 | } |
152 | |
153 | buf_pool->rx_skb[tail] = skb; |
154 | |
155 | raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | |
156 | SET_VAL(BUFDATALEN, bufdatalen) | |
157 | SET_BIT(COHERENT)); |
158 | tail = (tail + 1) & slots; |
159 | } |
160 | |
161 | pdata->ring_ops->wr_cmd(buf_pool, nbuf); |
162 | buf_pool->tail = tail; |
163 | |
164 | return 0; |
165 | } |
166 | |
167 | static u8 xgene_enet_hdr_len(const void *data) |
168 | { |
169 | const struct ethhdr *eth = data; |
170 | |
171 | return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN; |
172 | } |
173 | |
174 | static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) |
175 | { |
176 | struct device *dev = ndev_to_dev(ndev: buf_pool->ndev); |
177 | struct xgene_enet_raw_desc16 *raw_desc; |
178 | dma_addr_t dma_addr; |
179 | int i; |
180 | |
181 | /* Free up the buffers held by hardware */ |
182 | for (i = 0; i < buf_pool->slots; i++) { |
183 | if (buf_pool->rx_skb[i]) { |
184 | dev_kfree_skb_any(skb: buf_pool->rx_skb[i]); |
185 | |
186 | raw_desc = &buf_pool->raw_desc16[i]; |
187 | dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)); |
188 | dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU, |
189 | DMA_FROM_DEVICE); |
190 | } |
191 | } |
192 | } |
193 | |
194 | static void xgene_enet_delete_pagepool(struct xgene_enet_desc_ring *buf_pool) |
195 | { |
196 | struct device *dev = ndev_to_dev(ndev: buf_pool->ndev); |
197 | dma_addr_t dma_addr; |
198 | struct page *page; |
199 | int i; |
200 | |
201 | /* Free up the buffers held by hardware */ |
202 | for (i = 0; i < buf_pool->slots; i++) { |
203 | page = buf_pool->frag_page[i]; |
204 | if (page) { |
205 | dma_addr = buf_pool->frag_dma_addr[i]; |
206 | dma_unmap_page(dev, dma_addr, PAGE_SIZE, |
207 | DMA_FROM_DEVICE); |
208 | put_page(page); |
209 | } |
210 | } |
211 | } |
212 | |
213 | static irqreturn_t xgene_enet_rx_irq(const int irq, void *data) |
214 | { |
215 | struct xgene_enet_desc_ring *rx_ring = data; |
216 | |
217 | if (napi_schedule_prep(n: &rx_ring->napi)) { |
218 | disable_irq_nosync(irq); |
219 | __napi_schedule(n: &rx_ring->napi); |
220 | } |
221 | |
222 | return IRQ_HANDLED; |
223 | } |
224 | |
225 | static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, |
226 | struct xgene_enet_raw_desc *raw_desc) |
227 | { |
228 | struct xgene_enet_pdata *pdata = netdev_priv(dev: cp_ring->ndev); |
229 | struct sk_buff *skb; |
230 | struct device *dev; |
231 | skb_frag_t *frag; |
232 | dma_addr_t *frag_dma_addr; |
233 | u16 skb_index; |
234 | u8 mss_index; |
235 | u8 status; |
236 | int i; |
237 | |
238 | skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); |
239 | skb = cp_ring->cp_skb[skb_index]; |
240 | frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS]; |
241 | |
242 | dev = ndev_to_dev(ndev: cp_ring->ndev); |
243 | dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), |
244 | skb_headlen(skb), |
245 | DMA_TO_DEVICE); |
246 | |
247 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
248 | frag = &skb_shinfo(skb)->frags[i]; |
249 | dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag), |
250 | DMA_TO_DEVICE); |
251 | } |
252 | |
253 | if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) { |
254 | mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3)); |
255 | spin_lock(lock: &pdata->mss_lock); |
256 | pdata->mss_refcnt[mss_index]--; |
257 | spin_unlock(lock: &pdata->mss_lock); |
258 | } |
259 | |
260 | /* Checking for error */ |
261 | status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); |
262 | if (unlikely(status > 2)) { |
263 | cp_ring->tx_dropped++; |
264 | cp_ring->tx_errors++; |
265 | } |
266 | |
267 | if (likely(skb)) { |
268 | dev_kfree_skb_any(skb); |
269 | } else { |
270 | netdev_err(dev: cp_ring->ndev, format: "completion skb is NULL\n" ); |
271 | } |
272 | |
273 | return 0; |
274 | } |
275 | |
276 | static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss) |
277 | { |
278 | struct xgene_enet_pdata *pdata = netdev_priv(dev: ndev); |
279 | int mss_index = -EBUSY; |
280 | int i; |
281 | |
282 | spin_lock(lock: &pdata->mss_lock); |
283 | |
284 | /* Reuse the slot if MSS matches */ |
285 | for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) { |
286 | if (pdata->mss[i] == mss) { |
287 | pdata->mss_refcnt[i]++; |
288 | mss_index = i; |
289 | } |
290 | } |
291 | |
292 | /* Overwrite the slot with ref_count = 0 */ |
293 | for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) { |
294 | if (!pdata->mss_refcnt[i]) { |
295 | pdata->mss_refcnt[i]++; |
296 | pdata->mac_ops->set_mss(pdata, mss, i); |
297 | pdata->mss[i] = mss; |
298 | mss_index = i; |
299 | } |
300 | } |
301 | |
302 | spin_unlock(lock: &pdata->mss_lock); |
303 | |
304 | return mss_index; |
305 | } |
306 | |
307 | static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo) |
308 | { |
309 | struct net_device *ndev = skb->dev; |
310 | struct iphdr *iph; |
311 | u8 l3hlen = 0, l4hlen = 0; |
312 | u8 ethhdr, proto = 0, csum_enable = 0; |
313 | u32 hdr_len, mss = 0; |
314 | u32 i, len, nr_frags; |
315 | int mss_index; |
316 | |
317 | ethhdr = xgene_enet_hdr_len(data: skb->data); |
318 | |
319 | if (unlikely(skb->protocol != htons(ETH_P_IP)) && |
320 | unlikely(skb->protocol != htons(ETH_P_8021Q))) |
321 | goto out; |
322 | |
323 | if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM))) |
324 | goto out; |
325 | |
326 | iph = ip_hdr(skb); |
327 | if (unlikely(ip_is_fragment(iph))) |
328 | goto out; |
329 | |
330 | if (likely(iph->protocol == IPPROTO_TCP)) { |
331 | l4hlen = tcp_hdrlen(skb) >> 2; |
332 | csum_enable = 1; |
333 | proto = TSO_IPPROTO_TCP; |
334 | if (ndev->features & NETIF_F_TSO) { |
335 | hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb); |
336 | mss = skb_shinfo(skb)->gso_size; |
337 | |
338 | if (skb_is_nonlinear(skb)) { |
339 | len = skb_headlen(skb); |
340 | nr_frags = skb_shinfo(skb)->nr_frags; |
341 | |
342 | for (i = 0; i < 2 && i < nr_frags; i++) |
343 | len += skb_frag_size( |
344 | frag: &skb_shinfo(skb)->frags[i]); |
345 | |
346 | /* HW requires header must reside in 3 buffer */ |
347 | if (unlikely(hdr_len > len)) { |
348 | if (skb_linearize(skb)) |
349 | return 0; |
350 | } |
351 | } |
352 | |
353 | if (!mss || ((skb->len - hdr_len) <= mss)) |
354 | goto out; |
355 | |
356 | mss_index = xgene_enet_setup_mss(ndev, mss); |
357 | if (unlikely(mss_index < 0)) |
358 | return -EBUSY; |
359 | |
360 | *hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index); |
361 | } |
362 | } else if (iph->protocol == IPPROTO_UDP) { |
363 | l4hlen = UDP_HDR_SIZE; |
364 | csum_enable = 1; |
365 | } |
366 | out: |
367 | l3hlen = ip_hdrlen(skb) >> 2; |
368 | *hopinfo |= SET_VAL(TCPHDR, l4hlen) | |
369 | SET_VAL(IPHDR, l3hlen) | |
370 | SET_VAL(ETHHDR, ethhdr) | |
371 | SET_VAL(EC, csum_enable) | |
372 | SET_VAL(IS, proto) | |
373 | SET_BIT(IC) | |
374 | SET_BIT(TYPE_ETH_WORK_MESSAGE); |
375 | |
376 | return 0; |
377 | } |
378 | |
379 | static u16 xgene_enet_encode_len(u16 len) |
380 | { |
381 | return (len == BUFLEN_16K) ? 0 : len; |
382 | } |
383 | |
384 | static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len) |
385 | { |
386 | desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) | |
387 | SET_VAL(BUFDATALEN, len)); |
388 | } |
389 | |
390 | static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring) |
391 | { |
392 | __le64 *exp_bufs; |
393 | |
394 | exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS]; |
395 | memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS); |
396 | ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1); |
397 | |
398 | return exp_bufs; |
399 | } |
400 | |
401 | static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring) |
402 | { |
403 | return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS]; |
404 | } |
405 | |
406 | static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, |
407 | struct sk_buff *skb) |
408 | { |
409 | struct device *dev = ndev_to_dev(ndev: tx_ring->ndev); |
410 | struct xgene_enet_pdata *pdata = netdev_priv(dev: tx_ring->ndev); |
411 | struct xgene_enet_raw_desc *raw_desc; |
412 | __le64 *exp_desc = NULL, *exp_bufs = NULL; |
413 | dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr; |
414 | skb_frag_t *frag; |
415 | u16 tail = tx_ring->tail; |
416 | u64 hopinfo = 0; |
417 | u32 len, hw_len; |
418 | u8 ll = 0, nv = 0, idx = 0; |
419 | bool split = false; |
420 | u32 size, offset, ell_bytes = 0; |
421 | u32 i, fidx, nr_frags, count = 1; |
422 | int ret; |
423 | |
424 | raw_desc = &tx_ring->raw_desc[tail]; |
425 | tail = (tail + 1) & (tx_ring->slots - 1); |
426 | memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc)); |
427 | |
428 | ret = xgene_enet_work_msg(skb, hopinfo: &hopinfo); |
429 | if (ret) |
430 | return ret; |
431 | |
432 | raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) | |
433 | hopinfo); |
434 | |
435 | len = skb_headlen(skb); |
436 | hw_len = xgene_enet_encode_len(len); |
437 | |
438 | dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); |
439 | if (dma_mapping_error(dev, dma_addr)) { |
440 | netdev_err(dev: tx_ring->ndev, format: "DMA mapping error\n" ); |
441 | return -EINVAL; |
442 | } |
443 | |
444 | /* Hardware expects descriptor in little endian format */ |
445 | raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | |
446 | SET_VAL(BUFDATALEN, hw_len) | |
447 | SET_BIT(COHERENT)); |
448 | |
449 | if (!skb_is_nonlinear(skb)) |
450 | goto out; |
451 | |
452 | /* scatter gather */ |
453 | nv = 1; |
454 | exp_desc = (void *)&tx_ring->raw_desc[tail]; |
455 | tail = (tail + 1) & (tx_ring->slots - 1); |
456 | memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc)); |
457 | |
458 | nr_frags = skb_shinfo(skb)->nr_frags; |
459 | for (i = nr_frags; i < 4 ; i++) |
460 | exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER); |
461 | |
462 | frag_dma_addr = xgene_get_frag_dma_array(ring: tx_ring); |
463 | |
464 | for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) { |
465 | if (!split) { |
466 | frag = &skb_shinfo(skb)->frags[fidx]; |
467 | size = skb_frag_size(frag); |
468 | offset = 0; |
469 | |
470 | pbuf_addr = skb_frag_dma_map(dev, frag, offset: 0, size, |
471 | dir: DMA_TO_DEVICE); |
472 | if (dma_mapping_error(dev, dma_addr: pbuf_addr)) |
473 | return -EINVAL; |
474 | |
475 | frag_dma_addr[fidx] = pbuf_addr; |
476 | fidx++; |
477 | |
478 | if (size > BUFLEN_16K) |
479 | split = true; |
480 | } |
481 | |
482 | if (size > BUFLEN_16K) { |
483 | len = BUFLEN_16K; |
484 | size -= BUFLEN_16K; |
485 | } else { |
486 | len = size; |
487 | split = false; |
488 | } |
489 | |
490 | dma_addr = pbuf_addr + offset; |
491 | hw_len = xgene_enet_encode_len(len); |
492 | |
493 | switch (i) { |
494 | case 0: |
495 | case 1: |
496 | case 2: |
497 | xgene_set_addr_len(desc: exp_desc, idx: i, addr: dma_addr, len: hw_len); |
498 | break; |
499 | case 3: |
500 | if (split || (fidx != nr_frags)) { |
501 | exp_bufs = xgene_enet_get_exp_bufs(ring: tx_ring); |
502 | xgene_set_addr_len(desc: exp_bufs, idx, addr: dma_addr, |
503 | len: hw_len); |
504 | idx++; |
505 | ell_bytes += len; |
506 | } else { |
507 | xgene_set_addr_len(desc: exp_desc, idx: i, addr: dma_addr, |
508 | len: hw_len); |
509 | } |
510 | break; |
511 | default: |
512 | xgene_set_addr_len(desc: exp_bufs, idx, addr: dma_addr, len: hw_len); |
513 | idx++; |
514 | ell_bytes += len; |
515 | break; |
516 | } |
517 | |
518 | if (split) |
519 | offset += BUFLEN_16K; |
520 | } |
521 | count++; |
522 | |
523 | if (idx) { |
524 | ll = 1; |
525 | dma_addr = dma_map_single(dev, exp_bufs, |
526 | sizeof(u64) * MAX_EXP_BUFFS, |
527 | DMA_TO_DEVICE); |
528 | if (dma_mapping_error(dev, dma_addr)) { |
529 | dev_kfree_skb_any(skb); |
530 | return -EINVAL; |
531 | } |
532 | i = ell_bytes >> LL_BYTES_LSB_LEN; |
533 | exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | |
534 | SET_VAL(LL_BYTES_MSB, i) | |
535 | SET_VAL(LL_LEN, idx)); |
536 | raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes)); |
537 | } |
538 | |
539 | out: |
540 | raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | |
541 | SET_VAL(USERINFO, tx_ring->tail)); |
542 | tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; |
543 | pdata->tx_level[tx_ring->cp_ring->index] += count; |
544 | tx_ring->tail = tail; |
545 | |
546 | return count; |
547 | } |
548 | |
549 | static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, |
550 | struct net_device *ndev) |
551 | { |
552 | struct xgene_enet_pdata *pdata = netdev_priv(dev: ndev); |
553 | struct xgene_enet_desc_ring *tx_ring; |
554 | int index = skb->queue_mapping; |
555 | u32 tx_level = pdata->tx_level[index]; |
556 | int count; |
557 | |
558 | tx_ring = pdata->tx_ring[index]; |
559 | if (tx_level < pdata->txc_level[index]) |
560 | tx_level += ((typeof(pdata->tx_level[index]))~0U); |
561 | |
562 | if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) { |
563 | netif_stop_subqueue(dev: ndev, queue_index: index); |
564 | return NETDEV_TX_BUSY; |
565 | } |
566 | |
567 | if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE)) |
568 | return NETDEV_TX_OK; |
569 | |
570 | count = xgene_enet_setup_tx_desc(tx_ring, skb); |
571 | if (count == -EBUSY) |
572 | return NETDEV_TX_BUSY; |
573 | |
574 | if (count <= 0) { |
575 | dev_kfree_skb_any(skb); |
576 | return NETDEV_TX_OK; |
577 | } |
578 | |
579 | skb_tx_timestamp(skb); |
580 | |
581 | tx_ring->tx_packets++; |
582 | tx_ring->tx_bytes += skb->len; |
583 | |
584 | pdata->ring_ops->wr_cmd(tx_ring, count); |
585 | return NETDEV_TX_OK; |
586 | } |
587 | |
588 | static void xgene_enet_rx_csum(struct sk_buff *skb) |
589 | { |
590 | struct net_device *ndev = skb->dev; |
591 | struct iphdr *iph = ip_hdr(skb); |
592 | |
593 | if (!(ndev->features & NETIF_F_RXCSUM)) |
594 | return; |
595 | |
596 | if (skb->protocol != htons(ETH_P_IP)) |
597 | return; |
598 | |
599 | if (ip_is_fragment(iph)) |
600 | return; |
601 | |
602 | if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP) |
603 | return; |
604 | |
605 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
606 | } |
607 | |
608 | static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool, |
609 | struct xgene_enet_raw_desc *raw_desc, |
610 | struct xgene_enet_raw_desc *exp_desc) |
611 | { |
612 | __le64 *desc = (void *)exp_desc; |
613 | dma_addr_t dma_addr; |
614 | struct device *dev; |
615 | struct page *page; |
616 | u16 slots, head; |
617 | u32 frag_size; |
618 | int i; |
619 | |
620 | if (!buf_pool || !raw_desc || !exp_desc || |
621 | (!GET_VAL(NV, le64_to_cpu(raw_desc->m0)))) |
622 | return; |
623 | |
624 | dev = ndev_to_dev(ndev: buf_pool->ndev); |
625 | slots = buf_pool->slots - 1; |
626 | head = buf_pool->head; |
627 | |
628 | for (i = 0; i < 4; i++) { |
629 | frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1])); |
630 | if (!frag_size) |
631 | break; |
632 | |
633 | dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1])); |
634 | dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); |
635 | |
636 | page = buf_pool->frag_page[head]; |
637 | put_page(page); |
638 | |
639 | buf_pool->frag_page[head] = NULL; |
640 | head = (head + 1) & slots; |
641 | } |
642 | buf_pool->head = head; |
643 | } |
644 | |
645 | /* Errata 10GE_10 and ENET_15 - Fix duplicated HW statistic counters */ |
646 | static bool xgene_enet_errata_10GE_10(struct sk_buff *skb, u32 len, u8 status) |
647 | { |
648 | if (status == INGRESS_CRC && |
649 | len >= (ETHER_STD_PACKET + 1) && |
650 | len <= (ETHER_STD_PACKET + 4) && |
651 | skb->protocol == htons(ETH_P_8021Q)) |
652 | return true; |
653 | |
654 | return false; |
655 | } |
656 | |
657 | /* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */ |
658 | static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status) |
659 | { |
660 | if (status == INGRESS_PKT_LEN && len == ETHER_MIN_PACKET) { |
661 | if (ntohs(eth_hdr(skb)->h_proto) < 46) |
662 | return true; |
663 | } |
664 | |
665 | return false; |
666 | } |
667 | |
668 | static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, |
669 | struct xgene_enet_raw_desc *raw_desc, |
670 | struct xgene_enet_raw_desc *exp_desc) |
671 | { |
672 | struct xgene_enet_desc_ring *buf_pool, *page_pool; |
673 | u32 datalen, frag_size, skb_index; |
674 | struct xgene_enet_pdata *pdata; |
675 | struct net_device *ndev; |
676 | dma_addr_t dma_addr; |
677 | struct sk_buff *skb; |
678 | struct device *dev; |
679 | struct page *page; |
680 | u16 slots, head; |
681 | int i, ret = 0; |
682 | __le64 *desc; |
683 | u8 status; |
684 | bool nv; |
685 | |
686 | ndev = rx_ring->ndev; |
687 | pdata = netdev_priv(dev: ndev); |
688 | dev = ndev_to_dev(ndev: rx_ring->ndev); |
689 | buf_pool = rx_ring->buf_pool; |
690 | page_pool = rx_ring->page_pool; |
691 | |
692 | dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), |
693 | XGENE_ENET_STD_MTU, DMA_FROM_DEVICE); |
694 | skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); |
695 | skb = buf_pool->rx_skb[skb_index]; |
696 | buf_pool->rx_skb[skb_index] = NULL; |
697 | |
698 | datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1)); |
699 | |
700 | /* strip off CRC as HW isn't doing this */ |
701 | nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0)); |
702 | if (!nv) |
703 | datalen -= 4; |
704 | |
705 | skb_put(skb, len: datalen); |
706 | prefetch(skb->data - NET_IP_ALIGN); |
707 | skb->protocol = eth_type_trans(skb, dev: ndev); |
708 | |
709 | /* checking for error */ |
710 | status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) | |
711 | GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); |
712 | if (unlikely(status)) { |
713 | if (xgene_enet_errata_10GE_8(skb, len: datalen, status)) { |
714 | pdata->false_rflr++; |
715 | } else if (xgene_enet_errata_10GE_10(skb, len: datalen, status)) { |
716 | pdata->vlan_rjbr++; |
717 | } else { |
718 | dev_kfree_skb_any(skb); |
719 | xgene_enet_free_pagepool(buf_pool: page_pool, raw_desc, exp_desc); |
720 | xgene_enet_parse_error(ring: rx_ring, status); |
721 | rx_ring->rx_dropped++; |
722 | goto out; |
723 | } |
724 | } |
725 | |
726 | if (!nv) |
727 | goto skip_jumbo; |
728 | |
729 | slots = page_pool->slots - 1; |
730 | head = page_pool->head; |
731 | desc = (void *)exp_desc; |
732 | |
733 | for (i = 0; i < 4; i++) { |
734 | frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1])); |
735 | if (!frag_size) |
736 | break; |
737 | |
738 | dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1])); |
739 | dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); |
740 | |
741 | page = page_pool->frag_page[head]; |
742 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, off: 0, |
743 | size: frag_size, PAGE_SIZE); |
744 | |
745 | datalen += frag_size; |
746 | |
747 | page_pool->frag_page[head] = NULL; |
748 | head = (head + 1) & slots; |
749 | } |
750 | |
751 | page_pool->head = head; |
752 | rx_ring->npagepool -= skb_shinfo(skb)->nr_frags; |
753 | |
754 | skip_jumbo: |
755 | skb_checksum_none_assert(skb); |
756 | xgene_enet_rx_csum(skb); |
757 | |
758 | rx_ring->rx_packets++; |
759 | rx_ring->rx_bytes += datalen; |
760 | napi_gro_receive(napi: &rx_ring->napi, skb); |
761 | |
762 | out: |
763 | if (rx_ring->npagepool <= 0) { |
764 | ret = xgene_enet_refill_pagepool(buf_pool: page_pool, NUM_NXTBUFPOOL); |
765 | rx_ring->npagepool = NUM_NXTBUFPOOL; |
766 | if (ret) |
767 | return ret; |
768 | } |
769 | |
770 | if (--rx_ring->nbufpool == 0) { |
771 | ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL); |
772 | rx_ring->nbufpool = NUM_BUFPOOL; |
773 | } |
774 | |
775 | return ret; |
776 | } |
777 | |
778 | static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc) |
779 | { |
780 | return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false; |
781 | } |
782 | |
783 | static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, |
784 | int budget) |
785 | { |
786 | struct net_device *ndev = ring->ndev; |
787 | struct xgene_enet_pdata *pdata = netdev_priv(dev: ndev); |
788 | struct xgene_enet_raw_desc *raw_desc, *exp_desc; |
789 | u16 head = ring->head; |
790 | u16 slots = ring->slots - 1; |
791 | int ret, desc_count, count = 0, processed = 0; |
792 | bool is_completion; |
793 | |
794 | do { |
795 | raw_desc = &ring->raw_desc[head]; |
796 | desc_count = 0; |
797 | is_completion = false; |
798 | exp_desc = NULL; |
799 | if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) |
800 | break; |
801 | |
802 | /* read fpqnum field after dataaddr field */ |
803 | dma_rmb(); |
804 | if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) { |
805 | head = (head + 1) & slots; |
806 | exp_desc = &ring->raw_desc[head]; |
807 | |
808 | if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) { |
809 | head = (head - 1) & slots; |
810 | break; |
811 | } |
812 | dma_rmb(); |
813 | count++; |
814 | desc_count++; |
815 | } |
816 | if (is_rx_desc(raw_desc)) { |
817 | ret = xgene_enet_rx_frame(rx_ring: ring, raw_desc, exp_desc); |
818 | } else { |
819 | ret = xgene_enet_tx_completion(cp_ring: ring, raw_desc); |
820 | is_completion = true; |
821 | } |
822 | xgene_enet_mark_desc_slot_empty(desc_slot_ptr: raw_desc); |
823 | if (exp_desc) |
824 | xgene_enet_mark_desc_slot_empty(desc_slot_ptr: exp_desc); |
825 | |
826 | head = (head + 1) & slots; |
827 | count++; |
828 | desc_count++; |
829 | processed++; |
830 | if (is_completion) |
831 | pdata->txc_level[ring->index] += desc_count; |
832 | |
833 | if (ret) |
834 | break; |
835 | } while (--budget); |
836 | |
837 | if (likely(count)) { |
838 | pdata->ring_ops->wr_cmd(ring, -count); |
839 | ring->head = head; |
840 | |
841 | if (__netif_subqueue_stopped(dev: ndev, queue_index: ring->index)) |
842 | netif_start_subqueue(dev: ndev, queue_index: ring->index); |
843 | } |
844 | |
845 | return processed; |
846 | } |
847 | |
848 | static int xgene_enet_napi(struct napi_struct *napi, const int budget) |
849 | { |
850 | struct xgene_enet_desc_ring *ring; |
851 | int processed; |
852 | |
853 | ring = container_of(napi, struct xgene_enet_desc_ring, napi); |
854 | processed = xgene_enet_process_ring(ring, budget); |
855 | |
856 | if (processed != budget) { |
857 | napi_complete_done(n: napi, work_done: processed); |
858 | enable_irq(irq: ring->irq); |
859 | } |
860 | |
861 | return processed; |
862 | } |
863 | |
864 | static void xgene_enet_timeout(struct net_device *ndev, unsigned int txqueue) |
865 | { |
866 | struct xgene_enet_pdata *pdata = netdev_priv(dev: ndev); |
867 | struct netdev_queue *txq; |
868 | int i; |
869 | |
870 | pdata->mac_ops->reset(pdata); |
871 | |
872 | for (i = 0; i < pdata->txq_cnt; i++) { |
873 | txq = netdev_get_tx_queue(dev: ndev, index: i); |
874 | txq_trans_cond_update(txq); |
875 | netif_tx_start_queue(dev_queue: txq); |
876 | } |
877 | } |
878 | |
879 | static void xgene_enet_set_irq_name(struct net_device *ndev) |
880 | { |
881 | struct xgene_enet_pdata *pdata = netdev_priv(dev: ndev); |
882 | struct xgene_enet_desc_ring *ring; |
883 | int i; |
884 | |
885 | for (i = 0; i < pdata->rxq_cnt; i++) { |
886 | ring = pdata->rx_ring[i]; |
887 | if (!pdata->cq_cnt) { |
888 | snprintf(buf: ring->irq_name, IRQ_ID_SIZE, fmt: "%s-rx-txc" , |
889 | ndev->name); |
890 | } else { |
891 | snprintf(buf: ring->irq_name, IRQ_ID_SIZE, fmt: "%s-rx-%d" , |
892 | ndev->name, i); |
893 | } |
894 | } |
895 | |
896 | for (i = 0; i < pdata->cq_cnt; i++) { |
897 | ring = pdata->tx_ring[i]->cp_ring; |
898 | snprintf(buf: ring->irq_name, IRQ_ID_SIZE, fmt: "%s-txc-%d" , |
899 | ndev->name, i); |
900 | } |
901 | } |
902 | |
903 | static int xgene_enet_register_irq(struct net_device *ndev) |
904 | { |
905 | struct xgene_enet_pdata *pdata = netdev_priv(dev: ndev); |
906 | struct device *dev = ndev_to_dev(ndev); |
907 | struct xgene_enet_desc_ring *ring; |
908 | int ret = 0, i; |
909 | |
910 | xgene_enet_set_irq_name(ndev); |
911 | for (i = 0; i < pdata->rxq_cnt; i++) { |
912 | ring = pdata->rx_ring[i]; |
913 | irq_set_status_flags(irq: ring->irq, set: IRQ_DISABLE_UNLAZY); |
914 | ret = devm_request_irq(dev, irq: ring->irq, handler: xgene_enet_rx_irq, |
915 | irqflags: 0, devname: ring->irq_name, dev_id: ring); |
916 | if (ret) { |
917 | netdev_err(dev: ndev, format: "Failed to request irq %s\n" , |
918 | ring->irq_name); |
919 | } |
920 | } |
921 | |
922 | for (i = 0; i < pdata->cq_cnt; i++) { |
923 | ring = pdata->tx_ring[i]->cp_ring; |
924 | irq_set_status_flags(irq: ring->irq, set: IRQ_DISABLE_UNLAZY); |
925 | ret = devm_request_irq(dev, irq: ring->irq, handler: xgene_enet_rx_irq, |
926 | irqflags: 0, devname: ring->irq_name, dev_id: ring); |
927 | if (ret) { |
928 | netdev_err(dev: ndev, format: "Failed to request irq %s\n" , |
929 | ring->irq_name); |
930 | } |
931 | } |
932 | |
933 | return ret; |
934 | } |
935 | |
936 | static void xgene_enet_free_irq(struct net_device *ndev) |
937 | { |
938 | struct xgene_enet_pdata *pdata; |
939 | struct xgene_enet_desc_ring *ring; |
940 | struct device *dev; |
941 | int i; |
942 | |
943 | pdata = netdev_priv(dev: ndev); |
944 | dev = ndev_to_dev(ndev); |
945 | |
946 | for (i = 0; i < pdata->rxq_cnt; i++) { |
947 | ring = pdata->rx_ring[i]; |
948 | irq_clear_status_flags(irq: ring->irq, clr: IRQ_DISABLE_UNLAZY); |
949 | devm_free_irq(dev, irq: ring->irq, dev_id: ring); |
950 | } |
951 | |
952 | for (i = 0; i < pdata->cq_cnt; i++) { |
953 | ring = pdata->tx_ring[i]->cp_ring; |
954 | irq_clear_status_flags(irq: ring->irq, clr: IRQ_DISABLE_UNLAZY); |
955 | devm_free_irq(dev, irq: ring->irq, dev_id: ring); |
956 | } |
957 | } |
958 | |
959 | static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata) |
960 | { |
961 | struct napi_struct *napi; |
962 | int i; |
963 | |
964 | for (i = 0; i < pdata->rxq_cnt; i++) { |
965 | napi = &pdata->rx_ring[i]->napi; |
966 | napi_enable(n: napi); |
967 | } |
968 | |
969 | for (i = 0; i < pdata->cq_cnt; i++) { |
970 | napi = &pdata->tx_ring[i]->cp_ring->napi; |
971 | napi_enable(n: napi); |
972 | } |
973 | } |
974 | |
975 | static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata) |
976 | { |
977 | struct napi_struct *napi; |
978 | int i; |
979 | |
980 | for (i = 0; i < pdata->rxq_cnt; i++) { |
981 | napi = &pdata->rx_ring[i]->napi; |
982 | napi_disable(n: napi); |
983 | } |
984 | |
985 | for (i = 0; i < pdata->cq_cnt; i++) { |
986 | napi = &pdata->tx_ring[i]->cp_ring->napi; |
987 | napi_disable(n: napi); |
988 | } |
989 | } |
990 | |
991 | static int xgene_enet_open(struct net_device *ndev) |
992 | { |
993 | struct xgene_enet_pdata *pdata = netdev_priv(dev: ndev); |
994 | const struct xgene_mac_ops *mac_ops = pdata->mac_ops; |
995 | int ret; |
996 | |
997 | ret = netif_set_real_num_tx_queues(dev: ndev, txq: pdata->txq_cnt); |
998 | if (ret) |
999 | return ret; |
1000 | |
1001 | ret = netif_set_real_num_rx_queues(dev: ndev, rxq: pdata->rxq_cnt); |
1002 | if (ret) |
1003 | return ret; |
1004 | |
1005 | xgene_enet_napi_enable(pdata); |
1006 | ret = xgene_enet_register_irq(ndev); |
1007 | if (ret) { |
1008 | xgene_enet_napi_disable(pdata); |
1009 | return ret; |
1010 | } |
1011 | |
1012 | if (ndev->phydev) { |
1013 | phy_start(phydev: ndev->phydev); |
1014 | } else { |
1015 | schedule_delayed_work(dwork: &pdata->link_work, PHY_POLL_LINK_OFF); |
1016 | netif_carrier_off(dev: ndev); |
1017 | } |
1018 | |
1019 | mac_ops->tx_enable(pdata); |
1020 | mac_ops->rx_enable(pdata); |
1021 | netif_tx_start_all_queues(dev: ndev); |
1022 | |
1023 | return ret; |
1024 | } |
1025 | |
1026 | static int xgene_enet_close(struct net_device *ndev) |
1027 | { |
1028 | struct xgene_enet_pdata *pdata = netdev_priv(dev: ndev); |
1029 | const struct xgene_mac_ops *mac_ops = pdata->mac_ops; |
1030 | int i; |
1031 | |
1032 | netif_tx_stop_all_queues(dev: ndev); |
1033 | mac_ops->tx_disable(pdata); |
1034 | mac_ops->rx_disable(pdata); |
1035 | |
1036 | if (ndev->phydev) |
1037 | phy_stop(phydev: ndev->phydev); |
1038 | else |
1039 | cancel_delayed_work_sync(dwork: &pdata->link_work); |
1040 | |
1041 | xgene_enet_free_irq(ndev); |
1042 | xgene_enet_napi_disable(pdata); |
1043 | for (i = 0; i < pdata->rxq_cnt; i++) |
1044 | xgene_enet_process_ring(ring: pdata->rx_ring[i], budget: -1); |
1045 | |
1046 | return 0; |
1047 | } |
1048 | static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) |
1049 | { |
1050 | struct xgene_enet_pdata *pdata; |
1051 | struct device *dev; |
1052 | |
1053 | pdata = netdev_priv(dev: ring->ndev); |
1054 | dev = ndev_to_dev(ndev: ring->ndev); |
1055 | |
1056 | pdata->ring_ops->clear(ring); |
1057 | dmam_free_coherent(dev, size: ring->size, vaddr: ring->desc_addr, dma_handle: ring->dma); |
1058 | } |
1059 | |
1060 | static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) |
1061 | { |
1062 | struct xgene_enet_desc_ring *buf_pool, *page_pool; |
1063 | struct xgene_enet_desc_ring *ring; |
1064 | int i; |
1065 | |
1066 | for (i = 0; i < pdata->txq_cnt; i++) { |
1067 | ring = pdata->tx_ring[i]; |
1068 | if (ring) { |
1069 | xgene_enet_delete_ring(ring); |
1070 | pdata->port_ops->clear(pdata, ring); |
1071 | if (pdata->cq_cnt) |
1072 | xgene_enet_delete_ring(ring: ring->cp_ring); |
1073 | pdata->tx_ring[i] = NULL; |
1074 | } |
1075 | |
1076 | } |
1077 | |
1078 | for (i = 0; i < pdata->rxq_cnt; i++) { |
1079 | ring = pdata->rx_ring[i]; |
1080 | if (ring) { |
1081 | page_pool = ring->page_pool; |
1082 | if (page_pool) { |
1083 | xgene_enet_delete_pagepool(buf_pool: page_pool); |
1084 | xgene_enet_delete_ring(ring: page_pool); |
1085 | pdata->port_ops->clear(pdata, page_pool); |
1086 | } |
1087 | |
1088 | buf_pool = ring->buf_pool; |
1089 | xgene_enet_delete_bufpool(buf_pool); |
1090 | xgene_enet_delete_ring(ring: buf_pool); |
1091 | pdata->port_ops->clear(pdata, buf_pool); |
1092 | |
1093 | xgene_enet_delete_ring(ring); |
1094 | pdata->rx_ring[i] = NULL; |
1095 | } |
1096 | |
1097 | } |
1098 | } |
1099 | |
1100 | static int xgene_enet_get_ring_size(struct device *dev, |
1101 | enum xgene_enet_ring_cfgsize cfgsize) |
1102 | { |
1103 | int size = -EINVAL; |
1104 | |
1105 | switch (cfgsize) { |
1106 | case RING_CFGSIZE_512B: |
1107 | size = 0x200; |
1108 | break; |
1109 | case RING_CFGSIZE_2KB: |
1110 | size = 0x800; |
1111 | break; |
1112 | case RING_CFGSIZE_16KB: |
1113 | size = 0x4000; |
1114 | break; |
1115 | case RING_CFGSIZE_64KB: |
1116 | size = 0x10000; |
1117 | break; |
1118 | case RING_CFGSIZE_512KB: |
1119 | size = 0x80000; |
1120 | break; |
1121 | default: |
1122 | dev_err(dev, "Unsupported cfg ring size %d\n" , cfgsize); |
1123 | break; |
1124 | } |
1125 | |
1126 | return size; |
1127 | } |
1128 | |
1129 | static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring) |
1130 | { |
1131 | struct xgene_enet_pdata *pdata; |
1132 | struct device *dev; |
1133 | |
1134 | if (!ring) |
1135 | return; |
1136 | |
1137 | dev = ndev_to_dev(ndev: ring->ndev); |
1138 | pdata = netdev_priv(dev: ring->ndev); |
1139 | |
1140 | if (ring->desc_addr) { |
1141 | pdata->ring_ops->clear(ring); |
1142 | dmam_free_coherent(dev, size: ring->size, vaddr: ring->desc_addr, dma_handle: ring->dma); |
1143 | } |
1144 | devm_kfree(dev, p: ring); |
1145 | } |
1146 | |
1147 | static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) |
1148 | { |
1149 | struct xgene_enet_desc_ring *page_pool; |
1150 | struct device *dev = &pdata->pdev->dev; |
1151 | struct xgene_enet_desc_ring *ring; |
1152 | void *p; |
1153 | int i; |
1154 | |
1155 | for (i = 0; i < pdata->txq_cnt; i++) { |
1156 | ring = pdata->tx_ring[i]; |
1157 | if (ring) { |
1158 | if (ring->cp_ring && ring->cp_ring->cp_skb) |
1159 | devm_kfree(dev, p: ring->cp_ring->cp_skb); |
1160 | |
1161 | if (ring->cp_ring && pdata->cq_cnt) |
1162 | xgene_enet_free_desc_ring(ring: ring->cp_ring); |
1163 | |
1164 | xgene_enet_free_desc_ring(ring); |
1165 | } |
1166 | |
1167 | } |
1168 | |
1169 | for (i = 0; i < pdata->rxq_cnt; i++) { |
1170 | ring = pdata->rx_ring[i]; |
1171 | if (ring) { |
1172 | if (ring->buf_pool) { |
1173 | if (ring->buf_pool->rx_skb) |
1174 | devm_kfree(dev, p: ring->buf_pool->rx_skb); |
1175 | |
1176 | xgene_enet_free_desc_ring(ring: ring->buf_pool); |
1177 | } |
1178 | |
1179 | page_pool = ring->page_pool; |
1180 | if (page_pool) { |
1181 | p = page_pool->frag_page; |
1182 | if (p) |
1183 | devm_kfree(dev, p); |
1184 | |
1185 | p = page_pool->frag_dma_addr; |
1186 | if (p) |
1187 | devm_kfree(dev, p); |
1188 | } |
1189 | |
1190 | xgene_enet_free_desc_ring(ring); |
1191 | } |
1192 | } |
1193 | } |
1194 | |
1195 | static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata, |
1196 | struct xgene_enet_desc_ring *ring) |
1197 | { |
1198 | if ((pdata->enet_id == XGENE_ENET2) && |
1199 | (xgene_enet_ring_owner(id: ring->id) == RING_OWNER_CPU)) { |
1200 | return true; |
1201 | } |
1202 | |
1203 | return false; |
1204 | } |
1205 | |
1206 | static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata, |
1207 | struct xgene_enet_desc_ring *ring) |
1208 | { |
1209 | u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift; |
1210 | |
1211 | return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift); |
1212 | } |
1213 | |
1214 | static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( |
1215 | struct net_device *ndev, u32 ring_num, |
1216 | enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id) |
1217 | { |
1218 | struct xgene_enet_pdata *pdata = netdev_priv(dev: ndev); |
1219 | struct device *dev = ndev_to_dev(ndev); |
1220 | struct xgene_enet_desc_ring *ring; |
1221 | void *irq_mbox_addr; |
1222 | int size; |
1223 | |
1224 | size = xgene_enet_get_ring_size(dev, cfgsize); |
1225 | if (size < 0) |
1226 | return NULL; |
1227 | |
1228 | ring = devm_kzalloc(dev, size: sizeof(struct xgene_enet_desc_ring), |
1229 | GFP_KERNEL); |
1230 | if (!ring) |
1231 | return NULL; |
1232 | |
1233 | ring->ndev = ndev; |
1234 | ring->num = ring_num; |
1235 | ring->cfgsize = cfgsize; |
1236 | ring->id = ring_id; |
1237 | |
1238 | ring->desc_addr = dmam_alloc_coherent(dev, size, dma_handle: &ring->dma, |
1239 | GFP_KERNEL | __GFP_ZERO); |
1240 | if (!ring->desc_addr) { |
1241 | devm_kfree(dev, p: ring); |
1242 | return NULL; |
1243 | } |
1244 | ring->size = size; |
1245 | |
1246 | if (is_irq_mbox_required(pdata, ring)) { |
1247 | irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE, |
1248 | dma_handle: &ring->irq_mbox_dma, |
1249 | GFP_KERNEL | __GFP_ZERO); |
1250 | if (!irq_mbox_addr) { |
1251 | dmam_free_coherent(dev, size, vaddr: ring->desc_addr, |
1252 | dma_handle: ring->dma); |
1253 | devm_kfree(dev, p: ring); |
1254 | return NULL; |
1255 | } |
1256 | ring->irq_mbox_addr = irq_mbox_addr; |
1257 | } |
1258 | |
1259 | ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring); |
1260 | ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR; |
1261 | ring = pdata->ring_ops->setup(ring); |
1262 | netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n" , |
1263 | ring->num, ring->size, ring->id, ring->slots); |
1264 | |
1265 | return ring; |
1266 | } |
1267 | |
1268 | static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum) |
1269 | { |
1270 | return (owner << 6) | (bufnum & GENMASK(5, 0)); |
1271 | } |
1272 | |
1273 | static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p) |
1274 | { |
1275 | enum xgene_ring_owner owner; |
1276 | |
1277 | if (p->enet_id == XGENE_ENET1) { |
1278 | switch (p->phy_mode) { |
1279 | case PHY_INTERFACE_MODE_SGMII: |
1280 | owner = RING_OWNER_ETH0; |
1281 | break; |
1282 | default: |
1283 | owner = (!p->port_id) ? RING_OWNER_ETH0 : |
1284 | RING_OWNER_ETH1; |
1285 | break; |
1286 | } |
1287 | } else { |
1288 | owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1; |
1289 | } |
1290 | |
1291 | return owner; |
1292 | } |
1293 | |
1294 | static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata) |
1295 | { |
1296 | struct device *dev = &pdata->pdev->dev; |
1297 | u32 cpu_bufnum; |
1298 | int ret; |
1299 | |
1300 | ret = device_property_read_u32(dev, propname: "channel" , val: &cpu_bufnum); |
1301 | |
1302 | return (!ret) ? cpu_bufnum : pdata->cpu_bufnum; |
1303 | } |
1304 | |
1305 | static int xgene_enet_create_desc_rings(struct net_device *ndev) |
1306 | { |
1307 | struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; |
1308 | struct xgene_enet_pdata *pdata = netdev_priv(dev: ndev); |
1309 | struct xgene_enet_desc_ring *page_pool = NULL; |
1310 | struct xgene_enet_desc_ring *buf_pool = NULL; |
1311 | struct device *dev = ndev_to_dev(ndev); |
1312 | u8 eth_bufnum = pdata->eth_bufnum; |
1313 | u8 bp_bufnum = pdata->bp_bufnum; |
1314 | u16 ring_num = pdata->ring_num; |
1315 | enum xgene_ring_owner owner; |
1316 | dma_addr_t dma_exp_bufs; |
1317 | u16 ring_id, slots; |
1318 | __le64 *exp_bufs; |
1319 | int i, ret, size; |
1320 | u8 cpu_bufnum; |
1321 | |
1322 | cpu_bufnum = xgene_start_cpu_bufnum(pdata); |
1323 | |
1324 | for (i = 0; i < pdata->rxq_cnt; i++) { |
1325 | /* allocate rx descriptor ring */ |
1326 | owner = xgene_derive_ring_owner(p: pdata); |
1327 | ring_id = xgene_enet_get_ring_id(owner: RING_OWNER_CPU, bufnum: cpu_bufnum++); |
1328 | rx_ring = xgene_enet_create_desc_ring(ndev, ring_num: ring_num++, |
1329 | cfgsize: RING_CFGSIZE_16KB, |
1330 | ring_id); |
1331 | if (!rx_ring) { |
1332 | ret = -ENOMEM; |
1333 | goto err; |
1334 | } |
1335 | |
1336 | /* allocate buffer pool for receiving packets */ |
1337 | owner = xgene_derive_ring_owner(p: pdata); |
1338 | ring_id = xgene_enet_get_ring_id(owner, bufnum: bp_bufnum++); |
1339 | buf_pool = xgene_enet_create_desc_ring(ndev, ring_num: ring_num++, |
1340 | cfgsize: RING_CFGSIZE_16KB, |
1341 | ring_id); |
1342 | if (!buf_pool) { |
1343 | ret = -ENOMEM; |
1344 | goto err; |
1345 | } |
1346 | |
1347 | rx_ring->nbufpool = NUM_BUFPOOL; |
1348 | rx_ring->npagepool = NUM_NXTBUFPOOL; |
1349 | rx_ring->irq = pdata->irqs[i]; |
1350 | buf_pool->rx_skb = devm_kcalloc(dev, n: buf_pool->slots, |
1351 | size: sizeof(struct sk_buff *), |
1352 | GFP_KERNEL); |
1353 | if (!buf_pool->rx_skb) { |
1354 | ret = -ENOMEM; |
1355 | goto err; |
1356 | } |
1357 | |
1358 | buf_pool->dst_ring_num = xgene_enet_dst_ring_num(ring: buf_pool); |
1359 | rx_ring->buf_pool = buf_pool; |
1360 | pdata->rx_ring[i] = rx_ring; |
1361 | |
1362 | if ((pdata->enet_id == XGENE_ENET1 && pdata->rxq_cnt > 4) || |
1363 | (pdata->enet_id == XGENE_ENET2 && pdata->rxq_cnt > 16)) { |
1364 | break; |
1365 | } |
1366 | |
1367 | /* allocate next buffer pool for jumbo packets */ |
1368 | owner = xgene_derive_ring_owner(p: pdata); |
1369 | ring_id = xgene_enet_get_ring_id(owner, bufnum: bp_bufnum++); |
1370 | page_pool = xgene_enet_create_desc_ring(ndev, ring_num: ring_num++, |
1371 | cfgsize: RING_CFGSIZE_16KB, |
1372 | ring_id); |
1373 | if (!page_pool) { |
1374 | ret = -ENOMEM; |
1375 | goto err; |
1376 | } |
1377 | |
1378 | slots = page_pool->slots; |
1379 | page_pool->frag_page = devm_kcalloc(dev, n: slots, |
1380 | size: sizeof(struct page *), |
1381 | GFP_KERNEL); |
1382 | if (!page_pool->frag_page) { |
1383 | ret = -ENOMEM; |
1384 | goto err; |
1385 | } |
1386 | |
1387 | page_pool->frag_dma_addr = devm_kcalloc(dev, n: slots, |
1388 | size: sizeof(dma_addr_t), |
1389 | GFP_KERNEL); |
1390 | if (!page_pool->frag_dma_addr) { |
1391 | ret = -ENOMEM; |
1392 | goto err; |
1393 | } |
1394 | |
1395 | page_pool->dst_ring_num = xgene_enet_dst_ring_num(ring: page_pool); |
1396 | rx_ring->page_pool = page_pool; |
1397 | } |
1398 | |
1399 | for (i = 0; i < pdata->txq_cnt; i++) { |
1400 | /* allocate tx descriptor ring */ |
1401 | owner = xgene_derive_ring_owner(p: pdata); |
1402 | ring_id = xgene_enet_get_ring_id(owner, bufnum: eth_bufnum++); |
1403 | tx_ring = xgene_enet_create_desc_ring(ndev, ring_num: ring_num++, |
1404 | cfgsize: RING_CFGSIZE_16KB, |
1405 | ring_id); |
1406 | if (!tx_ring) { |
1407 | ret = -ENOMEM; |
1408 | goto err; |
1409 | } |
1410 | |
1411 | size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS; |
1412 | exp_bufs = dmam_alloc_coherent(dev, size, dma_handle: &dma_exp_bufs, |
1413 | GFP_KERNEL | __GFP_ZERO); |
1414 | if (!exp_bufs) { |
1415 | ret = -ENOMEM; |
1416 | goto err; |
1417 | } |
1418 | tx_ring->exp_bufs = exp_bufs; |
1419 | |
1420 | pdata->tx_ring[i] = tx_ring; |
1421 | |
1422 | if (!pdata->cq_cnt) { |
1423 | cp_ring = pdata->rx_ring[i]; |
1424 | } else { |
1425 | /* allocate tx completion descriptor ring */ |
1426 | ring_id = xgene_enet_get_ring_id(owner: RING_OWNER_CPU, |
1427 | bufnum: cpu_bufnum++); |
1428 | cp_ring = xgene_enet_create_desc_ring(ndev, ring_num: ring_num++, |
1429 | cfgsize: RING_CFGSIZE_16KB, |
1430 | ring_id); |
1431 | if (!cp_ring) { |
1432 | ret = -ENOMEM; |
1433 | goto err; |
1434 | } |
1435 | |
1436 | cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i]; |
1437 | cp_ring->index = i; |
1438 | } |
1439 | |
1440 | cp_ring->cp_skb = devm_kcalloc(dev, n: tx_ring->slots, |
1441 | size: sizeof(struct sk_buff *), |
1442 | GFP_KERNEL); |
1443 | if (!cp_ring->cp_skb) { |
1444 | ret = -ENOMEM; |
1445 | goto err; |
1446 | } |
1447 | |
1448 | size = sizeof(dma_addr_t) * MAX_SKB_FRAGS; |
1449 | cp_ring->frag_dma_addr = devm_kcalloc(dev, n: tx_ring->slots, |
1450 | size, GFP_KERNEL); |
1451 | if (!cp_ring->frag_dma_addr) { |
1452 | devm_kfree(dev, p: cp_ring->cp_skb); |
1453 | ret = -ENOMEM; |
1454 | goto err; |
1455 | } |
1456 | |
1457 | tx_ring->cp_ring = cp_ring; |
1458 | tx_ring->dst_ring_num = xgene_enet_dst_ring_num(ring: cp_ring); |
1459 | } |
1460 | |
1461 | if (pdata->ring_ops->coalesce) |
1462 | pdata->ring_ops->coalesce(pdata->tx_ring[0]); |
1463 | pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128; |
1464 | |
1465 | return 0; |
1466 | |
1467 | err: |
1468 | xgene_enet_free_desc_rings(pdata); |
1469 | return ret; |
1470 | } |
1471 | |
1472 | static void xgene_enet_get_stats64( |
1473 | struct net_device *ndev, |
1474 | struct rtnl_link_stats64 *stats) |
1475 | { |
1476 | struct xgene_enet_pdata *pdata = netdev_priv(dev: ndev); |
1477 | struct xgene_enet_desc_ring *ring; |
1478 | int i; |
1479 | |
1480 | for (i = 0; i < pdata->txq_cnt; i++) { |
1481 | ring = pdata->tx_ring[i]; |
1482 | if (ring) { |
1483 | stats->tx_packets += ring->tx_packets; |
1484 | stats->tx_bytes += ring->tx_bytes; |
1485 | stats->tx_dropped += ring->tx_dropped; |
1486 | stats->tx_errors += ring->tx_errors; |
1487 | } |
1488 | } |
1489 | |
1490 | for (i = 0; i < pdata->rxq_cnt; i++) { |
1491 | ring = pdata->rx_ring[i]; |
1492 | if (ring) { |
1493 | stats->rx_packets += ring->rx_packets; |
1494 | stats->rx_bytes += ring->rx_bytes; |
1495 | stats->rx_dropped += ring->rx_dropped; |
1496 | stats->rx_errors += ring->rx_errors + |
1497 | ring->rx_length_errors + |
1498 | ring->rx_crc_errors + |
1499 | ring->rx_frame_errors + |
1500 | ring->rx_fifo_errors; |
1501 | stats->rx_length_errors += ring->rx_length_errors; |
1502 | stats->rx_crc_errors += ring->rx_crc_errors; |
1503 | stats->rx_frame_errors += ring->rx_frame_errors; |
1504 | stats->rx_fifo_errors += ring->rx_fifo_errors; |
1505 | } |
1506 | } |
1507 | } |
1508 | |
1509 | static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) |
1510 | { |
1511 | struct xgene_enet_pdata *pdata = netdev_priv(dev: ndev); |
1512 | int ret; |
1513 | |
1514 | ret = eth_mac_addr(dev: ndev, p: addr); |
1515 | if (ret) |
1516 | return ret; |
1517 | pdata->mac_ops->set_mac_addr(pdata); |
1518 | |
1519 | return ret; |
1520 | } |
1521 | |
1522 | static int xgene_change_mtu(struct net_device *ndev, int new_mtu) |
1523 | { |
1524 | struct xgene_enet_pdata *pdata = netdev_priv(dev: ndev); |
1525 | int frame_size; |
1526 | |
1527 | if (!netif_running(dev: ndev)) |
1528 | return 0; |
1529 | |
1530 | frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600; |
1531 | |
1532 | xgene_enet_close(ndev); |
1533 | ndev->mtu = new_mtu; |
1534 | pdata->mac_ops->set_framesize(pdata, frame_size); |
1535 | xgene_enet_open(ndev); |
1536 | |
1537 | return 0; |
1538 | } |
1539 | |
1540 | static const struct net_device_ops xgene_ndev_ops = { |
1541 | .ndo_open = xgene_enet_open, |
1542 | .ndo_stop = xgene_enet_close, |
1543 | .ndo_start_xmit = xgene_enet_start_xmit, |
1544 | .ndo_tx_timeout = xgene_enet_timeout, |
1545 | .ndo_get_stats64 = xgene_enet_get_stats64, |
1546 | .ndo_change_mtu = xgene_change_mtu, |
1547 | .ndo_set_mac_address = xgene_enet_set_mac_address, |
1548 | }; |
1549 | |
1550 | #ifdef CONFIG_ACPI |
1551 | static void xgene_get_port_id_acpi(struct device *dev, |
1552 | struct xgene_enet_pdata *pdata) |
1553 | { |
1554 | acpi_status status; |
1555 | u64 temp; |
1556 | |
1557 | status = acpi_evaluate_integer(ACPI_HANDLE(dev), pathname: "_SUN" , NULL, data: &temp); |
1558 | if (ACPI_FAILURE(status)) { |
1559 | pdata->port_id = 0; |
1560 | } else { |
1561 | pdata->port_id = temp; |
1562 | } |
1563 | |
1564 | return; |
1565 | } |
1566 | #endif |
1567 | |
1568 | static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata) |
1569 | { |
1570 | u32 id = 0; |
1571 | |
1572 | of_property_read_u32(np: dev->of_node, propname: "port-id" , out_value: &id); |
1573 | |
1574 | pdata->port_id = id & BIT(0); |
1575 | |
1576 | return; |
1577 | } |
1578 | |
1579 | static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata) |
1580 | { |
1581 | struct device *dev = &pdata->pdev->dev; |
1582 | int delay, ret; |
1583 | |
1584 | ret = device_property_read_u32(dev, propname: "tx-delay" , val: &delay); |
1585 | if (ret) { |
1586 | pdata->tx_delay = 4; |
1587 | return 0; |
1588 | } |
1589 | |
1590 | if (delay < 0 || delay > 7) { |
1591 | dev_err(dev, "Invalid tx-delay specified\n" ); |
1592 | return -EINVAL; |
1593 | } |
1594 | |
1595 | pdata->tx_delay = delay; |
1596 | |
1597 | return 0; |
1598 | } |
1599 | |
1600 | static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata) |
1601 | { |
1602 | struct device *dev = &pdata->pdev->dev; |
1603 | int delay, ret; |
1604 | |
1605 | ret = device_property_read_u32(dev, propname: "rx-delay" , val: &delay); |
1606 | if (ret) { |
1607 | pdata->rx_delay = 2; |
1608 | return 0; |
1609 | } |
1610 | |
1611 | if (delay < 0 || delay > 7) { |
1612 | dev_err(dev, "Invalid rx-delay specified\n" ); |
1613 | return -EINVAL; |
1614 | } |
1615 | |
1616 | pdata->rx_delay = delay; |
1617 | |
1618 | return 0; |
1619 | } |
1620 | |
1621 | static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) |
1622 | { |
1623 | struct platform_device *pdev = pdata->pdev; |
1624 | int i, ret, max_irqs; |
1625 | |
1626 | if (phy_interface_mode_is_rgmii(mode: pdata->phy_mode)) |
1627 | max_irqs = 1; |
1628 | else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) |
1629 | max_irqs = 2; |
1630 | else |
1631 | max_irqs = XGENE_MAX_ENET_IRQ; |
1632 | |
1633 | for (i = 0; i < max_irqs; i++) { |
1634 | ret = platform_get_irq(pdev, i); |
1635 | if (ret < 0) { |
1636 | if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { |
1637 | max_irqs = i; |
1638 | pdata->rxq_cnt = max_irqs / 2; |
1639 | pdata->txq_cnt = max_irqs / 2; |
1640 | pdata->cq_cnt = max_irqs / 2; |
1641 | break; |
1642 | } |
1643 | return ret; |
1644 | } |
1645 | pdata->irqs[i] = ret; |
1646 | } |
1647 | |
1648 | return 0; |
1649 | } |
1650 | |
1651 | static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata) |
1652 | { |
1653 | int ret; |
1654 | |
1655 | if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) |
1656 | return; |
1657 | |
1658 | if (!IS_ENABLED(CONFIG_MDIO_XGENE)) |
1659 | return; |
1660 | |
1661 | ret = xgene_enet_phy_connect(ndev: pdata->ndev); |
1662 | if (!ret) |
1663 | pdata->mdio_driver = true; |
1664 | } |
1665 | |
1666 | static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata) |
1667 | { |
1668 | struct device *dev = &pdata->pdev->dev; |
1669 | |
1670 | pdata->sfp_gpio_en = false; |
1671 | if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII || |
1672 | (!device_property_present(dev, propname: "sfp-gpios" ) && |
1673 | !device_property_present(dev, propname: "rxlos-gpios" ))) |
1674 | return; |
1675 | |
1676 | pdata->sfp_gpio_en = true; |
1677 | pdata->sfp_rdy = gpiod_get(dev, con_id: "rxlos" , flags: GPIOD_IN); |
1678 | if (IS_ERR(ptr: pdata->sfp_rdy)) |
1679 | pdata->sfp_rdy = gpiod_get(dev, con_id: "sfp" , flags: GPIOD_IN); |
1680 | } |
1681 | |
1682 | static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) |
1683 | { |
1684 | struct platform_device *pdev; |
1685 | struct net_device *ndev; |
1686 | struct device *dev; |
1687 | struct resource *res; |
1688 | void __iomem *base_addr; |
1689 | u32 offset; |
1690 | int ret = 0; |
1691 | |
1692 | pdev = pdata->pdev; |
1693 | dev = &pdev->dev; |
1694 | ndev = pdata->ndev; |
1695 | |
1696 | res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR); |
1697 | if (!res) { |
1698 | dev_err(dev, "Resource enet_csr not defined\n" ); |
1699 | return -ENODEV; |
1700 | } |
1701 | pdata->base_addr = devm_ioremap(dev, offset: res->start, size: resource_size(res)); |
1702 | if (!pdata->base_addr) { |
1703 | dev_err(dev, "Unable to retrieve ENET Port CSR region\n" ); |
1704 | return -ENOMEM; |
1705 | } |
1706 | |
1707 | res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR); |
1708 | if (!res) { |
1709 | dev_err(dev, "Resource ring_csr not defined\n" ); |
1710 | return -ENODEV; |
1711 | } |
1712 | pdata->ring_csr_addr = devm_ioremap(dev, offset: res->start, |
1713 | size: resource_size(res)); |
1714 | if (!pdata->ring_csr_addr) { |
1715 | dev_err(dev, "Unable to retrieve ENET Ring CSR region\n" ); |
1716 | return -ENOMEM; |
1717 | } |
1718 | |
1719 | res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD); |
1720 | if (!res) { |
1721 | dev_err(dev, "Resource ring_cmd not defined\n" ); |
1722 | return -ENODEV; |
1723 | } |
1724 | pdata->ring_cmd_addr = devm_ioremap(dev, offset: res->start, |
1725 | size: resource_size(res)); |
1726 | if (!pdata->ring_cmd_addr) { |
1727 | dev_err(dev, "Unable to retrieve ENET Ring command region\n" ); |
1728 | return -ENOMEM; |
1729 | } |
1730 | |
1731 | if (dev->of_node) |
1732 | xgene_get_port_id_dt(dev, pdata); |
1733 | #ifdef CONFIG_ACPI |
1734 | else |
1735 | xgene_get_port_id_acpi(dev, pdata); |
1736 | #endif |
1737 | |
1738 | if (device_get_ethdev_address(dev, netdev: ndev)) |
1739 | eth_hw_addr_random(dev: ndev); |
1740 | |
1741 | memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); |
1742 | |
1743 | pdata->phy_mode = device_get_phy_mode(dev); |
1744 | if (pdata->phy_mode < 0) { |
1745 | dev_err(dev, "Unable to get phy-connection-type\n" ); |
1746 | return pdata->phy_mode; |
1747 | } |
1748 | if (!phy_interface_mode_is_rgmii(mode: pdata->phy_mode) && |
1749 | pdata->phy_mode != PHY_INTERFACE_MODE_SGMII && |
1750 | pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) { |
1751 | dev_err(dev, "Incorrect phy-connection-type specified\n" ); |
1752 | return -ENODEV; |
1753 | } |
1754 | |
1755 | ret = xgene_get_tx_delay(pdata); |
1756 | if (ret) |
1757 | return ret; |
1758 | |
1759 | ret = xgene_get_rx_delay(pdata); |
1760 | if (ret) |
1761 | return ret; |
1762 | |
1763 | ret = xgene_enet_get_irqs(pdata); |
1764 | if (ret) |
1765 | return ret; |
1766 | |
1767 | xgene_enet_gpiod_get(pdata); |
1768 | |
1769 | pdata->clk = devm_clk_get(dev: &pdev->dev, NULL); |
1770 | if (IS_ERR(ptr: pdata->clk)) { |
1771 | if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) { |
1772 | /* Abort if the clock is defined but couldn't be |
1773 | * retrived. Always abort if the clock is missing on |
1774 | * DT system as the driver can't cope with this case. |
1775 | */ |
1776 | if (PTR_ERR(ptr: pdata->clk) != -ENOENT || dev->of_node) |
1777 | return PTR_ERR(ptr: pdata->clk); |
1778 | /* Firmware may have set up the clock already. */ |
1779 | dev_info(dev, "clocks have been setup already\n" ); |
1780 | } |
1781 | } |
1782 | |
1783 | if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) |
1784 | base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET); |
1785 | else |
1786 | base_addr = pdata->base_addr; |
1787 | pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; |
1788 | pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET; |
1789 | pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; |
1790 | pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; |
1791 | if (phy_interface_mode_is_rgmii(mode: pdata->phy_mode) || |
1792 | pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { |
1793 | pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET; |
1794 | pdata->mcx_stats_addr = |
1795 | pdata->base_addr + BLOCK_ETH_STATS_OFFSET; |
1796 | offset = (pdata->enet_id == XGENE_ENET1) ? |
1797 | BLOCK_ETH_MAC_CSR_OFFSET : |
1798 | X2_BLOCK_ETH_MAC_CSR_OFFSET; |
1799 | pdata->mcx_mac_csr_addr = base_addr + offset; |
1800 | } else { |
1801 | pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET; |
1802 | pdata->mcx_stats_addr = base_addr + BLOCK_AXG_STATS_OFFSET; |
1803 | pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET; |
1804 | pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET; |
1805 | } |
1806 | pdata->rx_buff_cnt = NUM_PKT_BUF; |
1807 | |
1808 | return 0; |
1809 | } |
1810 | |
1811 | static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) |
1812 | { |
1813 | struct xgene_enet_cle *enet_cle = &pdata->cle; |
1814 | struct xgene_enet_desc_ring *page_pool; |
1815 | struct net_device *ndev = pdata->ndev; |
1816 | struct xgene_enet_desc_ring *buf_pool; |
1817 | u16 dst_ring_num, ring_id; |
1818 | int i, ret; |
1819 | u32 count; |
1820 | |
1821 | ret = pdata->port_ops->reset(pdata); |
1822 | if (ret) |
1823 | return ret; |
1824 | |
1825 | ret = xgene_enet_create_desc_rings(ndev); |
1826 | if (ret) { |
1827 | netdev_err(dev: ndev, format: "Error in ring configuration\n" ); |
1828 | return ret; |
1829 | } |
1830 | |
1831 | /* setup buffer pool */ |
1832 | for (i = 0; i < pdata->rxq_cnt; i++) { |
1833 | buf_pool = pdata->rx_ring[i]->buf_pool; |
1834 | xgene_enet_init_bufpool(buf_pool); |
1835 | page_pool = pdata->rx_ring[i]->page_pool; |
1836 | xgene_enet_init_bufpool(buf_pool: page_pool); |
1837 | |
1838 | count = pdata->rx_buff_cnt; |
1839 | ret = xgene_enet_refill_bufpool(buf_pool, nbuf: count); |
1840 | if (ret) |
1841 | goto err; |
1842 | |
1843 | ret = xgene_enet_refill_pagepool(buf_pool: page_pool, nbuf: count); |
1844 | if (ret) |
1845 | goto err; |
1846 | |
1847 | } |
1848 | |
1849 | dst_ring_num = xgene_enet_dst_ring_num(ring: pdata->rx_ring[0]); |
1850 | buf_pool = pdata->rx_ring[0]->buf_pool; |
1851 | if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { |
1852 | /* Initialize and Enable PreClassifier Tree */ |
1853 | enet_cle->max_nodes = 512; |
1854 | enet_cle->max_dbptrs = 1024; |
1855 | enet_cle->parsers = 3; |
1856 | enet_cle->active_parser = PARSER_ALL; |
1857 | enet_cle->ptree.start_node = 0; |
1858 | enet_cle->ptree.start_dbptr = 0; |
1859 | enet_cle->jump_bytes = 8; |
1860 | ret = pdata->cle_ops->cle_init(pdata); |
1861 | if (ret) { |
1862 | netdev_err(dev: ndev, format: "Preclass Tree init error\n" ); |
1863 | goto err; |
1864 | } |
1865 | |
1866 | } else { |
1867 | dst_ring_num = xgene_enet_dst_ring_num(ring: pdata->rx_ring[0]); |
1868 | buf_pool = pdata->rx_ring[0]->buf_pool; |
1869 | page_pool = pdata->rx_ring[0]->page_pool; |
1870 | ring_id = (page_pool) ? page_pool->id : 0; |
1871 | pdata->port_ops->cle_bypass(pdata, dst_ring_num, |
1872 | buf_pool->id, ring_id); |
1873 | } |
1874 | |
1875 | ndev->max_mtu = XGENE_ENET_MAX_MTU; |
1876 | pdata->phy_speed = SPEED_UNKNOWN; |
1877 | pdata->mac_ops->init(pdata); |
1878 | |
1879 | return ret; |
1880 | |
1881 | err: |
1882 | xgene_enet_delete_desc_rings(pdata); |
1883 | return ret; |
1884 | } |
1885 | |
1886 | static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) |
1887 | { |
1888 | switch (pdata->phy_mode) { |
1889 | case PHY_INTERFACE_MODE_RGMII: |
1890 | case PHY_INTERFACE_MODE_RGMII_ID: |
1891 | case PHY_INTERFACE_MODE_RGMII_RXID: |
1892 | case PHY_INTERFACE_MODE_RGMII_TXID: |
1893 | pdata->mac_ops = &xgene_gmac_ops; |
1894 | pdata->port_ops = &xgene_gport_ops; |
1895 | pdata->rm = RM3; |
1896 | pdata->rxq_cnt = 1; |
1897 | pdata->txq_cnt = 1; |
1898 | pdata->cq_cnt = 0; |
1899 | break; |
1900 | case PHY_INTERFACE_MODE_SGMII: |
1901 | pdata->mac_ops = &xgene_sgmac_ops; |
1902 | pdata->port_ops = &xgene_sgport_ops; |
1903 | pdata->rm = RM1; |
1904 | pdata->rxq_cnt = 1; |
1905 | pdata->txq_cnt = 1; |
1906 | pdata->cq_cnt = 1; |
1907 | break; |
1908 | default: |
1909 | pdata->mac_ops = &xgene_xgmac_ops; |
1910 | pdata->port_ops = &xgene_xgport_ops; |
1911 | pdata->cle_ops = &xgene_cle3in_ops; |
1912 | pdata->rm = RM0; |
1913 | if (!pdata->rxq_cnt) { |
1914 | pdata->rxq_cnt = XGENE_NUM_RX_RING; |
1915 | pdata->txq_cnt = XGENE_NUM_TX_RING; |
1916 | pdata->cq_cnt = XGENE_NUM_TXC_RING; |
1917 | } |
1918 | break; |
1919 | } |
1920 | |
1921 | if (pdata->enet_id == XGENE_ENET1) { |
1922 | switch (pdata->port_id) { |
1923 | case 0: |
1924 | if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { |
1925 | pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0; |
1926 | pdata->eth_bufnum = X2_START_ETH_BUFNUM_0; |
1927 | pdata->bp_bufnum = X2_START_BP_BUFNUM_0; |
1928 | pdata->ring_num = START_RING_NUM_0; |
1929 | } else { |
1930 | pdata->cpu_bufnum = START_CPU_BUFNUM_0; |
1931 | pdata->eth_bufnum = START_ETH_BUFNUM_0; |
1932 | pdata->bp_bufnum = START_BP_BUFNUM_0; |
1933 | pdata->ring_num = START_RING_NUM_0; |
1934 | } |
1935 | break; |
1936 | case 1: |
1937 | if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { |
1938 | pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1; |
1939 | pdata->eth_bufnum = XG_START_ETH_BUFNUM_1; |
1940 | pdata->bp_bufnum = XG_START_BP_BUFNUM_1; |
1941 | pdata->ring_num = XG_START_RING_NUM_1; |
1942 | } else { |
1943 | pdata->cpu_bufnum = START_CPU_BUFNUM_1; |
1944 | pdata->eth_bufnum = START_ETH_BUFNUM_1; |
1945 | pdata->bp_bufnum = START_BP_BUFNUM_1; |
1946 | pdata->ring_num = START_RING_NUM_1; |
1947 | } |
1948 | break; |
1949 | default: |
1950 | break; |
1951 | } |
1952 | pdata->ring_ops = &xgene_ring1_ops; |
1953 | } else { |
1954 | switch (pdata->port_id) { |
1955 | case 0: |
1956 | pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0; |
1957 | pdata->eth_bufnum = X2_START_ETH_BUFNUM_0; |
1958 | pdata->bp_bufnum = X2_START_BP_BUFNUM_0; |
1959 | pdata->ring_num = X2_START_RING_NUM_0; |
1960 | break; |
1961 | case 1: |
1962 | pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1; |
1963 | pdata->eth_bufnum = X2_START_ETH_BUFNUM_1; |
1964 | pdata->bp_bufnum = X2_START_BP_BUFNUM_1; |
1965 | pdata->ring_num = X2_START_RING_NUM_1; |
1966 | break; |
1967 | default: |
1968 | break; |
1969 | } |
1970 | pdata->rm = RM0; |
1971 | pdata->ring_ops = &xgene_ring2_ops; |
1972 | } |
1973 | } |
1974 | |
1975 | static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) |
1976 | { |
1977 | struct napi_struct *napi; |
1978 | int i; |
1979 | |
1980 | for (i = 0; i < pdata->rxq_cnt; i++) { |
1981 | napi = &pdata->rx_ring[i]->napi; |
1982 | netif_napi_add(dev: pdata->ndev, napi, poll: xgene_enet_napi); |
1983 | } |
1984 | |
1985 | for (i = 0; i < pdata->cq_cnt; i++) { |
1986 | napi = &pdata->tx_ring[i]->cp_ring->napi; |
1987 | netif_napi_add(dev: pdata->ndev, napi, poll: xgene_enet_napi); |
1988 | } |
1989 | } |
1990 | |
1991 | #ifdef CONFIG_ACPI |
1992 | static const struct acpi_device_id xgene_enet_acpi_match[] = { |
1993 | { "APMC0D05" , XGENE_ENET1}, |
1994 | { "APMC0D30" , XGENE_ENET1}, |
1995 | { "APMC0D31" , XGENE_ENET1}, |
1996 | { "APMC0D3F" , XGENE_ENET1}, |
1997 | { "APMC0D26" , XGENE_ENET2}, |
1998 | { "APMC0D25" , XGENE_ENET2}, |
1999 | { } |
2000 | }; |
2001 | MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); |
2002 | #endif |
2003 | |
2004 | static const struct of_device_id xgene_enet_of_match[] = { |
2005 | {.compatible = "apm,xgene-enet" , .data = (void *)XGENE_ENET1}, |
2006 | {.compatible = "apm,xgene1-sgenet" , .data = (void *)XGENE_ENET1}, |
2007 | {.compatible = "apm,xgene1-xgenet" , .data = (void *)XGENE_ENET1}, |
2008 | {.compatible = "apm,xgene2-sgenet" , .data = (void *)XGENE_ENET2}, |
2009 | {.compatible = "apm,xgene2-xgenet" , .data = (void *)XGENE_ENET2}, |
2010 | {}, |
2011 | }; |
2012 | |
2013 | MODULE_DEVICE_TABLE(of, xgene_enet_of_match); |
2014 | |
2015 | static int xgene_enet_probe(struct platform_device *pdev) |
2016 | { |
2017 | struct net_device *ndev; |
2018 | struct xgene_enet_pdata *pdata; |
2019 | struct device *dev = &pdev->dev; |
2020 | void (*link_state)(struct work_struct *); |
2021 | int ret; |
2022 | |
2023 | ndev = alloc_etherdev_mqs(sizeof_priv: sizeof(struct xgene_enet_pdata), |
2024 | XGENE_NUM_TX_RING, XGENE_NUM_RX_RING); |
2025 | if (!ndev) |
2026 | return -ENOMEM; |
2027 | |
2028 | pdata = netdev_priv(dev: ndev); |
2029 | |
2030 | pdata->pdev = pdev; |
2031 | pdata->ndev = ndev; |
2032 | SET_NETDEV_DEV(ndev, dev); |
2033 | platform_set_drvdata(pdev, data: pdata); |
2034 | ndev->netdev_ops = &xgene_ndev_ops; |
2035 | xgene_enet_set_ethtool_ops(netdev: ndev); |
2036 | ndev->features |= NETIF_F_IP_CSUM | |
2037 | NETIF_F_GSO | |
2038 | NETIF_F_GRO | |
2039 | NETIF_F_SG; |
2040 | |
2041 | pdata->enet_id = (enum xgene_enet_id)device_get_match_data(dev: &pdev->dev); |
2042 | if (!pdata->enet_id) { |
2043 | ret = -ENODEV; |
2044 | goto err; |
2045 | } |
2046 | |
2047 | ret = xgene_enet_get_resources(pdata); |
2048 | if (ret) |
2049 | goto err; |
2050 | |
2051 | xgene_enet_setup_ops(pdata); |
2052 | spin_lock_init(&pdata->mac_lock); |
2053 | |
2054 | if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { |
2055 | ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM; |
2056 | spin_lock_init(&pdata->mss_lock); |
2057 | } |
2058 | ndev->hw_features = ndev->features; |
2059 | |
2060 | ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); |
2061 | if (ret) { |
2062 | netdev_err(dev: ndev, format: "No usable DMA configuration\n" ); |
2063 | goto err; |
2064 | } |
2065 | |
2066 | xgene_enet_check_phy_handle(pdata); |
2067 | |
2068 | ret = xgene_enet_init_hw(pdata); |
2069 | if (ret) |
2070 | goto err2; |
2071 | |
2072 | link_state = pdata->mac_ops->link_state; |
2073 | if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { |
2074 | INIT_DELAYED_WORK(&pdata->link_work, link_state); |
2075 | } else if (!pdata->mdio_driver) { |
2076 | if (phy_interface_mode_is_rgmii(mode: pdata->phy_mode)) |
2077 | ret = xgene_enet_mdio_config(pdata); |
2078 | else |
2079 | INIT_DELAYED_WORK(&pdata->link_work, link_state); |
2080 | |
2081 | if (ret) |
2082 | goto err1; |
2083 | } |
2084 | |
2085 | spin_lock_init(&pdata->stats_lock); |
2086 | ret = xgene_extd_stats_init(pdata); |
2087 | if (ret) |
2088 | goto err1; |
2089 | |
2090 | xgene_enet_napi_add(pdata); |
2091 | ret = register_netdev(dev: ndev); |
2092 | if (ret) { |
2093 | netdev_err(dev: ndev, format: "Failed to register netdev\n" ); |
2094 | goto err1; |
2095 | } |
2096 | |
2097 | return 0; |
2098 | |
2099 | err1: |
2100 | /* |
2101 | * If necessary, free_netdev() will call netif_napi_del() and undo |
2102 | * the effects of xgene_enet_napi_add()'s calls to netif_napi_add(). |
2103 | */ |
2104 | |
2105 | xgene_enet_delete_desc_rings(pdata); |
2106 | |
2107 | err2: |
2108 | if (pdata->mdio_driver) |
2109 | xgene_enet_phy_disconnect(pdata); |
2110 | else if (phy_interface_mode_is_rgmii(mode: pdata->phy_mode)) |
2111 | xgene_enet_mdio_remove(pdata); |
2112 | err: |
2113 | free_netdev(dev: ndev); |
2114 | return ret; |
2115 | } |
2116 | |
2117 | static void xgene_enet_remove(struct platform_device *pdev) |
2118 | { |
2119 | struct xgene_enet_pdata *pdata; |
2120 | struct net_device *ndev; |
2121 | |
2122 | pdata = platform_get_drvdata(pdev); |
2123 | ndev = pdata->ndev; |
2124 | |
2125 | rtnl_lock(); |
2126 | if (netif_running(dev: ndev)) |
2127 | dev_close(dev: ndev); |
2128 | rtnl_unlock(); |
2129 | |
2130 | if (pdata->mdio_driver) |
2131 | xgene_enet_phy_disconnect(pdata); |
2132 | else if (phy_interface_mode_is_rgmii(mode: pdata->phy_mode)) |
2133 | xgene_enet_mdio_remove(pdata); |
2134 | |
2135 | unregister_netdev(dev: ndev); |
2136 | xgene_enet_delete_desc_rings(pdata); |
2137 | pdata->port_ops->shutdown(pdata); |
2138 | free_netdev(dev: ndev); |
2139 | } |
2140 | |
2141 | static void xgene_enet_shutdown(struct platform_device *pdev) |
2142 | { |
2143 | struct xgene_enet_pdata *pdata; |
2144 | |
2145 | pdata = platform_get_drvdata(pdev); |
2146 | if (!pdata) |
2147 | return; |
2148 | |
2149 | if (!pdata->ndev) |
2150 | return; |
2151 | |
2152 | xgene_enet_remove(pdev); |
2153 | } |
2154 | |
2155 | static struct platform_driver xgene_enet_driver = { |
2156 | .driver = { |
2157 | .name = "xgene-enet" , |
2158 | .of_match_table = xgene_enet_of_match, |
2159 | .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match), |
2160 | }, |
2161 | .probe = xgene_enet_probe, |
2162 | .remove_new = xgene_enet_remove, |
2163 | .shutdown = xgene_enet_shutdown, |
2164 | }; |
2165 | |
2166 | module_platform_driver(xgene_enet_driver); |
2167 | |
2168 | MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver" ); |
2169 | MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>" ); |
2170 | MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>" ); |
2171 | MODULE_LICENSE("GPL" ); |
2172 | |