1 | /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver |
2 | * |
3 | * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) |
4 | * |
5 | * This program is dual-licensed; you may select either version 2 of |
6 | * the GNU General Public License ("GPL") or BSD license ("BSD"). |
7 | * |
8 | * This Synopsys DWC XLGMAC software driver and associated documentation |
9 | * (hereinafter the "Software") is an unsupported proprietary work of |
10 | * Synopsys, Inc. unless otherwise expressly agreed to in writing between |
11 | * Synopsys and you. The Software IS NOT an item of Licensed Software or a |
12 | * Licensed Product under any End User Software License Agreement or |
13 | * Agreement for Licensed Products with Synopsys or any supplement thereto. |
14 | * Synopsys is a registered trademark of Synopsys, Inc. Other names included |
15 | * in the SOFTWARE may be the trademarks of their respective owners. |
16 | */ |
17 | |
18 | #include <linux/netdevice.h> |
19 | #include <linux/tcp.h> |
20 | #include <linux/interrupt.h> |
21 | |
22 | #include "dwc-xlgmac.h" |
23 | #include "dwc-xlgmac-reg.h" |
24 | |
25 | static int xlgmac_one_poll(struct napi_struct *, int); |
26 | static int xlgmac_all_poll(struct napi_struct *, int); |
27 | |
28 | static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring) |
29 | { |
30 | return (ring->dma_desc_count - (ring->cur - ring->dirty)); |
31 | } |
32 | |
33 | static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring) |
34 | { |
35 | return (ring->cur - ring->dirty); |
36 | } |
37 | |
38 | static int xlgmac_maybe_stop_tx_queue( |
39 | struct xlgmac_channel *channel, |
40 | struct xlgmac_ring *ring, |
41 | unsigned int count) |
42 | { |
43 | struct xlgmac_pdata *pdata = channel->pdata; |
44 | |
45 | if (count > xlgmac_tx_avail_desc(ring)) { |
46 | netif_info(pdata, drv, pdata->netdev, |
47 | "Tx queue stopped, not enough descriptors available\n" ); |
48 | netif_stop_subqueue(dev: pdata->netdev, queue_index: channel->queue_index); |
49 | ring->tx.queue_stopped = 1; |
50 | |
51 | /* If we haven't notified the hardware because of xmit_more |
52 | * support, tell it now |
53 | */ |
54 | if (ring->tx.xmit_more) |
55 | pdata->hw_ops.tx_start_xmit(channel, ring); |
56 | |
57 | return NETDEV_TX_BUSY; |
58 | } |
59 | |
60 | return 0; |
61 | } |
62 | |
63 | static void xlgmac_prep_vlan(struct sk_buff *skb, |
64 | struct xlgmac_pkt_info *pkt_info) |
65 | { |
66 | if (skb_vlan_tag_present(skb)) |
67 | pkt_info->vlan_ctag = skb_vlan_tag_get(skb); |
68 | } |
69 | |
70 | static int xlgmac_prep_tso(struct sk_buff *skb, |
71 | struct xlgmac_pkt_info *pkt_info) |
72 | { |
73 | int ret; |
74 | |
75 | if (!XLGMAC_GET_REG_BITS(pkt_info->attributes, |
76 | TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, |
77 | TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN)) |
78 | return 0; |
79 | |
80 | ret = skb_cow_head(skb, headroom: 0); |
81 | if (ret) |
82 | return ret; |
83 | |
84 | pkt_info->header_len = skb_tcp_all_headers(skb); |
85 | pkt_info->tcp_header_len = tcp_hdrlen(skb); |
86 | pkt_info->tcp_payload_len = skb->len - pkt_info->header_len; |
87 | pkt_info->mss = skb_shinfo(skb)->gso_size; |
88 | |
89 | XLGMAC_PR("header_len=%u\n" , pkt_info->header_len); |
90 | XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n" , |
91 | pkt_info->tcp_header_len, pkt_info->tcp_payload_len); |
92 | XLGMAC_PR("mss=%u\n" , pkt_info->mss); |
93 | |
94 | /* Update the number of packets that will ultimately be transmitted |
95 | * along with the extra bytes for each extra packet |
96 | */ |
97 | pkt_info->tx_packets = skb_shinfo(skb)->gso_segs; |
98 | pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len; |
99 | |
100 | return 0; |
101 | } |
102 | |
103 | static int xlgmac_is_tso(struct sk_buff *skb) |
104 | { |
105 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
106 | return 0; |
107 | |
108 | if (!skb_is_gso(skb)) |
109 | return 0; |
110 | |
111 | return 1; |
112 | } |
113 | |
114 | static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata, |
115 | struct xlgmac_ring *ring, |
116 | struct sk_buff *skb, |
117 | struct xlgmac_pkt_info *pkt_info) |
118 | { |
119 | skb_frag_t *frag; |
120 | unsigned int context_desc; |
121 | unsigned int len; |
122 | unsigned int i; |
123 | |
124 | pkt_info->skb = skb; |
125 | |
126 | context_desc = 0; |
127 | pkt_info->desc_count = 0; |
128 | |
129 | pkt_info->tx_packets = 1; |
130 | pkt_info->tx_bytes = skb->len; |
131 | |
132 | if (xlgmac_is_tso(skb)) { |
133 | /* TSO requires an extra descriptor if mss is different */ |
134 | if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { |
135 | context_desc = 1; |
136 | pkt_info->desc_count++; |
137 | } |
138 | |
139 | /* TSO requires an extra descriptor for TSO header */ |
140 | pkt_info->desc_count++; |
141 | |
142 | pkt_info->attributes = XLGMAC_SET_REG_BITS( |
143 | pkt_info->attributes, |
144 | TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, |
145 | TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN, |
146 | 1); |
147 | pkt_info->attributes = XLGMAC_SET_REG_BITS( |
148 | pkt_info->attributes, |
149 | TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, |
150 | TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN, |
151 | 1); |
152 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) |
153 | pkt_info->attributes = XLGMAC_SET_REG_BITS( |
154 | pkt_info->attributes, |
155 | TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, |
156 | TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN, |
157 | 1); |
158 | |
159 | if (skb_vlan_tag_present(skb)) { |
160 | /* VLAN requires an extra descriptor if tag is different */ |
161 | if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag) |
162 | /* We can share with the TSO context descriptor */ |
163 | if (!context_desc) { |
164 | context_desc = 1; |
165 | pkt_info->desc_count++; |
166 | } |
167 | |
168 | pkt_info->attributes = XLGMAC_SET_REG_BITS( |
169 | pkt_info->attributes, |
170 | TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, |
171 | TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, |
172 | 1); |
173 | } |
174 | |
175 | for (len = skb_headlen(skb); len;) { |
176 | pkt_info->desc_count++; |
177 | len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE); |
178 | } |
179 | |
180 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
181 | frag = &skb_shinfo(skb)->frags[i]; |
182 | for (len = skb_frag_size(frag); len; ) { |
183 | pkt_info->desc_count++; |
184 | len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE); |
185 | } |
186 | } |
187 | } |
188 | |
189 | static int xlgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) |
190 | { |
191 | unsigned int rx_buf_size; |
192 | |
193 | if (mtu > XLGMAC_JUMBO_PACKET_MTU) { |
194 | netdev_alert(dev: netdev, format: "MTU exceeds maximum supported value\n" ); |
195 | return -EINVAL; |
196 | } |
197 | |
198 | rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
199 | rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE); |
200 | |
201 | rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) & |
202 | ~(XLGMAC_RX_BUF_ALIGN - 1); |
203 | |
204 | return rx_buf_size; |
205 | } |
206 | |
207 | static void xlgmac_enable_rx_tx_ints(struct xlgmac_pdata *pdata) |
208 | { |
209 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; |
210 | struct xlgmac_channel *channel; |
211 | enum xlgmac_int int_id; |
212 | unsigned int i; |
213 | |
214 | channel = pdata->channel_head; |
215 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
216 | if (channel->tx_ring && channel->rx_ring) |
217 | int_id = XLGMAC_INT_DMA_CH_SR_TI_RI; |
218 | else if (channel->tx_ring) |
219 | int_id = XLGMAC_INT_DMA_CH_SR_TI; |
220 | else if (channel->rx_ring) |
221 | int_id = XLGMAC_INT_DMA_CH_SR_RI; |
222 | else |
223 | continue; |
224 | |
225 | hw_ops->enable_int(channel, int_id); |
226 | } |
227 | } |
228 | |
229 | static void xlgmac_disable_rx_tx_ints(struct xlgmac_pdata *pdata) |
230 | { |
231 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; |
232 | struct xlgmac_channel *channel; |
233 | enum xlgmac_int int_id; |
234 | unsigned int i; |
235 | |
236 | channel = pdata->channel_head; |
237 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
238 | if (channel->tx_ring && channel->rx_ring) |
239 | int_id = XLGMAC_INT_DMA_CH_SR_TI_RI; |
240 | else if (channel->tx_ring) |
241 | int_id = XLGMAC_INT_DMA_CH_SR_TI; |
242 | else if (channel->rx_ring) |
243 | int_id = XLGMAC_INT_DMA_CH_SR_RI; |
244 | else |
245 | continue; |
246 | |
247 | hw_ops->disable_int(channel, int_id); |
248 | } |
249 | } |
250 | |
251 | static irqreturn_t xlgmac_isr(int irq, void *data) |
252 | { |
253 | unsigned int dma_isr, dma_ch_isr, mac_isr; |
254 | struct xlgmac_pdata *pdata = data; |
255 | struct xlgmac_channel *channel; |
256 | struct xlgmac_hw_ops *hw_ops; |
257 | unsigned int i, ti, ri; |
258 | |
259 | hw_ops = &pdata->hw_ops; |
260 | |
261 | /* The DMA interrupt status register also reports MAC and MTL |
262 | * interrupts. So for polling mode, we just need to check for |
263 | * this register to be non-zero |
264 | */ |
265 | dma_isr = readl(addr: pdata->mac_regs + DMA_ISR); |
266 | if (!dma_isr) |
267 | return IRQ_HANDLED; |
268 | |
269 | netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n" , dma_isr); |
270 | |
271 | for (i = 0; i < pdata->channel_count; i++) { |
272 | if (!(dma_isr & (1 << i))) |
273 | continue; |
274 | |
275 | channel = pdata->channel_head + i; |
276 | |
277 | dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR)); |
278 | netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n" , |
279 | i, dma_ch_isr); |
280 | |
281 | /* The TI or RI interrupt bits may still be set even if using |
282 | * per channel DMA interrupts. Check to be sure those are not |
283 | * enabled before using the private data napi structure. |
284 | */ |
285 | ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS, |
286 | DMA_CH_SR_TI_LEN); |
287 | ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS, |
288 | DMA_CH_SR_RI_LEN); |
289 | if (!pdata->per_channel_irq && (ti || ri)) { |
290 | if (napi_schedule_prep(n: &pdata->napi)) { |
291 | /* Disable Tx and Rx interrupts */ |
292 | xlgmac_disable_rx_tx_ints(pdata); |
293 | |
294 | pdata->stats.napi_poll_isr++; |
295 | /* Turn on polling */ |
296 | __napi_schedule_irqoff(n: &pdata->napi); |
297 | } |
298 | } |
299 | |
300 | if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS, |
301 | DMA_CH_SR_TPS_LEN)) |
302 | pdata->stats.tx_process_stopped++; |
303 | |
304 | if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS, |
305 | DMA_CH_SR_RPS_LEN)) |
306 | pdata->stats.rx_process_stopped++; |
307 | |
308 | if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS, |
309 | DMA_CH_SR_TBU_LEN)) |
310 | pdata->stats.tx_buffer_unavailable++; |
311 | |
312 | if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS, |
313 | DMA_CH_SR_RBU_LEN)) |
314 | pdata->stats.rx_buffer_unavailable++; |
315 | |
316 | /* Restart the device on a Fatal Bus Error */ |
317 | if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS, |
318 | DMA_CH_SR_FBE_LEN)) { |
319 | pdata->stats.fatal_bus_error++; |
320 | schedule_work(work: &pdata->restart_work); |
321 | } |
322 | |
323 | /* Clear all interrupt signals */ |
324 | writel(val: dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR)); |
325 | } |
326 | |
327 | if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS, |
328 | DMA_ISR_MACIS_LEN)) { |
329 | mac_isr = readl(addr: pdata->mac_regs + MAC_ISR); |
330 | |
331 | if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS, |
332 | MAC_ISR_MMCTXIS_LEN)) |
333 | hw_ops->tx_mmc_int(pdata); |
334 | |
335 | if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS, |
336 | MAC_ISR_MMCRXIS_LEN)) |
337 | hw_ops->rx_mmc_int(pdata); |
338 | } |
339 | |
340 | return IRQ_HANDLED; |
341 | } |
342 | |
343 | static irqreturn_t xlgmac_dma_isr(int irq, void *data) |
344 | { |
345 | struct xlgmac_channel *channel = data; |
346 | |
347 | /* Per channel DMA interrupts are enabled, so we use the per |
348 | * channel napi structure and not the private data napi structure |
349 | */ |
350 | if (napi_schedule_prep(n: &channel->napi)) { |
351 | /* Disable Tx and Rx interrupts */ |
352 | disable_irq_nosync(irq: channel->dma_irq); |
353 | |
354 | /* Turn on polling */ |
355 | __napi_schedule_irqoff(n: &channel->napi); |
356 | } |
357 | |
358 | return IRQ_HANDLED; |
359 | } |
360 | |
361 | static void xlgmac_tx_timer(struct timer_list *t) |
362 | { |
363 | struct xlgmac_channel *channel = from_timer(channel, t, tx_timer); |
364 | struct xlgmac_pdata *pdata = channel->pdata; |
365 | struct napi_struct *napi; |
366 | |
367 | napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; |
368 | |
369 | if (napi_schedule_prep(n: napi)) { |
370 | /* Disable Tx and Rx interrupts */ |
371 | if (pdata->per_channel_irq) |
372 | disable_irq_nosync(irq: channel->dma_irq); |
373 | else |
374 | xlgmac_disable_rx_tx_ints(pdata); |
375 | |
376 | pdata->stats.napi_poll_txtimer++; |
377 | /* Turn on polling */ |
378 | __napi_schedule(n: napi); |
379 | } |
380 | |
381 | channel->tx_timer_active = 0; |
382 | } |
383 | |
384 | static void xlgmac_init_timers(struct xlgmac_pdata *pdata) |
385 | { |
386 | struct xlgmac_channel *channel; |
387 | unsigned int i; |
388 | |
389 | channel = pdata->channel_head; |
390 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
391 | if (!channel->tx_ring) |
392 | break; |
393 | |
394 | timer_setup(&channel->tx_timer, xlgmac_tx_timer, 0); |
395 | } |
396 | } |
397 | |
398 | static void xlgmac_stop_timers(struct xlgmac_pdata *pdata) |
399 | { |
400 | struct xlgmac_channel *channel; |
401 | unsigned int i; |
402 | |
403 | channel = pdata->channel_head; |
404 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
405 | if (!channel->tx_ring) |
406 | break; |
407 | |
408 | del_timer_sync(timer: &channel->tx_timer); |
409 | } |
410 | } |
411 | |
412 | static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add) |
413 | { |
414 | struct xlgmac_channel *channel; |
415 | unsigned int i; |
416 | |
417 | if (pdata->per_channel_irq) { |
418 | channel = pdata->channel_head; |
419 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
420 | if (add) |
421 | netif_napi_add(dev: pdata->netdev, napi: &channel->napi, |
422 | poll: xlgmac_one_poll); |
423 | |
424 | napi_enable(n: &channel->napi); |
425 | } |
426 | } else { |
427 | if (add) |
428 | netif_napi_add(dev: pdata->netdev, napi: &pdata->napi, |
429 | poll: xlgmac_all_poll); |
430 | |
431 | napi_enable(n: &pdata->napi); |
432 | } |
433 | } |
434 | |
435 | static void xlgmac_napi_disable(struct xlgmac_pdata *pdata, unsigned int del) |
436 | { |
437 | struct xlgmac_channel *channel; |
438 | unsigned int i; |
439 | |
440 | if (pdata->per_channel_irq) { |
441 | channel = pdata->channel_head; |
442 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
443 | napi_disable(n: &channel->napi); |
444 | |
445 | if (del) |
446 | netif_napi_del(napi: &channel->napi); |
447 | } |
448 | } else { |
449 | napi_disable(n: &pdata->napi); |
450 | |
451 | if (del) |
452 | netif_napi_del(napi: &pdata->napi); |
453 | } |
454 | } |
455 | |
456 | static int xlgmac_request_irqs(struct xlgmac_pdata *pdata) |
457 | { |
458 | struct net_device *netdev = pdata->netdev; |
459 | struct xlgmac_channel *channel; |
460 | unsigned int i; |
461 | int ret; |
462 | |
463 | ret = devm_request_irq(dev: pdata->dev, irq: pdata->dev_irq, handler: xlgmac_isr, |
464 | IRQF_SHARED, devname: netdev->name, dev_id: pdata); |
465 | if (ret) { |
466 | netdev_alert(dev: netdev, format: "error requesting irq %d\n" , |
467 | pdata->dev_irq); |
468 | return ret; |
469 | } |
470 | |
471 | if (!pdata->per_channel_irq) |
472 | return 0; |
473 | |
474 | channel = pdata->channel_head; |
475 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
476 | snprintf(buf: channel->dma_irq_name, |
477 | size: sizeof(channel->dma_irq_name) - 1, |
478 | fmt: "%s-TxRx-%u" , netdev_name(dev: netdev), |
479 | channel->queue_index); |
480 | |
481 | ret = devm_request_irq(dev: pdata->dev, irq: channel->dma_irq, |
482 | handler: xlgmac_dma_isr, irqflags: 0, |
483 | devname: channel->dma_irq_name, dev_id: channel); |
484 | if (ret) { |
485 | netdev_alert(dev: netdev, format: "error requesting irq %d\n" , |
486 | channel->dma_irq); |
487 | goto err_irq; |
488 | } |
489 | } |
490 | |
491 | return 0; |
492 | |
493 | err_irq: |
494 | /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ |
495 | for (i--, channel--; i < pdata->channel_count; i--, channel--) |
496 | devm_free_irq(dev: pdata->dev, irq: channel->dma_irq, dev_id: channel); |
497 | |
498 | devm_free_irq(dev: pdata->dev, irq: pdata->dev_irq, dev_id: pdata); |
499 | |
500 | return ret; |
501 | } |
502 | |
503 | static void xlgmac_free_irqs(struct xlgmac_pdata *pdata) |
504 | { |
505 | struct xlgmac_channel *channel; |
506 | unsigned int i; |
507 | |
508 | devm_free_irq(dev: pdata->dev, irq: pdata->dev_irq, dev_id: pdata); |
509 | |
510 | if (!pdata->per_channel_irq) |
511 | return; |
512 | |
513 | channel = pdata->channel_head; |
514 | for (i = 0; i < pdata->channel_count; i++, channel++) |
515 | devm_free_irq(dev: pdata->dev, irq: channel->dma_irq, dev_id: channel); |
516 | } |
517 | |
518 | static void xlgmac_free_tx_data(struct xlgmac_pdata *pdata) |
519 | { |
520 | struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops; |
521 | struct xlgmac_desc_data *desc_data; |
522 | struct xlgmac_channel *channel; |
523 | struct xlgmac_ring *ring; |
524 | unsigned int i, j; |
525 | |
526 | channel = pdata->channel_head; |
527 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
528 | ring = channel->tx_ring; |
529 | if (!ring) |
530 | break; |
531 | |
532 | for (j = 0; j < ring->dma_desc_count; j++) { |
533 | desc_data = XLGMAC_GET_DESC_DATA(ring, j); |
534 | desc_ops->unmap_desc_data(pdata, desc_data); |
535 | } |
536 | } |
537 | } |
538 | |
539 | static void xlgmac_free_rx_data(struct xlgmac_pdata *pdata) |
540 | { |
541 | struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops; |
542 | struct xlgmac_desc_data *desc_data; |
543 | struct xlgmac_channel *channel; |
544 | struct xlgmac_ring *ring; |
545 | unsigned int i, j; |
546 | |
547 | channel = pdata->channel_head; |
548 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
549 | ring = channel->rx_ring; |
550 | if (!ring) |
551 | break; |
552 | |
553 | for (j = 0; j < ring->dma_desc_count; j++) { |
554 | desc_data = XLGMAC_GET_DESC_DATA(ring, j); |
555 | desc_ops->unmap_desc_data(pdata, desc_data); |
556 | } |
557 | } |
558 | } |
559 | |
560 | static int xlgmac_start(struct xlgmac_pdata *pdata) |
561 | { |
562 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; |
563 | struct net_device *netdev = pdata->netdev; |
564 | int ret; |
565 | |
566 | hw_ops->init(pdata); |
567 | xlgmac_napi_enable(pdata, add: 1); |
568 | |
569 | ret = xlgmac_request_irqs(pdata); |
570 | if (ret) |
571 | goto err_napi; |
572 | |
573 | hw_ops->enable_tx(pdata); |
574 | hw_ops->enable_rx(pdata); |
575 | netif_tx_start_all_queues(dev: netdev); |
576 | |
577 | return 0; |
578 | |
579 | err_napi: |
580 | xlgmac_napi_disable(pdata, del: 1); |
581 | hw_ops->exit(pdata); |
582 | |
583 | return ret; |
584 | } |
585 | |
586 | static void xlgmac_stop(struct xlgmac_pdata *pdata) |
587 | { |
588 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; |
589 | struct net_device *netdev = pdata->netdev; |
590 | struct xlgmac_channel *channel; |
591 | struct netdev_queue *txq; |
592 | unsigned int i; |
593 | |
594 | netif_tx_stop_all_queues(dev: netdev); |
595 | xlgmac_stop_timers(pdata); |
596 | hw_ops->disable_tx(pdata); |
597 | hw_ops->disable_rx(pdata); |
598 | xlgmac_free_irqs(pdata); |
599 | xlgmac_napi_disable(pdata, del: 1); |
600 | hw_ops->exit(pdata); |
601 | |
602 | channel = pdata->channel_head; |
603 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
604 | if (!channel->tx_ring) |
605 | continue; |
606 | |
607 | txq = netdev_get_tx_queue(dev: netdev, index: channel->queue_index); |
608 | netdev_tx_reset_queue(q: txq); |
609 | } |
610 | } |
611 | |
612 | static void xlgmac_restart_dev(struct xlgmac_pdata *pdata) |
613 | { |
614 | /* If not running, "restart" will happen on open */ |
615 | if (!netif_running(dev: pdata->netdev)) |
616 | return; |
617 | |
618 | xlgmac_stop(pdata); |
619 | |
620 | xlgmac_free_tx_data(pdata); |
621 | xlgmac_free_rx_data(pdata); |
622 | |
623 | xlgmac_start(pdata); |
624 | } |
625 | |
626 | static void xlgmac_restart(struct work_struct *work) |
627 | { |
628 | struct xlgmac_pdata *pdata = container_of(work, |
629 | struct xlgmac_pdata, |
630 | restart_work); |
631 | |
632 | rtnl_lock(); |
633 | |
634 | xlgmac_restart_dev(pdata); |
635 | |
636 | rtnl_unlock(); |
637 | } |
638 | |
639 | static int xlgmac_open(struct net_device *netdev) |
640 | { |
641 | struct xlgmac_pdata *pdata = netdev_priv(dev: netdev); |
642 | struct xlgmac_desc_ops *desc_ops; |
643 | int ret; |
644 | |
645 | desc_ops = &pdata->desc_ops; |
646 | |
647 | /* TODO: Initialize the phy */ |
648 | |
649 | /* Calculate the Rx buffer size before allocating rings */ |
650 | ret = xlgmac_calc_rx_buf_size(netdev, mtu: netdev->mtu); |
651 | if (ret < 0) |
652 | return ret; |
653 | pdata->rx_buf_size = ret; |
654 | |
655 | /* Allocate the channels and rings */ |
656 | ret = desc_ops->alloc_channels_and_rings(pdata); |
657 | if (ret) |
658 | return ret; |
659 | |
660 | INIT_WORK(&pdata->restart_work, xlgmac_restart); |
661 | xlgmac_init_timers(pdata); |
662 | |
663 | ret = xlgmac_start(pdata); |
664 | if (ret) |
665 | goto err_channels_and_rings; |
666 | |
667 | return 0; |
668 | |
669 | err_channels_and_rings: |
670 | desc_ops->free_channels_and_rings(pdata); |
671 | |
672 | return ret; |
673 | } |
674 | |
675 | static int xlgmac_close(struct net_device *netdev) |
676 | { |
677 | struct xlgmac_pdata *pdata = netdev_priv(dev: netdev); |
678 | struct xlgmac_desc_ops *desc_ops; |
679 | |
680 | desc_ops = &pdata->desc_ops; |
681 | |
682 | /* Stop the device */ |
683 | xlgmac_stop(pdata); |
684 | |
685 | /* Free the channels and rings */ |
686 | desc_ops->free_channels_and_rings(pdata); |
687 | |
688 | return 0; |
689 | } |
690 | |
691 | static void xlgmac_tx_timeout(struct net_device *netdev, unsigned int txqueue) |
692 | { |
693 | struct xlgmac_pdata *pdata = netdev_priv(dev: netdev); |
694 | |
695 | netdev_warn(dev: netdev, format: "tx timeout, device restarting\n" ); |
696 | schedule_work(work: &pdata->restart_work); |
697 | } |
698 | |
699 | static netdev_tx_t xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev) |
700 | { |
701 | struct xlgmac_pdata *pdata = netdev_priv(dev: netdev); |
702 | struct xlgmac_pkt_info *tx_pkt_info; |
703 | struct xlgmac_desc_ops *desc_ops; |
704 | struct xlgmac_channel *channel; |
705 | struct xlgmac_hw_ops *hw_ops; |
706 | struct netdev_queue *txq; |
707 | struct xlgmac_ring *ring; |
708 | int ret; |
709 | |
710 | desc_ops = &pdata->desc_ops; |
711 | hw_ops = &pdata->hw_ops; |
712 | |
713 | XLGMAC_PR("skb->len = %d\n" , skb->len); |
714 | |
715 | channel = pdata->channel_head + skb->queue_mapping; |
716 | txq = netdev_get_tx_queue(dev: netdev, index: channel->queue_index); |
717 | ring = channel->tx_ring; |
718 | tx_pkt_info = &ring->pkt_info; |
719 | |
720 | if (skb->len == 0) { |
721 | netif_err(pdata, tx_err, netdev, |
722 | "empty skb received from stack\n" ); |
723 | dev_kfree_skb_any(skb); |
724 | return NETDEV_TX_OK; |
725 | } |
726 | |
727 | /* Prepare preliminary packet info for TX */ |
728 | memset(tx_pkt_info, 0, sizeof(*tx_pkt_info)); |
729 | xlgmac_prep_tx_pkt(pdata, ring, skb, pkt_info: tx_pkt_info); |
730 | |
731 | /* Check that there are enough descriptors available */ |
732 | ret = xlgmac_maybe_stop_tx_queue(channel, ring, |
733 | count: tx_pkt_info->desc_count); |
734 | if (ret) |
735 | return ret; |
736 | |
737 | ret = xlgmac_prep_tso(skb, pkt_info: tx_pkt_info); |
738 | if (ret) { |
739 | netif_err(pdata, tx_err, netdev, |
740 | "error processing TSO packet\n" ); |
741 | dev_kfree_skb_any(skb); |
742 | return ret; |
743 | } |
744 | xlgmac_prep_vlan(skb, pkt_info: tx_pkt_info); |
745 | |
746 | if (!desc_ops->map_tx_skb(channel, skb)) { |
747 | dev_kfree_skb_any(skb); |
748 | return NETDEV_TX_OK; |
749 | } |
750 | |
751 | /* Report on the actual number of bytes (to be) sent */ |
752 | netdev_tx_sent_queue(dev_queue: txq, bytes: tx_pkt_info->tx_bytes); |
753 | |
754 | /* Configure required descriptor fields for transmission */ |
755 | hw_ops->dev_xmit(channel); |
756 | |
757 | if (netif_msg_pktdata(pdata)) |
758 | xlgmac_print_pkt(netdev, skb, tx_rx: true); |
759 | |
760 | /* Stop the queue in advance if there may not be enough descriptors */ |
761 | xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR); |
762 | |
763 | return NETDEV_TX_OK; |
764 | } |
765 | |
766 | static void xlgmac_get_stats64(struct net_device *netdev, |
767 | struct rtnl_link_stats64 *s) |
768 | { |
769 | struct xlgmac_pdata *pdata = netdev_priv(dev: netdev); |
770 | struct xlgmac_stats *pstats = &pdata->stats; |
771 | |
772 | pdata->hw_ops.read_mmc_stats(pdata); |
773 | |
774 | s->rx_packets = pstats->rxframecount_gb; |
775 | s->rx_bytes = pstats->rxoctetcount_gb; |
776 | s->rx_errors = pstats->rxframecount_gb - |
777 | pstats->rxbroadcastframes_g - |
778 | pstats->rxmulticastframes_g - |
779 | pstats->rxunicastframes_g; |
780 | s->multicast = pstats->rxmulticastframes_g; |
781 | s->rx_length_errors = pstats->rxlengtherror; |
782 | s->rx_crc_errors = pstats->rxcrcerror; |
783 | s->rx_fifo_errors = pstats->rxfifooverflow; |
784 | |
785 | s->tx_packets = pstats->txframecount_gb; |
786 | s->tx_bytes = pstats->txoctetcount_gb; |
787 | s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g; |
788 | s->tx_dropped = netdev->stats.tx_dropped; |
789 | } |
790 | |
791 | static int xlgmac_set_mac_address(struct net_device *netdev, void *addr) |
792 | { |
793 | struct xlgmac_pdata *pdata = netdev_priv(dev: netdev); |
794 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; |
795 | struct sockaddr *saddr = addr; |
796 | |
797 | if (!is_valid_ether_addr(addr: saddr->sa_data)) |
798 | return -EADDRNOTAVAIL; |
799 | |
800 | eth_hw_addr_set(dev: netdev, addr: saddr->sa_data); |
801 | |
802 | hw_ops->set_mac_address(pdata, netdev->dev_addr); |
803 | |
804 | return 0; |
805 | } |
806 | |
807 | static int xlgmac_ioctl(struct net_device *netdev, |
808 | struct ifreq *ifreq, int cmd) |
809 | { |
810 | if (!netif_running(dev: netdev)) |
811 | return -ENODEV; |
812 | |
813 | return 0; |
814 | } |
815 | |
816 | static int xlgmac_change_mtu(struct net_device *netdev, int mtu) |
817 | { |
818 | struct xlgmac_pdata *pdata = netdev_priv(dev: netdev); |
819 | int ret; |
820 | |
821 | ret = xlgmac_calc_rx_buf_size(netdev, mtu); |
822 | if (ret < 0) |
823 | return ret; |
824 | |
825 | pdata->rx_buf_size = ret; |
826 | netdev->mtu = mtu; |
827 | |
828 | xlgmac_restart_dev(pdata); |
829 | |
830 | return 0; |
831 | } |
832 | |
833 | static int xlgmac_vlan_rx_add_vid(struct net_device *netdev, |
834 | __be16 proto, |
835 | u16 vid) |
836 | { |
837 | struct xlgmac_pdata *pdata = netdev_priv(dev: netdev); |
838 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; |
839 | |
840 | set_bit(nr: vid, addr: pdata->active_vlans); |
841 | hw_ops->update_vlan_hash_table(pdata); |
842 | |
843 | return 0; |
844 | } |
845 | |
846 | static int xlgmac_vlan_rx_kill_vid(struct net_device *netdev, |
847 | __be16 proto, |
848 | u16 vid) |
849 | { |
850 | struct xlgmac_pdata *pdata = netdev_priv(dev: netdev); |
851 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; |
852 | |
853 | clear_bit(nr: vid, addr: pdata->active_vlans); |
854 | hw_ops->update_vlan_hash_table(pdata); |
855 | |
856 | return 0; |
857 | } |
858 | |
859 | #ifdef CONFIG_NET_POLL_CONTROLLER |
860 | static void xlgmac_poll_controller(struct net_device *netdev) |
861 | { |
862 | struct xlgmac_pdata *pdata = netdev_priv(dev: netdev); |
863 | struct xlgmac_channel *channel; |
864 | unsigned int i; |
865 | |
866 | if (pdata->per_channel_irq) { |
867 | channel = pdata->channel_head; |
868 | for (i = 0; i < pdata->channel_count; i++, channel++) |
869 | xlgmac_dma_isr(irq: channel->dma_irq, data: channel); |
870 | } else { |
871 | disable_irq(irq: pdata->dev_irq); |
872 | xlgmac_isr(irq: pdata->dev_irq, data: pdata); |
873 | enable_irq(irq: pdata->dev_irq); |
874 | } |
875 | } |
876 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |
877 | |
878 | static int xlgmac_set_features(struct net_device *netdev, |
879 | netdev_features_t features) |
880 | { |
881 | netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter; |
882 | struct xlgmac_pdata *pdata = netdev_priv(dev: netdev); |
883 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; |
884 | int ret = 0; |
885 | |
886 | rxhash = pdata->netdev_features & NETIF_F_RXHASH; |
887 | rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; |
888 | rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; |
889 | rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; |
890 | |
891 | if ((features & NETIF_F_RXHASH) && !rxhash) |
892 | ret = hw_ops->enable_rss(pdata); |
893 | else if (!(features & NETIF_F_RXHASH) && rxhash) |
894 | ret = hw_ops->disable_rss(pdata); |
895 | if (ret) |
896 | return ret; |
897 | |
898 | if ((features & NETIF_F_RXCSUM) && !rxcsum) |
899 | hw_ops->enable_rx_csum(pdata); |
900 | else if (!(features & NETIF_F_RXCSUM) && rxcsum) |
901 | hw_ops->disable_rx_csum(pdata); |
902 | |
903 | if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan) |
904 | hw_ops->enable_rx_vlan_stripping(pdata); |
905 | else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan) |
906 | hw_ops->disable_rx_vlan_stripping(pdata); |
907 | |
908 | if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter) |
909 | hw_ops->enable_rx_vlan_filtering(pdata); |
910 | else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter) |
911 | hw_ops->disable_rx_vlan_filtering(pdata); |
912 | |
913 | pdata->netdev_features = features; |
914 | |
915 | return 0; |
916 | } |
917 | |
918 | static void xlgmac_set_rx_mode(struct net_device *netdev) |
919 | { |
920 | struct xlgmac_pdata *pdata = netdev_priv(dev: netdev); |
921 | struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; |
922 | |
923 | hw_ops->config_rx_mode(pdata); |
924 | } |
925 | |
926 | static const struct net_device_ops xlgmac_netdev_ops = { |
927 | .ndo_open = xlgmac_open, |
928 | .ndo_stop = xlgmac_close, |
929 | .ndo_start_xmit = xlgmac_xmit, |
930 | .ndo_tx_timeout = xlgmac_tx_timeout, |
931 | .ndo_get_stats64 = xlgmac_get_stats64, |
932 | .ndo_change_mtu = xlgmac_change_mtu, |
933 | .ndo_set_mac_address = xlgmac_set_mac_address, |
934 | .ndo_validate_addr = eth_validate_addr, |
935 | .ndo_eth_ioctl = xlgmac_ioctl, |
936 | .ndo_vlan_rx_add_vid = xlgmac_vlan_rx_add_vid, |
937 | .ndo_vlan_rx_kill_vid = xlgmac_vlan_rx_kill_vid, |
938 | #ifdef CONFIG_NET_POLL_CONTROLLER |
939 | .ndo_poll_controller = xlgmac_poll_controller, |
940 | #endif |
941 | .ndo_set_features = xlgmac_set_features, |
942 | .ndo_set_rx_mode = xlgmac_set_rx_mode, |
943 | }; |
944 | |
945 | const struct net_device_ops *xlgmac_get_netdev_ops(void) |
946 | { |
947 | return &xlgmac_netdev_ops; |
948 | } |
949 | |
950 | static void xlgmac_rx_refresh(struct xlgmac_channel *channel) |
951 | { |
952 | struct xlgmac_pdata *pdata = channel->pdata; |
953 | struct xlgmac_ring *ring = channel->rx_ring; |
954 | struct xlgmac_desc_data *desc_data; |
955 | struct xlgmac_desc_ops *desc_ops; |
956 | struct xlgmac_hw_ops *hw_ops; |
957 | |
958 | desc_ops = &pdata->desc_ops; |
959 | hw_ops = &pdata->hw_ops; |
960 | |
961 | while (ring->dirty != ring->cur) { |
962 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty); |
963 | |
964 | /* Reset desc_data values */ |
965 | desc_ops->unmap_desc_data(pdata, desc_data); |
966 | |
967 | if (desc_ops->map_rx_buffer(pdata, ring, desc_data)) |
968 | break; |
969 | |
970 | hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty); |
971 | |
972 | ring->dirty++; |
973 | } |
974 | |
975 | /* Make sure everything is written before the register write */ |
976 | wmb(); |
977 | |
978 | /* Update the Rx Tail Pointer Register with address of |
979 | * the last cleaned entry |
980 | */ |
981 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1); |
982 | writel(lower_32_bits(desc_data->dma_desc_addr), |
983 | XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); |
984 | } |
985 | |
986 | static struct sk_buff *xlgmac_create_skb(struct xlgmac_pdata *pdata, |
987 | struct napi_struct *napi, |
988 | struct xlgmac_desc_data *desc_data, |
989 | unsigned int len) |
990 | { |
991 | unsigned int copy_len; |
992 | struct sk_buff *skb; |
993 | u8 *packet; |
994 | |
995 | skb = napi_alloc_skb(napi, length: desc_data->rx.hdr.dma_len); |
996 | if (!skb) |
997 | return NULL; |
998 | |
999 | /* Start with the header buffer which may contain just the header |
1000 | * or the header plus data |
1001 | */ |
1002 | dma_sync_single_range_for_cpu(dev: pdata->dev, addr: desc_data->rx.hdr.dma_base, |
1003 | offset: desc_data->rx.hdr.dma_off, |
1004 | size: desc_data->rx.hdr.dma_len, |
1005 | dir: DMA_FROM_DEVICE); |
1006 | |
1007 | packet = page_address(desc_data->rx.hdr.pa.pages) + |
1008 | desc_data->rx.hdr.pa.pages_offset; |
1009 | copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len; |
1010 | copy_len = min(desc_data->rx.hdr.dma_len, copy_len); |
1011 | skb_copy_to_linear_data(skb, from: packet, len: copy_len); |
1012 | skb_put(skb, len: copy_len); |
1013 | |
1014 | len -= copy_len; |
1015 | if (len) { |
1016 | /* Add the remaining data as a frag */ |
1017 | dma_sync_single_range_for_cpu(dev: pdata->dev, |
1018 | addr: desc_data->rx.buf.dma_base, |
1019 | offset: desc_data->rx.buf.dma_off, |
1020 | size: desc_data->rx.buf.dma_len, |
1021 | dir: DMA_FROM_DEVICE); |
1022 | |
1023 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
1024 | page: desc_data->rx.buf.pa.pages, |
1025 | off: desc_data->rx.buf.pa.pages_offset, |
1026 | size: len, truesize: desc_data->rx.buf.dma_len); |
1027 | desc_data->rx.buf.pa.pages = NULL; |
1028 | } |
1029 | |
1030 | return skb; |
1031 | } |
1032 | |
1033 | static int xlgmac_tx_poll(struct xlgmac_channel *channel) |
1034 | { |
1035 | struct xlgmac_pdata *pdata = channel->pdata; |
1036 | struct xlgmac_ring *ring = channel->tx_ring; |
1037 | struct net_device *netdev = pdata->netdev; |
1038 | unsigned int tx_packets = 0, tx_bytes = 0; |
1039 | struct xlgmac_desc_data *desc_data; |
1040 | struct xlgmac_dma_desc *dma_desc; |
1041 | struct xlgmac_desc_ops *desc_ops; |
1042 | struct xlgmac_hw_ops *hw_ops; |
1043 | struct netdev_queue *txq; |
1044 | int processed = 0; |
1045 | unsigned int cur; |
1046 | |
1047 | desc_ops = &pdata->desc_ops; |
1048 | hw_ops = &pdata->hw_ops; |
1049 | |
1050 | /* Nothing to do if there isn't a Tx ring for this channel */ |
1051 | if (!ring) |
1052 | return 0; |
1053 | |
1054 | cur = ring->cur; |
1055 | |
1056 | /* Be sure we get ring->cur before accessing descriptor data */ |
1057 | smp_rmb(); |
1058 | |
1059 | txq = netdev_get_tx_queue(dev: netdev, index: channel->queue_index); |
1060 | |
1061 | while ((processed < XLGMAC_TX_DESC_MAX_PROC) && |
1062 | (ring->dirty != cur)) { |
1063 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty); |
1064 | dma_desc = desc_data->dma_desc; |
1065 | |
1066 | if (!hw_ops->tx_complete(dma_desc)) |
1067 | break; |
1068 | |
1069 | /* Make sure descriptor fields are read after reading |
1070 | * the OWN bit |
1071 | */ |
1072 | dma_rmb(); |
1073 | |
1074 | if (netif_msg_tx_done(pdata)) |
1075 | xlgmac_dump_tx_desc(pdata, ring, idx: ring->dirty, count: 1, flag: 0); |
1076 | |
1077 | if (hw_ops->is_last_desc(dma_desc)) { |
1078 | tx_packets += desc_data->tx.packets; |
1079 | tx_bytes += desc_data->tx.bytes; |
1080 | } |
1081 | |
1082 | /* Free the SKB and reset the descriptor for re-use */ |
1083 | desc_ops->unmap_desc_data(pdata, desc_data); |
1084 | hw_ops->tx_desc_reset(desc_data); |
1085 | |
1086 | processed++; |
1087 | ring->dirty++; |
1088 | } |
1089 | |
1090 | if (!processed) |
1091 | return 0; |
1092 | |
1093 | netdev_tx_completed_queue(dev_queue: txq, pkts: tx_packets, bytes: tx_bytes); |
1094 | |
1095 | if ((ring->tx.queue_stopped == 1) && |
1096 | (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) { |
1097 | ring->tx.queue_stopped = 0; |
1098 | netif_tx_wake_queue(dev_queue: txq); |
1099 | } |
1100 | |
1101 | XLGMAC_PR("processed=%d\n" , processed); |
1102 | |
1103 | return processed; |
1104 | } |
1105 | |
1106 | static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget) |
1107 | { |
1108 | struct xlgmac_pdata *pdata = channel->pdata; |
1109 | struct xlgmac_ring *ring = channel->rx_ring; |
1110 | struct net_device *netdev = pdata->netdev; |
1111 | unsigned int len, dma_desc_len, max_len; |
1112 | unsigned int context_next, context; |
1113 | struct xlgmac_desc_data *desc_data; |
1114 | struct xlgmac_pkt_info *pkt_info; |
1115 | unsigned int incomplete, error; |
1116 | struct xlgmac_hw_ops *hw_ops; |
1117 | unsigned int received = 0; |
1118 | struct napi_struct *napi; |
1119 | struct sk_buff *skb; |
1120 | int packet_count = 0; |
1121 | |
1122 | hw_ops = &pdata->hw_ops; |
1123 | |
1124 | /* Nothing to do if there isn't a Rx ring for this channel */ |
1125 | if (!ring) |
1126 | return 0; |
1127 | |
1128 | incomplete = 0; |
1129 | context_next = 0; |
1130 | |
1131 | napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; |
1132 | |
1133 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); |
1134 | pkt_info = &ring->pkt_info; |
1135 | while (packet_count < budget) { |
1136 | /* First time in loop see if we need to restore state */ |
1137 | if (!received && desc_data->state_saved) { |
1138 | skb = desc_data->state.skb; |
1139 | error = desc_data->state.error; |
1140 | len = desc_data->state.len; |
1141 | } else { |
1142 | memset(pkt_info, 0, sizeof(*pkt_info)); |
1143 | skb = NULL; |
1144 | error = 0; |
1145 | len = 0; |
1146 | } |
1147 | |
1148 | read_again: |
1149 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); |
1150 | |
1151 | if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY) |
1152 | xlgmac_rx_refresh(channel); |
1153 | |
1154 | if (hw_ops->dev_read(channel)) |
1155 | break; |
1156 | |
1157 | received++; |
1158 | ring->cur++; |
1159 | |
1160 | incomplete = XLGMAC_GET_REG_BITS( |
1161 | pkt_info->attributes, |
1162 | RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, |
1163 | RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN); |
1164 | context_next = XLGMAC_GET_REG_BITS( |
1165 | pkt_info->attributes, |
1166 | RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, |
1167 | RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN); |
1168 | context = XLGMAC_GET_REG_BITS( |
1169 | pkt_info->attributes, |
1170 | RX_PACKET_ATTRIBUTES_CONTEXT_POS, |
1171 | RX_PACKET_ATTRIBUTES_CONTEXT_LEN); |
1172 | |
1173 | /* Earlier error, just drain the remaining data */ |
1174 | if ((incomplete || context_next) && error) |
1175 | goto read_again; |
1176 | |
1177 | if (error || pkt_info->errors) { |
1178 | if (pkt_info->errors) |
1179 | netif_err(pdata, rx_err, netdev, |
1180 | "error in received packet\n" ); |
1181 | dev_kfree_skb(skb); |
1182 | goto next_packet; |
1183 | } |
1184 | |
1185 | if (!context) { |
1186 | /* Length is cumulative, get this descriptor's length */ |
1187 | dma_desc_len = desc_data->rx.len - len; |
1188 | len += dma_desc_len; |
1189 | |
1190 | if (dma_desc_len && !skb) { |
1191 | skb = xlgmac_create_skb(pdata, napi, desc_data, |
1192 | len: dma_desc_len); |
1193 | if (!skb) |
1194 | error = 1; |
1195 | } else if (dma_desc_len) { |
1196 | dma_sync_single_range_for_cpu( |
1197 | dev: pdata->dev, |
1198 | addr: desc_data->rx.buf.dma_base, |
1199 | offset: desc_data->rx.buf.dma_off, |
1200 | size: desc_data->rx.buf.dma_len, |
1201 | dir: DMA_FROM_DEVICE); |
1202 | |
1203 | skb_add_rx_frag( |
1204 | skb, skb_shinfo(skb)->nr_frags, |
1205 | page: desc_data->rx.buf.pa.pages, |
1206 | off: desc_data->rx.buf.pa.pages_offset, |
1207 | size: dma_desc_len, |
1208 | truesize: desc_data->rx.buf.dma_len); |
1209 | desc_data->rx.buf.pa.pages = NULL; |
1210 | } |
1211 | } |
1212 | |
1213 | if (incomplete || context_next) |
1214 | goto read_again; |
1215 | |
1216 | if (!skb) |
1217 | goto next_packet; |
1218 | |
1219 | /* Be sure we don't exceed the configured MTU */ |
1220 | max_len = netdev->mtu + ETH_HLEN; |
1221 | if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && |
1222 | (skb->protocol == htons(ETH_P_8021Q))) |
1223 | max_len += VLAN_HLEN; |
1224 | |
1225 | if (skb->len > max_len) { |
1226 | netif_err(pdata, rx_err, netdev, |
1227 | "packet length exceeds configured MTU\n" ); |
1228 | dev_kfree_skb(skb); |
1229 | goto next_packet; |
1230 | } |
1231 | |
1232 | if (netif_msg_pktdata(pdata)) |
1233 | xlgmac_print_pkt(netdev, skb, tx_rx: false); |
1234 | |
1235 | skb_checksum_none_assert(skb); |
1236 | if (XLGMAC_GET_REG_BITS(pkt_info->attributes, |
1237 | RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, |
1238 | RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN)) |
1239 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1240 | |
1241 | if (XLGMAC_GET_REG_BITS(pkt_info->attributes, |
1242 | RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, |
1243 | RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN)) { |
1244 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
1245 | vlan_tci: pkt_info->vlan_ctag); |
1246 | pdata->stats.rx_vlan_packets++; |
1247 | } |
1248 | |
1249 | if (XLGMAC_GET_REG_BITS(pkt_info->attributes, |
1250 | RX_PACKET_ATTRIBUTES_RSS_HASH_POS, |
1251 | RX_PACKET_ATTRIBUTES_RSS_HASH_LEN)) |
1252 | skb_set_hash(skb, hash: pkt_info->rss_hash, |
1253 | type: pkt_info->rss_hash_type); |
1254 | |
1255 | skb->dev = netdev; |
1256 | skb->protocol = eth_type_trans(skb, dev: netdev); |
1257 | skb_record_rx_queue(skb, rx_queue: channel->queue_index); |
1258 | |
1259 | napi_gro_receive(napi, skb); |
1260 | |
1261 | next_packet: |
1262 | packet_count++; |
1263 | } |
1264 | |
1265 | /* Check if we need to save state before leaving */ |
1266 | if (received && (incomplete || context_next)) { |
1267 | desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); |
1268 | desc_data->state_saved = 1; |
1269 | desc_data->state.skb = skb; |
1270 | desc_data->state.len = len; |
1271 | desc_data->state.error = error; |
1272 | } |
1273 | |
1274 | XLGMAC_PR("packet_count = %d\n" , packet_count); |
1275 | |
1276 | return packet_count; |
1277 | } |
1278 | |
1279 | static int xlgmac_one_poll(struct napi_struct *napi, int budget) |
1280 | { |
1281 | struct xlgmac_channel *channel = container_of(napi, |
1282 | struct xlgmac_channel, |
1283 | napi); |
1284 | int processed = 0; |
1285 | |
1286 | XLGMAC_PR("budget=%d\n" , budget); |
1287 | |
1288 | /* Cleanup Tx ring first */ |
1289 | xlgmac_tx_poll(channel); |
1290 | |
1291 | /* Process Rx ring next */ |
1292 | processed = xlgmac_rx_poll(channel, budget); |
1293 | |
1294 | /* If we processed everything, we are done */ |
1295 | if (processed < budget) { |
1296 | /* Turn off polling */ |
1297 | napi_complete_done(n: napi, work_done: processed); |
1298 | |
1299 | /* Enable Tx and Rx interrupts */ |
1300 | enable_irq(irq: channel->dma_irq); |
1301 | } |
1302 | |
1303 | XLGMAC_PR("received = %d\n" , processed); |
1304 | |
1305 | return processed; |
1306 | } |
1307 | |
1308 | static int xlgmac_all_poll(struct napi_struct *napi, int budget) |
1309 | { |
1310 | struct xlgmac_pdata *pdata = container_of(napi, |
1311 | struct xlgmac_pdata, |
1312 | napi); |
1313 | struct xlgmac_channel *channel; |
1314 | int processed, last_processed; |
1315 | int ring_budget; |
1316 | unsigned int i; |
1317 | |
1318 | XLGMAC_PR("budget=%d\n" , budget); |
1319 | |
1320 | processed = 0; |
1321 | ring_budget = budget / pdata->rx_ring_count; |
1322 | do { |
1323 | last_processed = processed; |
1324 | |
1325 | channel = pdata->channel_head; |
1326 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
1327 | /* Cleanup Tx ring first */ |
1328 | xlgmac_tx_poll(channel); |
1329 | |
1330 | /* Process Rx ring next */ |
1331 | if (ring_budget > (budget - processed)) |
1332 | ring_budget = budget - processed; |
1333 | processed += xlgmac_rx_poll(channel, budget: ring_budget); |
1334 | } |
1335 | } while ((processed < budget) && (processed != last_processed)); |
1336 | |
1337 | /* If we processed everything, we are done */ |
1338 | if (processed < budget) { |
1339 | /* Turn off polling */ |
1340 | napi_complete_done(n: napi, work_done: processed); |
1341 | |
1342 | /* Enable Tx and Rx interrupts */ |
1343 | xlgmac_enable_rx_tx_ints(pdata); |
1344 | } |
1345 | |
1346 | XLGMAC_PR("received = %d\n" , processed); |
1347 | |
1348 | return processed; |
1349 | } |
1350 | |