1 | /* |
2 | * AMD 10Gb Ethernet driver |
3 | * |
4 | * This file is available to you under your choice of the following two |
5 | * licenses: |
6 | * |
7 | * License 1: GPLv2 |
8 | * |
9 | * Copyright (c) 2014 Advanced Micro Devices, Inc. |
10 | * |
11 | * This file is free software; you may copy, redistribute and/or modify |
12 | * it under the terms of the GNU General Public License as published by |
13 | * the Free Software Foundation, either version 2 of the License, or (at |
14 | * your option) any later version. |
15 | * |
16 | * This file is distributed in the hope that it will be useful, but |
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
19 | * General Public License for more details. |
20 | * |
21 | * You should have received a copy of the GNU General Public License |
22 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
23 | * |
24 | * This file incorporates work covered by the following copyright and |
25 | * permission notice: |
26 | * The Synopsys DWC ETHER XGMAC Software Driver and documentation |
27 | * (hereinafter "Software") is an unsupported proprietary work of Synopsys, |
28 | * Inc. unless otherwise expressly agreed to in writing between Synopsys |
29 | * and you. |
30 | * |
31 | * The Software IS NOT an item of Licensed Software or Licensed Product |
32 | * under any End User Software License Agreement or Agreement for Licensed |
33 | * Product with Synopsys or any supplement thereto. Permission is hereby |
34 | * granted, free of charge, to any person obtaining a copy of this software |
35 | * annotated with this license and the Software, to deal in the Software |
36 | * without restriction, including without limitation the rights to use, |
37 | * copy, modify, merge, publish, distribute, sublicense, and/or sell copies |
38 | * of the Software, and to permit persons to whom the Software is furnished |
39 | * to do so, subject to the following conditions: |
40 | * |
41 | * The above copyright notice and this permission notice shall be included |
42 | * in all copies or substantial portions of the Software. |
43 | * |
44 | * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" |
45 | * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
46 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A |
47 | * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS |
48 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
49 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
50 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
51 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
52 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
53 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
54 | * THE POSSIBILITY OF SUCH DAMAGE. |
55 | * |
56 | * |
57 | * License 2: Modified BSD |
58 | * |
59 | * Copyright (c) 2014 Advanced Micro Devices, Inc. |
60 | * All rights reserved. |
61 | * |
62 | * Redistribution and use in source and binary forms, with or without |
63 | * modification, are permitted provided that the following conditions are met: |
64 | * * Redistributions of source code must retain the above copyright |
65 | * notice, this list of conditions and the following disclaimer. |
66 | * * Redistributions in binary form must reproduce the above copyright |
67 | * notice, this list of conditions and the following disclaimer in the |
68 | * documentation and/or other materials provided with the distribution. |
69 | * * Neither the name of Advanced Micro Devices, Inc. nor the |
70 | * names of its contributors may be used to endorse or promote products |
71 | * derived from this software without specific prior written permission. |
72 | * |
73 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
74 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
75 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
76 | * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY |
77 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
78 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
79 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
80 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
81 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
82 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
83 | * |
84 | * This file incorporates work covered by the following copyright and |
85 | * permission notice: |
86 | * The Synopsys DWC ETHER XGMAC Software Driver and documentation |
87 | * (hereinafter "Software") is an unsupported proprietary work of Synopsys, |
88 | * Inc. unless otherwise expressly agreed to in writing between Synopsys |
89 | * and you. |
90 | * |
91 | * The Software IS NOT an item of Licensed Software or Licensed Product |
92 | * under any End User Software License Agreement or Agreement for Licensed |
93 | * Product with Synopsys or any supplement thereto. Permission is hereby |
94 | * granted, free of charge, to any person obtaining a copy of this software |
95 | * annotated with this license and the Software, to deal in the Software |
96 | * without restriction, including without limitation the rights to use, |
97 | * copy, modify, merge, publish, distribute, sublicense, and/or sell copies |
98 | * of the Software, and to permit persons to whom the Software is furnished |
99 | * to do so, subject to the following conditions: |
100 | * |
101 | * The above copyright notice and this permission notice shall be included |
102 | * in all copies or substantial portions of the Software. |
103 | * |
104 | * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" |
105 | * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
106 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A |
107 | * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS |
108 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
109 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
110 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
111 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
112 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
113 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
114 | * THE POSSIBILITY OF SUCH DAMAGE. |
115 | */ |
116 | |
117 | #include "xgbe.h" |
118 | #include "xgbe-common.h" |
119 | |
120 | static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *); |
121 | |
122 | static void xgbe_free_ring(struct xgbe_prv_data *pdata, |
123 | struct xgbe_ring *ring) |
124 | { |
125 | struct xgbe_ring_data *rdata; |
126 | unsigned int i; |
127 | |
128 | if (!ring) |
129 | return; |
130 | |
131 | if (ring->rdata) { |
132 | for (i = 0; i < ring->rdesc_count; i++) { |
133 | rdata = XGBE_GET_DESC_DATA(ring, i); |
134 | xgbe_unmap_rdata(pdata, rdata); |
135 | } |
136 | |
137 | kfree(objp: ring->rdata); |
138 | ring->rdata = NULL; |
139 | } |
140 | |
141 | if (ring->rx_hdr_pa.pages) { |
142 | dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma, |
143 | ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE); |
144 | put_page(page: ring->rx_hdr_pa.pages); |
145 | |
146 | ring->rx_hdr_pa.pages = NULL; |
147 | ring->rx_hdr_pa.pages_len = 0; |
148 | ring->rx_hdr_pa.pages_offset = 0; |
149 | ring->rx_hdr_pa.pages_dma = 0; |
150 | } |
151 | |
152 | if (ring->rx_buf_pa.pages) { |
153 | dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma, |
154 | ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE); |
155 | put_page(page: ring->rx_buf_pa.pages); |
156 | |
157 | ring->rx_buf_pa.pages = NULL; |
158 | ring->rx_buf_pa.pages_len = 0; |
159 | ring->rx_buf_pa.pages_offset = 0; |
160 | ring->rx_buf_pa.pages_dma = 0; |
161 | } |
162 | |
163 | if (ring->rdesc) { |
164 | dma_free_coherent(dev: pdata->dev, |
165 | size: (sizeof(struct xgbe_ring_desc) * |
166 | ring->rdesc_count), |
167 | cpu_addr: ring->rdesc, dma_handle: ring->rdesc_dma); |
168 | ring->rdesc = NULL; |
169 | } |
170 | } |
171 | |
172 | static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata) |
173 | { |
174 | struct xgbe_channel *channel; |
175 | unsigned int i; |
176 | |
177 | DBGPR("-->xgbe_free_ring_resources\n" ); |
178 | |
179 | for (i = 0; i < pdata->channel_count; i++) { |
180 | channel = pdata->channel[i]; |
181 | xgbe_free_ring(pdata, ring: channel->tx_ring); |
182 | xgbe_free_ring(pdata, ring: channel->rx_ring); |
183 | } |
184 | |
185 | DBGPR("<--xgbe_free_ring_resources\n" ); |
186 | } |
187 | |
188 | static void *xgbe_alloc_node(size_t size, int node) |
189 | { |
190 | void *mem; |
191 | |
192 | mem = kzalloc_node(size, GFP_KERNEL, node); |
193 | if (!mem) |
194 | mem = kzalloc(size, GFP_KERNEL); |
195 | |
196 | return mem; |
197 | } |
198 | |
199 | static void *xgbe_dma_alloc_node(struct device *dev, size_t size, |
200 | dma_addr_t *dma, int node) |
201 | { |
202 | void *mem; |
203 | int cur_node = dev_to_node(dev); |
204 | |
205 | set_dev_node(dev, node); |
206 | mem = dma_alloc_coherent(dev, size, dma_handle: dma, GFP_KERNEL); |
207 | set_dev_node(dev, node: cur_node); |
208 | |
209 | if (!mem) |
210 | mem = dma_alloc_coherent(dev, size, dma_handle: dma, GFP_KERNEL); |
211 | |
212 | return mem; |
213 | } |
214 | |
215 | static int xgbe_init_ring(struct xgbe_prv_data *pdata, |
216 | struct xgbe_ring *ring, unsigned int rdesc_count) |
217 | { |
218 | size_t size; |
219 | |
220 | if (!ring) |
221 | return 0; |
222 | |
223 | /* Descriptors */ |
224 | size = rdesc_count * sizeof(struct xgbe_ring_desc); |
225 | |
226 | ring->rdesc_count = rdesc_count; |
227 | ring->rdesc = xgbe_dma_alloc_node(dev: pdata->dev, size, dma: &ring->rdesc_dma, |
228 | node: ring->node); |
229 | if (!ring->rdesc) |
230 | return -ENOMEM; |
231 | |
232 | /* Descriptor information */ |
233 | size = rdesc_count * sizeof(struct xgbe_ring_data); |
234 | |
235 | ring->rdata = xgbe_alloc_node(size, node: ring->node); |
236 | if (!ring->rdata) |
237 | return -ENOMEM; |
238 | |
239 | netif_dbg(pdata, drv, pdata->netdev, |
240 | "rdesc=%p, rdesc_dma=%pad, rdata=%p, node=%d\n" , |
241 | ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node); |
242 | |
243 | return 0; |
244 | } |
245 | |
246 | static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata) |
247 | { |
248 | struct xgbe_channel *channel; |
249 | unsigned int i; |
250 | int ret; |
251 | |
252 | for (i = 0; i < pdata->channel_count; i++) { |
253 | channel = pdata->channel[i]; |
254 | netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n" , |
255 | channel->name); |
256 | |
257 | ret = xgbe_init_ring(pdata, ring: channel->tx_ring, |
258 | rdesc_count: pdata->tx_desc_count); |
259 | if (ret) { |
260 | netdev_alert(dev: pdata->netdev, |
261 | format: "error initializing Tx ring\n" ); |
262 | goto err_ring; |
263 | } |
264 | |
265 | netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n" , |
266 | channel->name); |
267 | |
268 | ret = xgbe_init_ring(pdata, ring: channel->rx_ring, |
269 | rdesc_count: pdata->rx_desc_count); |
270 | if (ret) { |
271 | netdev_alert(dev: pdata->netdev, |
272 | format: "error initializing Rx ring\n" ); |
273 | goto err_ring; |
274 | } |
275 | } |
276 | |
277 | return 0; |
278 | |
279 | err_ring: |
280 | xgbe_free_ring_resources(pdata); |
281 | |
282 | return ret; |
283 | } |
284 | |
285 | static int xgbe_alloc_pages(struct xgbe_prv_data *pdata, |
286 | struct xgbe_page_alloc *pa, int alloc_order, |
287 | int node) |
288 | { |
289 | struct page *pages = NULL; |
290 | dma_addr_t pages_dma; |
291 | gfp_t gfp; |
292 | int order; |
293 | |
294 | again: |
295 | order = alloc_order; |
296 | |
297 | /* Try to obtain pages, decreasing order if necessary */ |
298 | gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; |
299 | while (order >= 0) { |
300 | pages = alloc_pages_node(nid: node, gfp_mask: gfp, order); |
301 | if (pages) |
302 | break; |
303 | |
304 | order--; |
305 | } |
306 | |
307 | /* If we couldn't get local pages, try getting from anywhere */ |
308 | if (!pages && (node != NUMA_NO_NODE)) { |
309 | node = NUMA_NO_NODE; |
310 | goto again; |
311 | } |
312 | |
313 | if (!pages) |
314 | return -ENOMEM; |
315 | |
316 | /* Map the pages */ |
317 | pages_dma = dma_map_page(pdata->dev, pages, 0, |
318 | PAGE_SIZE << order, DMA_FROM_DEVICE); |
319 | if (dma_mapping_error(dev: pdata->dev, dma_addr: pages_dma)) { |
320 | put_page(page: pages); |
321 | return -ENOMEM; |
322 | } |
323 | |
324 | pa->pages = pages; |
325 | pa->pages_len = PAGE_SIZE << order; |
326 | pa->pages_offset = 0; |
327 | pa->pages_dma = pages_dma; |
328 | |
329 | return 0; |
330 | } |
331 | |
332 | static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd, |
333 | struct xgbe_page_alloc *pa, |
334 | unsigned int len) |
335 | { |
336 | get_page(page: pa->pages); |
337 | bd->pa = *pa; |
338 | |
339 | bd->dma_base = pa->pages_dma; |
340 | bd->dma_off = pa->pages_offset; |
341 | bd->dma_len = len; |
342 | |
343 | pa->pages_offset += len; |
344 | if ((pa->pages_offset + len) > pa->pages_len) { |
345 | /* This data descriptor is responsible for unmapping page(s) */ |
346 | bd->pa_unmap = *pa; |
347 | |
348 | /* Get a new allocation next time */ |
349 | pa->pages = NULL; |
350 | pa->pages_len = 0; |
351 | pa->pages_offset = 0; |
352 | pa->pages_dma = 0; |
353 | } |
354 | } |
355 | |
356 | static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, |
357 | struct xgbe_ring *ring, |
358 | struct xgbe_ring_data *rdata) |
359 | { |
360 | int ret; |
361 | |
362 | if (!ring->rx_hdr_pa.pages) { |
363 | ret = xgbe_alloc_pages(pdata, pa: &ring->rx_hdr_pa, alloc_order: 0, node: ring->node); |
364 | if (ret) |
365 | return ret; |
366 | } |
367 | |
368 | if (!ring->rx_buf_pa.pages) { |
369 | ret = xgbe_alloc_pages(pdata, pa: &ring->rx_buf_pa, |
370 | PAGE_ALLOC_COSTLY_ORDER, node: ring->node); |
371 | if (ret) |
372 | return ret; |
373 | } |
374 | |
375 | /* Set up the header page info */ |
376 | xgbe_set_buffer_data(bd: &rdata->rx.hdr, pa: &ring->rx_hdr_pa, |
377 | XGBE_SKB_ALLOC_SIZE); |
378 | |
379 | /* Set up the buffer page info */ |
380 | xgbe_set_buffer_data(bd: &rdata->rx.buf, pa: &ring->rx_buf_pa, |
381 | len: pdata->rx_buf_size); |
382 | |
383 | return 0; |
384 | } |
385 | |
386 | static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata) |
387 | { |
388 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
389 | struct xgbe_channel *channel; |
390 | struct xgbe_ring *ring; |
391 | struct xgbe_ring_data *rdata; |
392 | struct xgbe_ring_desc *rdesc; |
393 | dma_addr_t rdesc_dma; |
394 | unsigned int i, j; |
395 | |
396 | DBGPR("-->xgbe_wrapper_tx_descriptor_init\n" ); |
397 | |
398 | for (i = 0; i < pdata->channel_count; i++) { |
399 | channel = pdata->channel[i]; |
400 | ring = channel->tx_ring; |
401 | if (!ring) |
402 | break; |
403 | |
404 | rdesc = ring->rdesc; |
405 | rdesc_dma = ring->rdesc_dma; |
406 | |
407 | for (j = 0; j < ring->rdesc_count; j++) { |
408 | rdata = XGBE_GET_DESC_DATA(ring, j); |
409 | |
410 | rdata->rdesc = rdesc; |
411 | rdata->rdesc_dma = rdesc_dma; |
412 | |
413 | rdesc++; |
414 | rdesc_dma += sizeof(struct xgbe_ring_desc); |
415 | } |
416 | |
417 | ring->cur = 0; |
418 | ring->dirty = 0; |
419 | memset(&ring->tx, 0, sizeof(ring->tx)); |
420 | |
421 | hw_if->tx_desc_init(channel); |
422 | } |
423 | |
424 | DBGPR("<--xgbe_wrapper_tx_descriptor_init\n" ); |
425 | } |
426 | |
427 | static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) |
428 | { |
429 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
430 | struct xgbe_channel *channel; |
431 | struct xgbe_ring *ring; |
432 | struct xgbe_ring_desc *rdesc; |
433 | struct xgbe_ring_data *rdata; |
434 | dma_addr_t rdesc_dma; |
435 | unsigned int i, j; |
436 | |
437 | DBGPR("-->xgbe_wrapper_rx_descriptor_init\n" ); |
438 | |
439 | for (i = 0; i < pdata->channel_count; i++) { |
440 | channel = pdata->channel[i]; |
441 | ring = channel->rx_ring; |
442 | if (!ring) |
443 | break; |
444 | |
445 | rdesc = ring->rdesc; |
446 | rdesc_dma = ring->rdesc_dma; |
447 | |
448 | for (j = 0; j < ring->rdesc_count; j++) { |
449 | rdata = XGBE_GET_DESC_DATA(ring, j); |
450 | |
451 | rdata->rdesc = rdesc; |
452 | rdata->rdesc_dma = rdesc_dma; |
453 | |
454 | if (xgbe_map_rx_buffer(pdata, ring, rdata)) |
455 | break; |
456 | |
457 | rdesc++; |
458 | rdesc_dma += sizeof(struct xgbe_ring_desc); |
459 | } |
460 | |
461 | ring->cur = 0; |
462 | ring->dirty = 0; |
463 | |
464 | hw_if->rx_desc_init(channel); |
465 | } |
466 | |
467 | DBGPR("<--xgbe_wrapper_rx_descriptor_init\n" ); |
468 | } |
469 | |
470 | static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata, |
471 | struct xgbe_ring_data *rdata) |
472 | { |
473 | if (rdata->skb_dma) { |
474 | if (rdata->mapped_as_page) { |
475 | dma_unmap_page(pdata->dev, rdata->skb_dma, |
476 | rdata->skb_dma_len, DMA_TO_DEVICE); |
477 | } else { |
478 | dma_unmap_single(pdata->dev, rdata->skb_dma, |
479 | rdata->skb_dma_len, DMA_TO_DEVICE); |
480 | } |
481 | rdata->skb_dma = 0; |
482 | rdata->skb_dma_len = 0; |
483 | } |
484 | |
485 | if (rdata->skb) { |
486 | dev_kfree_skb_any(skb: rdata->skb); |
487 | rdata->skb = NULL; |
488 | } |
489 | |
490 | if (rdata->rx.hdr.pa.pages) |
491 | put_page(page: rdata->rx.hdr.pa.pages); |
492 | |
493 | if (rdata->rx.hdr.pa_unmap.pages) { |
494 | dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma, |
495 | rdata->rx.hdr.pa_unmap.pages_len, |
496 | DMA_FROM_DEVICE); |
497 | put_page(page: rdata->rx.hdr.pa_unmap.pages); |
498 | } |
499 | |
500 | if (rdata->rx.buf.pa.pages) |
501 | put_page(page: rdata->rx.buf.pa.pages); |
502 | |
503 | if (rdata->rx.buf.pa_unmap.pages) { |
504 | dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma, |
505 | rdata->rx.buf.pa_unmap.pages_len, |
506 | DMA_FROM_DEVICE); |
507 | put_page(page: rdata->rx.buf.pa_unmap.pages); |
508 | } |
509 | |
510 | memset(&rdata->tx, 0, sizeof(rdata->tx)); |
511 | memset(&rdata->rx, 0, sizeof(rdata->rx)); |
512 | |
513 | rdata->mapped_as_page = 0; |
514 | |
515 | if (rdata->state_saved) { |
516 | rdata->state_saved = 0; |
517 | rdata->state.skb = NULL; |
518 | rdata->state.len = 0; |
519 | rdata->state.error = 0; |
520 | } |
521 | } |
522 | |
523 | static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) |
524 | { |
525 | struct xgbe_prv_data *pdata = channel->pdata; |
526 | struct xgbe_ring *ring = channel->tx_ring; |
527 | struct xgbe_ring_data *rdata; |
528 | struct xgbe_packet_data *packet; |
529 | skb_frag_t *frag; |
530 | dma_addr_t skb_dma; |
531 | unsigned int start_index, cur_index; |
532 | unsigned int offset, tso, vlan, datalen, len; |
533 | unsigned int i; |
534 | |
535 | DBGPR("-->xgbe_map_tx_skb: cur = %d\n" , ring->cur); |
536 | |
537 | offset = 0; |
538 | start_index = ring->cur; |
539 | cur_index = ring->cur; |
540 | |
541 | packet = &ring->packet_data; |
542 | packet->rdesc_count = 0; |
543 | packet->length = 0; |
544 | |
545 | tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, |
546 | TSO_ENABLE); |
547 | vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, |
548 | VLAN_CTAG); |
549 | |
550 | /* Save space for a context descriptor if needed */ |
551 | if ((tso && (packet->mss != ring->tx.cur_mss)) || |
552 | (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))) |
553 | cur_index++; |
554 | rdata = XGBE_GET_DESC_DATA(ring, cur_index); |
555 | |
556 | if (tso) { |
557 | /* Map the TSO header */ |
558 | skb_dma = dma_map_single(pdata->dev, skb->data, |
559 | packet->header_len, DMA_TO_DEVICE); |
560 | if (dma_mapping_error(dev: pdata->dev, dma_addr: skb_dma)) { |
561 | netdev_alert(dev: pdata->netdev, format: "dma_map_single failed\n" ); |
562 | goto err_out; |
563 | } |
564 | rdata->skb_dma = skb_dma; |
565 | rdata->skb_dma_len = packet->header_len; |
566 | netif_dbg(pdata, tx_queued, pdata->netdev, |
567 | "skb header: index=%u, dma=%pad, len=%u\n" , |
568 | cur_index, &skb_dma, packet->header_len); |
569 | |
570 | offset = packet->header_len; |
571 | |
572 | packet->length += packet->header_len; |
573 | |
574 | cur_index++; |
575 | rdata = XGBE_GET_DESC_DATA(ring, cur_index); |
576 | } |
577 | |
578 | /* Map the (remainder of the) packet */ |
579 | for (datalen = skb_headlen(skb) - offset; datalen; ) { |
580 | len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE); |
581 | |
582 | skb_dma = dma_map_single(pdata->dev, skb->data + offset, len, |
583 | DMA_TO_DEVICE); |
584 | if (dma_mapping_error(dev: pdata->dev, dma_addr: skb_dma)) { |
585 | netdev_alert(dev: pdata->netdev, format: "dma_map_single failed\n" ); |
586 | goto err_out; |
587 | } |
588 | rdata->skb_dma = skb_dma; |
589 | rdata->skb_dma_len = len; |
590 | netif_dbg(pdata, tx_queued, pdata->netdev, |
591 | "skb data: index=%u, dma=%pad, len=%u\n" , |
592 | cur_index, &skb_dma, len); |
593 | |
594 | datalen -= len; |
595 | offset += len; |
596 | |
597 | packet->length += len; |
598 | |
599 | cur_index++; |
600 | rdata = XGBE_GET_DESC_DATA(ring, cur_index); |
601 | } |
602 | |
603 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
604 | netif_dbg(pdata, tx_queued, pdata->netdev, |
605 | "mapping frag %u\n" , i); |
606 | |
607 | frag = &skb_shinfo(skb)->frags[i]; |
608 | offset = 0; |
609 | |
610 | for (datalen = skb_frag_size(frag); datalen; ) { |
611 | len = min_t(unsigned int, datalen, |
612 | XGBE_TX_MAX_BUF_SIZE); |
613 | |
614 | skb_dma = skb_frag_dma_map(dev: pdata->dev, frag, offset, |
615 | size: len, dir: DMA_TO_DEVICE); |
616 | if (dma_mapping_error(dev: pdata->dev, dma_addr: skb_dma)) { |
617 | netdev_alert(dev: pdata->netdev, |
618 | format: "skb_frag_dma_map failed\n" ); |
619 | goto err_out; |
620 | } |
621 | rdata->skb_dma = skb_dma; |
622 | rdata->skb_dma_len = len; |
623 | rdata->mapped_as_page = 1; |
624 | netif_dbg(pdata, tx_queued, pdata->netdev, |
625 | "skb frag: index=%u, dma=%pad, len=%u\n" , |
626 | cur_index, &skb_dma, len); |
627 | |
628 | datalen -= len; |
629 | offset += len; |
630 | |
631 | packet->length += len; |
632 | |
633 | cur_index++; |
634 | rdata = XGBE_GET_DESC_DATA(ring, cur_index); |
635 | } |
636 | } |
637 | |
638 | /* Save the skb address in the last entry. We always have some data |
639 | * that has been mapped so rdata is always advanced past the last |
640 | * piece of mapped data - use the entry pointed to by cur_index - 1. |
641 | */ |
642 | rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1); |
643 | rdata->skb = skb; |
644 | |
645 | /* Save the number of descriptor entries used */ |
646 | packet->rdesc_count = cur_index - start_index; |
647 | |
648 | DBGPR("<--xgbe_map_tx_skb: count=%u\n" , packet->rdesc_count); |
649 | |
650 | return packet->rdesc_count; |
651 | |
652 | err_out: |
653 | while (start_index < cur_index) { |
654 | rdata = XGBE_GET_DESC_DATA(ring, start_index++); |
655 | xgbe_unmap_rdata(pdata, rdata); |
656 | } |
657 | |
658 | DBGPR("<--xgbe_map_tx_skb: count=0\n" ); |
659 | |
660 | return 0; |
661 | } |
662 | |
663 | void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) |
664 | { |
665 | DBGPR("-->xgbe_init_function_ptrs_desc\n" ); |
666 | |
667 | desc_if->alloc_ring_resources = xgbe_alloc_ring_resources; |
668 | desc_if->free_ring_resources = xgbe_free_ring_resources; |
669 | desc_if->map_tx_skb = xgbe_map_tx_skb; |
670 | desc_if->map_rx_buffer = xgbe_map_rx_buffer; |
671 | desc_if->unmap_rdata = xgbe_unmap_rdata; |
672 | desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init; |
673 | desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init; |
674 | |
675 | DBGPR("<--xgbe_init_function_ptrs_desc\n" ); |
676 | } |
677 | |