1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright (c) 2014-2015 Hisilicon Limited. |
4 | */ |
5 | |
6 | #include <linux/clk.h> |
7 | #include <linux/cpumask.h> |
8 | #include <linux/etherdevice.h> |
9 | #include <linux/if_vlan.h> |
10 | #include <linux/interrupt.h> |
11 | #include <linux/io.h> |
12 | #include <linux/ip.h> |
13 | #include <linux/ipv6.h> |
14 | #include <linux/irq.h> |
15 | #include <linux/module.h> |
16 | #include <linux/phy.h> |
17 | #include <linux/platform_device.h> |
18 | #include <linux/skbuff.h> |
19 | |
20 | #include "hnae.h" |
21 | #include "hns_enet.h" |
22 | #include "hns_dsaf_mac.h" |
23 | |
24 | #define NIC_MAX_Q_PER_VF 16 |
25 | #define HNS_NIC_TX_TIMEOUT (5 * HZ) |
26 | |
27 | #define SERVICE_TIMER_HZ (1 * HZ) |
28 | |
29 | #define RCB_IRQ_NOT_INITED 0 |
30 | #define RCB_IRQ_INITED 1 |
31 | #define HNS_BUFFER_SIZE_2048 2048 |
32 | |
33 | #define BD_MAX_SEND_SIZE 8191 |
34 | |
35 | static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size, |
36 | int send_sz, dma_addr_t dma, int frag_end, |
37 | int buf_num, enum hns_desc_type type, int mtu) |
38 | { |
39 | struct hnae_desc *desc = &ring->desc[ring->next_to_use]; |
40 | struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; |
41 | struct iphdr *iphdr; |
42 | struct ipv6hdr *ipv6hdr; |
43 | struct sk_buff *skb; |
44 | __be16 protocol; |
45 | u8 bn_pid = 0; |
46 | u8 rrcfv = 0; |
47 | u8 ip_offset = 0; |
48 | u8 tvsvsn = 0; |
49 | u16 mss = 0; |
50 | u8 l4_len = 0; |
51 | u16 paylen = 0; |
52 | |
53 | desc_cb->priv = priv; |
54 | desc_cb->length = size; |
55 | desc_cb->dma = dma; |
56 | desc_cb->type = type; |
57 | |
58 | desc->addr = cpu_to_le64(dma); |
59 | desc->tx.send_size = cpu_to_le16((u16)send_sz); |
60 | |
61 | /* config bd buffer end */ |
62 | hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1); |
63 | hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1); |
64 | |
65 | /* fill port_id in the tx bd for sending management pkts */ |
66 | hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M, |
67 | HNSV2_TXD_PORTID_S, ring->q->handle->dport_id); |
68 | |
69 | if (type == DESC_TYPE_SKB) { |
70 | skb = (struct sk_buff *)priv; |
71 | |
72 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
73 | skb_reset_mac_len(skb); |
74 | protocol = skb->protocol; |
75 | ip_offset = ETH_HLEN; |
76 | |
77 | if (protocol == htons(ETH_P_8021Q)) { |
78 | ip_offset += VLAN_HLEN; |
79 | protocol = vlan_get_protocol(skb); |
80 | skb->protocol = protocol; |
81 | } |
82 | |
83 | if (skb->protocol == htons(ETH_P_IP)) { |
84 | iphdr = ip_hdr(skb); |
85 | hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1); |
86 | hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); |
87 | |
88 | /* check for tcp/udp header */ |
89 | if (iphdr->protocol == IPPROTO_TCP && |
90 | skb_is_gso(skb)) { |
91 | hnae_set_bit(tvsvsn, |
92 | HNSV2_TXD_TSE_B, 1); |
93 | l4_len = tcp_hdrlen(skb); |
94 | mss = skb_shinfo(skb)->gso_size; |
95 | paylen = skb->len - skb_tcp_all_headers(skb); |
96 | } |
97 | } else if (skb->protocol == htons(ETH_P_IPV6)) { |
98 | hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1); |
99 | ipv6hdr = ipv6_hdr(skb); |
100 | hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); |
101 | |
102 | /* check for tcp/udp header */ |
103 | if (ipv6hdr->nexthdr == IPPROTO_TCP && |
104 | skb_is_gso(skb) && skb_is_gso_v6(skb)) { |
105 | hnae_set_bit(tvsvsn, |
106 | HNSV2_TXD_TSE_B, 1); |
107 | l4_len = tcp_hdrlen(skb); |
108 | mss = skb_shinfo(skb)->gso_size; |
109 | paylen = skb->len - skb_tcp_all_headers(skb); |
110 | } |
111 | } |
112 | desc->tx.ip_offset = ip_offset; |
113 | desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn; |
114 | desc->tx.mss = cpu_to_le16(mss); |
115 | desc->tx.l4_len = l4_len; |
116 | desc->tx.paylen = cpu_to_le16(paylen); |
117 | } |
118 | } |
119 | |
120 | hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end); |
121 | |
122 | desc->tx.bn_pid = bn_pid; |
123 | desc->tx.ra_ri_cs_fe_vld = rrcfv; |
124 | |
125 | ring_ptr_move_fw(ring, next_to_use); |
126 | } |
127 | |
128 | static void fill_v2_desc(struct hnae_ring *ring, void *priv, |
129 | int size, dma_addr_t dma, int frag_end, |
130 | int buf_num, enum hns_desc_type type, int mtu) |
131 | { |
132 | fill_v2_desc_hw(ring, priv, size, send_sz: size, dma, frag_end, |
133 | buf_num, type, mtu); |
134 | } |
135 | |
136 | static const struct acpi_device_id hns_enet_acpi_match[] = { |
137 | { "HISI00C1" , 0 }, |
138 | { "HISI00C2" , 0 }, |
139 | { }, |
140 | }; |
141 | MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match); |
142 | |
143 | static void fill_desc(struct hnae_ring *ring, void *priv, |
144 | int size, dma_addr_t dma, int frag_end, |
145 | int buf_num, enum hns_desc_type type, int mtu) |
146 | { |
147 | struct hnae_desc *desc = &ring->desc[ring->next_to_use]; |
148 | struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; |
149 | struct sk_buff *skb; |
150 | __be16 protocol; |
151 | u32 ip_offset; |
152 | u32 asid_bufnum_pid = 0; |
153 | u32 flag_ipoffset = 0; |
154 | |
155 | desc_cb->priv = priv; |
156 | desc_cb->length = size; |
157 | desc_cb->dma = dma; |
158 | desc_cb->type = type; |
159 | |
160 | desc->addr = cpu_to_le64(dma); |
161 | desc->tx.send_size = cpu_to_le16((u16)size); |
162 | |
163 | /*config bd buffer end */ |
164 | flag_ipoffset |= 1 << HNS_TXD_VLD_B; |
165 | |
166 | asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S; |
167 | |
168 | if (type == DESC_TYPE_SKB) { |
169 | skb = (struct sk_buff *)priv; |
170 | |
171 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
172 | protocol = skb->protocol; |
173 | ip_offset = ETH_HLEN; |
174 | |
175 | /*if it is a SW VLAN check the next protocol*/ |
176 | if (protocol == htons(ETH_P_8021Q)) { |
177 | ip_offset += VLAN_HLEN; |
178 | protocol = vlan_get_protocol(skb); |
179 | skb->protocol = protocol; |
180 | } |
181 | |
182 | if (skb->protocol == htons(ETH_P_IP)) { |
183 | flag_ipoffset |= 1 << HNS_TXD_L3CS_B; |
184 | /* check for tcp/udp header */ |
185 | flag_ipoffset |= 1 << HNS_TXD_L4CS_B; |
186 | |
187 | } else if (skb->protocol == htons(ETH_P_IPV6)) { |
188 | /* ipv6 has not l3 cs, check for L4 header */ |
189 | flag_ipoffset |= 1 << HNS_TXD_L4CS_B; |
190 | } |
191 | |
192 | flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S; |
193 | } |
194 | } |
195 | |
196 | flag_ipoffset |= frag_end << HNS_TXD_FE_B; |
197 | |
198 | desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid); |
199 | desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset); |
200 | |
201 | ring_ptr_move_fw(ring, next_to_use); |
202 | } |
203 | |
204 | static void unfill_desc(struct hnae_ring *ring) |
205 | { |
206 | ring_ptr_move_bw(ring, next_to_use); |
207 | } |
208 | |
209 | static int hns_nic_maybe_stop_tx( |
210 | struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) |
211 | { |
212 | struct sk_buff *skb = *out_skb; |
213 | struct sk_buff *new_skb = NULL; |
214 | int buf_num; |
215 | |
216 | /* no. of segments (plus a header) */ |
217 | buf_num = skb_shinfo(skb)->nr_frags + 1; |
218 | |
219 | if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { |
220 | if (ring_space(ring) < 1) |
221 | return -EBUSY; |
222 | |
223 | new_skb = skb_copy(skb, GFP_ATOMIC); |
224 | if (!new_skb) |
225 | return -ENOMEM; |
226 | |
227 | dev_kfree_skb_any(skb); |
228 | *out_skb = new_skb; |
229 | buf_num = 1; |
230 | } else if (buf_num > ring_space(ring)) { |
231 | return -EBUSY; |
232 | } |
233 | |
234 | *bnum = buf_num; |
235 | return 0; |
236 | } |
237 | |
238 | static int hns_nic_maybe_stop_tso( |
239 | struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) |
240 | { |
241 | int i; |
242 | int size; |
243 | int buf_num; |
244 | int frag_num; |
245 | struct sk_buff *skb = *out_skb; |
246 | struct sk_buff *new_skb = NULL; |
247 | skb_frag_t *frag; |
248 | |
249 | size = skb_headlen(skb); |
250 | buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; |
251 | |
252 | frag_num = skb_shinfo(skb)->nr_frags; |
253 | for (i = 0; i < frag_num; i++) { |
254 | frag = &skb_shinfo(skb)->frags[i]; |
255 | size = skb_frag_size(frag); |
256 | buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; |
257 | } |
258 | |
259 | if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { |
260 | buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; |
261 | if (ring_space(ring) < buf_num) |
262 | return -EBUSY; |
263 | /* manual split the send packet */ |
264 | new_skb = skb_copy(skb, GFP_ATOMIC); |
265 | if (!new_skb) |
266 | return -ENOMEM; |
267 | dev_kfree_skb_any(skb); |
268 | *out_skb = new_skb; |
269 | |
270 | } else if (ring_space(ring) < buf_num) { |
271 | return -EBUSY; |
272 | } |
273 | |
274 | *bnum = buf_num; |
275 | return 0; |
276 | } |
277 | |
278 | static void fill_tso_desc(struct hnae_ring *ring, void *priv, |
279 | int size, dma_addr_t dma, int frag_end, |
280 | int buf_num, enum hns_desc_type type, int mtu) |
281 | { |
282 | int frag_buf_num; |
283 | int sizeoflast; |
284 | int k; |
285 | |
286 | frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; |
287 | sizeoflast = size % BD_MAX_SEND_SIZE; |
288 | sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE; |
289 | |
290 | /* when the frag size is bigger than hardware, split this frag */ |
291 | for (k = 0; k < frag_buf_num; k++) |
292 | fill_v2_desc_hw(ring, priv, size: k == 0 ? size : 0, |
293 | send_sz: (k == frag_buf_num - 1) ? |
294 | sizeoflast : BD_MAX_SEND_SIZE, |
295 | dma: dma + BD_MAX_SEND_SIZE * k, |
296 | frag_end: frag_end && (k == frag_buf_num - 1) ? 1 : 0, |
297 | buf_num, |
298 | type: (type == DESC_TYPE_SKB && !k) ? |
299 | DESC_TYPE_SKB : DESC_TYPE_PAGE, |
300 | mtu); |
301 | } |
302 | |
303 | netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, |
304 | struct sk_buff *skb, |
305 | struct hns_nic_ring_data *ring_data) |
306 | { |
307 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
308 | struct hnae_ring *ring = ring_data->ring; |
309 | struct device *dev = ring_to_dev(ring); |
310 | struct netdev_queue *dev_queue; |
311 | skb_frag_t *frag; |
312 | int buf_num; |
313 | int seg_num; |
314 | dma_addr_t dma; |
315 | int size, next_to_use; |
316 | int i; |
317 | |
318 | switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { |
319 | case -EBUSY: |
320 | ring->stats.tx_busy++; |
321 | goto out_net_tx_busy; |
322 | case -ENOMEM: |
323 | ring->stats.sw_err_cnt++; |
324 | netdev_err(dev: ndev, format: "no memory to xmit!\n" ); |
325 | goto out_err_tx_ok; |
326 | default: |
327 | break; |
328 | } |
329 | |
330 | /* no. of segments (plus a header) */ |
331 | seg_num = skb_shinfo(skb)->nr_frags + 1; |
332 | next_to_use = ring->next_to_use; |
333 | |
334 | /* fill the first part */ |
335 | size = skb_headlen(skb); |
336 | dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); |
337 | if (dma_mapping_error(dev, dma_addr: dma)) { |
338 | netdev_err(dev: ndev, format: "TX head DMA map failed\n" ); |
339 | ring->stats.sw_err_cnt++; |
340 | goto out_err_tx_ok; |
341 | } |
342 | priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, |
343 | buf_num, DESC_TYPE_SKB, ndev->mtu); |
344 | |
345 | /* fill the fragments */ |
346 | for (i = 1; i < seg_num; i++) { |
347 | frag = &skb_shinfo(skb)->frags[i - 1]; |
348 | size = skb_frag_size(frag); |
349 | dma = skb_frag_dma_map(dev, frag, offset: 0, size, dir: DMA_TO_DEVICE); |
350 | if (dma_mapping_error(dev, dma_addr: dma)) { |
351 | netdev_err(dev: ndev, format: "TX frag(%d) DMA map failed\n" , i); |
352 | ring->stats.sw_err_cnt++; |
353 | goto out_map_frag_fail; |
354 | } |
355 | priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, |
356 | seg_num - 1 == i ? 1 : 0, buf_num, |
357 | DESC_TYPE_PAGE, ndev->mtu); |
358 | } |
359 | |
360 | /*complete translate all packets*/ |
361 | dev_queue = netdev_get_tx_queue(dev: ndev, index: skb->queue_mapping); |
362 | netdev_tx_sent_queue(dev_queue, bytes: skb->len); |
363 | |
364 | netif_trans_update(dev: ndev); |
365 | ndev->stats.tx_bytes += skb->len; |
366 | ndev->stats.tx_packets++; |
367 | |
368 | wmb(); /* commit all data before submit */ |
369 | assert(skb->queue_mapping < priv->ae_handle->q_num); |
370 | hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); |
371 | |
372 | return NETDEV_TX_OK; |
373 | |
374 | out_map_frag_fail: |
375 | |
376 | while (ring->next_to_use != next_to_use) { |
377 | unfill_desc(ring); |
378 | if (ring->next_to_use != next_to_use) |
379 | dma_unmap_page(dev, |
380 | ring->desc_cb[ring->next_to_use].dma, |
381 | ring->desc_cb[ring->next_to_use].length, |
382 | DMA_TO_DEVICE); |
383 | else |
384 | dma_unmap_single(dev, |
385 | ring->desc_cb[next_to_use].dma, |
386 | ring->desc_cb[next_to_use].length, |
387 | DMA_TO_DEVICE); |
388 | } |
389 | |
390 | out_err_tx_ok: |
391 | |
392 | dev_kfree_skb_any(skb); |
393 | return NETDEV_TX_OK; |
394 | |
395 | out_net_tx_busy: |
396 | |
397 | netif_stop_subqueue(dev: ndev, queue_index: skb->queue_mapping); |
398 | |
399 | /* Herbert's original patch had: |
400 | * smp_mb__after_netif_stop_queue(); |
401 | * but since that doesn't exist yet, just open code it. |
402 | */ |
403 | smp_mb(); |
404 | return NETDEV_TX_BUSY; |
405 | } |
406 | |
407 | static void hns_nic_reuse_page(struct sk_buff *skb, int i, |
408 | struct hnae_ring *ring, int pull_len, |
409 | struct hnae_desc_cb *desc_cb) |
410 | { |
411 | struct hnae_desc *desc; |
412 | u32 truesize; |
413 | int size; |
414 | int last_offset; |
415 | bool twobufs; |
416 | |
417 | twobufs = ((PAGE_SIZE < 8192) && |
418 | hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048); |
419 | |
420 | desc = &ring->desc[ring->next_to_clean]; |
421 | size = le16_to_cpu(desc->rx.size); |
422 | |
423 | if (twobufs) { |
424 | truesize = hnae_buf_size(ring); |
425 | } else { |
426 | truesize = ALIGN(size, L1_CACHE_BYTES); |
427 | last_offset = hnae_page_size(ring) - hnae_buf_size(ring); |
428 | } |
429 | |
430 | skb_add_rx_frag(skb, i, page: desc_cb->priv, off: desc_cb->page_offset + pull_len, |
431 | size: size - pull_len, truesize); |
432 | |
433 | /* avoid re-using remote pages,flag default unreuse */ |
434 | if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) |
435 | return; |
436 | |
437 | if (twobufs) { |
438 | /* if we are only owner of page we can reuse it */ |
439 | if (likely(page_count(desc_cb->priv) == 1)) { |
440 | /* flip page offset to other buffer */ |
441 | desc_cb->page_offset ^= truesize; |
442 | |
443 | desc_cb->reuse_flag = 1; |
444 | /* bump ref count on page before it is given*/ |
445 | get_page(page: desc_cb->priv); |
446 | } |
447 | return; |
448 | } |
449 | |
450 | /* move offset up to the next cache line */ |
451 | desc_cb->page_offset += truesize; |
452 | |
453 | if (desc_cb->page_offset <= last_offset) { |
454 | desc_cb->reuse_flag = 1; |
455 | /* bump ref count on page before it is given*/ |
456 | get_page(page: desc_cb->priv); |
457 | } |
458 | } |
459 | |
460 | static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum) |
461 | { |
462 | *out_bnum = hnae_get_field(bnum_flag, |
463 | HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1; |
464 | } |
465 | |
466 | static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum) |
467 | { |
468 | *out_bnum = hnae_get_field(bnum_flag, |
469 | HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S); |
470 | } |
471 | |
472 | static void hns_nic_rx_checksum(struct hns_nic_ring_data *ring_data, |
473 | struct sk_buff *skb, u32 flag) |
474 | { |
475 | struct net_device *netdev = ring_data->napi.dev; |
476 | u32 l3id; |
477 | u32 l4id; |
478 | |
479 | /* check if RX checksum offload is enabled */ |
480 | if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) |
481 | return; |
482 | |
483 | /* In hardware, we only support checksum for the following protocols: |
484 | * 1) IPv4, |
485 | * 2) TCP(over IPv4 or IPv6), |
486 | * 3) UDP(over IPv4 or IPv6), |
487 | * 4) SCTP(over IPv4 or IPv6) |
488 | * but we support many L3(IPv4, IPv6, MPLS, PPPoE etc) and L4(TCP, |
489 | * UDP, GRE, SCTP, IGMP, ICMP etc.) protocols. |
490 | * |
491 | * Hardware limitation: |
492 | * Our present hardware RX Descriptor lacks L3/L4 checksum "Status & |
493 | * Error" bit (which usually can be used to indicate whether checksum |
494 | * was calculated by the hardware and if there was any error encountered |
495 | * during checksum calculation). |
496 | * |
497 | * Software workaround: |
498 | * We do get info within the RX descriptor about the kind of L3/L4 |
499 | * protocol coming in the packet and the error status. These errors |
500 | * might not just be checksum errors but could be related to version, |
501 | * length of IPv4, UDP, TCP etc. |
502 | * Because there is no-way of knowing if it is a L3/L4 error due to bad |
503 | * checksum or any other L3/L4 error, we will not (cannot) convey |
504 | * checksum status for such cases to upper stack and will not maintain |
505 | * the RX L3/L4 checksum counters as well. |
506 | */ |
507 | |
508 | l3id = hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S); |
509 | l4id = hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S); |
510 | |
511 | /* check L3 protocol for which checksum is supported */ |
512 | if ((l3id != HNS_RX_FLAG_L3ID_IPV4) && (l3id != HNS_RX_FLAG_L3ID_IPV6)) |
513 | return; |
514 | |
515 | /* check for any(not just checksum)flagged L3 protocol errors */ |
516 | if (unlikely(hnae_get_bit(flag, HNS_RXD_L3E_B))) |
517 | return; |
518 | |
519 | /* we do not support checksum of fragmented packets */ |
520 | if (unlikely(hnae_get_bit(flag, HNS_RXD_FRAG_B))) |
521 | return; |
522 | |
523 | /* check L4 protocol for which checksum is supported */ |
524 | if ((l4id != HNS_RX_FLAG_L4ID_TCP) && |
525 | (l4id != HNS_RX_FLAG_L4ID_UDP) && |
526 | (l4id != HNS_RX_FLAG_L4ID_SCTP)) |
527 | return; |
528 | |
529 | /* check for any(not just checksum)flagged L4 protocol errors */ |
530 | if (unlikely(hnae_get_bit(flag, HNS_RXD_L4E_B))) |
531 | return; |
532 | |
533 | /* now, this has to be a packet with valid RX checksum */ |
534 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
535 | } |
536 | |
537 | static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, |
538 | struct sk_buff **out_skb, int *out_bnum) |
539 | { |
540 | struct hnae_ring *ring = ring_data->ring; |
541 | struct net_device *ndev = ring_data->napi.dev; |
542 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
543 | struct sk_buff *skb; |
544 | struct hnae_desc *desc; |
545 | struct hnae_desc_cb *desc_cb; |
546 | unsigned char *va; |
547 | int bnum, length, i; |
548 | int pull_len; |
549 | u32 bnum_flag; |
550 | |
551 | desc = &ring->desc[ring->next_to_clean]; |
552 | desc_cb = &ring->desc_cb[ring->next_to_clean]; |
553 | |
554 | prefetch(desc); |
555 | |
556 | va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; |
557 | |
558 | /* prefetch first cache line of first page */ |
559 | net_prefetch(p: va); |
560 | |
561 | skb = *out_skb = napi_alloc_skb(napi: &ring_data->napi, |
562 | HNS_RX_HEAD_SIZE); |
563 | if (unlikely(!skb)) { |
564 | ring->stats.sw_err_cnt++; |
565 | return -ENOMEM; |
566 | } |
567 | |
568 | prefetchw(x: skb->data); |
569 | length = le16_to_cpu(desc->rx.pkt_len); |
570 | bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); |
571 | priv->ops.get_rxd_bnum(bnum_flag, &bnum); |
572 | *out_bnum = bnum; |
573 | |
574 | if (length <= HNS_RX_HEAD_SIZE) { |
575 | memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); |
576 | |
577 | /* we can reuse buffer as-is, just make sure it is local */ |
578 | if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) |
579 | desc_cb->reuse_flag = 1; |
580 | else /* this page cannot be reused so discard it */ |
581 | put_page(page: desc_cb->priv); |
582 | |
583 | ring_ptr_move_fw(ring, next_to_clean); |
584 | |
585 | if (unlikely(bnum != 1)) { /* check err*/ |
586 | *out_bnum = 1; |
587 | goto out_bnum_err; |
588 | } |
589 | } else { |
590 | ring->stats.seg_pkt_cnt++; |
591 | |
592 | pull_len = eth_get_headlen(dev: ndev, data: va, HNS_RX_HEAD_SIZE); |
593 | memcpy(__skb_put(skb, pull_len), va, |
594 | ALIGN(pull_len, sizeof(long))); |
595 | |
596 | hns_nic_reuse_page(skb, i: 0, ring, pull_len, desc_cb); |
597 | ring_ptr_move_fw(ring, next_to_clean); |
598 | |
599 | if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/ |
600 | *out_bnum = 1; |
601 | goto out_bnum_err; |
602 | } |
603 | for (i = 1; i < bnum; i++) { |
604 | desc = &ring->desc[ring->next_to_clean]; |
605 | desc_cb = &ring->desc_cb[ring->next_to_clean]; |
606 | |
607 | hns_nic_reuse_page(skb, i, ring, pull_len: 0, desc_cb); |
608 | ring_ptr_move_fw(ring, next_to_clean); |
609 | } |
610 | } |
611 | |
612 | /* check except process, free skb and jump the desc */ |
613 | if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) { |
614 | out_bnum_err: |
615 | *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/ |
616 | netdev_err(dev: ndev, format: "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n" , |
617 | bnum, ring->max_desc_num_per_pkt, |
618 | length, (int)MAX_SKB_FRAGS, |
619 | ((u64 *)desc)[0], ((u64 *)desc)[1]); |
620 | ring->stats.err_bd_num++; |
621 | dev_kfree_skb_any(skb); |
622 | return -EDOM; |
623 | } |
624 | |
625 | bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); |
626 | |
627 | if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) { |
628 | netdev_err(dev: ndev, format: "no valid bd,%016llx,%016llx\n" , |
629 | ((u64 *)desc)[0], ((u64 *)desc)[1]); |
630 | ring->stats.non_vld_descs++; |
631 | dev_kfree_skb_any(skb); |
632 | return -EINVAL; |
633 | } |
634 | |
635 | if (unlikely((!desc->rx.pkt_len) || |
636 | hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) { |
637 | ring->stats.err_pkt_len++; |
638 | dev_kfree_skb_any(skb); |
639 | return -EFAULT; |
640 | } |
641 | |
642 | if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) { |
643 | ring->stats.l2_err++; |
644 | dev_kfree_skb_any(skb); |
645 | return -EFAULT; |
646 | } |
647 | |
648 | ring->stats.rx_pkts++; |
649 | ring->stats.rx_bytes += skb->len; |
650 | |
651 | /* indicate to upper stack if our hardware has already calculated |
652 | * the RX checksum |
653 | */ |
654 | hns_nic_rx_checksum(ring_data, skb, flag: bnum_flag); |
655 | |
656 | return 0; |
657 | } |
658 | |
659 | static void |
660 | hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count) |
661 | { |
662 | int i, ret; |
663 | struct hnae_desc_cb res_cbs; |
664 | struct hnae_desc_cb *desc_cb; |
665 | struct hnae_ring *ring = ring_data->ring; |
666 | struct net_device *ndev = ring_data->napi.dev; |
667 | |
668 | for (i = 0; i < cleand_count; i++) { |
669 | desc_cb = &ring->desc_cb[ring->next_to_use]; |
670 | if (desc_cb->reuse_flag) { |
671 | ring->stats.reuse_pg_cnt++; |
672 | hnae_reuse_buffer(ring, i: ring->next_to_use); |
673 | } else { |
674 | ret = hnae_reserve_buffer_map(ring, cb: &res_cbs); |
675 | if (ret) { |
676 | ring->stats.sw_err_cnt++; |
677 | netdev_err(dev: ndev, format: "hnae reserve buffer map failed.\n" ); |
678 | break; |
679 | } |
680 | hnae_replace_buffer(ring, i: ring->next_to_use, res_cb: &res_cbs); |
681 | } |
682 | |
683 | ring_ptr_move_fw(ring, next_to_use); |
684 | } |
685 | |
686 | wmb(); /* make all data has been write before submit */ |
687 | writel_relaxed(i, ring->io_base + RCB_REG_HEAD); |
688 | } |
689 | |
690 | /* return error number for error or number of desc left to take |
691 | */ |
692 | static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data, |
693 | struct sk_buff *skb) |
694 | { |
695 | struct net_device *ndev = ring_data->napi.dev; |
696 | |
697 | skb->protocol = eth_type_trans(skb, dev: ndev); |
698 | napi_gro_receive(napi: &ring_data->napi, skb); |
699 | } |
700 | |
701 | static int hns_desc_unused(struct hnae_ring *ring) |
702 | { |
703 | int ntc = ring->next_to_clean; |
704 | int ntu = ring->next_to_use; |
705 | |
706 | return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; |
707 | } |
708 | |
709 | #define HNS_LOWEST_LATENCY_RATE 27 /* 27 MB/s */ |
710 | #define HNS_LOW_LATENCY_RATE 80 /* 80 MB/s */ |
711 | |
712 | #define HNS_COAL_BDNUM 3 |
713 | |
714 | static u32 hns_coal_rx_bdnum(struct hnae_ring *ring) |
715 | { |
716 | bool coal_enable = ring->q->handle->coal_adapt_en; |
717 | |
718 | if (coal_enable && |
719 | ring->coal_last_rx_bytes > HNS_LOWEST_LATENCY_RATE) |
720 | return HNS_COAL_BDNUM; |
721 | else |
722 | return 0; |
723 | } |
724 | |
725 | static void hns_update_rx_rate(struct hnae_ring *ring) |
726 | { |
727 | bool coal_enable = ring->q->handle->coal_adapt_en; |
728 | u32 time_passed_ms; |
729 | u64 total_bytes; |
730 | |
731 | if (!coal_enable || |
732 | time_before(jiffies, ring->coal_last_jiffies + (HZ >> 4))) |
733 | return; |
734 | |
735 | /* ring->stats.rx_bytes overflowed */ |
736 | if (ring->coal_last_rx_bytes > ring->stats.rx_bytes) { |
737 | ring->coal_last_rx_bytes = ring->stats.rx_bytes; |
738 | ring->coal_last_jiffies = jiffies; |
739 | return; |
740 | } |
741 | |
742 | total_bytes = ring->stats.rx_bytes - ring->coal_last_rx_bytes; |
743 | time_passed_ms = jiffies_to_msecs(j: jiffies - ring->coal_last_jiffies); |
744 | do_div(total_bytes, time_passed_ms); |
745 | ring->coal_rx_rate = total_bytes >> 10; |
746 | |
747 | ring->coal_last_rx_bytes = ring->stats.rx_bytes; |
748 | ring->coal_last_jiffies = jiffies; |
749 | } |
750 | |
751 | /** |
752 | * smooth_alg - smoothing algrithm for adjusting coalesce parameter |
753 | * @new_param: new value |
754 | * @old_param: old value |
755 | **/ |
756 | static u32 smooth_alg(u32 new_param, u32 old_param) |
757 | { |
758 | u32 gap = (new_param > old_param) ? new_param - old_param |
759 | : old_param - new_param; |
760 | |
761 | if (gap > 8) |
762 | gap >>= 3; |
763 | |
764 | if (new_param > old_param) |
765 | return old_param + gap; |
766 | else |
767 | return old_param - gap; |
768 | } |
769 | |
770 | /** |
771 | * hns_nic_adpt_coalesce - self adapte coalesce according to rx rate |
772 | * @ring_data: pointer to hns_nic_ring_data |
773 | **/ |
774 | static void hns_nic_adpt_coalesce(struct hns_nic_ring_data *ring_data) |
775 | { |
776 | struct hnae_ring *ring = ring_data->ring; |
777 | struct hnae_handle *handle = ring->q->handle; |
778 | u32 new_coal_param, old_coal_param = ring->coal_param; |
779 | |
780 | if (ring->coal_rx_rate < HNS_LOWEST_LATENCY_RATE) |
781 | new_coal_param = HNAE_LOWEST_LATENCY_COAL_PARAM; |
782 | else if (ring->coal_rx_rate < HNS_LOW_LATENCY_RATE) |
783 | new_coal_param = HNAE_LOW_LATENCY_COAL_PARAM; |
784 | else |
785 | new_coal_param = HNAE_BULK_LATENCY_COAL_PARAM; |
786 | |
787 | if (new_coal_param == old_coal_param && |
788 | new_coal_param == handle->coal_param) |
789 | return; |
790 | |
791 | new_coal_param = smooth_alg(new_param: new_coal_param, old_param: old_coal_param); |
792 | ring->coal_param = new_coal_param; |
793 | |
794 | /** |
795 | * Because all ring in one port has one coalesce param, when one ring |
796 | * calculate its own coalesce param, it cannot write to hardware at |
797 | * once. There are three conditions as follows: |
798 | * 1. current ring's coalesce param is larger than the hardware. |
799 | * 2. or ring which adapt last time can change again. |
800 | * 3. timeout. |
801 | */ |
802 | if (new_coal_param == handle->coal_param) { |
803 | handle->coal_last_jiffies = jiffies; |
804 | handle->coal_ring_idx = ring_data->queue_index; |
805 | } else if (new_coal_param > handle->coal_param || |
806 | handle->coal_ring_idx == ring_data->queue_index || |
807 | time_after(jiffies, handle->coal_last_jiffies + (HZ >> 4))) { |
808 | handle->dev->ops->set_coalesce_usecs(handle, |
809 | new_coal_param); |
810 | handle->dev->ops->set_coalesce_frames(handle, |
811 | 1, new_coal_param); |
812 | handle->coal_param = new_coal_param; |
813 | handle->coal_ring_idx = ring_data->queue_index; |
814 | handle->coal_last_jiffies = jiffies; |
815 | } |
816 | } |
817 | |
818 | static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, |
819 | int budget, void *v) |
820 | { |
821 | struct hnae_ring *ring = ring_data->ring; |
822 | struct sk_buff *skb; |
823 | int num, bnum; |
824 | #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 |
825 | int recv_pkts, recv_bds, clean_count, err; |
826 | int unused_count = hns_desc_unused(ring); |
827 | |
828 | num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); |
829 | rmb(); /* make sure num taken effect before the other data is touched */ |
830 | |
831 | recv_pkts = 0, recv_bds = 0, clean_count = 0; |
832 | num -= unused_count; |
833 | |
834 | while (recv_pkts < budget && recv_bds < num) { |
835 | /* reuse or realloc buffers */ |
836 | if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { |
837 | hns_nic_alloc_rx_buffers(ring_data, |
838 | cleand_count: clean_count + unused_count); |
839 | clean_count = 0; |
840 | unused_count = hns_desc_unused(ring); |
841 | } |
842 | |
843 | /* poll one pkt */ |
844 | err = hns_nic_poll_rx_skb(ring_data, out_skb: &skb, out_bnum: &bnum); |
845 | if (unlikely(!skb)) /* this fault cannot be repaired */ |
846 | goto out; |
847 | |
848 | recv_bds += bnum; |
849 | clean_count += bnum; |
850 | if (unlikely(err)) { /* do jump the err */ |
851 | recv_pkts++; |
852 | continue; |
853 | } |
854 | |
855 | /* do update ip stack process*/ |
856 | ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)( |
857 | ring_data, skb); |
858 | recv_pkts++; |
859 | } |
860 | |
861 | out: |
862 | /* make all data has been write before submit */ |
863 | if (clean_count + unused_count > 0) |
864 | hns_nic_alloc_rx_buffers(ring_data, |
865 | cleand_count: clean_count + unused_count); |
866 | |
867 | return recv_pkts; |
868 | } |
869 | |
870 | static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) |
871 | { |
872 | struct hnae_ring *ring = ring_data->ring; |
873 | int num; |
874 | bool rx_stopped; |
875 | |
876 | hns_update_rx_rate(ring); |
877 | |
878 | /* for hardware bug fixed */ |
879 | ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); |
880 | num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); |
881 | |
882 | if (num <= hns_coal_rx_bdnum(ring)) { |
883 | if (ring->q->handle->coal_adapt_en) |
884 | hns_nic_adpt_coalesce(ring_data); |
885 | |
886 | rx_stopped = true; |
887 | } else { |
888 | ring_data->ring->q->handle->dev->ops->toggle_ring_irq( |
889 | ring_data->ring, 1); |
890 | |
891 | rx_stopped = false; |
892 | } |
893 | |
894 | return rx_stopped; |
895 | } |
896 | |
897 | static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data) |
898 | { |
899 | struct hnae_ring *ring = ring_data->ring; |
900 | int num; |
901 | |
902 | hns_update_rx_rate(ring); |
903 | num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); |
904 | |
905 | if (num <= hns_coal_rx_bdnum(ring)) { |
906 | if (ring->q->handle->coal_adapt_en) |
907 | hns_nic_adpt_coalesce(ring_data); |
908 | |
909 | return true; |
910 | } |
911 | |
912 | return false; |
913 | } |
914 | |
915 | static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring, |
916 | int *bytes, int *pkts) |
917 | { |
918 | struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; |
919 | |
920 | (*pkts) += (desc_cb->type == DESC_TYPE_SKB); |
921 | (*bytes) += desc_cb->length; |
922 | /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ |
923 | hnae_free_buffer_detach(ring, i: ring->next_to_clean); |
924 | |
925 | ring_ptr_move_fw(ring, next_to_clean); |
926 | } |
927 | |
928 | static int is_valid_clean_head(struct hnae_ring *ring, int h) |
929 | { |
930 | int u = ring->next_to_use; |
931 | int c = ring->next_to_clean; |
932 | |
933 | if (unlikely(h > ring->desc_num)) |
934 | return 0; |
935 | |
936 | assert(u > 0 && u < ring->desc_num); |
937 | assert(c > 0 && c < ring->desc_num); |
938 | assert(u != c && h != c); /* must be checked before call this func */ |
939 | |
940 | return u > c ? (h > c && h <= u) : (h > c || h <= u); |
941 | } |
942 | |
943 | /* reclaim all desc in one budget |
944 | * return error or number of desc left |
945 | */ |
946 | static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, |
947 | int budget, void *v) |
948 | { |
949 | struct hnae_ring *ring = ring_data->ring; |
950 | struct net_device *ndev = ring_data->napi.dev; |
951 | struct netdev_queue *dev_queue; |
952 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
953 | int head; |
954 | int bytes, pkts; |
955 | |
956 | head = readl_relaxed(ring->io_base + RCB_REG_HEAD); |
957 | rmb(); /* make sure head is ready before touch any data */ |
958 | |
959 | if (is_ring_empty(ring) || head == ring->next_to_clean) |
960 | return 0; /* no data to poll */ |
961 | |
962 | if (!is_valid_clean_head(ring, h: head)) { |
963 | netdev_err(dev: ndev, format: "wrong head (%d, %d-%d)\n" , head, |
964 | ring->next_to_use, ring->next_to_clean); |
965 | ring->stats.io_err_cnt++; |
966 | return -EIO; |
967 | } |
968 | |
969 | bytes = 0; |
970 | pkts = 0; |
971 | while (head != ring->next_to_clean) { |
972 | hns_nic_reclaim_one_desc(ring, bytes: &bytes, pkts: &pkts); |
973 | /* issue prefetch for next Tx descriptor */ |
974 | prefetch(&ring->desc_cb[ring->next_to_clean]); |
975 | } |
976 | /* update tx ring statistics. */ |
977 | ring->stats.tx_pkts += pkts; |
978 | ring->stats.tx_bytes += bytes; |
979 | |
980 | dev_queue = netdev_get_tx_queue(dev: ndev, index: ring_data->queue_index); |
981 | netdev_tx_completed_queue(dev_queue, pkts, bytes); |
982 | |
983 | if (unlikely(priv->link && !netif_carrier_ok(ndev))) |
984 | netif_carrier_on(dev: ndev); |
985 | |
986 | if (unlikely(pkts && netif_carrier_ok(ndev) && |
987 | (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) { |
988 | /* Make sure that anybody stopping the queue after this |
989 | * sees the new next_to_clean. |
990 | */ |
991 | smp_mb(); |
992 | if (netif_tx_queue_stopped(dev_queue) && |
993 | !test_bit(NIC_STATE_DOWN, &priv->state)) { |
994 | netif_tx_wake_queue(dev_queue); |
995 | ring->stats.restart_queue++; |
996 | } |
997 | } |
998 | return 0; |
999 | } |
1000 | |
1001 | static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) |
1002 | { |
1003 | struct hnae_ring *ring = ring_data->ring; |
1004 | int head; |
1005 | |
1006 | ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); |
1007 | |
1008 | head = readl_relaxed(ring->io_base + RCB_REG_HEAD); |
1009 | |
1010 | if (head != ring->next_to_clean) { |
1011 | ring_data->ring->q->handle->dev->ops->toggle_ring_irq( |
1012 | ring_data->ring, 1); |
1013 | |
1014 | return false; |
1015 | } else { |
1016 | return true; |
1017 | } |
1018 | } |
1019 | |
1020 | static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data) |
1021 | { |
1022 | struct hnae_ring *ring = ring_data->ring; |
1023 | int head = readl_relaxed(ring->io_base + RCB_REG_HEAD); |
1024 | |
1025 | if (head == ring->next_to_clean) |
1026 | return true; |
1027 | else |
1028 | return false; |
1029 | } |
1030 | |
1031 | static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) |
1032 | { |
1033 | struct hnae_ring *ring = ring_data->ring; |
1034 | struct net_device *ndev = ring_data->napi.dev; |
1035 | struct netdev_queue *dev_queue; |
1036 | int head; |
1037 | int bytes, pkts; |
1038 | |
1039 | head = ring->next_to_use; /* ntu :soft setted ring position*/ |
1040 | bytes = 0; |
1041 | pkts = 0; |
1042 | while (head != ring->next_to_clean) |
1043 | hns_nic_reclaim_one_desc(ring, bytes: &bytes, pkts: &pkts); |
1044 | |
1045 | dev_queue = netdev_get_tx_queue(dev: ndev, index: ring_data->queue_index); |
1046 | netdev_tx_reset_queue(q: dev_queue); |
1047 | } |
1048 | |
1049 | static int hns_nic_common_poll(struct napi_struct *napi, int budget) |
1050 | { |
1051 | int clean_complete = 0; |
1052 | struct hns_nic_ring_data *ring_data = |
1053 | container_of(napi, struct hns_nic_ring_data, napi); |
1054 | struct hnae_ring *ring = ring_data->ring; |
1055 | |
1056 | clean_complete += ring_data->poll_one( |
1057 | ring_data, budget - clean_complete, |
1058 | ring_data->ex_process); |
1059 | |
1060 | if (clean_complete < budget) { |
1061 | if (ring_data->fini_process(ring_data)) { |
1062 | napi_complete(n: napi); |
1063 | ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); |
1064 | } else { |
1065 | return budget; |
1066 | } |
1067 | } |
1068 | |
1069 | return clean_complete; |
1070 | } |
1071 | |
1072 | static irqreturn_t hns_irq_handle(int irq, void *dev) |
1073 | { |
1074 | struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev; |
1075 | |
1076 | ring_data->ring->q->handle->dev->ops->toggle_ring_irq( |
1077 | ring_data->ring, 1); |
1078 | napi_schedule(n: &ring_data->napi); |
1079 | |
1080 | return IRQ_HANDLED; |
1081 | } |
1082 | |
1083 | /** |
1084 | *hns_nic_adjust_link - adjust net work mode by the phy stat or new param |
1085 | *@ndev: net device |
1086 | */ |
1087 | static void hns_nic_adjust_link(struct net_device *ndev) |
1088 | { |
1089 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
1090 | struct hnae_handle *h = priv->ae_handle; |
1091 | int state = 1; |
1092 | |
1093 | /* If there is no phy, do not need adjust link */ |
1094 | if (ndev->phydev) { |
1095 | /* When phy link down, do nothing */ |
1096 | if (ndev->phydev->link == 0) |
1097 | return; |
1098 | |
1099 | if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed, |
1100 | ndev->phydev->duplex)) { |
1101 | /* because Hi161X chip don't support to change gmac |
1102 | * speed and duplex with traffic. Delay 200ms to |
1103 | * make sure there is no more data in chip FIFO. |
1104 | */ |
1105 | netif_carrier_off(dev: ndev); |
1106 | msleep(msecs: 200); |
1107 | h->dev->ops->adjust_link(h, ndev->phydev->speed, |
1108 | ndev->phydev->duplex); |
1109 | netif_carrier_on(dev: ndev); |
1110 | } |
1111 | } |
1112 | |
1113 | state = state && h->dev->ops->get_status(h); |
1114 | |
1115 | if (state != priv->link) { |
1116 | if (state) { |
1117 | netif_carrier_on(dev: ndev); |
1118 | netif_tx_wake_all_queues(dev: ndev); |
1119 | netdev_info(dev: ndev, format: "link up\n" ); |
1120 | } else { |
1121 | netif_carrier_off(dev: ndev); |
1122 | netdev_info(dev: ndev, format: "link down\n" ); |
1123 | } |
1124 | priv->link = state; |
1125 | } |
1126 | } |
1127 | |
1128 | /** |
1129 | *hns_nic_init_phy - init phy |
1130 | *@ndev: net device |
1131 | *@h: ae handle |
1132 | * Return 0 on success, negative on failure |
1133 | */ |
1134 | int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) |
1135 | { |
1136 | __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; |
1137 | struct phy_device *phy_dev = h->phy_dev; |
1138 | int ret; |
1139 | |
1140 | if (!h->phy_dev) |
1141 | return 0; |
1142 | |
1143 | ethtool_convert_legacy_u32_to_link_mode(dst: supported, legacy_u32: h->if_support); |
1144 | linkmode_and(dst: phy_dev->supported, a: phy_dev->supported, b: supported); |
1145 | linkmode_copy(dst: phy_dev->advertising, src: phy_dev->supported); |
1146 | |
1147 | if (h->phy_if == PHY_INTERFACE_MODE_XGMII) |
1148 | phy_dev->autoneg = false; |
1149 | |
1150 | if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { |
1151 | phy_dev->dev_flags = 0; |
1152 | |
1153 | ret = phy_connect_direct(dev: ndev, phydev: phy_dev, handler: hns_nic_adjust_link, |
1154 | interface: h->phy_if); |
1155 | } else { |
1156 | ret = phy_attach_direct(dev: ndev, phydev: phy_dev, flags: 0, interface: h->phy_if); |
1157 | } |
1158 | if (unlikely(ret)) |
1159 | return -ENODEV; |
1160 | |
1161 | phy_attached_info(phydev: phy_dev); |
1162 | |
1163 | return 0; |
1164 | } |
1165 | |
1166 | static int hns_nic_ring_open(struct net_device *netdev, int idx) |
1167 | { |
1168 | struct hns_nic_priv *priv = netdev_priv(dev: netdev); |
1169 | struct hnae_handle *h = priv->ae_handle; |
1170 | |
1171 | napi_enable(n: &priv->ring_data[idx].napi); |
1172 | |
1173 | enable_irq(irq: priv->ring_data[idx].ring->irq); |
1174 | h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0); |
1175 | |
1176 | return 0; |
1177 | } |
1178 | |
1179 | static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p) |
1180 | { |
1181 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
1182 | struct hnae_handle *h = priv->ae_handle; |
1183 | struct sockaddr *mac_addr = p; |
1184 | int ret; |
1185 | |
1186 | if (!mac_addr || !is_valid_ether_addr(addr: (const u8 *)mac_addr->sa_data)) |
1187 | return -EADDRNOTAVAIL; |
1188 | |
1189 | ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data); |
1190 | if (ret) { |
1191 | netdev_err(dev: ndev, format: "set_mac_address fail, ret=%d!\n" , ret); |
1192 | return ret; |
1193 | } |
1194 | |
1195 | eth_hw_addr_set(dev: ndev, addr: mac_addr->sa_data); |
1196 | |
1197 | return 0; |
1198 | } |
1199 | |
1200 | static void hns_nic_update_stats(struct net_device *netdev) |
1201 | { |
1202 | struct hns_nic_priv *priv = netdev_priv(dev: netdev); |
1203 | struct hnae_handle *h = priv->ae_handle; |
1204 | |
1205 | h->dev->ops->update_stats(h, &netdev->stats); |
1206 | } |
1207 | |
1208 | /* set mac addr if it is configed. or leave it to the AE driver */ |
1209 | static void hns_init_mac_addr(struct net_device *ndev) |
1210 | { |
1211 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
1212 | |
1213 | if (device_get_ethdev_address(dev: priv->dev, netdev: ndev)) { |
1214 | eth_hw_addr_random(dev: ndev); |
1215 | dev_warn(priv->dev, "No valid mac, use random mac %pM" , |
1216 | ndev->dev_addr); |
1217 | } |
1218 | } |
1219 | |
1220 | static void hns_nic_ring_close(struct net_device *netdev, int idx) |
1221 | { |
1222 | struct hns_nic_priv *priv = netdev_priv(dev: netdev); |
1223 | struct hnae_handle *h = priv->ae_handle; |
1224 | |
1225 | h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1); |
1226 | disable_irq(irq: priv->ring_data[idx].ring->irq); |
1227 | |
1228 | napi_disable(n: &priv->ring_data[idx].napi); |
1229 | } |
1230 | |
1231 | static int hns_nic_init_affinity_mask(int q_num, int ring_idx, |
1232 | struct hnae_ring *ring, cpumask_t *mask) |
1233 | { |
1234 | int cpu; |
1235 | |
1236 | /* Different irq balance between 16core and 32core. |
1237 | * The cpu mask set by ring index according to the ring flag |
1238 | * which indicate the ring is tx or rx. |
1239 | */ |
1240 | if (q_num == num_possible_cpus()) { |
1241 | if (is_tx_ring(ring)) |
1242 | cpu = ring_idx; |
1243 | else |
1244 | cpu = ring_idx - q_num; |
1245 | } else { |
1246 | if (is_tx_ring(ring)) |
1247 | cpu = ring_idx * 2; |
1248 | else |
1249 | cpu = (ring_idx - q_num) * 2 + 1; |
1250 | } |
1251 | |
1252 | cpumask_clear(dstp: mask); |
1253 | cpumask_set_cpu(cpu, dstp: mask); |
1254 | |
1255 | return cpu; |
1256 | } |
1257 | |
1258 | static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv) |
1259 | { |
1260 | int i; |
1261 | |
1262 | for (i = 0; i < q_num * 2; i++) { |
1263 | if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) { |
1264 | irq_set_affinity_hint(irq: priv->ring_data[i].ring->irq, |
1265 | NULL); |
1266 | free_irq(priv->ring_data[i].ring->irq, |
1267 | &priv->ring_data[i]); |
1268 | priv->ring_data[i].ring->irq_init_flag = |
1269 | RCB_IRQ_NOT_INITED; |
1270 | } |
1271 | } |
1272 | } |
1273 | |
1274 | static int hns_nic_init_irq(struct hns_nic_priv *priv) |
1275 | { |
1276 | struct hnae_handle *h = priv->ae_handle; |
1277 | struct hns_nic_ring_data *rd; |
1278 | int i; |
1279 | int ret; |
1280 | int cpu; |
1281 | |
1282 | for (i = 0; i < h->q_num * 2; i++) { |
1283 | rd = &priv->ring_data[i]; |
1284 | |
1285 | if (rd->ring->irq_init_flag == RCB_IRQ_INITED) |
1286 | break; |
1287 | |
1288 | snprintf(buf: rd->ring->ring_name, RCB_RING_NAME_LEN, |
1289 | fmt: "%s-%s%d" , priv->netdev->name, |
1290 | (is_tx_ring(rd->ring) ? "tx" : "rx" ), rd->queue_index); |
1291 | |
1292 | rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0'; |
1293 | |
1294 | irq_set_status_flags(irq: rd->ring->irq, set: IRQ_NOAUTOEN); |
1295 | ret = request_irq(irq: rd->ring->irq, |
1296 | handler: hns_irq_handle, flags: 0, name: rd->ring->ring_name, dev: rd); |
1297 | if (ret) { |
1298 | netdev_err(dev: priv->netdev, format: "request irq(%d) fail\n" , |
1299 | rd->ring->irq); |
1300 | goto out_free_irq; |
1301 | } |
1302 | |
1303 | cpu = hns_nic_init_affinity_mask(q_num: h->q_num, ring_idx: i, |
1304 | ring: rd->ring, mask: &rd->mask); |
1305 | |
1306 | if (cpu_online(cpu)) |
1307 | irq_set_affinity_hint(irq: rd->ring->irq, |
1308 | m: &rd->mask); |
1309 | |
1310 | rd->ring->irq_init_flag = RCB_IRQ_INITED; |
1311 | } |
1312 | |
1313 | return 0; |
1314 | |
1315 | out_free_irq: |
1316 | hns_nic_free_irq(q_num: h->q_num, priv); |
1317 | return ret; |
1318 | } |
1319 | |
1320 | static int hns_nic_net_up(struct net_device *ndev) |
1321 | { |
1322 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
1323 | struct hnae_handle *h = priv->ae_handle; |
1324 | int i, j; |
1325 | int ret; |
1326 | |
1327 | if (!test_bit(NIC_STATE_DOWN, &priv->state)) |
1328 | return 0; |
1329 | |
1330 | ret = hns_nic_init_irq(priv); |
1331 | if (ret != 0) { |
1332 | netdev_err(dev: ndev, format: "hns init irq failed! ret=%d\n" , ret); |
1333 | return ret; |
1334 | } |
1335 | |
1336 | for (i = 0; i < h->q_num * 2; i++) { |
1337 | ret = hns_nic_ring_open(netdev: ndev, idx: i); |
1338 | if (ret) |
1339 | goto out_has_some_queues; |
1340 | } |
1341 | |
1342 | ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr); |
1343 | if (ret) |
1344 | goto out_set_mac_addr_err; |
1345 | |
1346 | ret = h->dev->ops->start ? h->dev->ops->start(h) : 0; |
1347 | if (ret) |
1348 | goto out_start_err; |
1349 | |
1350 | if (ndev->phydev) |
1351 | phy_start(phydev: ndev->phydev); |
1352 | |
1353 | clear_bit(nr: NIC_STATE_DOWN, addr: &priv->state); |
1354 | (void)mod_timer(timer: &priv->service_timer, expires: jiffies + SERVICE_TIMER_HZ); |
1355 | |
1356 | return 0; |
1357 | |
1358 | out_start_err: |
1359 | netif_stop_queue(dev: ndev); |
1360 | out_set_mac_addr_err: |
1361 | out_has_some_queues: |
1362 | for (j = i - 1; j >= 0; j--) |
1363 | hns_nic_ring_close(netdev: ndev, idx: j); |
1364 | |
1365 | hns_nic_free_irq(q_num: h->q_num, priv); |
1366 | set_bit(nr: NIC_STATE_DOWN, addr: &priv->state); |
1367 | |
1368 | return ret; |
1369 | } |
1370 | |
1371 | static void hns_nic_net_down(struct net_device *ndev) |
1372 | { |
1373 | int i; |
1374 | struct hnae_ae_ops *ops; |
1375 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
1376 | |
1377 | if (test_and_set_bit(nr: NIC_STATE_DOWN, addr: &priv->state)) |
1378 | return; |
1379 | |
1380 | (void)del_timer_sync(timer: &priv->service_timer); |
1381 | netif_tx_stop_all_queues(dev: ndev); |
1382 | netif_carrier_off(dev: ndev); |
1383 | netif_tx_disable(dev: ndev); |
1384 | priv->link = 0; |
1385 | |
1386 | if (ndev->phydev) |
1387 | phy_stop(phydev: ndev->phydev); |
1388 | |
1389 | ops = priv->ae_handle->dev->ops; |
1390 | |
1391 | if (ops->stop) |
1392 | ops->stop(priv->ae_handle); |
1393 | |
1394 | netif_tx_stop_all_queues(dev: ndev); |
1395 | |
1396 | for (i = priv->ae_handle->q_num - 1; i >= 0; i--) { |
1397 | hns_nic_ring_close(netdev: ndev, idx: i); |
1398 | hns_nic_ring_close(netdev: ndev, idx: i + priv->ae_handle->q_num); |
1399 | |
1400 | /* clean tx buffers*/ |
1401 | hns_nic_tx_clr_all_bufs(ring_data: priv->ring_data + i); |
1402 | } |
1403 | } |
1404 | |
1405 | void hns_nic_net_reset(struct net_device *ndev) |
1406 | { |
1407 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
1408 | struct hnae_handle *handle = priv->ae_handle; |
1409 | |
1410 | while (test_and_set_bit(nr: NIC_STATE_RESETTING, addr: &priv->state)) |
1411 | usleep_range(min: 1000, max: 2000); |
1412 | |
1413 | (void)hnae_reinit_handle(handle); |
1414 | |
1415 | clear_bit(nr: NIC_STATE_RESETTING, addr: &priv->state); |
1416 | } |
1417 | |
1418 | void hns_nic_net_reinit(struct net_device *netdev) |
1419 | { |
1420 | struct hns_nic_priv *priv = netdev_priv(dev: netdev); |
1421 | enum hnae_port_type type = priv->ae_handle->port_type; |
1422 | |
1423 | netif_trans_update(dev: priv->netdev); |
1424 | while (test_and_set_bit(nr: NIC_STATE_REINITING, addr: &priv->state)) |
1425 | usleep_range(min: 1000, max: 2000); |
1426 | |
1427 | hns_nic_net_down(ndev: netdev); |
1428 | |
1429 | /* Only do hns_nic_net_reset in debug mode |
1430 | * because of hardware limitation. |
1431 | */ |
1432 | if (type == HNAE_PORT_DEBUG) |
1433 | hns_nic_net_reset(ndev: netdev); |
1434 | |
1435 | (void)hns_nic_net_up(ndev: netdev); |
1436 | clear_bit(nr: NIC_STATE_REINITING, addr: &priv->state); |
1437 | } |
1438 | |
1439 | static int hns_nic_net_open(struct net_device *ndev) |
1440 | { |
1441 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
1442 | struct hnae_handle *h = priv->ae_handle; |
1443 | int ret; |
1444 | |
1445 | if (test_bit(NIC_STATE_TESTING, &priv->state)) |
1446 | return -EBUSY; |
1447 | |
1448 | priv->link = 0; |
1449 | netif_carrier_off(dev: ndev); |
1450 | |
1451 | ret = netif_set_real_num_tx_queues(dev: ndev, txq: h->q_num); |
1452 | if (ret < 0) { |
1453 | netdev_err(dev: ndev, format: "netif_set_real_num_tx_queues fail, ret=%d!\n" , |
1454 | ret); |
1455 | return ret; |
1456 | } |
1457 | |
1458 | ret = netif_set_real_num_rx_queues(dev: ndev, rxq: h->q_num); |
1459 | if (ret < 0) { |
1460 | netdev_err(dev: ndev, |
1461 | format: "netif_set_real_num_rx_queues fail, ret=%d!\n" , ret); |
1462 | return ret; |
1463 | } |
1464 | |
1465 | ret = hns_nic_net_up(ndev); |
1466 | if (ret) { |
1467 | netdev_err(dev: ndev, |
1468 | format: "hns net up fail, ret=%d!\n" , ret); |
1469 | return ret; |
1470 | } |
1471 | |
1472 | return 0; |
1473 | } |
1474 | |
1475 | static int hns_nic_net_stop(struct net_device *ndev) |
1476 | { |
1477 | hns_nic_net_down(ndev); |
1478 | |
1479 | return 0; |
1480 | } |
1481 | |
1482 | static void hns_tx_timeout_reset(struct hns_nic_priv *priv); |
1483 | #define HNS_TX_TIMEO_LIMIT (40 * HZ) |
1484 | static void hns_nic_net_timeout(struct net_device *ndev, unsigned int txqueue) |
1485 | { |
1486 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
1487 | |
1488 | if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) { |
1489 | ndev->watchdog_timeo *= 2; |
1490 | netdev_info(dev: ndev, format: "watchdog_timo changed to %d.\n" , |
1491 | ndev->watchdog_timeo); |
1492 | } else { |
1493 | ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT; |
1494 | hns_tx_timeout_reset(priv); |
1495 | } |
1496 | } |
1497 | |
1498 | static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb, |
1499 | struct net_device *ndev) |
1500 | { |
1501 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
1502 | |
1503 | assert(skb->queue_mapping < priv->ae_handle->q_num); |
1504 | |
1505 | return hns_nic_net_xmit_hw(ndev, skb, |
1506 | ring_data: &tx_ring_data(priv, skb->queue_mapping)); |
1507 | } |
1508 | |
1509 | static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data, |
1510 | struct sk_buff *skb) |
1511 | { |
1512 | dev_kfree_skb_any(skb); |
1513 | } |
1514 | |
1515 | #define HNS_LB_TX_RING 0 |
1516 | static struct sk_buff *hns_assemble_skb(struct net_device *ndev) |
1517 | { |
1518 | struct sk_buff *skb; |
1519 | struct ethhdr *ethhdr; |
1520 | int frame_len; |
1521 | |
1522 | /* allocate test skb */ |
1523 | skb = alloc_skb(size: 64, GFP_KERNEL); |
1524 | if (!skb) |
1525 | return NULL; |
1526 | |
1527 | skb_put(skb, len: 64); |
1528 | skb->dev = ndev; |
1529 | memset(skb->data, 0xFF, skb->len); |
1530 | |
1531 | /* must be tcp/ip package */ |
1532 | ethhdr = (struct ethhdr *)skb->data; |
1533 | ethhdr->h_proto = htons(ETH_P_IP); |
1534 | |
1535 | frame_len = skb->len & (~1ul); |
1536 | memset(&skb->data[frame_len / 2], 0xAA, |
1537 | frame_len / 2 - 1); |
1538 | |
1539 | skb->queue_mapping = HNS_LB_TX_RING; |
1540 | |
1541 | return skb; |
1542 | } |
1543 | |
1544 | static int hns_enable_serdes_lb(struct net_device *ndev) |
1545 | { |
1546 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
1547 | struct hnae_handle *h = priv->ae_handle; |
1548 | struct hnae_ae_ops *ops = h->dev->ops; |
1549 | int speed, duplex; |
1550 | int ret; |
1551 | |
1552 | ret = ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 1); |
1553 | if (ret) |
1554 | return ret; |
1555 | |
1556 | ret = ops->start ? ops->start(h) : 0; |
1557 | if (ret) |
1558 | return ret; |
1559 | |
1560 | /* link adjust duplex*/ |
1561 | if (h->phy_if != PHY_INTERFACE_MODE_XGMII) |
1562 | speed = 1000; |
1563 | else |
1564 | speed = 10000; |
1565 | duplex = 1; |
1566 | |
1567 | ops->adjust_link(h, speed, duplex); |
1568 | |
1569 | /* wait h/w ready */ |
1570 | mdelay(300); |
1571 | |
1572 | return 0; |
1573 | } |
1574 | |
1575 | static void hns_disable_serdes_lb(struct net_device *ndev) |
1576 | { |
1577 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
1578 | struct hnae_handle *h = priv->ae_handle; |
1579 | struct hnae_ae_ops *ops = h->dev->ops; |
1580 | |
1581 | ops->stop(h); |
1582 | ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 0); |
1583 | } |
1584 | |
1585 | /** |
1586 | *hns_nic_clear_all_rx_fetch - clear the chip fetched descriptions. The |
1587 | *function as follows: |
1588 | * 1. if one rx ring has found the page_offset is not equal 0 between head |
1589 | * and tail, it means that the chip fetched the wrong descs for the ring |
1590 | * which buffer size is 4096. |
1591 | * 2. we set the chip serdes loopback and set rss indirection to the ring. |
1592 | * 3. construct 64-bytes ip broadcast packages, wait the associated rx ring |
1593 | * receiving all packages and it will fetch new descriptions. |
1594 | * 4. recover to the original state. |
1595 | * |
1596 | *@ndev: net device |
1597 | */ |
1598 | static int hns_nic_clear_all_rx_fetch(struct net_device *ndev) |
1599 | { |
1600 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
1601 | struct hnae_handle *h = priv->ae_handle; |
1602 | struct hnae_ae_ops *ops = h->dev->ops; |
1603 | struct hns_nic_ring_data *rd; |
1604 | struct hnae_ring *ring; |
1605 | struct sk_buff *skb; |
1606 | u32 *org_indir; |
1607 | u32 *cur_indir; |
1608 | int indir_size; |
1609 | int head, tail; |
1610 | int fetch_num; |
1611 | int i, j; |
1612 | bool found; |
1613 | int retry_times; |
1614 | int ret = 0; |
1615 | |
1616 | /* alloc indir memory */ |
1617 | indir_size = ops->get_rss_indir_size(h) * sizeof(*org_indir); |
1618 | org_indir = kzalloc(size: indir_size, GFP_KERNEL); |
1619 | if (!org_indir) |
1620 | return -ENOMEM; |
1621 | |
1622 | /* store the original indirection */ |
1623 | ops->get_rss(h, org_indir, NULL, NULL); |
1624 | |
1625 | cur_indir = kzalloc(size: indir_size, GFP_KERNEL); |
1626 | if (!cur_indir) { |
1627 | ret = -ENOMEM; |
1628 | goto cur_indir_alloc_err; |
1629 | } |
1630 | |
1631 | /* set loopback */ |
1632 | if (hns_enable_serdes_lb(ndev)) { |
1633 | ret = -EINVAL; |
1634 | goto enable_serdes_lb_err; |
1635 | } |
1636 | |
1637 | /* foreach every rx ring to clear fetch desc */ |
1638 | for (i = 0; i < h->q_num; i++) { |
1639 | ring = &h->qs[i]->rx_ring; |
1640 | head = readl_relaxed(ring->io_base + RCB_REG_HEAD); |
1641 | tail = readl_relaxed(ring->io_base + RCB_REG_TAIL); |
1642 | found = false; |
1643 | fetch_num = ring_dist(ring, begin: head, end: tail); |
1644 | |
1645 | while (head != tail) { |
1646 | if (ring->desc_cb[head].page_offset != 0) { |
1647 | found = true; |
1648 | break; |
1649 | } |
1650 | |
1651 | head++; |
1652 | if (head == ring->desc_num) |
1653 | head = 0; |
1654 | } |
1655 | |
1656 | if (found) { |
1657 | for (j = 0; j < indir_size / sizeof(*org_indir); j++) |
1658 | cur_indir[j] = i; |
1659 | ops->set_rss(h, cur_indir, NULL, 0); |
1660 | |
1661 | for (j = 0; j < fetch_num; j++) { |
1662 | /* alloc one skb and init */ |
1663 | skb = hns_assemble_skb(ndev); |
1664 | if (!skb) { |
1665 | ret = -ENOMEM; |
1666 | goto out; |
1667 | } |
1668 | rd = &tx_ring_data(priv, skb->queue_mapping); |
1669 | hns_nic_net_xmit_hw(ndev, skb, ring_data: rd); |
1670 | |
1671 | retry_times = 0; |
1672 | while (retry_times++ < 10) { |
1673 | mdelay(10); |
1674 | /* clean rx */ |
1675 | rd = &rx_ring_data(priv, i); |
1676 | if (rd->poll_one(rd, fetch_num, |
1677 | hns_nic_drop_rx_fetch)) |
1678 | break; |
1679 | } |
1680 | |
1681 | retry_times = 0; |
1682 | while (retry_times++ < 10) { |
1683 | mdelay(10); |
1684 | /* clean tx ring 0 send package */ |
1685 | rd = &tx_ring_data(priv, |
1686 | HNS_LB_TX_RING); |
1687 | if (rd->poll_one(rd, fetch_num, NULL)) |
1688 | break; |
1689 | } |
1690 | } |
1691 | } |
1692 | } |
1693 | |
1694 | out: |
1695 | /* restore everything */ |
1696 | ops->set_rss(h, org_indir, NULL, 0); |
1697 | hns_disable_serdes_lb(ndev); |
1698 | enable_serdes_lb_err: |
1699 | kfree(objp: cur_indir); |
1700 | cur_indir_alloc_err: |
1701 | kfree(objp: org_indir); |
1702 | |
1703 | return ret; |
1704 | } |
1705 | |
1706 | static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu) |
1707 | { |
1708 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
1709 | struct hnae_handle *h = priv->ae_handle; |
1710 | bool if_running = netif_running(dev: ndev); |
1711 | int ret; |
1712 | |
1713 | /* MTU < 68 is an error and causes problems on some kernels */ |
1714 | if (new_mtu < 68) |
1715 | return -EINVAL; |
1716 | |
1717 | /* MTU no change */ |
1718 | if (new_mtu == ndev->mtu) |
1719 | return 0; |
1720 | |
1721 | if (!h->dev->ops->set_mtu) |
1722 | return -ENOTSUPP; |
1723 | |
1724 | if (if_running) { |
1725 | (void)hns_nic_net_stop(ndev); |
1726 | msleep(msecs: 100); |
1727 | } |
1728 | |
1729 | if (priv->enet_ver != AE_VERSION_1 && |
1730 | ndev->mtu <= BD_SIZE_2048_MAX_MTU && |
1731 | new_mtu > BD_SIZE_2048_MAX_MTU) { |
1732 | /* update desc */ |
1733 | hnae_reinit_all_ring_desc(h); |
1734 | |
1735 | /* clear the package which the chip has fetched */ |
1736 | ret = hns_nic_clear_all_rx_fetch(ndev); |
1737 | |
1738 | /* the page offset must be consist with desc */ |
1739 | hnae_reinit_all_ring_page_off(h); |
1740 | |
1741 | if (ret) { |
1742 | netdev_err(dev: ndev, format: "clear the fetched desc fail\n" ); |
1743 | goto out; |
1744 | } |
1745 | } |
1746 | |
1747 | ret = h->dev->ops->set_mtu(h, new_mtu); |
1748 | if (ret) { |
1749 | netdev_err(dev: ndev, format: "set mtu fail, return value %d\n" , |
1750 | ret); |
1751 | goto out; |
1752 | } |
1753 | |
1754 | /* finally, set new mtu to netdevice */ |
1755 | ndev->mtu = new_mtu; |
1756 | |
1757 | out: |
1758 | if (if_running) { |
1759 | if (hns_nic_net_open(ndev)) { |
1760 | netdev_err(dev: ndev, format: "hns net open fail\n" ); |
1761 | ret = -EINVAL; |
1762 | } |
1763 | } |
1764 | |
1765 | return ret; |
1766 | } |
1767 | |
1768 | static int hns_nic_set_features(struct net_device *netdev, |
1769 | netdev_features_t features) |
1770 | { |
1771 | struct hns_nic_priv *priv = netdev_priv(dev: netdev); |
1772 | |
1773 | switch (priv->enet_ver) { |
1774 | case AE_VERSION_1: |
1775 | if (features & (NETIF_F_TSO | NETIF_F_TSO6)) |
1776 | netdev_info(dev: netdev, format: "enet v1 do not support tso!\n" ); |
1777 | break; |
1778 | default: |
1779 | if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { |
1780 | priv->ops.fill_desc = fill_tso_desc; |
1781 | priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; |
1782 | /* The chip only support 7*4096 */ |
1783 | netif_set_tso_max_size(dev: netdev, size: 7 * 4096); |
1784 | } else { |
1785 | priv->ops.fill_desc = fill_v2_desc; |
1786 | priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; |
1787 | } |
1788 | break; |
1789 | } |
1790 | netdev->features = features; |
1791 | return 0; |
1792 | } |
1793 | |
1794 | static netdev_features_t hns_nic_fix_features( |
1795 | struct net_device *netdev, netdev_features_t features) |
1796 | { |
1797 | struct hns_nic_priv *priv = netdev_priv(dev: netdev); |
1798 | |
1799 | switch (priv->enet_ver) { |
1800 | case AE_VERSION_1: |
1801 | features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | |
1802 | NETIF_F_HW_VLAN_CTAG_FILTER); |
1803 | break; |
1804 | default: |
1805 | break; |
1806 | } |
1807 | return features; |
1808 | } |
1809 | |
1810 | static int hns_nic_uc_sync(struct net_device *netdev, const unsigned char *addr) |
1811 | { |
1812 | struct hns_nic_priv *priv = netdev_priv(dev: netdev); |
1813 | struct hnae_handle *h = priv->ae_handle; |
1814 | |
1815 | if (h->dev->ops->add_uc_addr) |
1816 | return h->dev->ops->add_uc_addr(h, addr); |
1817 | |
1818 | return 0; |
1819 | } |
1820 | |
1821 | static int hns_nic_uc_unsync(struct net_device *netdev, |
1822 | const unsigned char *addr) |
1823 | { |
1824 | struct hns_nic_priv *priv = netdev_priv(dev: netdev); |
1825 | struct hnae_handle *h = priv->ae_handle; |
1826 | |
1827 | if (h->dev->ops->rm_uc_addr) |
1828 | return h->dev->ops->rm_uc_addr(h, addr); |
1829 | |
1830 | return 0; |
1831 | } |
1832 | |
1833 | /** |
1834 | * hns_set_multicast_list - set mutl mac address |
1835 | * @ndev: net device |
1836 | * |
1837 | * return void |
1838 | */ |
1839 | static void hns_set_multicast_list(struct net_device *ndev) |
1840 | { |
1841 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
1842 | struct hnae_handle *h = priv->ae_handle; |
1843 | struct netdev_hw_addr *ha = NULL; |
1844 | |
1845 | if (!h) { |
1846 | netdev_err(dev: ndev, format: "hnae handle is null\n" ); |
1847 | return; |
1848 | } |
1849 | |
1850 | if (h->dev->ops->clr_mc_addr) |
1851 | if (h->dev->ops->clr_mc_addr(h)) |
1852 | netdev_err(dev: ndev, format: "clear multicast address fail\n" ); |
1853 | |
1854 | if (h->dev->ops->set_mc_addr) { |
1855 | netdev_for_each_mc_addr(ha, ndev) |
1856 | if (h->dev->ops->set_mc_addr(h, ha->addr)) |
1857 | netdev_err(dev: ndev, format: "set multicast fail\n" ); |
1858 | } |
1859 | } |
1860 | |
1861 | static void hns_nic_set_rx_mode(struct net_device *ndev) |
1862 | { |
1863 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
1864 | struct hnae_handle *h = priv->ae_handle; |
1865 | |
1866 | if (h->dev->ops->set_promisc_mode) { |
1867 | if (ndev->flags & IFF_PROMISC) |
1868 | h->dev->ops->set_promisc_mode(h, 1); |
1869 | else |
1870 | h->dev->ops->set_promisc_mode(h, 0); |
1871 | } |
1872 | |
1873 | hns_set_multicast_list(ndev); |
1874 | |
1875 | if (__dev_uc_sync(dev: ndev, sync: hns_nic_uc_sync, unsync: hns_nic_uc_unsync)) |
1876 | netdev_err(dev: ndev, format: "sync uc address fail\n" ); |
1877 | } |
1878 | |
1879 | static void hns_nic_get_stats64(struct net_device *ndev, |
1880 | struct rtnl_link_stats64 *stats) |
1881 | { |
1882 | int idx; |
1883 | u64 tx_bytes = 0; |
1884 | u64 rx_bytes = 0; |
1885 | u64 tx_pkts = 0; |
1886 | u64 rx_pkts = 0; |
1887 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
1888 | struct hnae_handle *h = priv->ae_handle; |
1889 | |
1890 | for (idx = 0; idx < h->q_num; idx++) { |
1891 | tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes; |
1892 | tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts; |
1893 | rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes; |
1894 | rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts; |
1895 | } |
1896 | |
1897 | stats->tx_bytes = tx_bytes; |
1898 | stats->tx_packets = tx_pkts; |
1899 | stats->rx_bytes = rx_bytes; |
1900 | stats->rx_packets = rx_pkts; |
1901 | |
1902 | stats->rx_errors = ndev->stats.rx_errors; |
1903 | stats->multicast = ndev->stats.multicast; |
1904 | stats->rx_length_errors = ndev->stats.rx_length_errors; |
1905 | stats->rx_crc_errors = ndev->stats.rx_crc_errors; |
1906 | stats->rx_missed_errors = ndev->stats.rx_missed_errors; |
1907 | |
1908 | stats->tx_errors = ndev->stats.tx_errors; |
1909 | stats->rx_dropped = ndev->stats.rx_dropped; |
1910 | stats->tx_dropped = ndev->stats.tx_dropped; |
1911 | stats->collisions = ndev->stats.collisions; |
1912 | stats->rx_over_errors = ndev->stats.rx_over_errors; |
1913 | stats->rx_frame_errors = ndev->stats.rx_frame_errors; |
1914 | stats->rx_fifo_errors = ndev->stats.rx_fifo_errors; |
1915 | stats->tx_aborted_errors = ndev->stats.tx_aborted_errors; |
1916 | stats->tx_carrier_errors = ndev->stats.tx_carrier_errors; |
1917 | stats->tx_fifo_errors = ndev->stats.tx_fifo_errors; |
1918 | stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors; |
1919 | stats->tx_window_errors = ndev->stats.tx_window_errors; |
1920 | stats->rx_compressed = ndev->stats.rx_compressed; |
1921 | stats->tx_compressed = ndev->stats.tx_compressed; |
1922 | } |
1923 | |
1924 | static u16 |
1925 | hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, |
1926 | struct net_device *sb_dev) |
1927 | { |
1928 | struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; |
1929 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
1930 | |
1931 | /* fix hardware broadcast/multicast packets queue loopback */ |
1932 | if (!AE_IS_VER1(priv->enet_ver) && |
1933 | is_multicast_ether_addr(addr: eth_hdr->h_dest)) |
1934 | return 0; |
1935 | else |
1936 | return netdev_pick_tx(dev: ndev, skb, NULL); |
1937 | } |
1938 | |
1939 | static const struct net_device_ops hns_nic_netdev_ops = { |
1940 | .ndo_open = hns_nic_net_open, |
1941 | .ndo_stop = hns_nic_net_stop, |
1942 | .ndo_start_xmit = hns_nic_net_xmit, |
1943 | .ndo_tx_timeout = hns_nic_net_timeout, |
1944 | .ndo_set_mac_address = hns_nic_net_set_mac_address, |
1945 | .ndo_change_mtu = hns_nic_change_mtu, |
1946 | .ndo_eth_ioctl = phy_do_ioctl_running, |
1947 | .ndo_set_features = hns_nic_set_features, |
1948 | .ndo_fix_features = hns_nic_fix_features, |
1949 | .ndo_get_stats64 = hns_nic_get_stats64, |
1950 | .ndo_set_rx_mode = hns_nic_set_rx_mode, |
1951 | .ndo_select_queue = hns_nic_select_queue, |
1952 | }; |
1953 | |
1954 | static void hns_nic_update_link_status(struct net_device *netdev) |
1955 | { |
1956 | struct hns_nic_priv *priv = netdev_priv(dev: netdev); |
1957 | |
1958 | struct hnae_handle *h = priv->ae_handle; |
1959 | |
1960 | if (h->phy_dev) { |
1961 | if (h->phy_if != PHY_INTERFACE_MODE_XGMII) |
1962 | return; |
1963 | |
1964 | (void)genphy_read_status(phydev: h->phy_dev); |
1965 | } |
1966 | hns_nic_adjust_link(ndev: netdev); |
1967 | } |
1968 | |
1969 | /* for dumping key regs*/ |
1970 | static void hns_nic_dump(struct hns_nic_priv *priv) |
1971 | { |
1972 | struct hnae_handle *h = priv->ae_handle; |
1973 | struct hnae_ae_ops *ops = h->dev->ops; |
1974 | u32 *data, reg_num, i; |
1975 | |
1976 | if (ops->get_regs_len && ops->get_regs) { |
1977 | reg_num = ops->get_regs_len(priv->ae_handle); |
1978 | reg_num = (reg_num + 3ul) & ~3ul; |
1979 | data = kcalloc(n: reg_num, size: sizeof(u32), GFP_KERNEL); |
1980 | if (data) { |
1981 | ops->get_regs(priv->ae_handle, data); |
1982 | for (i = 0; i < reg_num; i += 4) |
1983 | pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n" , |
1984 | i, data[i], data[i + 1], |
1985 | data[i + 2], data[i + 3]); |
1986 | kfree(objp: data); |
1987 | } |
1988 | } |
1989 | |
1990 | for (i = 0; i < h->q_num; i++) { |
1991 | pr_info("tx_queue%d_next_to_clean:%d\n" , |
1992 | i, h->qs[i]->tx_ring.next_to_clean); |
1993 | pr_info("tx_queue%d_next_to_use:%d\n" , |
1994 | i, h->qs[i]->tx_ring.next_to_use); |
1995 | pr_info("rx_queue%d_next_to_clean:%d\n" , |
1996 | i, h->qs[i]->rx_ring.next_to_clean); |
1997 | pr_info("rx_queue%d_next_to_use:%d\n" , |
1998 | i, h->qs[i]->rx_ring.next_to_use); |
1999 | } |
2000 | } |
2001 | |
2002 | /* for resetting subtask */ |
2003 | static void hns_nic_reset_subtask(struct hns_nic_priv *priv) |
2004 | { |
2005 | enum hnae_port_type type = priv->ae_handle->port_type; |
2006 | |
2007 | if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state)) |
2008 | return; |
2009 | clear_bit(nr: NIC_STATE2_RESET_REQUESTED, addr: &priv->state); |
2010 | |
2011 | /* If we're already down, removing or resetting, just bail */ |
2012 | if (test_bit(NIC_STATE_DOWN, &priv->state) || |
2013 | test_bit(NIC_STATE_REMOVING, &priv->state) || |
2014 | test_bit(NIC_STATE_RESETTING, &priv->state)) |
2015 | return; |
2016 | |
2017 | hns_nic_dump(priv); |
2018 | netdev_info(dev: priv->netdev, format: "try to reset %s port!\n" , |
2019 | (type == HNAE_PORT_DEBUG ? "debug" : "service" )); |
2020 | |
2021 | rtnl_lock(); |
2022 | /* put off any impending NetWatchDogTimeout */ |
2023 | netif_trans_update(dev: priv->netdev); |
2024 | hns_nic_net_reinit(netdev: priv->netdev); |
2025 | |
2026 | rtnl_unlock(); |
2027 | } |
2028 | |
2029 | /* for doing service complete*/ |
2030 | static void hns_nic_service_event_complete(struct hns_nic_priv *priv) |
2031 | { |
2032 | WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state)); |
2033 | /* make sure to commit the things */ |
2034 | smp_mb__before_atomic(); |
2035 | clear_bit(nr: NIC_STATE_SERVICE_SCHED, addr: &priv->state); |
2036 | } |
2037 | |
2038 | static void hns_nic_service_task(struct work_struct *work) |
2039 | { |
2040 | struct hns_nic_priv *priv |
2041 | = container_of(work, struct hns_nic_priv, service_task); |
2042 | struct hnae_handle *h = priv->ae_handle; |
2043 | |
2044 | hns_nic_reset_subtask(priv); |
2045 | hns_nic_update_link_status(netdev: priv->netdev); |
2046 | h->dev->ops->update_led_status(h); |
2047 | hns_nic_update_stats(netdev: priv->netdev); |
2048 | |
2049 | hns_nic_service_event_complete(priv); |
2050 | } |
2051 | |
2052 | static void hns_nic_task_schedule(struct hns_nic_priv *priv) |
2053 | { |
2054 | if (!test_bit(NIC_STATE_DOWN, &priv->state) && |
2055 | !test_bit(NIC_STATE_REMOVING, &priv->state) && |
2056 | !test_and_set_bit(nr: NIC_STATE_SERVICE_SCHED, addr: &priv->state)) |
2057 | (void)schedule_work(work: &priv->service_task); |
2058 | } |
2059 | |
2060 | static void hns_nic_service_timer(struct timer_list *t) |
2061 | { |
2062 | struct hns_nic_priv *priv = from_timer(priv, t, service_timer); |
2063 | |
2064 | (void)mod_timer(timer: &priv->service_timer, expires: jiffies + SERVICE_TIMER_HZ); |
2065 | |
2066 | hns_nic_task_schedule(priv); |
2067 | } |
2068 | |
2069 | /** |
2070 | * hns_tx_timeout_reset - initiate reset due to Tx timeout |
2071 | * @priv: driver private struct |
2072 | **/ |
2073 | static void hns_tx_timeout_reset(struct hns_nic_priv *priv) |
2074 | { |
2075 | /* Do the reset outside of interrupt context */ |
2076 | if (!test_bit(NIC_STATE_DOWN, &priv->state)) { |
2077 | set_bit(nr: NIC_STATE2_RESET_REQUESTED, addr: &priv->state); |
2078 | netdev_warn(dev: priv->netdev, |
2079 | format: "initiating reset due to tx timeout(%llu,0x%lx)\n" , |
2080 | priv->tx_timeout_count, priv->state); |
2081 | priv->tx_timeout_count++; |
2082 | hns_nic_task_schedule(priv); |
2083 | } |
2084 | } |
2085 | |
2086 | static int hns_nic_init_ring_data(struct hns_nic_priv *priv) |
2087 | { |
2088 | struct hnae_handle *h = priv->ae_handle; |
2089 | struct hns_nic_ring_data *rd; |
2090 | bool is_ver1 = AE_IS_VER1(priv->enet_ver); |
2091 | int i; |
2092 | |
2093 | if (h->q_num > NIC_MAX_Q_PER_VF) { |
2094 | netdev_err(dev: priv->netdev, format: "too much queue (%d)\n" , h->q_num); |
2095 | return -EINVAL; |
2096 | } |
2097 | |
2098 | priv->ring_data = kzalloc(array3_size(h->q_num, |
2099 | sizeof(*priv->ring_data), 2), |
2100 | GFP_KERNEL); |
2101 | if (!priv->ring_data) |
2102 | return -ENOMEM; |
2103 | |
2104 | for (i = 0; i < h->q_num; i++) { |
2105 | rd = &priv->ring_data[i]; |
2106 | rd->queue_index = i; |
2107 | rd->ring = &h->qs[i]->tx_ring; |
2108 | rd->poll_one = hns_nic_tx_poll_one; |
2109 | rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : |
2110 | hns_nic_tx_fini_pro_v2; |
2111 | |
2112 | netif_napi_add(dev: priv->netdev, napi: &rd->napi, poll: hns_nic_common_poll); |
2113 | rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; |
2114 | } |
2115 | for (i = h->q_num; i < h->q_num * 2; i++) { |
2116 | rd = &priv->ring_data[i]; |
2117 | rd->queue_index = i - h->q_num; |
2118 | rd->ring = &h->qs[i - h->q_num]->rx_ring; |
2119 | rd->poll_one = hns_nic_rx_poll_one; |
2120 | rd->ex_process = hns_nic_rx_up_pro; |
2121 | rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : |
2122 | hns_nic_rx_fini_pro_v2; |
2123 | |
2124 | netif_napi_add(dev: priv->netdev, napi: &rd->napi, poll: hns_nic_common_poll); |
2125 | rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; |
2126 | } |
2127 | |
2128 | return 0; |
2129 | } |
2130 | |
2131 | static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv) |
2132 | { |
2133 | struct hnae_handle *h = priv->ae_handle; |
2134 | int i; |
2135 | |
2136 | for (i = 0; i < h->q_num * 2; i++) { |
2137 | netif_napi_del(napi: &priv->ring_data[i].napi); |
2138 | if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) { |
2139 | (void)irq_set_affinity_hint( |
2140 | irq: priv->ring_data[i].ring->irq, |
2141 | NULL); |
2142 | free_irq(priv->ring_data[i].ring->irq, |
2143 | &priv->ring_data[i]); |
2144 | } |
2145 | |
2146 | priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED; |
2147 | } |
2148 | kfree(objp: priv->ring_data); |
2149 | } |
2150 | |
2151 | static void hns_nic_set_priv_ops(struct net_device *netdev) |
2152 | { |
2153 | struct hns_nic_priv *priv = netdev_priv(dev: netdev); |
2154 | struct hnae_handle *h = priv->ae_handle; |
2155 | |
2156 | if (AE_IS_VER1(priv->enet_ver)) { |
2157 | priv->ops.fill_desc = fill_desc; |
2158 | priv->ops.get_rxd_bnum = get_rx_desc_bnum; |
2159 | priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; |
2160 | } else { |
2161 | priv->ops.get_rxd_bnum = get_v2rx_desc_bnum; |
2162 | if ((netdev->features & NETIF_F_TSO) || |
2163 | (netdev->features & NETIF_F_TSO6)) { |
2164 | priv->ops.fill_desc = fill_tso_desc; |
2165 | priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; |
2166 | /* This chip only support 7*4096 */ |
2167 | netif_set_tso_max_size(dev: netdev, size: 7 * 4096); |
2168 | } else { |
2169 | priv->ops.fill_desc = fill_v2_desc; |
2170 | priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; |
2171 | } |
2172 | /* enable tso when init |
2173 | * control tso on/off through TSE bit in bd |
2174 | */ |
2175 | h->dev->ops->set_tso_stats(h, 1); |
2176 | } |
2177 | } |
2178 | |
2179 | static int hns_nic_try_get_ae(struct net_device *ndev) |
2180 | { |
2181 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
2182 | struct hnae_handle *h; |
2183 | int ret; |
2184 | |
2185 | h = hnae_get_handle(owner_dev: &priv->netdev->dev, |
2186 | fwnode: priv->fwnode, port_id: priv->port_id, NULL); |
2187 | if (IS_ERR_OR_NULL(ptr: h)) { |
2188 | ret = -ENODEV; |
2189 | dev_dbg(priv->dev, "has not handle, register notifier!\n" ); |
2190 | goto out; |
2191 | } |
2192 | priv->ae_handle = h; |
2193 | |
2194 | ret = hns_nic_init_phy(ndev, h); |
2195 | if (ret) { |
2196 | dev_err(priv->dev, "probe phy device fail!\n" ); |
2197 | goto out_init_phy; |
2198 | } |
2199 | |
2200 | ret = hns_nic_init_ring_data(priv); |
2201 | if (ret) { |
2202 | ret = -ENOMEM; |
2203 | goto out_init_ring_data; |
2204 | } |
2205 | |
2206 | hns_nic_set_priv_ops(netdev: ndev); |
2207 | |
2208 | ret = register_netdev(dev: ndev); |
2209 | if (ret) { |
2210 | dev_err(priv->dev, "probe register netdev fail!\n" ); |
2211 | goto out_reg_ndev_fail; |
2212 | } |
2213 | return 0; |
2214 | |
2215 | out_reg_ndev_fail: |
2216 | hns_nic_uninit_ring_data(priv); |
2217 | priv->ring_data = NULL; |
2218 | out_init_phy: |
2219 | out_init_ring_data: |
2220 | hnae_put_handle(handle: priv->ae_handle); |
2221 | priv->ae_handle = NULL; |
2222 | out: |
2223 | return ret; |
2224 | } |
2225 | |
2226 | static int hns_nic_notifier_action(struct notifier_block *nb, |
2227 | unsigned long action, void *data) |
2228 | { |
2229 | struct hns_nic_priv *priv = |
2230 | container_of(nb, struct hns_nic_priv, notifier_block); |
2231 | |
2232 | assert(action == HNAE_AE_REGISTER); |
2233 | |
2234 | if (!hns_nic_try_get_ae(ndev: priv->netdev)) { |
2235 | hnae_unregister_notifier(nb: &priv->notifier_block); |
2236 | priv->notifier_block.notifier_call = NULL; |
2237 | } |
2238 | return 0; |
2239 | } |
2240 | |
2241 | static int hns_nic_dev_probe(struct platform_device *pdev) |
2242 | { |
2243 | struct device *dev = &pdev->dev; |
2244 | struct net_device *ndev; |
2245 | struct hns_nic_priv *priv; |
2246 | u32 port_id; |
2247 | int ret; |
2248 | |
2249 | ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF); |
2250 | if (!ndev) |
2251 | return -ENOMEM; |
2252 | |
2253 | platform_set_drvdata(pdev, data: ndev); |
2254 | |
2255 | priv = netdev_priv(dev: ndev); |
2256 | priv->dev = dev; |
2257 | priv->netdev = ndev; |
2258 | |
2259 | if (dev_of_node(dev)) { |
2260 | struct device_node *ae_node; |
2261 | |
2262 | if (of_device_is_compatible(device: dev->of_node, |
2263 | "hisilicon,hns-nic-v1" )) |
2264 | priv->enet_ver = AE_VERSION_1; |
2265 | else |
2266 | priv->enet_ver = AE_VERSION_2; |
2267 | |
2268 | ae_node = of_parse_phandle(np: dev->of_node, phandle_name: "ae-handle" , index: 0); |
2269 | if (!ae_node) { |
2270 | ret = -ENODEV; |
2271 | dev_err(dev, "not find ae-handle\n" ); |
2272 | goto out_read_prop_fail; |
2273 | } |
2274 | priv->fwnode = &ae_node->fwnode; |
2275 | } else if (is_acpi_node(fwnode: dev->fwnode)) { |
2276 | struct fwnode_reference_args args; |
2277 | |
2278 | if (acpi_dev_found(hid: hns_enet_acpi_match[0].id)) |
2279 | priv->enet_ver = AE_VERSION_1; |
2280 | else if (acpi_dev_found(hid: hns_enet_acpi_match[1].id)) |
2281 | priv->enet_ver = AE_VERSION_2; |
2282 | else { |
2283 | ret = -ENXIO; |
2284 | goto out_read_prop_fail; |
2285 | } |
2286 | |
2287 | /* try to find port-idx-in-ae first */ |
2288 | ret = acpi_node_get_property_reference(fwnode: dev->fwnode, |
2289 | name: "ae-handle" , index: 0, args: &args); |
2290 | if (ret) { |
2291 | dev_err(dev, "not find ae-handle\n" ); |
2292 | goto out_read_prop_fail; |
2293 | } |
2294 | if (!is_acpi_device_node(fwnode: args.fwnode)) { |
2295 | ret = -EINVAL; |
2296 | goto out_read_prop_fail; |
2297 | } |
2298 | priv->fwnode = args.fwnode; |
2299 | } else { |
2300 | dev_err(dev, "cannot read cfg data from OF or acpi\n" ); |
2301 | ret = -ENXIO; |
2302 | goto out_read_prop_fail; |
2303 | } |
2304 | |
2305 | ret = device_property_read_u32(dev, propname: "port-idx-in-ae" , val: &port_id); |
2306 | if (ret) { |
2307 | /* only for old code compatible */ |
2308 | ret = device_property_read_u32(dev, propname: "port-id" , val: &port_id); |
2309 | if (ret) |
2310 | goto out_read_prop_fail; |
2311 | /* for old dts, we need to caculate the port offset */ |
2312 | port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET |
2313 | : port_id - HNS_SRV_OFFSET; |
2314 | } |
2315 | priv->port_id = port_id; |
2316 | |
2317 | hns_init_mac_addr(ndev); |
2318 | |
2319 | ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT; |
2320 | ndev->priv_flags |= IFF_UNICAST_FLT; |
2321 | ndev->netdev_ops = &hns_nic_netdev_ops; |
2322 | hns_ethtool_set_ops(ndev); |
2323 | |
2324 | ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
2325 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | |
2326 | NETIF_F_GRO; |
2327 | ndev->vlan_features |= |
2328 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; |
2329 | ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; |
2330 | |
2331 | /* MTU range: 68 - 9578 (v1) or 9706 (v2) */ |
2332 | ndev->min_mtu = MAC_MIN_MTU; |
2333 | switch (priv->enet_ver) { |
2334 | case AE_VERSION_2: |
2335 | ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE; |
2336 | ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
2337 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | |
2338 | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6; |
2339 | ndev->vlan_features |= NETIF_F_TSO | NETIF_F_TSO6; |
2340 | ndev->max_mtu = MAC_MAX_MTU_V2 - |
2341 | (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); |
2342 | break; |
2343 | default: |
2344 | ndev->max_mtu = MAC_MAX_MTU - |
2345 | (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); |
2346 | break; |
2347 | } |
2348 | |
2349 | SET_NETDEV_DEV(ndev, dev); |
2350 | |
2351 | if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) |
2352 | dev_dbg(dev, "set mask to 64bit\n" ); |
2353 | else |
2354 | dev_err(dev, "set mask to 64bit fail!\n" ); |
2355 | |
2356 | /* carrier off reporting is important to ethtool even BEFORE open */ |
2357 | netif_carrier_off(dev: ndev); |
2358 | |
2359 | timer_setup(&priv->service_timer, hns_nic_service_timer, 0); |
2360 | INIT_WORK(&priv->service_task, hns_nic_service_task); |
2361 | |
2362 | set_bit(nr: NIC_STATE_SERVICE_INITED, addr: &priv->state); |
2363 | clear_bit(nr: NIC_STATE_SERVICE_SCHED, addr: &priv->state); |
2364 | set_bit(nr: NIC_STATE_DOWN, addr: &priv->state); |
2365 | |
2366 | if (hns_nic_try_get_ae(ndev: priv->netdev)) { |
2367 | priv->notifier_block.notifier_call = hns_nic_notifier_action; |
2368 | ret = hnae_register_notifier(nb: &priv->notifier_block); |
2369 | if (ret) { |
2370 | dev_err(dev, "register notifier fail!\n" ); |
2371 | goto out_notify_fail; |
2372 | } |
2373 | dev_dbg(dev, "has not handle, register notifier!\n" ); |
2374 | } |
2375 | |
2376 | return 0; |
2377 | |
2378 | out_notify_fail: |
2379 | (void)cancel_work_sync(work: &priv->service_task); |
2380 | out_read_prop_fail: |
2381 | /* safe for ACPI FW */ |
2382 | of_node_put(to_of_node(priv->fwnode)); |
2383 | free_netdev(dev: ndev); |
2384 | return ret; |
2385 | } |
2386 | |
2387 | static void hns_nic_dev_remove(struct platform_device *pdev) |
2388 | { |
2389 | struct net_device *ndev = platform_get_drvdata(pdev); |
2390 | struct hns_nic_priv *priv = netdev_priv(dev: ndev); |
2391 | |
2392 | if (ndev->reg_state != NETREG_UNINITIALIZED) |
2393 | unregister_netdev(dev: ndev); |
2394 | |
2395 | if (priv->ring_data) |
2396 | hns_nic_uninit_ring_data(priv); |
2397 | priv->ring_data = NULL; |
2398 | |
2399 | if (ndev->phydev) |
2400 | phy_disconnect(phydev: ndev->phydev); |
2401 | |
2402 | if (!IS_ERR_OR_NULL(ptr: priv->ae_handle)) |
2403 | hnae_put_handle(handle: priv->ae_handle); |
2404 | priv->ae_handle = NULL; |
2405 | if (priv->notifier_block.notifier_call) |
2406 | hnae_unregister_notifier(nb: &priv->notifier_block); |
2407 | priv->notifier_block.notifier_call = NULL; |
2408 | |
2409 | set_bit(nr: NIC_STATE_REMOVING, addr: &priv->state); |
2410 | (void)cancel_work_sync(work: &priv->service_task); |
2411 | |
2412 | /* safe for ACPI FW */ |
2413 | of_node_put(to_of_node(priv->fwnode)); |
2414 | |
2415 | free_netdev(dev: ndev); |
2416 | } |
2417 | |
2418 | static const struct of_device_id hns_enet_of_match[] = { |
2419 | {.compatible = "hisilicon,hns-nic-v1" ,}, |
2420 | {.compatible = "hisilicon,hns-nic-v2" ,}, |
2421 | {}, |
2422 | }; |
2423 | |
2424 | MODULE_DEVICE_TABLE(of, hns_enet_of_match); |
2425 | |
2426 | static struct platform_driver hns_nic_dev_driver = { |
2427 | .driver = { |
2428 | .name = "hns-nic" , |
2429 | .of_match_table = hns_enet_of_match, |
2430 | .acpi_match_table = ACPI_PTR(hns_enet_acpi_match), |
2431 | }, |
2432 | .probe = hns_nic_dev_probe, |
2433 | .remove_new = hns_nic_dev_remove, |
2434 | }; |
2435 | |
2436 | module_platform_driver(hns_nic_dev_driver); |
2437 | |
2438 | MODULE_DESCRIPTION("HISILICON HNS Ethernet driver" ); |
2439 | MODULE_AUTHOR("Hisilicon, Inc." ); |
2440 | MODULE_LICENSE("GPL" ); |
2441 | MODULE_ALIAS("platform:hns-nic" ); |
2442 | |