1// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2016-2017 Hisilicon Limited.
3
4#include <linux/dma-mapping.h>
5#include <linux/etherdevice.h>
6#include <linux/interrupt.h>
7#include <linux/if_vlan.h>
8#include <linux/ip.h>
9#include <linux/ipv6.h>
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/aer.h>
13#include <linux/skbuff.h>
14#include <linux/sctp.h>
15#include <linux/vermagic.h>
16#include <net/gre.h>
17#include <net/pkt_cls.h>
18#include <net/tcp.h>
19#include <net/vxlan.h>
20
21#include "hnae3.h"
22#include "hns3_enet.h"
23
24#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
25
26static void hns3_clear_all_ring(struct hnae3_handle *h);
27static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
28static void hns3_remove_hw_addr(struct net_device *netdev);
29
30static const char hns3_driver_name[] = "hns3";
31const char hns3_driver_version[] = VERMAGIC_STRING;
32static const char hns3_driver_string[] =
33 "Hisilicon Ethernet Network Driver for Hip08 Family";
34static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
35static struct hnae3_client client;
36
37/* hns3_pci_tbl - PCI Device ID Table
38 *
39 * Last entry must be all 0s
40 *
41 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
42 * Class, Class Mask, private data (not used) }
43 */
44static const struct pci_device_id hns3_pci_tbl[] = {
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
48 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
50 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
52 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
53 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
54 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
55 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
56 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
57 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
58 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
59 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
60 /* required last entry */
61 {0, }
62};
63MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
64
65static irqreturn_t hns3_irq_handle(int irq, void *vector)
66{
67 struct hns3_enet_tqp_vector *tqp_vector = vector;
68
69 napi_schedule(&tqp_vector->napi);
70
71 return IRQ_HANDLED;
72}
73
74/* This callback function is used to set affinity changes to the irq affinity
75 * masks when the irq_set_affinity_notifier function is used.
76 */
77static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
78 const cpumask_t *mask)
79{
80 struct hns3_enet_tqp_vector *tqp_vectors =
81 container_of(notify, struct hns3_enet_tqp_vector,
82 affinity_notify);
83
84 tqp_vectors->affinity_mask = *mask;
85}
86
87static void hns3_nic_irq_affinity_release(struct kref *ref)
88{
89}
90
91static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
92{
93 struct hns3_enet_tqp_vector *tqp_vectors;
94 unsigned int i;
95
96 for (i = 0; i < priv->vector_num; i++) {
97 tqp_vectors = &priv->tqp_vector[i];
98
99 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
100 continue;
101
102 /* clear the affinity notifier and affinity mask */
103 irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
104 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
105
106 /* release the irq resource */
107 free_irq(tqp_vectors->vector_irq, tqp_vectors);
108 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
109 }
110}
111
112static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
113{
114 struct hns3_enet_tqp_vector *tqp_vectors;
115 int txrx_int_idx = 0;
116 int rx_int_idx = 0;
117 int tx_int_idx = 0;
118 unsigned int i;
119 int ret;
120
121 for (i = 0; i < priv->vector_num; i++) {
122 tqp_vectors = &priv->tqp_vector[i];
123
124 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
125 continue;
126
127 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
128 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
129 "%s-%s-%d", priv->netdev->name, "TxRx",
130 txrx_int_idx++);
131 txrx_int_idx++;
132 } else if (tqp_vectors->rx_group.ring) {
133 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
134 "%s-%s-%d", priv->netdev->name, "Rx",
135 rx_int_idx++);
136 } else if (tqp_vectors->tx_group.ring) {
137 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
138 "%s-%s-%d", priv->netdev->name, "Tx",
139 tx_int_idx++);
140 } else {
141 /* Skip this unused q_vector */
142 continue;
143 }
144
145 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
146
147 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
148 tqp_vectors->name,
149 tqp_vectors);
150 if (ret) {
151 netdev_err(priv->netdev, "request irq(%d) fail\n",
152 tqp_vectors->vector_irq);
153 return ret;
154 }
155
156 tqp_vectors->affinity_notify.notify =
157 hns3_nic_irq_affinity_notify;
158 tqp_vectors->affinity_notify.release =
159 hns3_nic_irq_affinity_release;
160 irq_set_affinity_notifier(tqp_vectors->vector_irq,
161 &tqp_vectors->affinity_notify);
162 irq_set_affinity_hint(tqp_vectors->vector_irq,
163 &tqp_vectors->affinity_mask);
164
165 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
166 }
167
168 return 0;
169}
170
171static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
172 u32 mask_en)
173{
174 writel(mask_en, tqp_vector->mask_addr);
175}
176
177static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
178{
179 napi_enable(&tqp_vector->napi);
180
181 /* enable vector */
182 hns3_mask_vector_irq(tqp_vector, 1);
183}
184
185static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
186{
187 /* disable vector */
188 hns3_mask_vector_irq(tqp_vector, 0);
189
190 disable_irq(tqp_vector->vector_irq);
191 napi_disable(&tqp_vector->napi);
192}
193
194void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
195 u32 rl_value)
196{
197 u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
198
199 /* this defines the configuration for RL (Interrupt Rate Limiter).
200 * Rl defines rate of interrupts i.e. number of interrupts-per-second
201 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
202 */
203
204 if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
205 !tqp_vector->rx_group.coal.gl_adapt_enable)
206 /* According to the hardware, the range of rl_reg is
207 * 0-59 and the unit is 4.
208 */
209 rl_reg |= HNS3_INT_RL_ENABLE_MASK;
210
211 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
212}
213
214void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
215 u32 gl_value)
216{
217 u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
218
219 writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
220}
221
222void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
223 u32 gl_value)
224{
225 u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
226
227 writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
228}
229
230static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
231 struct hns3_nic_priv *priv)
232{
233 /* initialize the configuration for interrupt coalescing.
234 * 1. GL (Interrupt Gap Limiter)
235 * 2. RL (Interrupt Rate Limiter)
236 */
237
238 /* Default: enable interrupt coalescing self-adaptive and GL */
239 tqp_vector->tx_group.coal.gl_adapt_enable = 1;
240 tqp_vector->rx_group.coal.gl_adapt_enable = 1;
241
242 tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
243 tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
244
245 tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
246 tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
247}
248
249static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
250 struct hns3_nic_priv *priv)
251{
252 struct hnae3_handle *h = priv->ae_handle;
253
254 hns3_set_vector_coalesce_tx_gl(tqp_vector,
255 tqp_vector->tx_group.coal.int_gl);
256 hns3_set_vector_coalesce_rx_gl(tqp_vector,
257 tqp_vector->rx_group.coal.int_gl);
258 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
259}
260
261static int hns3_nic_set_real_num_queue(struct net_device *netdev)
262{
263 struct hnae3_handle *h = hns3_get_handle(netdev);
264 struct hnae3_knic_private_info *kinfo = &h->kinfo;
265 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
266 int i, ret;
267
268 if (kinfo->num_tc <= 1) {
269 netdev_reset_tc(netdev);
270 } else {
271 ret = netdev_set_num_tc(netdev, kinfo->num_tc);
272 if (ret) {
273 netdev_err(netdev,
274 "netdev_set_num_tc fail, ret=%d!\n", ret);
275 return ret;
276 }
277
278 for (i = 0; i < HNAE3_MAX_TC; i++) {
279 if (!kinfo->tc_info[i].enable)
280 continue;
281
282 netdev_set_tc_queue(netdev,
283 kinfo->tc_info[i].tc,
284 kinfo->tc_info[i].tqp_count,
285 kinfo->tc_info[i].tqp_offset);
286 }
287 }
288
289 ret = netif_set_real_num_tx_queues(netdev, queue_size);
290 if (ret) {
291 netdev_err(netdev,
292 "netif_set_real_num_tx_queues fail, ret=%d!\n",
293 ret);
294 return ret;
295 }
296
297 ret = netif_set_real_num_rx_queues(netdev, queue_size);
298 if (ret) {
299 netdev_err(netdev,
300 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
301 return ret;
302 }
303
304 return 0;
305}
306
307static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
308{
309 u16 alloc_tqps, max_rss_size, rss_size;
310
311 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
312 rss_size = alloc_tqps / h->kinfo.num_tc;
313
314 return min_t(u16, rss_size, max_rss_size);
315}
316
317static void hns3_tqp_enable(struct hnae3_queue *tqp)
318{
319 u32 rcb_reg;
320
321 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
322 rcb_reg |= BIT(HNS3_RING_EN_B);
323 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
324}
325
326static void hns3_tqp_disable(struct hnae3_queue *tqp)
327{
328 u32 rcb_reg;
329
330 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
331 rcb_reg &= ~BIT(HNS3_RING_EN_B);
332 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
333}
334
335static int hns3_nic_net_up(struct net_device *netdev)
336{
337 struct hns3_nic_priv *priv = netdev_priv(netdev);
338 struct hnae3_handle *h = priv->ae_handle;
339 int i, j;
340 int ret;
341
342 ret = hns3_nic_reset_all_ring(h);
343 if (ret)
344 return ret;
345
346 /* get irq resource for all vectors */
347 ret = hns3_nic_init_irq(priv);
348 if (ret) {
349 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
350 return ret;
351 }
352
353 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
354
355 /* enable the vectors */
356 for (i = 0; i < priv->vector_num; i++)
357 hns3_vector_enable(&priv->tqp_vector[i]);
358
359 /* enable rcb */
360 for (j = 0; j < h->kinfo.num_tqps; j++)
361 hns3_tqp_enable(h->kinfo.tqp[j]);
362
363 /* start the ae_dev */
364 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
365 if (ret)
366 goto out_start_err;
367
368 return 0;
369
370out_start_err:
371 set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
372 while (j--)
373 hns3_tqp_disable(h->kinfo.tqp[j]);
374
375 for (j = i - 1; j >= 0; j--)
376 hns3_vector_disable(&priv->tqp_vector[j]);
377
378 hns3_nic_uninit_irq(priv);
379
380 return ret;
381}
382
383static void hns3_config_xps(struct hns3_nic_priv *priv)
384{
385 int i;
386
387 for (i = 0; i < priv->vector_num; i++) {
388 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i];
389 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;
390
391 while (ring) {
392 int ret;
393
394 ret = netif_set_xps_queue(priv->netdev,
395 &tqp_vector->affinity_mask,
396 ring->tqp->tqp_index);
397 if (ret)
398 netdev_warn(priv->netdev,
399 "set xps queue failed: %d", ret);
400
401 ring = ring->next;
402 }
403 }
404}
405
406static int hns3_nic_net_open(struct net_device *netdev)
407{
408 struct hns3_nic_priv *priv = netdev_priv(netdev);
409 struct hnae3_handle *h = hns3_get_handle(netdev);
410 struct hnae3_knic_private_info *kinfo;
411 int i, ret;
412
413 if (hns3_nic_resetting(netdev))
414 return -EBUSY;
415
416 netif_carrier_off(netdev);
417
418 ret = hns3_nic_set_real_num_queue(netdev);
419 if (ret)
420 return ret;
421
422 ret = hns3_nic_net_up(netdev);
423 if (ret) {
424 netdev_err(netdev,
425 "hns net up fail, ret=%d!\n", ret);
426 return ret;
427 }
428
429 kinfo = &h->kinfo;
430 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
431 netdev_set_prio_tc_map(netdev, i,
432 kinfo->prio_tc[i]);
433 }
434
435 if (h->ae_algo->ops->set_timer_task)
436 h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
437
438 hns3_config_xps(priv);
439 return 0;
440}
441
442static void hns3_nic_net_down(struct net_device *netdev)
443{
444 struct hns3_nic_priv *priv = netdev_priv(netdev);
445 struct hnae3_handle *h = hns3_get_handle(netdev);
446 const struct hnae3_ae_ops *ops;
447 int i;
448
449 /* disable vectors */
450 for (i = 0; i < priv->vector_num; i++)
451 hns3_vector_disable(&priv->tqp_vector[i]);
452
453 /* disable rcb */
454 for (i = 0; i < h->kinfo.num_tqps; i++)
455 hns3_tqp_disable(h->kinfo.tqp[i]);
456
457 /* stop ae_dev */
458 ops = priv->ae_handle->ae_algo->ops;
459 if (ops->stop)
460 ops->stop(priv->ae_handle);
461
462 /* free irq resources */
463 hns3_nic_uninit_irq(priv);
464
465 hns3_clear_all_ring(priv->ae_handle);
466}
467
468static int hns3_nic_net_stop(struct net_device *netdev)
469{
470 struct hns3_nic_priv *priv = netdev_priv(netdev);
471 struct hnae3_handle *h = hns3_get_handle(netdev);
472
473 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
474 return 0;
475
476 if (h->ae_algo->ops->set_timer_task)
477 h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
478
479 netif_tx_stop_all_queues(netdev);
480 netif_carrier_off(netdev);
481
482 hns3_nic_net_down(netdev);
483
484 return 0;
485}
486
487static int hns3_nic_uc_sync(struct net_device *netdev,
488 const unsigned char *addr)
489{
490 struct hnae3_handle *h = hns3_get_handle(netdev);
491
492 if (h->ae_algo->ops->add_uc_addr)
493 return h->ae_algo->ops->add_uc_addr(h, addr);
494
495 return 0;
496}
497
498static int hns3_nic_uc_unsync(struct net_device *netdev,
499 const unsigned char *addr)
500{
501 struct hnae3_handle *h = hns3_get_handle(netdev);
502
503 if (h->ae_algo->ops->rm_uc_addr)
504 return h->ae_algo->ops->rm_uc_addr(h, addr);
505
506 return 0;
507}
508
509static int hns3_nic_mc_sync(struct net_device *netdev,
510 const unsigned char *addr)
511{
512 struct hnae3_handle *h = hns3_get_handle(netdev);
513
514 if (h->ae_algo->ops->add_mc_addr)
515 return h->ae_algo->ops->add_mc_addr(h, addr);
516
517 return 0;
518}
519
520static int hns3_nic_mc_unsync(struct net_device *netdev,
521 const unsigned char *addr)
522{
523 struct hnae3_handle *h = hns3_get_handle(netdev);
524
525 if (h->ae_algo->ops->rm_mc_addr)
526 return h->ae_algo->ops->rm_mc_addr(h, addr);
527
528 return 0;
529}
530
531static u8 hns3_get_netdev_flags(struct net_device *netdev)
532{
533 u8 flags = 0;
534
535 if (netdev->flags & IFF_PROMISC) {
536 flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
537 } else {
538 flags |= HNAE3_VLAN_FLTR;
539 if (netdev->flags & IFF_ALLMULTI)
540 flags |= HNAE3_USER_MPE;
541 }
542
543 return flags;
544}
545
546static void hns3_nic_set_rx_mode(struct net_device *netdev)
547{
548 struct hnae3_handle *h = hns3_get_handle(netdev);
549 u8 new_flags;
550 int ret;
551
552 new_flags = hns3_get_netdev_flags(netdev);
553
554 ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
555 if (ret) {
556 netdev_err(netdev, "sync uc address fail\n");
557 if (ret == -ENOSPC)
558 new_flags |= HNAE3_OVERFLOW_UPE;
559 }
560
561 if (netdev->flags & IFF_MULTICAST) {
562 ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
563 hns3_nic_mc_unsync);
564 if (ret) {
565 netdev_err(netdev, "sync mc address fail\n");
566 if (ret == -ENOSPC)
567 new_flags |= HNAE3_OVERFLOW_MPE;
568 }
569 }
570
571 /* User mode Promisc mode enable and vlan filtering is disabled to
572 * let all packets in. MAC-VLAN Table overflow Promisc enabled and
573 * vlan fitering is enabled
574 */
575 hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
576 h->netdev_flags = new_flags;
577 hns3_update_promisc_mode(netdev, new_flags);
578}
579
580int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
581{
582 struct hns3_nic_priv *priv = netdev_priv(netdev);
583 struct hnae3_handle *h = priv->ae_handle;
584
585 if (h->ae_algo->ops->set_promisc_mode) {
586 return h->ae_algo->ops->set_promisc_mode(h,
587 promisc_flags & HNAE3_UPE,
588 promisc_flags & HNAE3_MPE);
589 }
590
591 return 0;
592}
593
594void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
595{
596 struct hns3_nic_priv *priv = netdev_priv(netdev);
597 struct hnae3_handle *h = priv->ae_handle;
598 bool last_state;
599
600 if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
601 last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
602 if (enable != last_state) {
603 netdev_info(netdev,
604 "%s vlan filter\n",
605 enable ? "enable" : "disable");
606 h->ae_algo->ops->enable_vlan_filter(h, enable);
607 }
608 }
609}
610
611static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
612 u16 *mss, u32 *type_cs_vlan_tso)
613{
614 u32 l4_offset, hdr_len;
615 union l3_hdr_info l3;
616 union l4_hdr_info l4;
617 u32 l4_paylen;
618 int ret;
619
620 if (!skb_is_gso(skb))
621 return 0;
622
623 ret = skb_cow_head(skb, 0);
624 if (unlikely(ret))
625 return ret;
626
627 l3.hdr = skb_network_header(skb);
628 l4.hdr = skb_transport_header(skb);
629
630 /* Software should clear the IPv4's checksum field when tso is
631 * needed.
632 */
633 if (l3.v4->version == 4)
634 l3.v4->check = 0;
635
636 /* tunnel packet.*/
637 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
638 SKB_GSO_GRE_CSUM |
639 SKB_GSO_UDP_TUNNEL |
640 SKB_GSO_UDP_TUNNEL_CSUM)) {
641 if ((!(skb_shinfo(skb)->gso_type &
642 SKB_GSO_PARTIAL)) &&
643 (skb_shinfo(skb)->gso_type &
644 SKB_GSO_UDP_TUNNEL_CSUM)) {
645 /* Software should clear the udp's checksum
646 * field when tso is needed.
647 */
648 l4.udp->check = 0;
649 }
650 /* reset l3&l4 pointers from outer to inner headers */
651 l3.hdr = skb_inner_network_header(skb);
652 l4.hdr = skb_inner_transport_header(skb);
653
654 /* Software should clear the IPv4's checksum field when
655 * tso is needed.
656 */
657 if (l3.v4->version == 4)
658 l3.v4->check = 0;
659 }
660
661 /* normal or tunnel packet*/
662 l4_offset = l4.hdr - skb->data;
663 hdr_len = (l4.tcp->doff << 2) + l4_offset;
664
665 /* remove payload length from inner pseudo checksum when tso*/
666 l4_paylen = skb->len - l4_offset;
667 csum_replace_by_diff(&l4.tcp->check,
668 (__force __wsum)htonl(l4_paylen));
669
670 /* find the txbd field values */
671 *paylen = skb->len - hdr_len;
672 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
673
674 /* get MSS for TSO */
675 *mss = skb_shinfo(skb)->gso_size;
676
677 return 0;
678}
679
680static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
681 u8 *il4_proto)
682{
683 union l3_hdr_info l3;
684 unsigned char *l4_hdr;
685 unsigned char *exthdr;
686 u8 l4_proto_tmp;
687 __be16 frag_off;
688
689 /* find outer header point */
690 l3.hdr = skb_network_header(skb);
691 l4_hdr = skb_transport_header(skb);
692
693 if (skb->protocol == htons(ETH_P_IPV6)) {
694 exthdr = l3.hdr + sizeof(*l3.v6);
695 l4_proto_tmp = l3.v6->nexthdr;
696 if (l4_hdr != exthdr)
697 ipv6_skip_exthdr(skb, exthdr - skb->data,
698 &l4_proto_tmp, &frag_off);
699 } else if (skb->protocol == htons(ETH_P_IP)) {
700 l4_proto_tmp = l3.v4->protocol;
701 } else {
702 return -EINVAL;
703 }
704
705 *ol4_proto = l4_proto_tmp;
706
707 /* tunnel packet */
708 if (!skb->encapsulation) {
709 *il4_proto = 0;
710 return 0;
711 }
712
713 /* find inner header point */
714 l3.hdr = skb_inner_network_header(skb);
715 l4_hdr = skb_inner_transport_header(skb);
716
717 if (l3.v6->version == 6) {
718 exthdr = l3.hdr + sizeof(*l3.v6);
719 l4_proto_tmp = l3.v6->nexthdr;
720 if (l4_hdr != exthdr)
721 ipv6_skip_exthdr(skb, exthdr - skb->data,
722 &l4_proto_tmp, &frag_off);
723 } else if (l3.v4->version == 4) {
724 l4_proto_tmp = l3.v4->protocol;
725 }
726
727 *il4_proto = l4_proto_tmp;
728
729 return 0;
730}
731
732static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
733 u8 il4_proto, u32 *type_cs_vlan_tso,
734 u32 *ol_type_vlan_len_msec)
735{
736 union l3_hdr_info l3;
737 union l4_hdr_info l4;
738 unsigned char *l2_hdr;
739 u8 l4_proto = ol4_proto;
740 u32 ol2_len;
741 u32 ol3_len;
742 u32 ol4_len;
743 u32 l2_len;
744 u32 l3_len;
745
746 l3.hdr = skb_network_header(skb);
747 l4.hdr = skb_transport_header(skb);
748
749 /* compute L2 header size for normal packet, defined in 2 Bytes */
750 l2_len = l3.hdr - skb->data;
751 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
752
753 /* tunnel packet*/
754 if (skb->encapsulation) {
755 /* compute OL2 header size, defined in 2 Bytes */
756 ol2_len = l2_len;
757 hns3_set_field(*ol_type_vlan_len_msec,
758 HNS3_TXD_L2LEN_S, ol2_len >> 1);
759
760 /* compute OL3 header size, defined in 4 Bytes */
761 ol3_len = l4.hdr - l3.hdr;
762 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S,
763 ol3_len >> 2);
764
765 /* MAC in UDP, MAC in GRE (0x6558)*/
766 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
767 /* switch MAC header ptr from outer to inner header.*/
768 l2_hdr = skb_inner_mac_header(skb);
769
770 /* compute OL4 header size, defined in 4 Bytes. */
771 ol4_len = l2_hdr - l4.hdr;
772 hns3_set_field(*ol_type_vlan_len_msec,
773 HNS3_TXD_L4LEN_S, ol4_len >> 2);
774
775 /* switch IP header ptr from outer to inner header */
776 l3.hdr = skb_inner_network_header(skb);
777
778 /* compute inner l2 header size, defined in 2 Bytes. */
779 l2_len = l3.hdr - l2_hdr;
780 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S,
781 l2_len >> 1);
782 } else {
783 /* skb packet types not supported by hardware,
784 * txbd len fild doesn't be filled.
785 */
786 return;
787 }
788
789 /* switch L4 header pointer from outer to inner */
790 l4.hdr = skb_inner_transport_header(skb);
791
792 l4_proto = il4_proto;
793 }
794
795 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
796 l3_len = l4.hdr - l3.hdr;
797 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
798
799 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
800 switch (l4_proto) {
801 case IPPROTO_TCP:
802 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
803 l4.tcp->doff);
804 break;
805 case IPPROTO_SCTP:
806 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
807 (sizeof(struct sctphdr) >> 2));
808 break;
809 case IPPROTO_UDP:
810 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
811 (sizeof(struct udphdr) >> 2));
812 break;
813 default:
814 /* skb packet types not supported by hardware,
815 * txbd len fild doesn't be filled.
816 */
817 return;
818 }
819}
820
821/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
822 * and it is udp packet, which has a dest port as the IANA assigned.
823 * the hardware is expected to do the checksum offload, but the
824 * hardware will not do the checksum offload when udp dest port is
825 * 4789.
826 */
827static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
828{
829#define IANA_VXLAN_PORT 4789
830 union l4_hdr_info l4;
831
832 l4.hdr = skb_transport_header(skb);
833
834 if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
835 return false;
836
837 skb_checksum_help(skb);
838
839 return true;
840}
841
842static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
843 u8 il4_proto, u32 *type_cs_vlan_tso,
844 u32 *ol_type_vlan_len_msec)
845{
846 union l3_hdr_info l3;
847 u32 l4_proto = ol4_proto;
848
849 l3.hdr = skb_network_header(skb);
850
851 /* define OL3 type and tunnel type(OL4).*/
852 if (skb->encapsulation) {
853 /* define outer network header type.*/
854 if (skb->protocol == htons(ETH_P_IP)) {
855 if (skb_is_gso(skb))
856 hns3_set_field(*ol_type_vlan_len_msec,
857 HNS3_TXD_OL3T_S,
858 HNS3_OL3T_IPV4_CSUM);
859 else
860 hns3_set_field(*ol_type_vlan_len_msec,
861 HNS3_TXD_OL3T_S,
862 HNS3_OL3T_IPV4_NO_CSUM);
863
864 } else if (skb->protocol == htons(ETH_P_IPV6)) {
865 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
866 HNS3_OL3T_IPV6);
867 }
868
869 /* define tunnel type(OL4).*/
870 switch (l4_proto) {
871 case IPPROTO_UDP:
872 hns3_set_field(*ol_type_vlan_len_msec,
873 HNS3_TXD_TUNTYPE_S,
874 HNS3_TUN_MAC_IN_UDP);
875 break;
876 case IPPROTO_GRE:
877 hns3_set_field(*ol_type_vlan_len_msec,
878 HNS3_TXD_TUNTYPE_S,
879 HNS3_TUN_NVGRE);
880 break;
881 default:
882 /* drop the skb tunnel packet if hardware don't support,
883 * because hardware can't calculate csum when TSO.
884 */
885 if (skb_is_gso(skb))
886 return -EDOM;
887
888 /* the stack computes the IP header already,
889 * driver calculate l4 checksum when not TSO.
890 */
891 skb_checksum_help(skb);
892 return 0;
893 }
894
895 l3.hdr = skb_inner_network_header(skb);
896 l4_proto = il4_proto;
897 }
898
899 if (l3.v4->version == 4) {
900 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
901 HNS3_L3T_IPV4);
902
903 /* the stack computes the IP header already, the only time we
904 * need the hardware to recompute it is in the case of TSO.
905 */
906 if (skb_is_gso(skb))
907 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
908 } else if (l3.v6->version == 6) {
909 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
910 HNS3_L3T_IPV6);
911 }
912
913 switch (l4_proto) {
914 case IPPROTO_TCP:
915 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
916 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
917 HNS3_L4T_TCP);
918 break;
919 case IPPROTO_UDP:
920 if (hns3_tunnel_csum_bug(skb))
921 break;
922
923 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
924 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
925 HNS3_L4T_UDP);
926 break;
927 case IPPROTO_SCTP:
928 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
929 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
930 HNS3_L4T_SCTP);
931 break;
932 default:
933 /* drop the skb tunnel packet if hardware don't support,
934 * because hardware can't calculate csum when TSO.
935 */
936 if (skb_is_gso(skb))
937 return -EDOM;
938
939 /* the stack computes the IP header already,
940 * driver calculate l4 checksum when not TSO.
941 */
942 skb_checksum_help(skb);
943 return 0;
944 }
945
946 return 0;
947}
948
949static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
950{
951 /* Config bd buffer end */
952 hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
953 hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
954}
955
956static int hns3_fill_desc_vtags(struct sk_buff *skb,
957 struct hns3_enet_ring *tx_ring,
958 u32 *inner_vlan_flag,
959 u32 *out_vlan_flag,
960 u16 *inner_vtag,
961 u16 *out_vtag)
962{
963#define HNS3_TX_VLAN_PRIO_SHIFT 13
964
965 if (skb->protocol == htons(ETH_P_8021Q) &&
966 !(tx_ring->tqp->handle->kinfo.netdev->features &
967 NETIF_F_HW_VLAN_CTAG_TX)) {
968 /* When HW VLAN acceleration is turned off, and the stack
969 * sets the protocol to 802.1q, the driver just need to
970 * set the protocol to the encapsulated ethertype.
971 */
972 skb->protocol = vlan_get_protocol(skb);
973 return 0;
974 }
975
976 if (skb_vlan_tag_present(skb)) {
977 u16 vlan_tag;
978
979 vlan_tag = skb_vlan_tag_get(skb);
980 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
981
982 /* Based on hw strategy, use out_vtag in two layer tag case,
983 * and use inner_vtag in one tag case.
984 */
985 if (skb->protocol == htons(ETH_P_8021Q)) {
986 hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
987 *out_vtag = vlan_tag;
988 } else {
989 hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
990 *inner_vtag = vlan_tag;
991 }
992 } else if (skb->protocol == htons(ETH_P_8021Q)) {
993 struct vlan_ethhdr *vhdr;
994 int rc;
995
996 rc = skb_cow_head(skb, 0);
997 if (unlikely(rc < 0))
998 return rc;
999 vhdr = (struct vlan_ethhdr *)skb->data;
1000 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
1001 << HNS3_TX_VLAN_PRIO_SHIFT);
1002 }
1003
1004 skb->protocol = vlan_get_protocol(skb);
1005 return 0;
1006}
1007
1008static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
1009 int size, int frag_end, enum hns_desc_type type)
1010{
1011 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
1012 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
1013 struct device *dev = ring_to_dev(ring);
1014 u16 bdtp_fe_sc_vld_ra_ri = 0;
1015 struct skb_frag_struct *frag;
1016 unsigned int frag_buf_num;
1017 int k, sizeoflast;
1018 dma_addr_t dma;
1019
1020 if (type == DESC_TYPE_SKB) {
1021 struct sk_buff *skb = (struct sk_buff *)priv;
1022 u32 ol_type_vlan_len_msec = 0;
1023 u32 type_cs_vlan_tso = 0;
1024 u32 paylen = skb->len;
1025 u16 inner_vtag = 0;
1026 u16 out_vtag = 0;
1027 u16 mss = 0;
1028 int ret;
1029
1030 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
1031 &ol_type_vlan_len_msec,
1032 &inner_vtag, &out_vtag);
1033 if (unlikely(ret))
1034 return ret;
1035
1036 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1037 u8 ol4_proto, il4_proto;
1038
1039 skb_reset_mac_len(skb);
1040
1041 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
1042 if (unlikely(ret))
1043 return ret;
1044 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
1045 &type_cs_vlan_tso,
1046 &ol_type_vlan_len_msec);
1047 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
1048 &type_cs_vlan_tso,
1049 &ol_type_vlan_len_msec);
1050 if (unlikely(ret))
1051 return ret;
1052
1053 ret = hns3_set_tso(skb, &paylen, &mss,
1054 &type_cs_vlan_tso);
1055 if (unlikely(ret))
1056 return ret;
1057 }
1058
1059 /* Set txbd */
1060 desc->tx.ol_type_vlan_len_msec =
1061 cpu_to_le32(ol_type_vlan_len_msec);
1062 desc->tx.type_cs_vlan_tso_len =
1063 cpu_to_le32(type_cs_vlan_tso);
1064 desc->tx.paylen = cpu_to_le32(paylen);
1065 desc->tx.mss = cpu_to_le16(mss);
1066 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
1067 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
1068
1069 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1070 } else {
1071 frag = (struct skb_frag_struct *)priv;
1072 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1073 }
1074
1075 if (unlikely(dma_mapping_error(ring->dev, dma))) {
1076 ring->stats.sw_err_cnt++;
1077 return -ENOMEM;
1078 }
1079
1080 desc_cb->length = size;
1081
1082 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET;
1083 sizeoflast = size & HNS3_TX_LAST_SIZE_M;
1084 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1085
1086 /* When frag size is bigger than hardware limit, split this frag */
1087 for (k = 0; k < frag_buf_num; k++) {
1088 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
1089 desc_cb->priv = priv;
1090 desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
1091 desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
1092 DESC_TYPE_SKB : DESC_TYPE_PAGE;
1093
1094 /* now, fill the descriptor */
1095 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
1096 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1097 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1098 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
1099 frag_end && (k == frag_buf_num - 1) ?
1100 1 : 0);
1101 desc->tx.bdtp_fe_sc_vld_ra_ri =
1102 cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1103
1104 /* move ring pointer to next.*/
1105 ring_ptr_move_fw(ring, next_to_use);
1106
1107 desc_cb = &ring->desc_cb[ring->next_to_use];
1108 desc = &ring->desc[ring->next_to_use];
1109 }
1110
1111 return 0;
1112}
1113
1114static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
1115 struct hns3_enet_ring *ring)
1116{
1117 struct sk_buff *skb = *out_skb;
1118 struct sk_buff *new_skb = NULL;
1119 struct skb_frag_struct *frag;
1120 int bdnum_for_frag;
1121 int frag_num;
1122 int buf_num;
1123 int size;
1124 int i;
1125
1126 size = skb_headlen(skb);
1127 buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET;
1128
1129 frag_num = skb_shinfo(skb)->nr_frags;
1130 for (i = 0; i < frag_num; i++) {
1131 frag = &skb_shinfo(skb)->frags[i];
1132 size = skb_frag_size(frag);
1133 bdnum_for_frag = (size + HNS3_MAX_BD_SIZE - 1) >>
1134 HNS3_MAX_BD_SIZE_OFFSET;
1135 if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
1136 return -ENOMEM;
1137
1138 buf_num += bdnum_for_frag;
1139 }
1140
1141 if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
1142 buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) >>
1143 HNS3_MAX_BD_SIZE_OFFSET;
1144 if (ring_space(ring) < buf_num)
1145 return -EBUSY;
1146 /* manual split the send packet */
1147 new_skb = skb_copy(skb, GFP_ATOMIC);
1148 if (!new_skb)
1149 return -ENOMEM;
1150 dev_kfree_skb_any(skb);
1151 *out_skb = new_skb;
1152 }
1153
1154 if (unlikely(ring_space(ring) < buf_num))
1155 return -EBUSY;
1156
1157 *bnum = buf_num;
1158 return 0;
1159}
1160
1161static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
1162 struct hns3_enet_ring *ring)
1163{
1164 struct sk_buff *skb = *out_skb;
1165 struct sk_buff *new_skb = NULL;
1166 int buf_num;
1167
1168 /* No. of segments (plus a header) */
1169 buf_num = skb_shinfo(skb)->nr_frags + 1;
1170
1171 if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
1172 buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1173 if (ring_space(ring) < buf_num)
1174 return -EBUSY;
1175 /* manual split the send packet */
1176 new_skb = skb_copy(skb, GFP_ATOMIC);
1177 if (!new_skb)
1178 return -ENOMEM;
1179 dev_kfree_skb_any(skb);
1180 *out_skb = new_skb;
1181 }
1182
1183 if (unlikely(ring_space(ring) < buf_num))
1184 return -EBUSY;
1185
1186 *bnum = buf_num;
1187
1188 return 0;
1189}
1190
1191static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
1192{
1193 struct device *dev = ring_to_dev(ring);
1194 unsigned int i;
1195
1196 for (i = 0; i < ring->desc_num; i++) {
1197 /* check if this is where we started */
1198 if (ring->next_to_use == next_to_use_orig)
1199 break;
1200
1201 /* unmap the descriptor dma address */
1202 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
1203 dma_unmap_single(dev,
1204 ring->desc_cb[ring->next_to_use].dma,
1205 ring->desc_cb[ring->next_to_use].length,
1206 DMA_TO_DEVICE);
1207 else if (ring->desc_cb[ring->next_to_use].length)
1208 dma_unmap_page(dev,
1209 ring->desc_cb[ring->next_to_use].dma,
1210 ring->desc_cb[ring->next_to_use].length,
1211 DMA_TO_DEVICE);
1212
1213 ring->desc_cb[ring->next_to_use].length = 0;
1214
1215 /* rollback one */
1216 ring_ptr_move_bw(ring, next_to_use);
1217 }
1218}
1219
1220netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1221{
1222 struct hns3_nic_priv *priv = netdev_priv(netdev);
1223 struct hns3_nic_ring_data *ring_data =
1224 &tx_ring_data(priv, skb->queue_mapping);
1225 struct hns3_enet_ring *ring = ring_data->ring;
1226 struct netdev_queue *dev_queue;
1227 struct skb_frag_struct *frag;
1228 int next_to_use_head;
1229 int next_to_use_frag;
1230 int buf_num;
1231 int seg_num;
1232 int size;
1233 int ret;
1234 int i;
1235
1236 /* Prefetch the data used later */
1237 prefetch(skb->data);
1238
1239 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
1240 case -EBUSY:
1241 u64_stats_update_begin(&ring->syncp);
1242 ring->stats.tx_busy++;
1243 u64_stats_update_end(&ring->syncp);
1244
1245 goto out_net_tx_busy;
1246 case -ENOMEM:
1247 u64_stats_update_begin(&ring->syncp);
1248 ring->stats.sw_err_cnt++;
1249 u64_stats_update_end(&ring->syncp);
1250 netdev_err(netdev, "no memory to xmit!\n");
1251
1252 goto out_err_tx_ok;
1253 default:
1254 break;
1255 }
1256
1257 /* No. of segments (plus a header) */
1258 seg_num = skb_shinfo(skb)->nr_frags + 1;
1259 /* Fill the first part */
1260 size = skb_headlen(skb);
1261
1262 next_to_use_head = ring->next_to_use;
1263
1264 ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
1265 DESC_TYPE_SKB);
1266 if (unlikely(ret))
1267 goto head_fill_err;
1268
1269 next_to_use_frag = ring->next_to_use;
1270 /* Fill the fragments */
1271 for (i = 1; i < seg_num; i++) {
1272 frag = &skb_shinfo(skb)->frags[i - 1];
1273 size = skb_frag_size(frag);
1274
1275 ret = hns3_fill_desc(ring, frag, size,
1276 seg_num - 1 == i ? 1 : 0,
1277 DESC_TYPE_PAGE);
1278
1279 if (unlikely(ret))
1280 goto frag_fill_err;
1281 }
1282
1283 /* Complete translate all packets */
1284 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1285 netdev_tx_sent_queue(dev_queue, skb->len);
1286
1287 wmb(); /* Commit all data before submit */
1288
1289 hnae3_queue_xmit(ring->tqp, buf_num);
1290
1291 return NETDEV_TX_OK;
1292
1293frag_fill_err:
1294 hns3_clear_desc(ring, next_to_use_frag);
1295
1296head_fill_err:
1297 hns3_clear_desc(ring, next_to_use_head);
1298
1299out_err_tx_ok:
1300 dev_kfree_skb_any(skb);
1301 return NETDEV_TX_OK;
1302
1303out_net_tx_busy:
1304 netif_stop_subqueue(netdev, ring_data->queue_index);
1305 smp_mb(); /* Commit all data before submit */
1306
1307 return NETDEV_TX_BUSY;
1308}
1309
1310static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1311{
1312 struct hnae3_handle *h = hns3_get_handle(netdev);
1313 struct sockaddr *mac_addr = p;
1314 int ret;
1315
1316 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1317 return -EADDRNOTAVAIL;
1318
1319 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
1320 netdev_info(netdev, "already using mac address %pM\n",
1321 mac_addr->sa_data);
1322 return 0;
1323 }
1324
1325 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
1326 if (ret) {
1327 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1328 return ret;
1329 }
1330
1331 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1332
1333 return 0;
1334}
1335
1336static int hns3_nic_do_ioctl(struct net_device *netdev,
1337 struct ifreq *ifr, int cmd)
1338{
1339 struct hnae3_handle *h = hns3_get_handle(netdev);
1340
1341 if (!netif_running(netdev))
1342 return -EINVAL;
1343
1344 if (!h->ae_algo->ops->do_ioctl)
1345 return -EOPNOTSUPP;
1346
1347 return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
1348}
1349
1350static int hns3_nic_set_features(struct net_device *netdev,
1351 netdev_features_t features)
1352{
1353 netdev_features_t changed = netdev->features ^ features;
1354 struct hns3_nic_priv *priv = netdev_priv(netdev);
1355 struct hnae3_handle *h = priv->ae_handle;
1356 bool enable;
1357 int ret;
1358
1359 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
1360 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1361 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1362 else
1363 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1364 }
1365
1366 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
1367 enable = !!(features & NETIF_F_GRO_HW);
1368 ret = h->ae_algo->ops->set_gro_en(h, enable);
1369 if (ret)
1370 return ret;
1371 }
1372
1373 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1374 h->ae_algo->ops->enable_vlan_filter) {
1375 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
1376 h->ae_algo->ops->enable_vlan_filter(h, enable);
1377 }
1378
1379 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1380 h->ae_algo->ops->enable_hw_strip_rxvtag) {
1381 enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1382 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
1383 if (ret)
1384 return ret;
1385 }
1386
1387 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
1388 enable = !!(features & NETIF_F_NTUPLE);
1389 h->ae_algo->ops->enable_fd(h, enable);
1390 }
1391
1392 netdev->features = features;
1393 return 0;
1394}
1395
1396static void hns3_nic_get_stats64(struct net_device *netdev,
1397 struct rtnl_link_stats64 *stats)
1398{
1399 struct hns3_nic_priv *priv = netdev_priv(netdev);
1400 int queue_num = priv->ae_handle->kinfo.num_tqps;
1401 struct hnae3_handle *handle = priv->ae_handle;
1402 struct hns3_enet_ring *ring;
1403 u64 rx_length_errors = 0;
1404 u64 rx_crc_errors = 0;
1405 u64 rx_multicast = 0;
1406 unsigned int start;
1407 u64 tx_errors = 0;
1408 u64 rx_errors = 0;
1409 unsigned int idx;
1410 u64 tx_bytes = 0;
1411 u64 rx_bytes = 0;
1412 u64 tx_pkts = 0;
1413 u64 rx_pkts = 0;
1414 u64 tx_drop = 0;
1415 u64 rx_drop = 0;
1416
1417 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1418 return;
1419
1420 handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1421
1422 for (idx = 0; idx < queue_num; idx++) {
1423 /* fetch the tx stats */
1424 ring = priv->ring_data[idx].ring;
1425 do {
1426 start = u64_stats_fetch_begin_irq(&ring->syncp);
1427 tx_bytes += ring->stats.tx_bytes;
1428 tx_pkts += ring->stats.tx_pkts;
1429 tx_drop += ring->stats.sw_err_cnt;
1430 tx_errors += ring->stats.sw_err_cnt;
1431 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1432
1433 /* fetch the rx stats */
1434 ring = priv->ring_data[idx + queue_num].ring;
1435 do {
1436 start = u64_stats_fetch_begin_irq(&ring->syncp);
1437 rx_bytes += ring->stats.rx_bytes;
1438 rx_pkts += ring->stats.rx_pkts;
1439 rx_drop += ring->stats.non_vld_descs;
1440 rx_drop += ring->stats.l2_err;
1441 rx_errors += ring->stats.non_vld_descs;
1442 rx_errors += ring->stats.l2_err;
1443 rx_crc_errors += ring->stats.l2_err;
1444 rx_crc_errors += ring->stats.l3l4_csum_err;
1445 rx_multicast += ring->stats.rx_multicast;
1446 rx_length_errors += ring->stats.err_pkt_len;
1447 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1448 }
1449
1450 stats->tx_bytes = tx_bytes;
1451 stats->tx_packets = tx_pkts;
1452 stats->rx_bytes = rx_bytes;
1453 stats->rx_packets = rx_pkts;
1454
1455 stats->rx_errors = rx_errors;
1456 stats->multicast = rx_multicast;
1457 stats->rx_length_errors = rx_length_errors;
1458 stats->rx_crc_errors = rx_crc_errors;
1459 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1460
1461 stats->tx_errors = tx_errors;
1462 stats->rx_dropped = rx_drop;
1463 stats->tx_dropped = tx_drop;
1464 stats->collisions = netdev->stats.collisions;
1465 stats->rx_over_errors = netdev->stats.rx_over_errors;
1466 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1467 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1468 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1469 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1470 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1471 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1472 stats->tx_window_errors = netdev->stats.tx_window_errors;
1473 stats->rx_compressed = netdev->stats.rx_compressed;
1474 stats->tx_compressed = netdev->stats.tx_compressed;
1475}
1476
1477static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1478{
1479 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
1480 struct hnae3_handle *h = hns3_get_handle(netdev);
1481 struct hnae3_knic_private_info *kinfo = &h->kinfo;
1482 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1483 u8 tc = mqprio_qopt->qopt.num_tc;
1484 u16 mode = mqprio_qopt->mode;
1485 u8 hw = mqprio_qopt->qopt.hw;
1486
1487 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1488 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1489 return -EOPNOTSUPP;
1490
1491 if (tc > HNAE3_MAX_TC)
1492 return -EINVAL;
1493
1494 if (!netdev)
1495 return -EINVAL;
1496
1497 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1498 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
1499}
1500
1501static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1502 void *type_data)
1503{
1504 if (type != TC_SETUP_QDISC_MQPRIO)
1505 return -EOPNOTSUPP;
1506
1507 return hns3_setup_tc(dev, type_data);
1508}
1509
1510static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1511 __be16 proto, u16 vid)
1512{
1513 struct hnae3_handle *h = hns3_get_handle(netdev);
1514 struct hns3_nic_priv *priv = netdev_priv(netdev);
1515 int ret = -EIO;
1516
1517 if (h->ae_algo->ops->set_vlan_filter)
1518 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1519
1520 if (!ret)
1521 set_bit(vid, priv->active_vlans);
1522
1523 return ret;
1524}
1525
1526static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1527 __be16 proto, u16 vid)
1528{
1529 struct hnae3_handle *h = hns3_get_handle(netdev);
1530 struct hns3_nic_priv *priv = netdev_priv(netdev);
1531 int ret = -EIO;
1532
1533 if (h->ae_algo->ops->set_vlan_filter)
1534 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1535
1536 if (!ret)
1537 clear_bit(vid, priv->active_vlans);
1538
1539 return ret;
1540}
1541
1542static int hns3_restore_vlan(struct net_device *netdev)
1543{
1544 struct hns3_nic_priv *priv = netdev_priv(netdev);
1545 int ret = 0;
1546 u16 vid;
1547
1548 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
1549 ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
1550 if (ret) {
1551 netdev_err(netdev, "Restore vlan: %d filter, ret:%d\n",
1552 vid, ret);
1553 return ret;
1554 }
1555 }
1556
1557 return ret;
1558}
1559
1560static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1561 u8 qos, __be16 vlan_proto)
1562{
1563 struct hnae3_handle *h = hns3_get_handle(netdev);
1564 int ret = -EIO;
1565
1566 if (h->ae_algo->ops->set_vf_vlan_filter)
1567 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1568 qos, vlan_proto);
1569
1570 return ret;
1571}
1572
1573static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1574{
1575 struct hnae3_handle *h = hns3_get_handle(netdev);
1576 int ret;
1577
1578 if (!h->ae_algo->ops->set_mtu)
1579 return -EOPNOTSUPP;
1580
1581 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1582 if (ret)
1583 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1584 ret);
1585 else
1586 netdev->mtu = new_mtu;
1587
1588 return ret;
1589}
1590
1591static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1592{
1593 struct hns3_nic_priv *priv = netdev_priv(ndev);
1594 struct hns3_enet_ring *tx_ring = NULL;
1595 int timeout_queue = 0;
1596 int hw_head, hw_tail;
1597 int i;
1598
1599 /* Find the stopped queue the same way the stack does */
1600 for (i = 0; i < ndev->real_num_tx_queues; i++) {
1601 struct netdev_queue *q;
1602 unsigned long trans_start;
1603
1604 q = netdev_get_tx_queue(ndev, i);
1605 trans_start = q->trans_start;
1606 if (netif_xmit_stopped(q) &&
1607 time_after(jiffies,
1608 (trans_start + ndev->watchdog_timeo))) {
1609 timeout_queue = i;
1610 break;
1611 }
1612 }
1613
1614 if (i == ndev->num_tx_queues) {
1615 netdev_info(ndev,
1616 "no netdev TX timeout queue found, timeout count: %llu\n",
1617 priv->tx_timeout_count);
1618 return false;
1619 }
1620
1621 tx_ring = priv->ring_data[timeout_queue].ring;
1622
1623 hw_head = readl_relaxed(tx_ring->tqp->io_base +
1624 HNS3_RING_TX_RING_HEAD_REG);
1625 hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1626 HNS3_RING_TX_RING_TAIL_REG);
1627 netdev_info(ndev,
1628 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1629 priv->tx_timeout_count,
1630 timeout_queue,
1631 tx_ring->next_to_use,
1632 tx_ring->next_to_clean,
1633 hw_head,
1634 hw_tail,
1635 readl(tx_ring->tqp_vector->mask_addr));
1636
1637 return true;
1638}
1639
1640static void hns3_nic_net_timeout(struct net_device *ndev)
1641{
1642 struct hns3_nic_priv *priv = netdev_priv(ndev);
1643 struct hnae3_handle *h = priv->ae_handle;
1644
1645 if (!hns3_get_tx_timeo_queue_info(ndev))
1646 return;
1647
1648 priv->tx_timeout_count++;
1649
1650 /* request the reset, and let the hclge to determine
1651 * which reset level should be done
1652 */
1653 if (h->ae_algo->ops->reset_event)
1654 h->ae_algo->ops->reset_event(h->pdev, h);
1655}
1656
1657static const struct net_device_ops hns3_nic_netdev_ops = {
1658 .ndo_open = hns3_nic_net_open,
1659 .ndo_stop = hns3_nic_net_stop,
1660 .ndo_start_xmit = hns3_nic_net_xmit,
1661 .ndo_tx_timeout = hns3_nic_net_timeout,
1662 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
1663 .ndo_do_ioctl = hns3_nic_do_ioctl,
1664 .ndo_change_mtu = hns3_nic_change_mtu,
1665 .ndo_set_features = hns3_nic_set_features,
1666 .ndo_get_stats64 = hns3_nic_get_stats64,
1667 .ndo_setup_tc = hns3_nic_setup_tc,
1668 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
1669 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1670 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1671 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1672};
1673
1674static bool hns3_is_phys_func(struct pci_dev *pdev)
1675{
1676 u32 dev_id = pdev->device;
1677
1678 switch (dev_id) {
1679 case HNAE3_DEV_ID_GE:
1680 case HNAE3_DEV_ID_25GE:
1681 case HNAE3_DEV_ID_25GE_RDMA:
1682 case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1683 case HNAE3_DEV_ID_50GE_RDMA:
1684 case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1685 case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1686 return true;
1687 case HNAE3_DEV_ID_100G_VF:
1688 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1689 return false;
1690 default:
1691 dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1692 dev_id);
1693 }
1694
1695 return false;
1696}
1697
1698static void hns3_disable_sriov(struct pci_dev *pdev)
1699{
1700 /* If our VFs are assigned we cannot shut down SR-IOV
1701 * without causing issues, so just leave the hardware
1702 * available but disabled
1703 */
1704 if (pci_vfs_assigned(pdev)) {
1705 dev_warn(&pdev->dev,
1706 "disabling driver while VFs are assigned\n");
1707 return;
1708 }
1709
1710 pci_disable_sriov(pdev);
1711}
1712
1713static void hns3_get_dev_capability(struct pci_dev *pdev,
1714 struct hnae3_ae_dev *ae_dev)
1715{
1716 if (pdev->revision >= 0x21) {
1717 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
1718 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
1719 }
1720}
1721
1722/* hns3_probe - Device initialization routine
1723 * @pdev: PCI device information struct
1724 * @ent: entry in hns3_pci_tbl
1725 *
1726 * hns3_probe initializes a PF identified by a pci_dev structure.
1727 * The OS initialization, configuring of the PF private structure,
1728 * and a hardware reset occur.
1729 *
1730 * Returns 0 on success, negative on failure
1731 */
1732static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1733{
1734 struct hnae3_ae_dev *ae_dev;
1735 int ret;
1736
1737 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1738 GFP_KERNEL);
1739 if (!ae_dev) {
1740 ret = -ENOMEM;
1741 return ret;
1742 }
1743
1744 ae_dev->pdev = pdev;
1745 ae_dev->flag = ent->driver_data;
1746 ae_dev->dev_type = HNAE3_DEV_KNIC;
1747 ae_dev->reset_type = HNAE3_NONE_RESET;
1748 hns3_get_dev_capability(pdev, ae_dev);
1749 pci_set_drvdata(pdev, ae_dev);
1750
1751 ret = hnae3_register_ae_dev(ae_dev);
1752 if (ret) {
1753 devm_kfree(&pdev->dev, ae_dev);
1754 pci_set_drvdata(pdev, NULL);
1755 }
1756
1757 return ret;
1758}
1759
1760/* hns3_remove - Device removal routine
1761 * @pdev: PCI device information struct
1762 */
1763static void hns3_remove(struct pci_dev *pdev)
1764{
1765 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1766
1767 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1768 hns3_disable_sriov(pdev);
1769
1770 hnae3_unregister_ae_dev(ae_dev);
1771 pci_set_drvdata(pdev, NULL);
1772}
1773
1774/**
1775 * hns3_pci_sriov_configure
1776 * @pdev: pointer to a pci_dev structure
1777 * @num_vfs: number of VFs to allocate
1778 *
1779 * Enable or change the number of VFs. Called when the user updates the number
1780 * of VFs in sysfs.
1781 **/
1782static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1783{
1784 int ret;
1785
1786 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
1787 dev_warn(&pdev->dev, "Can not config SRIOV\n");
1788 return -EINVAL;
1789 }
1790
1791 if (num_vfs) {
1792 ret = pci_enable_sriov(pdev, num_vfs);
1793 if (ret)
1794 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
1795 else
1796 return num_vfs;
1797 } else if (!pci_vfs_assigned(pdev)) {
1798 pci_disable_sriov(pdev);
1799 } else {
1800 dev_warn(&pdev->dev,
1801 "Unable to free VFs because some are assigned to VMs.\n");
1802 }
1803
1804 return 0;
1805}
1806
1807static void hns3_shutdown(struct pci_dev *pdev)
1808{
1809 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1810
1811 hnae3_unregister_ae_dev(ae_dev);
1812 devm_kfree(&pdev->dev, ae_dev);
1813 pci_set_drvdata(pdev, NULL);
1814
1815 if (system_state == SYSTEM_POWER_OFF)
1816 pci_set_power_state(pdev, PCI_D3hot);
1817}
1818
1819static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
1820 pci_channel_state_t state)
1821{
1822 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1823 pci_ers_result_t ret;
1824
1825 dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
1826
1827 if (state == pci_channel_io_perm_failure)
1828 return PCI_ERS_RESULT_DISCONNECT;
1829
1830 if (!ae_dev) {
1831 dev_err(&pdev->dev,
1832 "Can't recover - error happened during device init\n");
1833 return PCI_ERS_RESULT_NONE;
1834 }
1835
1836 if (ae_dev->ops->handle_hw_ras_error)
1837 ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
1838 else
1839 return PCI_ERS_RESULT_NONE;
1840
1841 return ret;
1842}
1843
1844static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
1845{
1846 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1847 struct device *dev = &pdev->dev;
1848
1849 dev_info(dev, "requesting reset due to PCI error\n");
1850
1851 /* request the reset */
1852 if (ae_dev->ops->reset_event) {
1853 if (!ae_dev->override_pci_need_reset)
1854 ae_dev->ops->reset_event(pdev, NULL);
1855
1856 return PCI_ERS_RESULT_RECOVERED;
1857 }
1858
1859 return PCI_ERS_RESULT_DISCONNECT;
1860}
1861
1862static void hns3_reset_prepare(struct pci_dev *pdev)
1863{
1864 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1865
1866 dev_info(&pdev->dev, "hns3 flr prepare\n");
1867 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
1868 ae_dev->ops->flr_prepare(ae_dev);
1869}
1870
1871static void hns3_reset_done(struct pci_dev *pdev)
1872{
1873 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1874
1875 dev_info(&pdev->dev, "hns3 flr done\n");
1876 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
1877 ae_dev->ops->flr_done(ae_dev);
1878}
1879
1880static const struct pci_error_handlers hns3_err_handler = {
1881 .error_detected = hns3_error_detected,
1882 .slot_reset = hns3_slot_reset,
1883 .reset_prepare = hns3_reset_prepare,
1884 .reset_done = hns3_reset_done,
1885};
1886
1887static struct pci_driver hns3_driver = {
1888 .name = hns3_driver_name,
1889 .id_table = hns3_pci_tbl,
1890 .probe = hns3_probe,
1891 .remove = hns3_remove,
1892 .shutdown = hns3_shutdown,
1893 .sriov_configure = hns3_pci_sriov_configure,
1894 .err_handler = &hns3_err_handler,
1895};
1896
1897/* set default feature to hns3 */
1898static void hns3_set_default_feature(struct net_device *netdev)
1899{
1900 struct hnae3_handle *h = hns3_get_handle(netdev);
1901 struct pci_dev *pdev = h->pdev;
1902
1903 netdev->priv_flags |= IFF_UNICAST_FLT;
1904
1905 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1906 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1907 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1908 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1909 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1910
1911 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1912
1913 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1914
1915 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1916 NETIF_F_HW_VLAN_CTAG_FILTER |
1917 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1918 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1919 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1920 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1921 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1922
1923 netdev->vlan_features |=
1924 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1925 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1926 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1927 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1928 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1929
1930 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1931 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1932 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1933 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1934 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1935 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1936
1937 if (pdev->revision >= 0x21) {
1938 netdev->hw_features |= NETIF_F_GRO_HW;
1939 netdev->features |= NETIF_F_GRO_HW;
1940
1941 if (!(h->flags & HNAE3_SUPPORT_VF)) {
1942 netdev->hw_features |= NETIF_F_NTUPLE;
1943 netdev->features |= NETIF_F_NTUPLE;
1944 }
1945 }
1946}
1947
1948static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1949 struct hns3_desc_cb *cb)
1950{
1951 unsigned int order = hnae3_page_order(ring);
1952 struct page *p;
1953
1954 p = dev_alloc_pages(order);
1955 if (!p)
1956 return -ENOMEM;
1957
1958 cb->priv = p;
1959 cb->page_offset = 0;
1960 cb->reuse_flag = 0;
1961 cb->buf = page_address(p);
1962 cb->length = hnae3_page_size(ring);
1963 cb->type = DESC_TYPE_PAGE;
1964
1965 return 0;
1966}
1967
1968static void hns3_free_buffer(struct hns3_enet_ring *ring,
1969 struct hns3_desc_cb *cb)
1970{
1971 if (cb->type == DESC_TYPE_SKB)
1972 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1973 else if (!HNAE3_IS_TX_RING(ring))
1974 put_page((struct page *)cb->priv);
1975 memset(cb, 0, sizeof(*cb));
1976}
1977
1978static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1979{
1980 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1981 cb->length, ring_to_dma_dir(ring));
1982
1983 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
1984 return -EIO;
1985
1986 return 0;
1987}
1988
1989static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1990 struct hns3_desc_cb *cb)
1991{
1992 if (cb->type == DESC_TYPE_SKB)
1993 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1994 ring_to_dma_dir(ring));
1995 else if (cb->length)
1996 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1997 ring_to_dma_dir(ring));
1998}
1999
2000static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
2001{
2002 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2003 ring->desc[i].addr = 0;
2004}
2005
2006static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
2007{
2008 struct hns3_desc_cb *cb = &ring->desc_cb[i];
2009
2010 if (!ring->desc_cb[i].dma)
2011 return;
2012
2013 hns3_buffer_detach(ring, i);
2014 hns3_free_buffer(ring, cb);
2015}
2016
2017static void hns3_free_buffers(struct hns3_enet_ring *ring)
2018{
2019 int i;
2020
2021 for (i = 0; i < ring->desc_num; i++)
2022 hns3_free_buffer_detach(ring, i);
2023}
2024
2025/* free desc along with its attached buffer */
2026static void hns3_free_desc(struct hns3_enet_ring *ring)
2027{
2028 int size = ring->desc_num * sizeof(ring->desc[0]);
2029
2030 hns3_free_buffers(ring);
2031
2032 if (ring->desc) {
2033 dma_free_coherent(ring_to_dev(ring), size,
2034 ring->desc, ring->desc_dma_addr);
2035 ring->desc = NULL;
2036 }
2037}
2038
2039static int hns3_alloc_desc(struct hns3_enet_ring *ring)
2040{
2041 int size = ring->desc_num * sizeof(ring->desc[0]);
2042
2043 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
2044 &ring->desc_dma_addr, GFP_KERNEL);
2045 if (!ring->desc)
2046 return -ENOMEM;
2047
2048 return 0;
2049}
2050
2051static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
2052 struct hns3_desc_cb *cb)
2053{
2054 int ret;
2055
2056 ret = hns3_alloc_buffer(ring, cb);
2057 if (ret)
2058 goto out;
2059
2060 ret = hns3_map_buffer(ring, cb);
2061 if (ret)
2062 goto out_with_buf;
2063
2064 return 0;
2065
2066out_with_buf:
2067 hns3_free_buffer(ring, cb);
2068out:
2069 return ret;
2070}
2071
2072static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
2073{
2074 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
2075
2076 if (ret)
2077 return ret;
2078
2079 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2080
2081 return 0;
2082}
2083
2084/* Allocate memory for raw pkg, and map with dma */
2085static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
2086{
2087 int i, j, ret;
2088
2089 for (i = 0; i < ring->desc_num; i++) {
2090 ret = hns3_alloc_buffer_attach(ring, i);
2091 if (ret)
2092 goto out_buffer_fail;
2093 }
2094
2095 return 0;
2096
2097out_buffer_fail:
2098 for (j = i - 1; j >= 0; j--)
2099 hns3_free_buffer_detach(ring, j);
2100 return ret;
2101}
2102
2103/* detach a in-used buffer and replace with a reserved one */
2104static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
2105 struct hns3_desc_cb *res_cb)
2106{
2107 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2108 ring->desc_cb[i] = *res_cb;
2109 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2110 ring->desc[i].rx.bd_base_info = 0;
2111}
2112
2113static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
2114{
2115 ring->desc_cb[i].reuse_flag = 0;
2116 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
2117 + ring->desc_cb[i].page_offset);
2118 ring->desc[i].rx.bd_base_info = 0;
2119}
2120
2121static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
2122 int *pkts)
2123{
2124 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
2125
2126 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
2127 (*bytes) += desc_cb->length;
2128 /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
2129 hns3_free_buffer_detach(ring, ring->next_to_clean);
2130
2131 ring_ptr_move_fw(ring, next_to_clean);
2132}
2133
2134static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
2135{
2136 int u = ring->next_to_use;
2137 int c = ring->next_to_clean;
2138
2139 if (unlikely(h > ring->desc_num))
2140 return 0;
2141
2142 return u > c ? (h > c && h <= u) : (h > c || h <= u);
2143}
2144
2145void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
2146{
2147 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2148 struct hns3_nic_priv *priv = netdev_priv(netdev);
2149 struct netdev_queue *dev_queue;
2150 int bytes, pkts;
2151 int head;
2152
2153 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
2154 rmb(); /* Make sure head is ready before touch any data */
2155
2156 if (is_ring_empty(ring) || head == ring->next_to_clean)
2157 return; /* no data to poll */
2158
2159 if (unlikely(!is_valid_clean_head(ring, head))) {
2160 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
2161 ring->next_to_use, ring->next_to_clean);
2162
2163 u64_stats_update_begin(&ring->syncp);
2164 ring->stats.io_err_cnt++;
2165 u64_stats_update_end(&ring->syncp);
2166 return;
2167 }
2168
2169 bytes = 0;
2170 pkts = 0;
2171 while (head != ring->next_to_clean) {
2172 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
2173 /* Issue prefetch for next Tx descriptor */
2174 prefetch(&ring->desc_cb[ring->next_to_clean]);
2175 }
2176
2177 ring->tqp_vector->tx_group.total_bytes += bytes;
2178 ring->tqp_vector->tx_group.total_packets += pkts;
2179
2180 u64_stats_update_begin(&ring->syncp);
2181 ring->stats.tx_bytes += bytes;
2182 ring->stats.tx_pkts += pkts;
2183 u64_stats_update_end(&ring->syncp);
2184
2185 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
2186 netdev_tx_completed_queue(dev_queue, pkts, bytes);
2187
2188 if (unlikely(pkts && netif_carrier_ok(netdev) &&
2189 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
2190 /* Make sure that anybody stopping the queue after this
2191 * sees the new next_to_clean.
2192 */
2193 smp_mb();
2194 if (netif_tx_queue_stopped(dev_queue) &&
2195 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
2196 netif_tx_wake_queue(dev_queue);
2197 ring->stats.restart_queue++;
2198 }
2199 }
2200}
2201
2202static int hns3_desc_unused(struct hns3_enet_ring *ring)
2203{
2204 int ntc = ring->next_to_clean;
2205 int ntu = ring->next_to_use;
2206
2207 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
2208}
2209
2210static void
2211hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
2212{
2213 struct hns3_desc_cb *desc_cb;
2214 struct hns3_desc_cb res_cbs;
2215 int i, ret;
2216
2217 for (i = 0; i < cleand_count; i++) {
2218 desc_cb = &ring->desc_cb[ring->next_to_use];
2219 if (desc_cb->reuse_flag) {
2220 u64_stats_update_begin(&ring->syncp);
2221 ring->stats.reuse_pg_cnt++;
2222 u64_stats_update_end(&ring->syncp);
2223
2224 hns3_reuse_buffer(ring, ring->next_to_use);
2225 } else {
2226 ret = hns3_reserve_buffer_map(ring, &res_cbs);
2227 if (ret) {
2228 u64_stats_update_begin(&ring->syncp);
2229 ring->stats.sw_err_cnt++;
2230 u64_stats_update_end(&ring->syncp);
2231
2232 netdev_err(ring->tqp->handle->kinfo.netdev,
2233 "hnae reserve buffer map failed.\n");
2234 break;
2235 }
2236 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
2237 }
2238
2239 ring_ptr_move_fw(ring, next_to_use);
2240 }
2241
2242 wmb(); /* Make all data has been write before submit */
2243 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2244}
2245
2246static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2247 struct hns3_enet_ring *ring, int pull_len,
2248 struct hns3_desc_cb *desc_cb)
2249{
2250 struct hns3_desc *desc;
2251 u32 truesize;
2252 int size;
2253 int last_offset;
2254 bool twobufs;
2255
2256 twobufs = ((PAGE_SIZE < 8192) &&
2257 hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
2258
2259 desc = &ring->desc[ring->next_to_clean];
2260 size = le16_to_cpu(desc->rx.size);
2261
2262 truesize = hnae3_buf_size(ring);
2263
2264 if (!twobufs)
2265 last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
2266
2267 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2268 size - pull_len, truesize);
2269
2270 /* Avoid re-using remote pages,flag default unreuse */
2271 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
2272 return;
2273
2274 if (twobufs) {
2275 /* If we are only owner of page we can reuse it */
2276 if (likely(page_count(desc_cb->priv) == 1)) {
2277 /* Flip page offset to other buffer */
2278 desc_cb->page_offset ^= truesize;
2279
2280 desc_cb->reuse_flag = 1;
2281 /* bump ref count on page before it is given*/
2282 get_page(desc_cb->priv);
2283 }
2284 return;
2285 }
2286
2287 /* Move offset up to the next cache line */
2288 desc_cb->page_offset += truesize;
2289
2290 if (desc_cb->page_offset <= last_offset) {
2291 desc_cb->reuse_flag = 1;
2292 /* Bump ref count on page before it is given*/
2293 get_page(desc_cb->priv);
2294 }
2295}
2296
2297static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2298 struct hns3_desc *desc)
2299{
2300 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2301 int l3_type, l4_type;
2302 u32 bd_base_info;
2303 int ol4_type;
2304 u32 l234info;
2305
2306 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2307 l234info = le32_to_cpu(desc->rx.l234_info);
2308
2309 skb->ip_summed = CHECKSUM_NONE;
2310
2311 skb_checksum_none_assert(skb);
2312
2313 if (!(netdev->features & NETIF_F_RXCSUM))
2314 return;
2315
2316 /* We MUST enable hardware checksum before enabling hardware GRO */
2317 if (skb_shinfo(skb)->gso_size) {
2318 skb->ip_summed = CHECKSUM_UNNECESSARY;
2319 return;
2320 }
2321
2322 /* check if hardware has done checksum */
2323 if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
2324 return;
2325
2326 if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
2327 BIT(HNS3_RXD_OL3E_B) |
2328 BIT(HNS3_RXD_OL4E_B)))) {
2329 u64_stats_update_begin(&ring->syncp);
2330 ring->stats.l3l4_csum_err++;
2331 u64_stats_update_end(&ring->syncp);
2332
2333 return;
2334 }
2335
2336 ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
2337 HNS3_RXD_OL4ID_S);
2338 switch (ol4_type) {
2339 case HNS3_OL4_TYPE_MAC_IN_UDP:
2340 case HNS3_OL4_TYPE_NVGRE:
2341 skb->csum_level = 1;
2342 /* fall through */
2343 case HNS3_OL4_TYPE_NO_TUN:
2344 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2345 HNS3_RXD_L3ID_S);
2346 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
2347 HNS3_RXD_L4ID_S);
2348
2349 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2350 if ((l3_type == HNS3_L3_TYPE_IPV4 ||
2351 l3_type == HNS3_L3_TYPE_IPV6) &&
2352 (l4_type == HNS3_L4_TYPE_UDP ||
2353 l4_type == HNS3_L4_TYPE_TCP ||
2354 l4_type == HNS3_L4_TYPE_SCTP))
2355 skb->ip_summed = CHECKSUM_UNNECESSARY;
2356 break;
2357 default:
2358 break;
2359 }
2360}
2361
2362static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2363{
2364 if (skb_has_frag_list(skb))
2365 napi_gro_flush(&ring->tqp_vector->napi, false);
2366
2367 napi_gro_receive(&ring->tqp_vector->napi, skb);
2368}
2369
2370static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2371 struct hns3_desc *desc, u32 l234info,
2372 u16 *vlan_tag)
2373{
2374 struct pci_dev *pdev = ring->tqp->handle->pdev;
2375
2376 if (pdev->revision == 0x20) {
2377 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2378 if (!(*vlan_tag & VLAN_VID_MASK))
2379 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2380
2381 return (*vlan_tag != 0);
2382 }
2383
2384#define HNS3_STRP_OUTER_VLAN 0x1
2385#define HNS3_STRP_INNER_VLAN 0x2
2386
2387 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
2388 HNS3_RXD_STRP_TAGP_S)) {
2389 case HNS3_STRP_OUTER_VLAN:
2390 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2391 return true;
2392 case HNS3_STRP_INNER_VLAN:
2393 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2394 return true;
2395 default:
2396 return false;
2397 }
2398}
2399
2400static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
2401 unsigned char *va)
2402{
2403#define HNS3_NEED_ADD_FRAG 1
2404 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
2405 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2406 struct sk_buff *skb;
2407
2408 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
2409 skb = ring->skb;
2410 if (unlikely(!skb)) {
2411 netdev_err(netdev, "alloc rx skb fail\n");
2412
2413 u64_stats_update_begin(&ring->syncp);
2414 ring->stats.sw_err_cnt++;
2415 u64_stats_update_end(&ring->syncp);
2416
2417 return -ENOMEM;
2418 }
2419
2420 prefetchw(skb->data);
2421
2422 ring->pending_buf = 1;
2423 ring->frag_num = 0;
2424 ring->tail_skb = NULL;
2425 if (length <= HNS3_RX_HEAD_SIZE) {
2426 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2427
2428 /* We can reuse buffer as-is, just make sure it is local */
2429 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2430 desc_cb->reuse_flag = 1;
2431 else /* This page cannot be reused so discard it */
2432 put_page(desc_cb->priv);
2433
2434 ring_ptr_move_fw(ring, next_to_clean);
2435 return 0;
2436 }
2437 u64_stats_update_begin(&ring->syncp);
2438 ring->stats.seg_pkt_cnt++;
2439 u64_stats_update_end(&ring->syncp);
2440
2441 ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
2442 __skb_put(skb, ring->pull_len);
2443 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
2444 desc_cb);
2445 ring_ptr_move_fw(ring, next_to_clean);
2446
2447 return HNS3_NEED_ADD_FRAG;
2448}
2449
2450static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
2451 struct sk_buff **out_skb, bool pending)
2452{
2453 struct sk_buff *skb = *out_skb;
2454 struct sk_buff *head_skb = *out_skb;
2455 struct sk_buff *new_skb;
2456 struct hns3_desc_cb *desc_cb;
2457 struct hns3_desc *pre_desc;
2458 u32 bd_base_info;
2459 int pre_bd;
2460
2461 /* if there is pending bd, the SW param next_to_clean has moved
2462 * to next and the next is NULL
2463 */
2464 if (pending) {
2465 pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
2466 ring->desc_num;
2467 pre_desc = &ring->desc[pre_bd];
2468 bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
2469 } else {
2470 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2471 }
2472
2473 while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
2474 desc = &ring->desc[ring->next_to_clean];
2475 desc_cb = &ring->desc_cb[ring->next_to_clean];
2476 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2477 /* make sure HW write desc complete */
2478 dma_rmb();
2479 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
2480 return -ENXIO;
2481
2482 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
2483 new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2484 HNS3_RX_HEAD_SIZE);
2485 if (unlikely(!new_skb)) {
2486 netdev_err(ring->tqp->handle->kinfo.netdev,
2487 "alloc rx skb frag fail\n");
2488 return -ENXIO;
2489 }
2490 ring->frag_num = 0;
2491
2492 if (ring->tail_skb) {
2493 ring->tail_skb->next = new_skb;
2494 ring->tail_skb = new_skb;
2495 } else {
2496 skb_shinfo(skb)->frag_list = new_skb;
2497 ring->tail_skb = new_skb;
2498 }
2499 }
2500
2501 if (ring->tail_skb) {
2502 head_skb->truesize += hnae3_buf_size(ring);
2503 head_skb->data_len += le16_to_cpu(desc->rx.size);
2504 head_skb->len += le16_to_cpu(desc->rx.size);
2505 skb = ring->tail_skb;
2506 }
2507
2508 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
2509 ring_ptr_move_fw(ring, next_to_clean);
2510 ring->pending_buf++;
2511 }
2512
2513 return 0;
2514}
2515
2516static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
2517 u32 bd_base_info)
2518{
2519 u16 gro_count;
2520 u32 l3_type;
2521
2522 gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
2523 HNS3_RXD_GRO_COUNT_S);
2524 /* if there is no HW GRO, do not set gro params */
2525 if (!gro_count)
2526 return;
2527
2528 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
2529 * to skb_shinfo(skb)->gso_segs
2530 */
2531 NAPI_GRO_CB(skb)->count = gro_count;
2532
2533 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2534 HNS3_RXD_L3ID_S);
2535 if (l3_type == HNS3_L3_TYPE_IPV4)
2536 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2537 else if (l3_type == HNS3_L3_TYPE_IPV6)
2538 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
2539 else
2540 return;
2541
2542 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
2543 HNS3_RXD_GRO_SIZE_M,
2544 HNS3_RXD_GRO_SIZE_S);
2545 if (skb_shinfo(skb)->gso_size)
2546 tcp_gro_complete(skb);
2547}
2548
2549static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
2550 struct sk_buff *skb)
2551{
2552 struct hnae3_handle *handle = ring->tqp->handle;
2553 enum pkt_hash_types rss_type;
2554 struct hns3_desc *desc;
2555 int last_bd;
2556
2557 /* When driver handle the rss type, ring->next_to_clean indicates the
2558 * first descriptor of next packet, need -1 here.
2559 */
2560 last_bd = (ring->next_to_clean - 1 + ring->desc_num) % ring->desc_num;
2561 desc = &ring->desc[last_bd];
2562
2563 if (le32_to_cpu(desc->rx.rss_hash))
2564 rss_type = handle->kinfo.rss_type;
2565 else
2566 rss_type = PKT_HASH_TYPE_NONE;
2567
2568 skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
2569}
2570
2571static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2572 struct sk_buff **out_skb)
2573{
2574 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2575 enum hns3_pkt_l2t_type l2_frame_type;
2576 struct sk_buff *skb = ring->skb;
2577 struct hns3_desc_cb *desc_cb;
2578 struct hns3_desc *desc;
2579 u32 bd_base_info;
2580 u32 l234info;
2581 int length;
2582 int ret;
2583
2584 desc = &ring->desc[ring->next_to_clean];
2585 desc_cb = &ring->desc_cb[ring->next_to_clean];
2586
2587 prefetch(desc);
2588
2589 length = le16_to_cpu(desc->rx.size);
2590 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2591
2592 /* Check valid BD */
2593 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2594 return -ENXIO;
2595
2596 if (!skb)
2597 ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2598
2599 /* Prefetch first cache line of first page
2600 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2601 * line size is 64B so need to prefetch twice to make it 128B. But in
2602 * actual we can have greater size of caches with 128B Level 1 cache
2603 * lines. In such a case, single fetch would suffice to cache in the
2604 * relevant part of the header.
2605 */
2606 prefetch(ring->va);
2607#if L1_CACHE_BYTES < 128
2608 prefetch(ring->va + L1_CACHE_BYTES);
2609#endif
2610
2611 if (!skb) {
2612 ret = hns3_alloc_skb(ring, length, ring->va);
2613 *out_skb = skb = ring->skb;
2614
2615 if (ret < 0) /* alloc buffer fail */
2616 return ret;
2617 if (ret > 0) { /* need add frag */
2618 ret = hns3_add_frag(ring, desc, &skb, false);
2619 if (ret)
2620 return ret;
2621
2622 /* As the head data may be changed when GRO enable, copy
2623 * the head data in after other data rx completed
2624 */
2625 memcpy(skb->data, ring->va,
2626 ALIGN(ring->pull_len, sizeof(long)));
2627 }
2628 } else {
2629 ret = hns3_add_frag(ring, desc, &skb, true);
2630 if (ret)
2631 return ret;
2632
2633 /* As the head data may be changed when GRO enable, copy
2634 * the head data in after other data rx completed
2635 */
2636 memcpy(skb->data, ring->va,
2637 ALIGN(ring->pull_len, sizeof(long)));
2638 }
2639
2640 l234info = le32_to_cpu(desc->rx.l234_info);
2641 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2642
2643 /* Based on hw strategy, the tag offloaded will be stored at
2644 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2645 * in one layer tag case.
2646 */
2647 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2648 u16 vlan_tag;
2649
2650 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
2651 __vlan_hwaccel_put_tag(skb,
2652 htons(ETH_P_8021Q),
2653 vlan_tag);
2654 }
2655
2656 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
2657 u64_stats_update_begin(&ring->syncp);
2658 ring->stats.non_vld_descs++;
2659 u64_stats_update_end(&ring->syncp);
2660
2661 dev_kfree_skb_any(skb);
2662 return -EINVAL;
2663 }
2664
2665 if (unlikely((!desc->rx.pkt_len) ||
2666 (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
2667 BIT(HNS3_RXD_L2E_B))))) {
2668 u64_stats_update_begin(&ring->syncp);
2669 if (l234info & BIT(HNS3_RXD_L2E_B))
2670 ring->stats.l2_err++;
2671 else
2672 ring->stats.err_pkt_len++;
2673 u64_stats_update_end(&ring->syncp);
2674
2675 dev_kfree_skb_any(skb);
2676 return -EFAULT;
2677 }
2678
2679
2680 l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
2681 HNS3_RXD_DMAC_S);
2682 u64_stats_update_begin(&ring->syncp);
2683 if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
2684 ring->stats.rx_multicast++;
2685
2686 ring->stats.rx_pkts++;
2687 ring->stats.rx_bytes += skb->len;
2688 u64_stats_update_end(&ring->syncp);
2689
2690 ring->tqp_vector->rx_group.total_bytes += skb->len;
2691
2692 /* This is needed in order to enable forwarding support */
2693 hns3_set_gro_param(skb, l234info, bd_base_info);
2694
2695 hns3_rx_checksum(ring, skb, desc);
2696 *out_skb = skb;
2697 hns3_set_rx_skb_rss_type(ring, skb);
2698
2699 return 0;
2700}
2701
2702int hns3_clean_rx_ring(
2703 struct hns3_enet_ring *ring, int budget,
2704 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
2705{
2706#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2707 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2708 int recv_pkts, recv_bds, clean_count, err;
2709 int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
2710 struct sk_buff *skb = ring->skb;
2711 int num;
2712
2713 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2714 rmb(); /* Make sure num taken effect before the other data is touched */
2715
2716 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2717 num -= unused_count;
2718
2719 while (recv_pkts < budget && recv_bds < num) {
2720 /* Reuse or realloc buffers */
2721 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2722 hns3_nic_alloc_rx_buffers(ring,
2723 clean_count + unused_count);
2724 clean_count = 0;
2725 unused_count = hns3_desc_unused(ring) -
2726 ring->pending_buf;
2727 }
2728
2729 /* Poll one pkt */
2730 err = hns3_handle_rx_bd(ring, &skb);
2731 if (unlikely(!skb)) /* This fault cannot be repaired */
2732 goto out;
2733
2734 if (err == -ENXIO) { /* Do not get FE for the packet */
2735 goto out;
2736 } else if (unlikely(err)) { /* Do jump the err */
2737 recv_bds += ring->pending_buf;
2738 clean_count += ring->pending_buf;
2739 ring->skb = NULL;
2740 ring->pending_buf = 0;
2741 continue;
2742 }
2743
2744 /* Do update ip stack process */
2745 skb->protocol = eth_type_trans(skb, netdev);
2746 rx_fn(ring, skb);
2747 recv_bds += ring->pending_buf;
2748 clean_count += ring->pending_buf;
2749 ring->skb = NULL;
2750 ring->pending_buf = 0;
2751
2752 recv_pkts++;
2753 }
2754
2755out:
2756 /* Make all data has been write before submit */
2757 if (clean_count + unused_count > 0)
2758 hns3_nic_alloc_rx_buffers(ring,
2759 clean_count + unused_count);
2760
2761 return recv_pkts;
2762}
2763
2764static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2765{
2766 struct hns3_enet_tqp_vector *tqp_vector =
2767 ring_group->ring->tqp_vector;
2768 enum hns3_flow_level_range new_flow_level;
2769 int packets_per_msecs;
2770 int bytes_per_msecs;
2771 u32 time_passed_ms;
2772 u16 new_int_gl;
2773
2774 if (!tqp_vector->last_jiffies)
2775 return false;
2776
2777 if (ring_group->total_packets == 0) {
2778 ring_group->coal.int_gl = HNS3_INT_GL_50K;
2779 ring_group->coal.flow_level = HNS3_FLOW_LOW;
2780 return true;
2781 }
2782
2783 /* Simple throttlerate management
2784 * 0-10MB/s lower (50000 ints/s)
2785 * 10-20MB/s middle (20000 ints/s)
2786 * 20-1249MB/s high (18000 ints/s)
2787 * > 40000pps ultra (8000 ints/s)
2788 */
2789 new_flow_level = ring_group->coal.flow_level;
2790 new_int_gl = ring_group->coal.int_gl;
2791 time_passed_ms =
2792 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
2793
2794 if (!time_passed_ms)
2795 return false;
2796
2797 do_div(ring_group->total_packets, time_passed_ms);
2798 packets_per_msecs = ring_group->total_packets;
2799
2800 do_div(ring_group->total_bytes, time_passed_ms);
2801 bytes_per_msecs = ring_group->total_bytes;
2802
2803#define HNS3_RX_LOW_BYTE_RATE 10000
2804#define HNS3_RX_MID_BYTE_RATE 20000
2805
2806 switch (new_flow_level) {
2807 case HNS3_FLOW_LOW:
2808 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
2809 new_flow_level = HNS3_FLOW_MID;
2810 break;
2811 case HNS3_FLOW_MID:
2812 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
2813 new_flow_level = HNS3_FLOW_HIGH;
2814 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
2815 new_flow_level = HNS3_FLOW_LOW;
2816 break;
2817 case HNS3_FLOW_HIGH:
2818 case HNS3_FLOW_ULTRA:
2819 default:
2820 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
2821 new_flow_level = HNS3_FLOW_MID;
2822 break;
2823 }
2824
2825#define HNS3_RX_ULTRA_PACKET_RATE 40
2826
2827 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
2828 &tqp_vector->rx_group == ring_group)
2829 new_flow_level = HNS3_FLOW_ULTRA;
2830
2831 switch (new_flow_level) {
2832 case HNS3_FLOW_LOW:
2833 new_int_gl = HNS3_INT_GL_50K;
2834 break;
2835 case HNS3_FLOW_MID:
2836 new_int_gl = HNS3_INT_GL_20K;
2837 break;
2838 case HNS3_FLOW_HIGH:
2839 new_int_gl = HNS3_INT_GL_18K;
2840 break;
2841 case HNS3_FLOW_ULTRA:
2842 new_int_gl = HNS3_INT_GL_8K;
2843 break;
2844 default:
2845 break;
2846 }
2847
2848 ring_group->total_bytes = 0;
2849 ring_group->total_packets = 0;
2850 ring_group->coal.flow_level = new_flow_level;
2851 if (new_int_gl != ring_group->coal.int_gl) {
2852 ring_group->coal.int_gl = new_int_gl;
2853 return true;
2854 }
2855 return false;
2856}
2857
2858static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2859{
2860 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
2861 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
2862 bool rx_update, tx_update;
2863
2864 /* update param every 1000ms */
2865 if (time_before(jiffies,
2866 tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
2867 return;
2868
2869 if (rx_group->coal.gl_adapt_enable) {
2870 rx_update = hns3_get_new_int_gl(rx_group);
2871 if (rx_update)
2872 hns3_set_vector_coalesce_rx_gl(tqp_vector,
2873 rx_group->coal.int_gl);
2874 }
2875
2876 if (tx_group->coal.gl_adapt_enable) {
2877 tx_update = hns3_get_new_int_gl(tx_group);
2878 if (tx_update)
2879 hns3_set_vector_coalesce_tx_gl(tqp_vector,
2880 tx_group->coal.int_gl);
2881 }
2882
2883 tqp_vector->last_jiffies = jiffies;
2884}
2885
2886static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2887{
2888 struct hns3_nic_priv *priv = netdev_priv(napi->dev);
2889 struct hns3_enet_ring *ring;
2890 int rx_pkt_total = 0;
2891
2892 struct hns3_enet_tqp_vector *tqp_vector =
2893 container_of(napi, struct hns3_enet_tqp_vector, napi);
2894 bool clean_complete = true;
2895 int rx_budget;
2896
2897 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
2898 napi_complete(napi);
2899 return 0;
2900 }
2901
2902 /* Since the actual Tx work is minimal, we can give the Tx a larger
2903 * budget and be more aggressive about cleaning up the Tx descriptors.
2904 */
2905 hns3_for_each_ring(ring, tqp_vector->tx_group)
2906 hns3_clean_tx_ring(ring);
2907
2908 /* make sure rx ring budget not smaller than 1 */
2909 rx_budget = max(budget / tqp_vector->num_tqps, 1);
2910
2911 hns3_for_each_ring(ring, tqp_vector->rx_group) {
2912 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
2913 hns3_rx_skb);
2914
2915 if (rx_cleaned >= rx_budget)
2916 clean_complete = false;
2917
2918 rx_pkt_total += rx_cleaned;
2919 }
2920
2921 tqp_vector->rx_group.total_packets += rx_pkt_total;
2922
2923 if (!clean_complete)
2924 return budget;
2925
2926 if (napi_complete(napi) &&
2927 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
2928 hns3_update_new_int_gl(tqp_vector);
2929 hns3_mask_vector_irq(tqp_vector, 1);
2930 }
2931
2932 return rx_pkt_total;
2933}
2934
2935static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2936 struct hnae3_ring_chain_node *head)
2937{
2938 struct pci_dev *pdev = tqp_vector->handle->pdev;
2939 struct hnae3_ring_chain_node *cur_chain = head;
2940 struct hnae3_ring_chain_node *chain;
2941 struct hns3_enet_ring *tx_ring;
2942 struct hns3_enet_ring *rx_ring;
2943
2944 tx_ring = tqp_vector->tx_group.ring;
2945 if (tx_ring) {
2946 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2947 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2948 HNAE3_RING_TYPE_TX);
2949 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2950 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
2951
2952 cur_chain->next = NULL;
2953
2954 while (tx_ring->next) {
2955 tx_ring = tx_ring->next;
2956
2957 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2958 GFP_KERNEL);
2959 if (!chain)
2960 goto err_free_chain;
2961
2962 cur_chain->next = chain;
2963 chain->tqp_index = tx_ring->tqp->tqp_index;
2964 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2965 HNAE3_RING_TYPE_TX);
2966 hnae3_set_field(chain->int_gl_idx,
2967 HNAE3_RING_GL_IDX_M,
2968 HNAE3_RING_GL_IDX_S,
2969 HNAE3_RING_GL_TX);
2970
2971 cur_chain = chain;
2972 }
2973 }
2974
2975 rx_ring = tqp_vector->rx_group.ring;
2976 if (!tx_ring && rx_ring) {
2977 cur_chain->next = NULL;
2978 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2979 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2980 HNAE3_RING_TYPE_RX);
2981 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2982 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2983
2984 rx_ring = rx_ring->next;
2985 }
2986
2987 while (rx_ring) {
2988 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2989 if (!chain)
2990 goto err_free_chain;
2991
2992 cur_chain->next = chain;
2993 chain->tqp_index = rx_ring->tqp->tqp_index;
2994 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2995 HNAE3_RING_TYPE_RX);
2996 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2997 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2998
2999 cur_chain = chain;
3000
3001 rx_ring = rx_ring->next;
3002 }
3003
3004 return 0;
3005
3006err_free_chain:
3007 cur_chain = head->next;
3008 while (cur_chain) {
3009 chain = cur_chain->next;
3010 devm_kfree(&pdev->dev, cur_chain);
3011 cur_chain = chain;
3012 }
3013 head->next = NULL;
3014
3015 return -ENOMEM;
3016}
3017
3018static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3019 struct hnae3_ring_chain_node *head)
3020{
3021 struct pci_dev *pdev = tqp_vector->handle->pdev;
3022 struct hnae3_ring_chain_node *chain_tmp, *chain;
3023
3024 chain = head->next;
3025
3026 while (chain) {
3027 chain_tmp = chain->next;
3028 devm_kfree(&pdev->dev, chain);
3029 chain = chain_tmp;
3030 }
3031}
3032
3033static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
3034 struct hns3_enet_ring *ring)
3035{
3036 ring->next = group->ring;
3037 group->ring = ring;
3038
3039 group->count++;
3040}
3041
3042static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
3043{
3044 struct pci_dev *pdev = priv->ae_handle->pdev;
3045 struct hns3_enet_tqp_vector *tqp_vector;
3046 int num_vectors = priv->vector_num;
3047 int numa_node;
3048 int vector_i;
3049
3050 numa_node = dev_to_node(&pdev->dev);
3051
3052 for (vector_i = 0; vector_i < num_vectors; vector_i++) {
3053 tqp_vector = &priv->tqp_vector[vector_i];
3054 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
3055 &tqp_vector->affinity_mask);
3056 }
3057}
3058
3059static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
3060{
3061 struct hnae3_ring_chain_node vector_ring_chain;
3062 struct hnae3_handle *h = priv->ae_handle;
3063 struct hns3_enet_tqp_vector *tqp_vector;
3064 int ret = 0;
3065 int i;
3066
3067 hns3_nic_set_cpumask(priv);
3068
3069 for (i = 0; i < priv->vector_num; i++) {
3070 tqp_vector = &priv->tqp_vector[i];
3071 hns3_vector_gl_rl_init_hw(tqp_vector, priv);
3072 tqp_vector->num_tqps = 0;
3073 }
3074
3075 for (i = 0; i < h->kinfo.num_tqps; i++) {
3076 u16 vector_i = i % priv->vector_num;
3077 u16 tqp_num = h->kinfo.num_tqps;
3078
3079 tqp_vector = &priv->tqp_vector[vector_i];
3080
3081 hns3_add_ring_to_group(&tqp_vector->tx_group,
3082 priv->ring_data[i].ring);
3083
3084 hns3_add_ring_to_group(&tqp_vector->rx_group,
3085 priv->ring_data[i + tqp_num].ring);
3086
3087 priv->ring_data[i].ring->tqp_vector = tqp_vector;
3088 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
3089 tqp_vector->num_tqps++;
3090 }
3091
3092 for (i = 0; i < priv->vector_num; i++) {
3093 tqp_vector = &priv->tqp_vector[i];
3094
3095 tqp_vector->rx_group.total_bytes = 0;
3096 tqp_vector->rx_group.total_packets = 0;
3097 tqp_vector->tx_group.total_bytes = 0;
3098 tqp_vector->tx_group.total_packets = 0;
3099 tqp_vector->handle = h;
3100
3101 ret = hns3_get_vector_ring_chain(tqp_vector,
3102 &vector_ring_chain);
3103 if (ret)
3104 goto map_ring_fail;
3105
3106 ret = h->ae_algo->ops->map_ring_to_vector(h,
3107 tqp_vector->vector_irq, &vector_ring_chain);
3108
3109 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
3110
3111 if (ret)
3112 goto map_ring_fail;
3113
3114 netif_napi_add(priv->netdev, &tqp_vector->napi,
3115 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
3116 }
3117
3118 return 0;
3119
3120map_ring_fail:
3121 while (i--)
3122 netif_napi_del(&priv->tqp_vector[i].napi);
3123
3124 return ret;
3125}
3126
3127static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
3128{
3129#define HNS3_VECTOR_PF_MAX_NUM 64
3130
3131 struct hnae3_handle *h = priv->ae_handle;
3132 struct hns3_enet_tqp_vector *tqp_vector;
3133 struct hnae3_vector_info *vector;
3134 struct pci_dev *pdev = h->pdev;
3135 u16 tqp_num = h->kinfo.num_tqps;
3136 u16 vector_num;
3137 int ret = 0;
3138 u16 i;
3139
3140 /* RSS size, cpu online and vector_num should be the same */
3141 /* Should consider 2p/4p later */
3142 vector_num = min_t(u16, num_online_cpus(), tqp_num);
3143 vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);
3144
3145 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
3146 GFP_KERNEL);
3147 if (!vector)
3148 return -ENOMEM;
3149
3150 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
3151
3152 priv->vector_num = vector_num;
3153 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
3154 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
3155 GFP_KERNEL);
3156 if (!priv->tqp_vector) {
3157 ret = -ENOMEM;
3158 goto out;
3159 }
3160
3161 for (i = 0; i < priv->vector_num; i++) {
3162 tqp_vector = &priv->tqp_vector[i];
3163 tqp_vector->idx = i;
3164 tqp_vector->mask_addr = vector[i].io_addr;
3165 tqp_vector->vector_irq = vector[i].vector;
3166 hns3_vector_gl_rl_init(tqp_vector, priv);
3167 }
3168
3169out:
3170 devm_kfree(&pdev->dev, vector);
3171 return ret;
3172}
3173
3174static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
3175{
3176 group->ring = NULL;
3177 group->count = 0;
3178}
3179
3180static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
3181{
3182 struct hnae3_ring_chain_node vector_ring_chain;<