1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // Copyright (c) 2016-2017 Hisilicon Limited. |
3 | |
4 | #include <linux/acpi.h> |
5 | #include <linux/device.h> |
6 | #include <linux/etherdevice.h> |
7 | #include <linux/init.h> |
8 | #include <linux/interrupt.h> |
9 | #include <linux/kernel.h> |
10 | #include <linux/module.h> |
11 | #include <linux/netdevice.h> |
12 | #include <linux/pci.h> |
13 | #include <linux/platform_device.h> |
14 | #include <linux/if_vlan.h> |
15 | #include <linux/crash_dump.h> |
16 | #include <net/ipv6.h> |
17 | #include <net/rtnetlink.h> |
18 | #include "hclge_cmd.h" |
19 | #include "hclge_dcb.h" |
20 | #include "hclge_main.h" |
21 | #include "hclge_mbx.h" |
22 | #include "hclge_mdio.h" |
23 | #include "hclge_regs.h" |
24 | #include "hclge_tm.h" |
25 | #include "hclge_err.h" |
26 | #include "hnae3.h" |
27 | #include "hclge_devlink.h" |
28 | #include "hclge_comm_cmd.h" |
29 | |
30 | #define HCLGE_NAME "hclge" |
31 | |
32 | #define HCLGE_BUF_SIZE_UNIT 256U |
33 | #define HCLGE_BUF_MUL_BY 2 |
34 | #define HCLGE_BUF_DIV_BY 2 |
35 | #define NEED_RESERVE_TC_NUM 2 |
36 | #define BUF_MAX_PERCENT 100 |
37 | #define BUF_RESERVE_PERCENT 90 |
38 | |
39 | #define HCLGE_RESET_MAX_FAIL_CNT 5 |
40 | #define HCLGE_RESET_SYNC_TIME 100 |
41 | #define HCLGE_PF_RESET_SYNC_TIME 20 |
42 | #define HCLGE_PF_RESET_SYNC_CNT 1500 |
43 | |
44 | #define HCLGE_LINK_STATUS_MS 10 |
45 | |
46 | static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); |
47 | static int hclge_init_vlan_config(struct hclge_dev *hdev); |
48 | static void hclge_sync_vlan_filter(struct hclge_dev *hdev); |
49 | static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); |
50 | static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); |
51 | static void hclge_rfs_filter_expire(struct hclge_dev *hdev); |
52 | static int hclge_clear_arfs_rules(struct hclge_dev *hdev); |
53 | static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, |
54 | unsigned long *addr); |
55 | static int hclge_set_default_loopback(struct hclge_dev *hdev); |
56 | |
57 | static void hclge_sync_mac_table(struct hclge_dev *hdev); |
58 | static void hclge_restore_hw_table(struct hclge_dev *hdev); |
59 | static void hclge_sync_promisc_mode(struct hclge_dev *hdev); |
60 | static void hclge_sync_fd_table(struct hclge_dev *hdev); |
61 | static void hclge_update_fec_stats(struct hclge_dev *hdev); |
62 | static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, |
63 | int wait_cnt); |
64 | static int hclge_update_port_info(struct hclge_dev *hdev); |
65 | |
66 | static struct hnae3_ae_algo ae_algo; |
67 | |
68 | static struct workqueue_struct *hclge_wq; |
69 | |
70 | static const struct pci_device_id ae_algo_pci_tbl[] = { |
71 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, |
72 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, |
73 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, |
74 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, |
75 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, |
76 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, |
77 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, |
78 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0}, |
79 | /* required last entry */ |
80 | {0, } |
81 | }; |
82 | |
83 | MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); |
84 | |
85 | static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { |
86 | "External Loopback test" , |
87 | "App Loopback test" , |
88 | "Serdes serial Loopback test" , |
89 | "Serdes parallel Loopback test" , |
90 | "Phy Loopback test" |
91 | }; |
92 | |
93 | static const struct hclge_comm_stats_str g_mac_stats_string[] = { |
94 | {"mac_tx_mac_pause_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
95 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, |
96 | {"mac_rx_mac_pause_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
97 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, |
98 | {"mac_tx_pause_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
99 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)}, |
100 | {"mac_rx_pause_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
101 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)}, |
102 | {"mac_tx_control_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
103 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)}, |
104 | {"mac_rx_control_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
105 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)}, |
106 | {"mac_tx_pfc_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
107 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)}, |
108 | {"mac_tx_pfc_pri0_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
109 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, |
110 | {"mac_tx_pfc_pri1_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
111 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, |
112 | {"mac_tx_pfc_pri2_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
113 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, |
114 | {"mac_tx_pfc_pri3_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
115 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, |
116 | {"mac_tx_pfc_pri4_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
117 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, |
118 | {"mac_tx_pfc_pri5_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
119 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, |
120 | {"mac_tx_pfc_pri6_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
121 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, |
122 | {"mac_tx_pfc_pri7_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
123 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, |
124 | {"mac_tx_pfc_pri0_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
125 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)}, |
126 | {"mac_tx_pfc_pri1_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
127 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)}, |
128 | {"mac_tx_pfc_pri2_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
129 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)}, |
130 | {"mac_tx_pfc_pri3_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
131 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)}, |
132 | {"mac_tx_pfc_pri4_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
133 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)}, |
134 | {"mac_tx_pfc_pri5_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
135 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)}, |
136 | {"mac_tx_pfc_pri6_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
137 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)}, |
138 | {"mac_tx_pfc_pri7_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
139 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)}, |
140 | {"mac_rx_pfc_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
141 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)}, |
142 | {"mac_rx_pfc_pri0_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
143 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, |
144 | {"mac_rx_pfc_pri1_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
145 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, |
146 | {"mac_rx_pfc_pri2_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
147 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, |
148 | {"mac_rx_pfc_pri3_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
149 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, |
150 | {"mac_rx_pfc_pri4_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
151 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, |
152 | {"mac_rx_pfc_pri5_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
153 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, |
154 | {"mac_rx_pfc_pri6_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
155 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, |
156 | {"mac_rx_pfc_pri7_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
157 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, |
158 | {"mac_rx_pfc_pri0_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
159 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)}, |
160 | {"mac_rx_pfc_pri1_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
161 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)}, |
162 | {"mac_rx_pfc_pri2_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
163 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)}, |
164 | {"mac_rx_pfc_pri3_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
165 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)}, |
166 | {"mac_rx_pfc_pri4_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
167 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)}, |
168 | {"mac_rx_pfc_pri5_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
169 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)}, |
170 | {"mac_rx_pfc_pri6_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
171 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)}, |
172 | {"mac_rx_pfc_pri7_xoff_time" , HCLGE_MAC_STATS_MAX_NUM_V2, |
173 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)}, |
174 | {"mac_tx_total_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
175 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, |
176 | {"mac_tx_total_oct_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
177 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, |
178 | {"mac_tx_good_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
179 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, |
180 | {"mac_tx_bad_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
181 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, |
182 | {"mac_tx_good_oct_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
183 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, |
184 | {"mac_tx_bad_oct_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
185 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, |
186 | {"mac_tx_uni_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
187 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, |
188 | {"mac_tx_multi_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
189 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, |
190 | {"mac_tx_broad_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
191 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, |
192 | {"mac_tx_undersize_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
193 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, |
194 | {"mac_tx_oversize_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
195 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, |
196 | {"mac_tx_64_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
197 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, |
198 | {"mac_tx_65_127_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
199 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, |
200 | {"mac_tx_128_255_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
201 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, |
202 | {"mac_tx_256_511_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
203 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, |
204 | {"mac_tx_512_1023_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
205 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, |
206 | {"mac_tx_1024_1518_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
207 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, |
208 | {"mac_tx_1519_2047_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
209 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, |
210 | {"mac_tx_2048_4095_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
211 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, |
212 | {"mac_tx_4096_8191_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
213 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, |
214 | {"mac_tx_8192_9216_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
215 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, |
216 | {"mac_tx_9217_12287_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
217 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, |
218 | {"mac_tx_12288_16383_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
219 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, |
220 | {"mac_tx_1519_max_good_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
221 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, |
222 | {"mac_tx_1519_max_bad_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
223 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, |
224 | {"mac_rx_total_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
225 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, |
226 | {"mac_rx_total_oct_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
227 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, |
228 | {"mac_rx_good_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
229 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, |
230 | {"mac_rx_bad_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
231 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, |
232 | {"mac_rx_good_oct_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
233 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, |
234 | {"mac_rx_bad_oct_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
235 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, |
236 | {"mac_rx_uni_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
237 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, |
238 | {"mac_rx_multi_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
239 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, |
240 | {"mac_rx_broad_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
241 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, |
242 | {"mac_rx_undersize_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
243 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, |
244 | {"mac_rx_oversize_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
245 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, |
246 | {"mac_rx_64_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
247 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, |
248 | {"mac_rx_65_127_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
249 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, |
250 | {"mac_rx_128_255_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
251 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, |
252 | {"mac_rx_256_511_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
253 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, |
254 | {"mac_rx_512_1023_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
255 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, |
256 | {"mac_rx_1024_1518_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
257 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, |
258 | {"mac_rx_1519_2047_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
259 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, |
260 | {"mac_rx_2048_4095_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
261 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, |
262 | {"mac_rx_4096_8191_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
263 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, |
264 | {"mac_rx_8192_9216_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
265 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, |
266 | {"mac_rx_9217_12287_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
267 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, |
268 | {"mac_rx_12288_16383_oct_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
269 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, |
270 | {"mac_rx_1519_max_good_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
271 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, |
272 | {"mac_rx_1519_max_bad_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
273 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, |
274 | |
275 | {"mac_tx_fragment_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
276 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, |
277 | {"mac_tx_undermin_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
278 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, |
279 | {"mac_tx_jabber_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
280 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, |
281 | {"mac_tx_err_all_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
282 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, |
283 | {"mac_tx_from_app_good_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
284 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, |
285 | {"mac_tx_from_app_bad_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
286 | HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, |
287 | {"mac_rx_fragment_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
288 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, |
289 | {"mac_rx_undermin_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
290 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, |
291 | {"mac_rx_jabber_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
292 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, |
293 | {"mac_rx_fcs_err_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
294 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, |
295 | {"mac_rx_send_app_good_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
296 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, |
297 | {"mac_rx_send_app_bad_pkt_num" , HCLGE_MAC_STATS_MAX_NUM_V1, |
298 | HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} |
299 | }; |
300 | |
301 | static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { |
302 | { |
303 | .flags = HCLGE_MAC_MGR_MASK_VLAN_B, |
304 | .ethter_type = cpu_to_le16(ETH_P_LLDP), |
305 | .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e}, |
306 | .i_port_bitmap = 0x1, |
307 | }, |
308 | }; |
309 | |
310 | static const struct key_info meta_data_key_info[] = { |
311 | { PACKET_TYPE_ID, 6 }, |
312 | { IP_FRAGEMENT, 1 }, |
313 | { ROCE_TYPE, 1 }, |
314 | { NEXT_KEY, 5 }, |
315 | { VLAN_NUMBER, 2 }, |
316 | { SRC_VPORT, 12 }, |
317 | { DST_VPORT, 12 }, |
318 | { TUNNEL_PACKET, 1 }, |
319 | }; |
320 | |
321 | static const struct key_info tuple_key_info[] = { |
322 | { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 }, |
323 | { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 }, |
324 | { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 }, |
325 | { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, |
326 | { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 }, |
327 | { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 }, |
328 | { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 }, |
329 | { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 }, |
330 | { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 }, |
331 | { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 }, |
332 | { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 }, |
333 | { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 }, |
334 | { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 }, |
335 | { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 }, |
336 | { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 }, |
337 | { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 }, |
338 | { INNER_DST_MAC, 48, KEY_OPT_MAC, |
339 | offsetof(struct hclge_fd_rule, tuples.dst_mac), |
340 | offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) }, |
341 | { INNER_SRC_MAC, 48, KEY_OPT_MAC, |
342 | offsetof(struct hclge_fd_rule, tuples.src_mac), |
343 | offsetof(struct hclge_fd_rule, tuples_mask.src_mac) }, |
344 | { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16, |
345 | offsetof(struct hclge_fd_rule, tuples.vlan_tag1), |
346 | offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) }, |
347 | { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, |
348 | { INNER_ETH_TYPE, 16, KEY_OPT_LE16, |
349 | offsetof(struct hclge_fd_rule, tuples.ether_proto), |
350 | offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) }, |
351 | { INNER_L2_RSV, 16, KEY_OPT_LE16, |
352 | offsetof(struct hclge_fd_rule, tuples.l2_user_def), |
353 | offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) }, |
354 | { INNER_IP_TOS, 8, KEY_OPT_U8, |
355 | offsetof(struct hclge_fd_rule, tuples.ip_tos), |
356 | offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) }, |
357 | { INNER_IP_PROTO, 8, KEY_OPT_U8, |
358 | offsetof(struct hclge_fd_rule, tuples.ip_proto), |
359 | offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) }, |
360 | { INNER_SRC_IP, 32, KEY_OPT_IP, |
361 | offsetof(struct hclge_fd_rule, tuples.src_ip), |
362 | offsetof(struct hclge_fd_rule, tuples_mask.src_ip) }, |
363 | { INNER_DST_IP, 32, KEY_OPT_IP, |
364 | offsetof(struct hclge_fd_rule, tuples.dst_ip), |
365 | offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) }, |
366 | { INNER_L3_RSV, 16, KEY_OPT_LE16, |
367 | offsetof(struct hclge_fd_rule, tuples.l3_user_def), |
368 | offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) }, |
369 | { INNER_SRC_PORT, 16, KEY_OPT_LE16, |
370 | offsetof(struct hclge_fd_rule, tuples.src_port), |
371 | offsetof(struct hclge_fd_rule, tuples_mask.src_port) }, |
372 | { INNER_DST_PORT, 16, KEY_OPT_LE16, |
373 | offsetof(struct hclge_fd_rule, tuples.dst_port), |
374 | offsetof(struct hclge_fd_rule, tuples_mask.dst_port) }, |
375 | { INNER_L4_RSV, 32, KEY_OPT_LE32, |
376 | offsetof(struct hclge_fd_rule, tuples.l4_user_def), |
377 | offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) }, |
378 | }; |
379 | |
380 | /** |
381 | * hclge_cmd_send - send command to command queue |
382 | * @hw: pointer to the hw struct |
383 | * @desc: prefilled descriptor for describing the command |
384 | * @num : the number of descriptors to be sent |
385 | * |
386 | * This is the main send command for command queue, it |
387 | * sends the queue, cleans the queue, etc |
388 | **/ |
389 | int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) |
390 | { |
391 | return hclge_comm_cmd_send(hw: &hw->hw, desc, num); |
392 | } |
393 | |
394 | static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) |
395 | { |
396 | #define HCLGE_MAC_CMD_NUM 21 |
397 | |
398 | u64 *data = (u64 *)(&hdev->mac_stats); |
399 | struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; |
400 | __le64 *desc_data; |
401 | u32 data_size; |
402 | int ret; |
403 | u32 i; |
404 | |
405 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); |
406 | ret = hclge_cmd_send(hw: &hdev->hw, desc, HCLGE_MAC_CMD_NUM); |
407 | if (ret) { |
408 | dev_err(&hdev->pdev->dev, |
409 | "Get MAC pkt stats fail, status = %d.\n" , ret); |
410 | |
411 | return ret; |
412 | } |
413 | |
414 | /* The first desc has a 64-bit header, so data size need to minus 1 */ |
415 | data_size = sizeof(desc) / (sizeof(u64)) - 1; |
416 | |
417 | desc_data = (__le64 *)(&desc[0].data[0]); |
418 | for (i = 0; i < data_size; i++) { |
419 | /* data memory is continuous becase only the first desc has a |
420 | * header in this command |
421 | */ |
422 | *data += le64_to_cpu(*desc_data); |
423 | data++; |
424 | desc_data++; |
425 | } |
426 | |
427 | return 0; |
428 | } |
429 | |
430 | static int hclge_mac_update_stats_complete(struct hclge_dev *hdev) |
431 | { |
432 | #define HCLGE_REG_NUM_PER_DESC 4 |
433 | |
434 | u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num; |
435 | u64 *data = (u64 *)(&hdev->mac_stats); |
436 | struct hclge_desc *desc; |
437 | __le64 *desc_data; |
438 | u32 data_size; |
439 | u32 desc_num; |
440 | int ret; |
441 | u32 i; |
442 | |
443 | /* The first desc has a 64-bit header, so need to consider it */ |
444 | desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1; |
445 | |
446 | /* This may be called inside atomic sections, |
447 | * so GFP_ATOMIC is more suitalbe here |
448 | */ |
449 | desc = kcalloc(n: desc_num, size: sizeof(struct hclge_desc), GFP_ATOMIC); |
450 | if (!desc) |
451 | return -ENOMEM; |
452 | |
453 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true); |
454 | ret = hclge_cmd_send(hw: &hdev->hw, desc, num: desc_num); |
455 | if (ret) { |
456 | kfree(objp: desc); |
457 | return ret; |
458 | } |
459 | |
460 | data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num); |
461 | |
462 | desc_data = (__le64 *)(&desc[0].data[0]); |
463 | for (i = 0; i < data_size; i++) { |
464 | /* data memory is continuous becase only the first desc has a |
465 | * header in this command |
466 | */ |
467 | *data += le64_to_cpu(*desc_data); |
468 | data++; |
469 | desc_data++; |
470 | } |
471 | |
472 | kfree(objp: desc); |
473 | |
474 | return 0; |
475 | } |
476 | |
477 | static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num) |
478 | { |
479 | struct hclge_desc desc; |
480 | int ret; |
481 | |
482 | /* Driver needs total register number of both valid registers and |
483 | * reserved registers, but the old firmware only returns number |
484 | * of valid registers in device V2. To be compatible with these |
485 | * devices, driver uses a fixed value. |
486 | */ |
487 | if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { |
488 | *reg_num = HCLGE_MAC_STATS_MAX_NUM_V1; |
489 | return 0; |
490 | } |
491 | |
492 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true); |
493 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
494 | if (ret) { |
495 | dev_err(&hdev->pdev->dev, |
496 | "failed to query mac statistic reg number, ret = %d\n" , |
497 | ret); |
498 | return ret; |
499 | } |
500 | |
501 | *reg_num = le32_to_cpu(desc.data[0]); |
502 | if (*reg_num == 0) { |
503 | dev_err(&hdev->pdev->dev, |
504 | "mac statistic reg number is invalid!\n" ); |
505 | return -ENODATA; |
506 | } |
507 | |
508 | return 0; |
509 | } |
510 | |
511 | int hclge_mac_update_stats(struct hclge_dev *hdev) |
512 | { |
513 | /* The firmware supports the new statistics acquisition method */ |
514 | if (hdev->ae_dev->dev_specs.mac_stats_num) |
515 | return hclge_mac_update_stats_complete(hdev); |
516 | else |
517 | return hclge_mac_update_stats_defective(hdev); |
518 | } |
519 | |
520 | static int hclge_comm_get_count(struct hclge_dev *hdev, |
521 | const struct hclge_comm_stats_str strs[], |
522 | u32 size) |
523 | { |
524 | int count = 0; |
525 | u32 i; |
526 | |
527 | for (i = 0; i < size; i++) |
528 | if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num) |
529 | count++; |
530 | |
531 | return count; |
532 | } |
533 | |
534 | static u64 *hclge_comm_get_stats(struct hclge_dev *hdev, |
535 | const struct hclge_comm_stats_str strs[], |
536 | int size, u64 *data) |
537 | { |
538 | u64 *buf = data; |
539 | u32 i; |
540 | |
541 | for (i = 0; i < size; i++) { |
542 | if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) |
543 | continue; |
544 | |
545 | *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset); |
546 | buf++; |
547 | } |
548 | |
549 | return buf; |
550 | } |
551 | |
552 | static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset, |
553 | const struct hclge_comm_stats_str strs[], |
554 | int size, u8 *data) |
555 | { |
556 | char *buff = (char *)data; |
557 | u32 i; |
558 | |
559 | if (stringset != ETH_SS_STATS) |
560 | return buff; |
561 | |
562 | for (i = 0; i < size; i++) { |
563 | if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) |
564 | continue; |
565 | |
566 | snprintf(buf: buff, ETH_GSTRING_LEN, fmt: "%s" , strs[i].desc); |
567 | buff = buff + ETH_GSTRING_LEN; |
568 | } |
569 | |
570 | return (u8 *)buff; |
571 | } |
572 | |
573 | static void hclge_update_stats_for_all(struct hclge_dev *hdev) |
574 | { |
575 | struct hnae3_handle *handle; |
576 | int status; |
577 | |
578 | handle = &hdev->vport[0].nic; |
579 | if (handle->client) { |
580 | status = hclge_comm_tqps_update_stats(handle, hw: &hdev->hw.hw); |
581 | if (status) { |
582 | dev_err(&hdev->pdev->dev, |
583 | "Update TQPS stats fail, status = %d.\n" , |
584 | status); |
585 | } |
586 | } |
587 | |
588 | hclge_update_fec_stats(hdev); |
589 | |
590 | status = hclge_mac_update_stats(hdev); |
591 | if (status) |
592 | dev_err(&hdev->pdev->dev, |
593 | "Update MAC stats fail, status = %d.\n" , status); |
594 | } |
595 | |
596 | static void hclge_update_stats(struct hnae3_handle *handle) |
597 | { |
598 | struct hclge_vport *vport = hclge_get_vport(handle); |
599 | struct hclge_dev *hdev = vport->back; |
600 | int status; |
601 | |
602 | if (test_and_set_bit(nr: HCLGE_STATE_STATISTICS_UPDATING, addr: &hdev->state)) |
603 | return; |
604 | |
605 | status = hclge_mac_update_stats(hdev); |
606 | if (status) |
607 | dev_err(&hdev->pdev->dev, |
608 | "Update MAC stats fail, status = %d.\n" , |
609 | status); |
610 | |
611 | status = hclge_comm_tqps_update_stats(handle, hw: &hdev->hw.hw); |
612 | if (status) |
613 | dev_err(&hdev->pdev->dev, |
614 | "Update TQPS stats fail, status = %d.\n" , |
615 | status); |
616 | |
617 | clear_bit(nr: HCLGE_STATE_STATISTICS_UPDATING, addr: &hdev->state); |
618 | } |
619 | |
620 | static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) |
621 | { |
622 | #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \ |
623 | HNAE3_SUPPORT_PHY_LOOPBACK | \ |
624 | HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \ |
625 | HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \ |
626 | HNAE3_SUPPORT_EXTERNAL_LOOPBACK) |
627 | |
628 | struct hclge_vport *vport = hclge_get_vport(handle); |
629 | struct hclge_dev *hdev = vport->back; |
630 | int count = 0; |
631 | |
632 | /* Loopback test support rules: |
633 | * mac: only GE mode support |
634 | * serdes: all mac mode will support include GE/XGE/LGE/CGE |
635 | * phy: only support when phy device exist on board |
636 | */ |
637 | if (stringset == ETH_SS_TEST) { |
638 | /* clear loopback bit flags at first */ |
639 | handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); |
640 | if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 || |
641 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || |
642 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || |
643 | hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { |
644 | count += 1; |
645 | handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; |
646 | } |
647 | |
648 | if (hdev->ae_dev->dev_specs.hilink_version != |
649 | HCLGE_HILINK_H60) { |
650 | count += 1; |
651 | handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; |
652 | } |
653 | |
654 | count += 1; |
655 | handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; |
656 | count += 1; |
657 | handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK; |
658 | |
659 | if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && |
660 | hdev->hw.mac.phydev->drv->set_loopback) || |
661 | hnae3_dev_phy_imp_supported(hdev)) { |
662 | count += 1; |
663 | handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; |
664 | } |
665 | } else if (stringset == ETH_SS_STATS) { |
666 | count = hclge_comm_get_count(hdev, strs: g_mac_stats_string, |
667 | ARRAY_SIZE(g_mac_stats_string)) + |
668 | hclge_comm_tqps_get_sset_count(handle); |
669 | } |
670 | |
671 | return count; |
672 | } |
673 | |
674 | static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset, |
675 | u8 *data) |
676 | { |
677 | struct hclge_vport *vport = hclge_get_vport(handle); |
678 | struct hclge_dev *hdev = vport->back; |
679 | u8 *p = (char *)data; |
680 | int size; |
681 | |
682 | if (stringset == ETH_SS_STATS) { |
683 | size = ARRAY_SIZE(g_mac_stats_string); |
684 | p = hclge_comm_get_strings(hdev, stringset, strs: g_mac_stats_string, |
685 | size, data: p); |
686 | p = hclge_comm_tqps_get_strings(handle, data: p); |
687 | } else if (stringset == ETH_SS_TEST) { |
688 | if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) { |
689 | memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL], |
690 | ETH_GSTRING_LEN); |
691 | p += ETH_GSTRING_LEN; |
692 | } |
693 | if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { |
694 | memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP], |
695 | ETH_GSTRING_LEN); |
696 | p += ETH_GSTRING_LEN; |
697 | } |
698 | if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { |
699 | memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], |
700 | ETH_GSTRING_LEN); |
701 | p += ETH_GSTRING_LEN; |
702 | } |
703 | if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { |
704 | memcpy(p, |
705 | hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], |
706 | ETH_GSTRING_LEN); |
707 | p += ETH_GSTRING_LEN; |
708 | } |
709 | if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { |
710 | memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY], |
711 | ETH_GSTRING_LEN); |
712 | p += ETH_GSTRING_LEN; |
713 | } |
714 | } |
715 | } |
716 | |
717 | static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) |
718 | { |
719 | struct hclge_vport *vport = hclge_get_vport(handle); |
720 | struct hclge_dev *hdev = vport->back; |
721 | u64 *p; |
722 | |
723 | p = hclge_comm_get_stats(hdev, strs: g_mac_stats_string, |
724 | ARRAY_SIZE(g_mac_stats_string), data); |
725 | p = hclge_comm_tqps_get_stats(handle, data: p); |
726 | } |
727 | |
728 | static void hclge_get_mac_stat(struct hnae3_handle *handle, |
729 | struct hns3_mac_stats *mac_stats) |
730 | { |
731 | struct hclge_vport *vport = hclge_get_vport(handle); |
732 | struct hclge_dev *hdev = vport->back; |
733 | |
734 | hclge_update_stats(handle); |
735 | |
736 | mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num; |
737 | mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num; |
738 | } |
739 | |
740 | static int hclge_parse_func_status(struct hclge_dev *hdev, |
741 | struct hclge_func_status_cmd *status) |
742 | { |
743 | #define HCLGE_MAC_ID_MASK 0xF |
744 | |
745 | if (!(status->pf_state & HCLGE_PF_STATE_DONE)) |
746 | return -EINVAL; |
747 | |
748 | /* Set the pf to main pf */ |
749 | if (status->pf_state & HCLGE_PF_STATE_MAIN) |
750 | hdev->flag |= HCLGE_FLAG_MAIN; |
751 | else |
752 | hdev->flag &= ~HCLGE_FLAG_MAIN; |
753 | |
754 | hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK; |
755 | return 0; |
756 | } |
757 | |
758 | static int hclge_query_function_status(struct hclge_dev *hdev) |
759 | { |
760 | #define HCLGE_QUERY_MAX_CNT 5 |
761 | |
762 | struct hclge_func_status_cmd *req; |
763 | struct hclge_desc desc; |
764 | int timeout = 0; |
765 | int ret; |
766 | |
767 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); |
768 | req = (struct hclge_func_status_cmd *)desc.data; |
769 | |
770 | do { |
771 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
772 | if (ret) { |
773 | dev_err(&hdev->pdev->dev, |
774 | "query function status failed %d.\n" , ret); |
775 | return ret; |
776 | } |
777 | |
778 | /* Check pf reset is done */ |
779 | if (req->pf_state) |
780 | break; |
781 | usleep_range(min: 1000, max: 2000); |
782 | } while (timeout++ < HCLGE_QUERY_MAX_CNT); |
783 | |
784 | return hclge_parse_func_status(hdev, status: req); |
785 | } |
786 | |
787 | static int hclge_query_pf_resource(struct hclge_dev *hdev) |
788 | { |
789 | struct hclge_pf_res_cmd *req; |
790 | struct hclge_desc desc; |
791 | int ret; |
792 | |
793 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); |
794 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
795 | if (ret) { |
796 | dev_err(&hdev->pdev->dev, |
797 | "query pf resource failed %d.\n" , ret); |
798 | return ret; |
799 | } |
800 | |
801 | req = (struct hclge_pf_res_cmd *)desc.data; |
802 | hdev->num_tqps = le16_to_cpu(req->tqp_num) + |
803 | le16_to_cpu(req->ext_tqp_num); |
804 | hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; |
805 | |
806 | if (req->tx_buf_size) |
807 | hdev->tx_buf_size = |
808 | le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; |
809 | else |
810 | hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; |
811 | |
812 | hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); |
813 | |
814 | if (req->dv_buf_size) |
815 | hdev->dv_buf_size = |
816 | le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; |
817 | else |
818 | hdev->dv_buf_size = HCLGE_DEFAULT_DV; |
819 | |
820 | hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); |
821 | |
822 | hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic); |
823 | if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { |
824 | dev_err(&hdev->pdev->dev, |
825 | "only %u msi resources available, not enough for pf(min:2).\n" , |
826 | hdev->num_nic_msi); |
827 | return -EINVAL; |
828 | } |
829 | |
830 | if (hnae3_dev_roce_supported(hdev)) { |
831 | hdev->num_roce_msi = |
832 | le16_to_cpu(req->pf_intr_vector_number_roce); |
833 | |
834 | /* PF should have NIC vectors and Roce vectors, |
835 | * NIC vectors are queued before Roce vectors. |
836 | */ |
837 | hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi; |
838 | } else { |
839 | hdev->num_msi = hdev->num_nic_msi; |
840 | } |
841 | |
842 | return 0; |
843 | } |
844 | |
845 | static int hclge_parse_speed(u8 speed_cmd, u32 *speed) |
846 | { |
847 | switch (speed_cmd) { |
848 | case HCLGE_FW_MAC_SPEED_10M: |
849 | *speed = HCLGE_MAC_SPEED_10M; |
850 | break; |
851 | case HCLGE_FW_MAC_SPEED_100M: |
852 | *speed = HCLGE_MAC_SPEED_100M; |
853 | break; |
854 | case HCLGE_FW_MAC_SPEED_1G: |
855 | *speed = HCLGE_MAC_SPEED_1G; |
856 | break; |
857 | case HCLGE_FW_MAC_SPEED_10G: |
858 | *speed = HCLGE_MAC_SPEED_10G; |
859 | break; |
860 | case HCLGE_FW_MAC_SPEED_25G: |
861 | *speed = HCLGE_MAC_SPEED_25G; |
862 | break; |
863 | case HCLGE_FW_MAC_SPEED_40G: |
864 | *speed = HCLGE_MAC_SPEED_40G; |
865 | break; |
866 | case HCLGE_FW_MAC_SPEED_50G: |
867 | *speed = HCLGE_MAC_SPEED_50G; |
868 | break; |
869 | case HCLGE_FW_MAC_SPEED_100G: |
870 | *speed = HCLGE_MAC_SPEED_100G; |
871 | break; |
872 | case HCLGE_FW_MAC_SPEED_200G: |
873 | *speed = HCLGE_MAC_SPEED_200G; |
874 | break; |
875 | default: |
876 | return -EINVAL; |
877 | } |
878 | |
879 | return 0; |
880 | } |
881 | |
882 | static const struct hclge_speed_bit_map speed_bit_map[] = { |
883 | {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT}, |
884 | {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT}, |
885 | {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT}, |
886 | {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT}, |
887 | {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT}, |
888 | {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT}, |
889 | {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS}, |
890 | {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS}, |
891 | {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BITS}, |
892 | }; |
893 | |
894 | static int hclge_get_speed_bit(u32 speed, u32 *speed_bit) |
895 | { |
896 | u16 i; |
897 | |
898 | for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) { |
899 | if (speed == speed_bit_map[i].speed) { |
900 | *speed_bit = speed_bit_map[i].speed_bit; |
901 | return 0; |
902 | } |
903 | } |
904 | |
905 | return -EINVAL; |
906 | } |
907 | |
908 | static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed) |
909 | { |
910 | struct hclge_vport *vport = hclge_get_vport(handle); |
911 | struct hclge_dev *hdev = vport->back; |
912 | u32 speed_ability = hdev->hw.mac.speed_ability; |
913 | u32 speed_bit = 0; |
914 | int ret; |
915 | |
916 | ret = hclge_get_speed_bit(speed, speed_bit: &speed_bit); |
917 | if (ret) |
918 | return ret; |
919 | |
920 | if (speed_bit & speed_ability) |
921 | return 0; |
922 | |
923 | return -EINVAL; |
924 | } |
925 | |
926 | static void hclge_update_fec_support(struct hclge_mac *mac) |
927 | { |
928 | linkmode_clear_bit(nr: ETHTOOL_LINK_MODE_FEC_BASER_BIT, addr: mac->supported); |
929 | linkmode_clear_bit(nr: ETHTOOL_LINK_MODE_FEC_RS_BIT, addr: mac->supported); |
930 | linkmode_clear_bit(nr: ETHTOOL_LINK_MODE_FEC_LLRS_BIT, addr: mac->supported); |
931 | linkmode_clear_bit(nr: ETHTOOL_LINK_MODE_FEC_NONE_BIT, addr: mac->supported); |
932 | |
933 | if (mac->fec_ability & BIT(HNAE3_FEC_BASER)) |
934 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_FEC_BASER_BIT, |
935 | addr: mac->supported); |
936 | if (mac->fec_ability & BIT(HNAE3_FEC_RS)) |
937 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_FEC_RS_BIT, |
938 | addr: mac->supported); |
939 | if (mac->fec_ability & BIT(HNAE3_FEC_LLRS)) |
940 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_FEC_LLRS_BIT, |
941 | addr: mac->supported); |
942 | if (mac->fec_ability & BIT(HNAE3_FEC_NONE)) |
943 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_FEC_NONE_BIT, |
944 | addr: mac->supported); |
945 | } |
946 | |
947 | static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[] = { |
948 | {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT}, |
949 | {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT}, |
950 | {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT}, |
951 | {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT}, |
952 | {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT}, |
953 | {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT}, |
954 | {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT}, |
955 | {HCLGE_SUPPORT_200G_R4_EXT_BIT, |
956 | ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT}, |
957 | {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT}, |
958 | }; |
959 | |
960 | static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[] = { |
961 | {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT}, |
962 | {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, |
963 | {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT}, |
964 | {HCLGE_SUPPORT_100G_R4_BIT, |
965 | ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT}, |
966 | {HCLGE_SUPPORT_100G_R2_BIT, |
967 | ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT}, |
968 | {HCLGE_SUPPORT_200G_R4_EXT_BIT, |
969 | ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT}, |
970 | {HCLGE_SUPPORT_200G_R4_BIT, |
971 | ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT}, |
972 | }; |
973 | |
974 | static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[] = { |
975 | {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT}, |
976 | {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT}, |
977 | {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT}, |
978 | {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT}, |
979 | {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT}, |
980 | {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT}, |
981 | {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT}, |
982 | {HCLGE_SUPPORT_200G_R4_EXT_BIT, |
983 | ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT}, |
984 | {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT}, |
985 | }; |
986 | |
987 | static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[] = { |
988 | {HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT}, |
989 | {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, |
990 | {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, |
991 | {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT}, |
992 | {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, |
993 | {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT}, |
994 | {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, |
995 | {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT}, |
996 | {HCLGE_SUPPORT_200G_R4_EXT_BIT, |
997 | ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT}, |
998 | {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT}, |
999 | }; |
1000 | |
1001 | static void hclge_convert_setting_sr(u16 speed_ability, |
1002 | unsigned long *link_mode) |
1003 | { |
1004 | int i; |
1005 | |
1006 | for (i = 0; i < ARRAY_SIZE(hclge_sr_link_mode_bmap); i++) { |
1007 | if (speed_ability & hclge_sr_link_mode_bmap[i].support_bit) |
1008 | linkmode_set_bit(nr: hclge_sr_link_mode_bmap[i].link_mode, |
1009 | addr: link_mode); |
1010 | } |
1011 | } |
1012 | |
1013 | static void hclge_convert_setting_lr(u16 speed_ability, |
1014 | unsigned long *link_mode) |
1015 | { |
1016 | int i; |
1017 | |
1018 | for (i = 0; i < ARRAY_SIZE(hclge_lr_link_mode_bmap); i++) { |
1019 | if (speed_ability & hclge_lr_link_mode_bmap[i].support_bit) |
1020 | linkmode_set_bit(nr: hclge_lr_link_mode_bmap[i].link_mode, |
1021 | addr: link_mode); |
1022 | } |
1023 | } |
1024 | |
1025 | static void hclge_convert_setting_cr(u16 speed_ability, |
1026 | unsigned long *link_mode) |
1027 | { |
1028 | int i; |
1029 | |
1030 | for (i = 0; i < ARRAY_SIZE(hclge_cr_link_mode_bmap); i++) { |
1031 | if (speed_ability & hclge_cr_link_mode_bmap[i].support_bit) |
1032 | linkmode_set_bit(nr: hclge_cr_link_mode_bmap[i].link_mode, |
1033 | addr: link_mode); |
1034 | } |
1035 | } |
1036 | |
1037 | static void hclge_convert_setting_kr(u16 speed_ability, |
1038 | unsigned long *link_mode) |
1039 | { |
1040 | int i; |
1041 | |
1042 | for (i = 0; i < ARRAY_SIZE(hclge_kr_link_mode_bmap); i++) { |
1043 | if (speed_ability & hclge_kr_link_mode_bmap[i].support_bit) |
1044 | linkmode_set_bit(nr: hclge_kr_link_mode_bmap[i].link_mode, |
1045 | addr: link_mode); |
1046 | } |
1047 | } |
1048 | |
1049 | static void hclge_convert_setting_fec(struct hclge_mac *mac) |
1050 | { |
1051 | /* If firmware has reported fec_ability, don't need to convert by speed */ |
1052 | if (mac->fec_ability) |
1053 | goto out; |
1054 | |
1055 | switch (mac->speed) { |
1056 | case HCLGE_MAC_SPEED_10G: |
1057 | case HCLGE_MAC_SPEED_40G: |
1058 | mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) | |
1059 | BIT(HNAE3_FEC_NONE); |
1060 | break; |
1061 | case HCLGE_MAC_SPEED_25G: |
1062 | case HCLGE_MAC_SPEED_50G: |
1063 | mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) | |
1064 | BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE); |
1065 | break; |
1066 | case HCLGE_MAC_SPEED_100G: |
1067 | mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | |
1068 | BIT(HNAE3_FEC_NONE); |
1069 | break; |
1070 | case HCLGE_MAC_SPEED_200G: |
1071 | mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | |
1072 | BIT(HNAE3_FEC_LLRS); |
1073 | break; |
1074 | default: |
1075 | mac->fec_ability = 0; |
1076 | break; |
1077 | } |
1078 | |
1079 | out: |
1080 | hclge_update_fec_support(mac); |
1081 | } |
1082 | |
1083 | static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, |
1084 | u16 speed_ability) |
1085 | { |
1086 | struct hclge_mac *mac = &hdev->hw.mac; |
1087 | |
1088 | if (speed_ability & HCLGE_SUPPORT_1G_BIT) |
1089 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_1000baseX_Full_BIT, |
1090 | addr: mac->supported); |
1091 | |
1092 | hclge_convert_setting_sr(speed_ability, link_mode: mac->supported); |
1093 | hclge_convert_setting_lr(speed_ability, link_mode: mac->supported); |
1094 | hclge_convert_setting_cr(speed_ability, link_mode: mac->supported); |
1095 | if (hnae3_dev_fec_supported(hdev)) |
1096 | hclge_convert_setting_fec(mac); |
1097 | |
1098 | if (hnae3_dev_pause_supported(hdev)) |
1099 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_Pause_BIT, addr: mac->supported); |
1100 | |
1101 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_FIBRE_BIT, addr: mac->supported); |
1102 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_FEC_NONE_BIT, addr: mac->supported); |
1103 | } |
1104 | |
1105 | static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, |
1106 | u16 speed_ability) |
1107 | { |
1108 | struct hclge_mac *mac = &hdev->hw.mac; |
1109 | |
1110 | hclge_convert_setting_kr(speed_ability, link_mode: mac->supported); |
1111 | if (hnae3_dev_fec_supported(hdev)) |
1112 | hclge_convert_setting_fec(mac); |
1113 | |
1114 | if (hnae3_dev_pause_supported(hdev)) |
1115 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_Pause_BIT, addr: mac->supported); |
1116 | |
1117 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_Backplane_BIT, addr: mac->supported); |
1118 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_FEC_NONE_BIT, addr: mac->supported); |
1119 | } |
1120 | |
1121 | static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, |
1122 | u16 speed_ability) |
1123 | { |
1124 | unsigned long *supported = hdev->hw.mac.supported; |
1125 | |
1126 | /* default to support all speed for GE port */ |
1127 | if (!speed_ability) |
1128 | speed_ability = HCLGE_SUPPORT_GE; |
1129 | |
1130 | if (speed_ability & HCLGE_SUPPORT_1G_BIT) |
1131 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_1000baseT_Full_BIT, |
1132 | addr: supported); |
1133 | |
1134 | if (speed_ability & HCLGE_SUPPORT_100M_BIT) { |
1135 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_100baseT_Full_BIT, |
1136 | addr: supported); |
1137 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_100baseT_Half_BIT, |
1138 | addr: supported); |
1139 | } |
1140 | |
1141 | if (speed_ability & HCLGE_SUPPORT_10M_BIT) { |
1142 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_10baseT_Full_BIT, addr: supported); |
1143 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_10baseT_Half_BIT, addr: supported); |
1144 | } |
1145 | |
1146 | if (hnae3_dev_pause_supported(hdev)) { |
1147 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_Pause_BIT, addr: supported); |
1148 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_Asym_Pause_BIT, addr: supported); |
1149 | } |
1150 | |
1151 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_Autoneg_BIT, addr: supported); |
1152 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_TP_BIT, addr: supported); |
1153 | } |
1154 | |
1155 | static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability) |
1156 | { |
1157 | u8 media_type = hdev->hw.mac.media_type; |
1158 | |
1159 | if (media_type == HNAE3_MEDIA_TYPE_FIBER) |
1160 | hclge_parse_fiber_link_mode(hdev, speed_ability); |
1161 | else if (media_type == HNAE3_MEDIA_TYPE_COPPER) |
1162 | hclge_parse_copper_link_mode(hdev, speed_ability); |
1163 | else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE) |
1164 | hclge_parse_backplane_link_mode(hdev, speed_ability); |
1165 | } |
1166 | |
1167 | static u32 hclge_get_max_speed(u16 speed_ability) |
1168 | { |
1169 | if (speed_ability & HCLGE_SUPPORT_200G_BITS) |
1170 | return HCLGE_MAC_SPEED_200G; |
1171 | |
1172 | if (speed_ability & HCLGE_SUPPORT_100G_BITS) |
1173 | return HCLGE_MAC_SPEED_100G; |
1174 | |
1175 | if (speed_ability & HCLGE_SUPPORT_50G_BITS) |
1176 | return HCLGE_MAC_SPEED_50G; |
1177 | |
1178 | if (speed_ability & HCLGE_SUPPORT_40G_BIT) |
1179 | return HCLGE_MAC_SPEED_40G; |
1180 | |
1181 | if (speed_ability & HCLGE_SUPPORT_25G_BIT) |
1182 | return HCLGE_MAC_SPEED_25G; |
1183 | |
1184 | if (speed_ability & HCLGE_SUPPORT_10G_BIT) |
1185 | return HCLGE_MAC_SPEED_10G; |
1186 | |
1187 | if (speed_ability & HCLGE_SUPPORT_1G_BIT) |
1188 | return HCLGE_MAC_SPEED_1G; |
1189 | |
1190 | if (speed_ability & HCLGE_SUPPORT_100M_BIT) |
1191 | return HCLGE_MAC_SPEED_100M; |
1192 | |
1193 | if (speed_ability & HCLGE_SUPPORT_10M_BIT) |
1194 | return HCLGE_MAC_SPEED_10M; |
1195 | |
1196 | return HCLGE_MAC_SPEED_1G; |
1197 | } |
1198 | |
1199 | static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) |
1200 | { |
1201 | #define HCLGE_TX_SPARE_SIZE_UNIT 4096 |
1202 | #define SPEED_ABILITY_EXT_SHIFT 8 |
1203 | |
1204 | struct hclge_cfg_param_cmd *req; |
1205 | u64 mac_addr_tmp_high; |
1206 | u16 speed_ability_ext; |
1207 | u64 mac_addr_tmp; |
1208 | unsigned int i; |
1209 | |
1210 | req = (struct hclge_cfg_param_cmd *)desc[0].data; |
1211 | |
1212 | /* get the configuration */ |
1213 | cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), |
1214 | HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); |
1215 | cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), |
1216 | HCLGE_CFG_TQP_DESC_N_M, |
1217 | HCLGE_CFG_TQP_DESC_N_S); |
1218 | |
1219 | cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), |
1220 | HCLGE_CFG_PHY_ADDR_M, |
1221 | HCLGE_CFG_PHY_ADDR_S); |
1222 | cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), |
1223 | HCLGE_CFG_MEDIA_TP_M, |
1224 | HCLGE_CFG_MEDIA_TP_S); |
1225 | cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), |
1226 | HCLGE_CFG_RX_BUF_LEN_M, |
1227 | HCLGE_CFG_RX_BUF_LEN_S); |
1228 | /* get mac_address */ |
1229 | mac_addr_tmp = __le32_to_cpu(req->param[2]); |
1230 | mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), |
1231 | HCLGE_CFG_MAC_ADDR_H_M, |
1232 | HCLGE_CFG_MAC_ADDR_H_S); |
1233 | |
1234 | mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; |
1235 | |
1236 | cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), |
1237 | HCLGE_CFG_DEFAULT_SPEED_M, |
1238 | HCLGE_CFG_DEFAULT_SPEED_S); |
1239 | cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), |
1240 | HCLGE_CFG_RSS_SIZE_M, |
1241 | HCLGE_CFG_RSS_SIZE_S); |
1242 | |
1243 | for (i = 0; i < ETH_ALEN; i++) |
1244 | cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; |
1245 | |
1246 | req = (struct hclge_cfg_param_cmd *)desc[1].data; |
1247 | cfg->numa_node_map = __le32_to_cpu(req->param[0]); |
1248 | |
1249 | cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), |
1250 | HCLGE_CFG_SPEED_ABILITY_M, |
1251 | HCLGE_CFG_SPEED_ABILITY_S); |
1252 | speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]), |
1253 | HCLGE_CFG_SPEED_ABILITY_EXT_M, |
1254 | HCLGE_CFG_SPEED_ABILITY_EXT_S); |
1255 | cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT; |
1256 | |
1257 | cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]), |
1258 | HCLGE_CFG_VLAN_FLTR_CAP_M, |
1259 | HCLGE_CFG_VLAN_FLTR_CAP_S); |
1260 | |
1261 | cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), |
1262 | HCLGE_CFG_UMV_TBL_SPACE_M, |
1263 | HCLGE_CFG_UMV_TBL_SPACE_S); |
1264 | |
1265 | cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]), |
1266 | HCLGE_CFG_PF_RSS_SIZE_M, |
1267 | HCLGE_CFG_PF_RSS_SIZE_S); |
1268 | |
1269 | /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a |
1270 | * power of 2, instead of reading out directly. This would |
1271 | * be more flexible for future changes and expansions. |
1272 | * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S, |
1273 | * it does not make sense if PF's field is 0. In this case, PF and VF |
1274 | * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S. |
1275 | */ |
1276 | cfg->pf_rss_size_max = cfg->pf_rss_size_max ? |
1277 | 1U << cfg->pf_rss_size_max : |
1278 | cfg->vf_rss_size_max; |
1279 | |
1280 | /* The unit of the tx spare buffer size queried from configuration |
1281 | * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is |
1282 | * needed here. |
1283 | */ |
1284 | cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]), |
1285 | HCLGE_CFG_TX_SPARE_BUF_SIZE_M, |
1286 | HCLGE_CFG_TX_SPARE_BUF_SIZE_S); |
1287 | cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT; |
1288 | } |
1289 | |
1290 | /* hclge_get_cfg: query the static parameter from flash |
1291 | * @hdev: pointer to struct hclge_dev |
1292 | * @hcfg: the config structure to be getted |
1293 | */ |
1294 | static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) |
1295 | { |
1296 | struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; |
1297 | struct hclge_cfg_param_cmd *req; |
1298 | unsigned int i; |
1299 | int ret; |
1300 | |
1301 | for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { |
1302 | u32 offset = 0; |
1303 | |
1304 | req = (struct hclge_cfg_param_cmd *)desc[i].data; |
1305 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, |
1306 | true); |
1307 | hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, |
1308 | HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); |
1309 | /* Len should be united by 4 bytes when send to hardware */ |
1310 | hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, |
1311 | HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); |
1312 | req->offset = cpu_to_le32(offset); |
1313 | } |
1314 | |
1315 | ret = hclge_cmd_send(hw: &hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); |
1316 | if (ret) { |
1317 | dev_err(&hdev->pdev->dev, "get config failed %d.\n" , ret); |
1318 | return ret; |
1319 | } |
1320 | |
1321 | hclge_parse_cfg(cfg: hcfg, desc); |
1322 | |
1323 | return 0; |
1324 | } |
1325 | |
1326 | static void hclge_set_default_dev_specs(struct hclge_dev *hdev) |
1327 | { |
1328 | #define HCLGE_MAX_NON_TSO_BD_NUM 8U |
1329 | |
1330 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev: hdev->pdev); |
1331 | |
1332 | ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; |
1333 | ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; |
1334 | ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; |
1335 | ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE; |
1336 | ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL; |
1337 | ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME; |
1338 | ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM; |
1339 | ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; |
1340 | ae_dev->dev_specs.tnl_num = 0; |
1341 | } |
1342 | |
1343 | static void hclge_parse_dev_specs(struct hclge_dev *hdev, |
1344 | struct hclge_desc *desc) |
1345 | { |
1346 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev: hdev->pdev); |
1347 | struct hclge_dev_specs_0_cmd *req0; |
1348 | struct hclge_dev_specs_1_cmd *req1; |
1349 | |
1350 | req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data; |
1351 | req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data; |
1352 | |
1353 | ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; |
1354 | ae_dev->dev_specs.rss_ind_tbl_size = |
1355 | le16_to_cpu(req0->rss_ind_tbl_size); |
1356 | ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); |
1357 | ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); |
1358 | ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate); |
1359 | ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num); |
1360 | ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); |
1361 | ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); |
1362 | ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size); |
1363 | ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size); |
1364 | ae_dev->dev_specs.tnl_num = req1->tnl_num; |
1365 | ae_dev->dev_specs.hilink_version = req1->hilink_version; |
1366 | } |
1367 | |
1368 | static void hclge_check_dev_specs(struct hclge_dev *hdev) |
1369 | { |
1370 | struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; |
1371 | |
1372 | if (!dev_specs->max_non_tso_bd_num) |
1373 | dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; |
1374 | if (!dev_specs->rss_ind_tbl_size) |
1375 | dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; |
1376 | if (!dev_specs->rss_key_size) |
1377 | dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; |
1378 | if (!dev_specs->max_tm_rate) |
1379 | dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE; |
1380 | if (!dev_specs->max_qset_num) |
1381 | dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM; |
1382 | if (!dev_specs->max_int_gl) |
1383 | dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL; |
1384 | if (!dev_specs->max_frm_size) |
1385 | dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME; |
1386 | if (!dev_specs->umv_size) |
1387 | dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; |
1388 | } |
1389 | |
1390 | static int hclge_query_mac_stats_num(struct hclge_dev *hdev) |
1391 | { |
1392 | u32 reg_num = 0; |
1393 | int ret; |
1394 | |
1395 | ret = hclge_mac_query_reg_num(hdev, reg_num: ®_num); |
1396 | if (ret && ret != -EOPNOTSUPP) |
1397 | return ret; |
1398 | |
1399 | hdev->ae_dev->dev_specs.mac_stats_num = reg_num; |
1400 | return 0; |
1401 | } |
1402 | |
1403 | static int hclge_query_dev_specs(struct hclge_dev *hdev) |
1404 | { |
1405 | struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM]; |
1406 | int ret; |
1407 | int i; |
1408 | |
1409 | ret = hclge_query_mac_stats_num(hdev); |
1410 | if (ret) |
1411 | return ret; |
1412 | |
1413 | /* set default specifications as devices lower than version V3 do not |
1414 | * support querying specifications from firmware. |
1415 | */ |
1416 | if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { |
1417 | hclge_set_default_dev_specs(hdev); |
1418 | return 0; |
1419 | } |
1420 | |
1421 | for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) { |
1422 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, |
1423 | true); |
1424 | desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); |
1425 | } |
1426 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true); |
1427 | |
1428 | ret = hclge_cmd_send(hw: &hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM); |
1429 | if (ret) |
1430 | return ret; |
1431 | |
1432 | hclge_parse_dev_specs(hdev, desc); |
1433 | hclge_check_dev_specs(hdev); |
1434 | |
1435 | return 0; |
1436 | } |
1437 | |
1438 | static int hclge_get_cap(struct hclge_dev *hdev) |
1439 | { |
1440 | int ret; |
1441 | |
1442 | ret = hclge_query_function_status(hdev); |
1443 | if (ret) { |
1444 | dev_err(&hdev->pdev->dev, |
1445 | "query function status error %d.\n" , ret); |
1446 | return ret; |
1447 | } |
1448 | |
1449 | /* get pf resource */ |
1450 | return hclge_query_pf_resource(hdev); |
1451 | } |
1452 | |
1453 | static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) |
1454 | { |
1455 | #define HCLGE_MIN_TX_DESC 64 |
1456 | #define HCLGE_MIN_RX_DESC 64 |
1457 | |
1458 | if (!is_kdump_kernel()) |
1459 | return; |
1460 | |
1461 | dev_info(&hdev->pdev->dev, |
1462 | "Running kdump kernel. Using minimal resources\n" ); |
1463 | |
1464 | /* minimal queue pairs equals to the number of vports */ |
1465 | hdev->num_tqps = hdev->num_req_vfs + 1; |
1466 | hdev->num_tx_desc = HCLGE_MIN_TX_DESC; |
1467 | hdev->num_rx_desc = HCLGE_MIN_RX_DESC; |
1468 | } |
1469 | |
1470 | static void hclge_init_tc_config(struct hclge_dev *hdev) |
1471 | { |
1472 | unsigned int i; |
1473 | |
1474 | if (hdev->tc_max > HNAE3_MAX_TC || |
1475 | hdev->tc_max < 1) { |
1476 | dev_warn(&hdev->pdev->dev, "TC num = %u.\n" , |
1477 | hdev->tc_max); |
1478 | hdev->tc_max = 1; |
1479 | } |
1480 | |
1481 | /* Dev does not support DCB */ |
1482 | if (!hnae3_dev_dcb_supported(hdev)) { |
1483 | hdev->tc_max = 1; |
1484 | hdev->pfc_max = 0; |
1485 | } else { |
1486 | hdev->pfc_max = hdev->tc_max; |
1487 | } |
1488 | |
1489 | hdev->tm_info.num_tc = 1; |
1490 | |
1491 | /* Currently not support uncontiuous tc */ |
1492 | for (i = 0; i < hdev->tm_info.num_tc; i++) |
1493 | hnae3_set_bit(hdev->hw_tc_map, i, 1); |
1494 | |
1495 | hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; |
1496 | } |
1497 | |
1498 | static int hclge_configure(struct hclge_dev *hdev) |
1499 | { |
1500 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev: hdev->pdev); |
1501 | struct hclge_cfg cfg; |
1502 | int ret; |
1503 | |
1504 | ret = hclge_get_cfg(hdev, hcfg: &cfg); |
1505 | if (ret) |
1506 | return ret; |
1507 | |
1508 | hdev->base_tqp_pid = 0; |
1509 | hdev->vf_rss_size_max = cfg.vf_rss_size_max; |
1510 | hdev->pf_rss_size_max = cfg.pf_rss_size_max; |
1511 | hdev->rx_buf_len = cfg.rx_buf_len; |
1512 | ether_addr_copy(dst: hdev->hw.mac.mac_addr, src: cfg.mac_addr); |
1513 | hdev->hw.mac.media_type = cfg.media_type; |
1514 | hdev->hw.mac.phy_addr = cfg.phy_addr; |
1515 | hdev->num_tx_desc = cfg.tqp_desc_num; |
1516 | hdev->num_rx_desc = cfg.tqp_desc_num; |
1517 | hdev->tm_info.num_pg = 1; |
1518 | hdev->tc_max = cfg.tc_num; |
1519 | hdev->tm_info.hw_pfc_map = 0; |
1520 | if (cfg.umv_space) |
1521 | hdev->wanted_umv_size = cfg.umv_space; |
1522 | else |
1523 | hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size; |
1524 | hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; |
1525 | hdev->gro_en = true; |
1526 | if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) |
1527 | set_bit(nr: HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, addr: ae_dev->caps); |
1528 | |
1529 | if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) { |
1530 | hdev->fd_en = true; |
1531 | hdev->fd_active_type = HCLGE_FD_RULE_NONE; |
1532 | } |
1533 | |
1534 | ret = hclge_parse_speed(speed_cmd: cfg.default_speed, speed: &hdev->hw.mac.speed); |
1535 | if (ret) { |
1536 | dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n" , |
1537 | cfg.default_speed, ret); |
1538 | return ret; |
1539 | } |
1540 | |
1541 | hclge_parse_link_mode(hdev, speed_ability: cfg.speed_ability); |
1542 | |
1543 | hdev->hw.mac.max_speed = hclge_get_max_speed(speed_ability: cfg.speed_ability); |
1544 | |
1545 | hclge_init_tc_config(hdev); |
1546 | hclge_init_kdump_kernel_config(hdev); |
1547 | |
1548 | return ret; |
1549 | } |
1550 | |
1551 | static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min, |
1552 | u16 tso_mss_max) |
1553 | { |
1554 | struct hclge_cfg_tso_status_cmd *req; |
1555 | struct hclge_desc desc; |
1556 | |
1557 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); |
1558 | |
1559 | req = (struct hclge_cfg_tso_status_cmd *)desc.data; |
1560 | req->tso_mss_min = cpu_to_le16(tso_mss_min); |
1561 | req->tso_mss_max = cpu_to_le16(tso_mss_max); |
1562 | |
1563 | return hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
1564 | } |
1565 | |
1566 | static int hclge_config_gro(struct hclge_dev *hdev) |
1567 | { |
1568 | struct hclge_cfg_gro_status_cmd *req; |
1569 | struct hclge_desc desc; |
1570 | int ret; |
1571 | |
1572 | if (!hnae3_ae_dev_gro_supported(hdev->ae_dev)) |
1573 | return 0; |
1574 | |
1575 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); |
1576 | req = (struct hclge_cfg_gro_status_cmd *)desc.data; |
1577 | |
1578 | req->gro_en = hdev->gro_en ? 1 : 0; |
1579 | |
1580 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
1581 | if (ret) |
1582 | dev_err(&hdev->pdev->dev, |
1583 | "GRO hardware config cmd failed, ret = %d\n" , ret); |
1584 | |
1585 | return ret; |
1586 | } |
1587 | |
1588 | static int hclge_alloc_tqps(struct hclge_dev *hdev) |
1589 | { |
1590 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev: hdev->pdev); |
1591 | struct hclge_comm_tqp *tqp; |
1592 | int i; |
1593 | |
1594 | hdev->htqp = devm_kcalloc(dev: &hdev->pdev->dev, n: hdev->num_tqps, |
1595 | size: sizeof(struct hclge_comm_tqp), GFP_KERNEL); |
1596 | if (!hdev->htqp) |
1597 | return -ENOMEM; |
1598 | |
1599 | tqp = hdev->htqp; |
1600 | |
1601 | for (i = 0; i < hdev->num_tqps; i++) { |
1602 | tqp->dev = &hdev->pdev->dev; |
1603 | tqp->index = i; |
1604 | |
1605 | tqp->q.ae_algo = &ae_algo; |
1606 | tqp->q.buf_size = hdev->rx_buf_len; |
1607 | tqp->q.tx_desc_num = hdev->num_tx_desc; |
1608 | tqp->q.rx_desc_num = hdev->num_rx_desc; |
1609 | |
1610 | /* need an extended offset to configure queues >= |
1611 | * HCLGE_TQP_MAX_SIZE_DEV_V2 |
1612 | */ |
1613 | if (i < HCLGE_TQP_MAX_SIZE_DEV_V2) |
1614 | tqp->q.io_base = hdev->hw.hw.io_base + |
1615 | HCLGE_TQP_REG_OFFSET + |
1616 | i * HCLGE_TQP_REG_SIZE; |
1617 | else |
1618 | tqp->q.io_base = hdev->hw.hw.io_base + |
1619 | HCLGE_TQP_REG_OFFSET + |
1620 | HCLGE_TQP_EXT_REG_OFFSET + |
1621 | (i - HCLGE_TQP_MAX_SIZE_DEV_V2) * |
1622 | HCLGE_TQP_REG_SIZE; |
1623 | |
1624 | /* when device supports tx push and has device memory, |
1625 | * the queue can execute push mode or doorbell mode on |
1626 | * device memory. |
1627 | */ |
1628 | if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) |
1629 | tqp->q.mem_base = hdev->hw.hw.mem_base + |
1630 | HCLGE_TQP_MEM_OFFSET(hdev, i); |
1631 | |
1632 | tqp++; |
1633 | } |
1634 | |
1635 | return 0; |
1636 | } |
1637 | |
1638 | static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, |
1639 | u16 tqp_pid, u16 tqp_vid, bool is_pf) |
1640 | { |
1641 | struct hclge_tqp_map_cmd *req; |
1642 | struct hclge_desc desc; |
1643 | int ret; |
1644 | |
1645 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); |
1646 | |
1647 | req = (struct hclge_tqp_map_cmd *)desc.data; |
1648 | req->tqp_id = cpu_to_le16(tqp_pid); |
1649 | req->tqp_vf = func_id; |
1650 | req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B; |
1651 | if (!is_pf) |
1652 | req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B; |
1653 | req->tqp_vid = cpu_to_le16(tqp_vid); |
1654 | |
1655 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
1656 | if (ret) |
1657 | dev_err(&hdev->pdev->dev, "TQP map failed %d.\n" , ret); |
1658 | |
1659 | return ret; |
1660 | } |
1661 | |
1662 | static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps) |
1663 | { |
1664 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; |
1665 | struct hclge_dev *hdev = vport->back; |
1666 | int i, alloced; |
1667 | |
1668 | for (i = 0, alloced = 0; i < hdev->num_tqps && |
1669 | alloced < num_tqps; i++) { |
1670 | if (!hdev->htqp[i].alloced) { |
1671 | hdev->htqp[i].q.handle = &vport->nic; |
1672 | hdev->htqp[i].q.tqp_index = alloced; |
1673 | hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; |
1674 | hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; |
1675 | kinfo->tqp[alloced] = &hdev->htqp[i].q; |
1676 | hdev->htqp[i].alloced = true; |
1677 | alloced++; |
1678 | } |
1679 | } |
1680 | vport->alloc_tqps = alloced; |
1681 | kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max, |
1682 | vport->alloc_tqps / hdev->tm_info.num_tc); |
1683 | |
1684 | /* ensure one to one mapping between irq and queue at default */ |
1685 | kinfo->rss_size = min_t(u16, kinfo->rss_size, |
1686 | (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc); |
1687 | |
1688 | return 0; |
1689 | } |
1690 | |
1691 | static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps, |
1692 | u16 num_tx_desc, u16 num_rx_desc) |
1693 | |
1694 | { |
1695 | struct hnae3_handle *nic = &vport->nic; |
1696 | struct hnae3_knic_private_info *kinfo = &nic->kinfo; |
1697 | struct hclge_dev *hdev = vport->back; |
1698 | int ret; |
1699 | |
1700 | kinfo->num_tx_desc = num_tx_desc; |
1701 | kinfo->num_rx_desc = num_rx_desc; |
1702 | |
1703 | kinfo->rx_buf_len = hdev->rx_buf_len; |
1704 | kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size; |
1705 | |
1706 | kinfo->tqp = devm_kcalloc(dev: &hdev->pdev->dev, n: num_tqps, |
1707 | size: sizeof(struct hnae3_queue *), GFP_KERNEL); |
1708 | if (!kinfo->tqp) |
1709 | return -ENOMEM; |
1710 | |
1711 | ret = hclge_assign_tqp(vport, num_tqps); |
1712 | if (ret) |
1713 | dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n" , ret); |
1714 | |
1715 | return ret; |
1716 | } |
1717 | |
1718 | static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, |
1719 | struct hclge_vport *vport) |
1720 | { |
1721 | struct hnae3_handle *nic = &vport->nic; |
1722 | struct hnae3_knic_private_info *kinfo; |
1723 | u16 i; |
1724 | |
1725 | kinfo = &nic->kinfo; |
1726 | for (i = 0; i < vport->alloc_tqps; i++) { |
1727 | struct hclge_comm_tqp *q = |
1728 | container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); |
1729 | bool is_pf; |
1730 | int ret; |
1731 | |
1732 | is_pf = !(vport->vport_id); |
1733 | ret = hclge_map_tqps_to_func(hdev, func_id: vport->vport_id, tqp_pid: q->index, |
1734 | tqp_vid: i, is_pf); |
1735 | if (ret) |
1736 | return ret; |
1737 | } |
1738 | |
1739 | return 0; |
1740 | } |
1741 | |
1742 | static int hclge_map_tqp(struct hclge_dev *hdev) |
1743 | { |
1744 | struct hclge_vport *vport = hdev->vport; |
1745 | u16 i, num_vport; |
1746 | |
1747 | num_vport = hdev->num_req_vfs + 1; |
1748 | for (i = 0; i < num_vport; i++) { |
1749 | int ret; |
1750 | |
1751 | ret = hclge_map_tqp_to_vport(hdev, vport); |
1752 | if (ret) |
1753 | return ret; |
1754 | |
1755 | vport++; |
1756 | } |
1757 | |
1758 | return 0; |
1759 | } |
1760 | |
1761 | static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) |
1762 | { |
1763 | struct hnae3_handle *nic = &vport->nic; |
1764 | struct hclge_dev *hdev = vport->back; |
1765 | int ret; |
1766 | |
1767 | nic->pdev = hdev->pdev; |
1768 | nic->ae_algo = &ae_algo; |
1769 | nic->numa_node_mask = hdev->numa_node_mask; |
1770 | nic->kinfo.io_base = hdev->hw.hw.io_base; |
1771 | |
1772 | ret = hclge_knic_setup(vport, num_tqps, |
1773 | num_tx_desc: hdev->num_tx_desc, num_rx_desc: hdev->num_rx_desc); |
1774 | if (ret) |
1775 | dev_err(&hdev->pdev->dev, "knic setup failed %d\n" , ret); |
1776 | |
1777 | return ret; |
1778 | } |
1779 | |
1780 | static int hclge_alloc_vport(struct hclge_dev *hdev) |
1781 | { |
1782 | struct pci_dev *pdev = hdev->pdev; |
1783 | struct hclge_vport *vport; |
1784 | u32 tqp_main_vport; |
1785 | u32 tqp_per_vport; |
1786 | int num_vport, i; |
1787 | int ret; |
1788 | |
1789 | /* We need to alloc a vport for main NIC of PF */ |
1790 | num_vport = hdev->num_req_vfs + 1; |
1791 | |
1792 | if (hdev->num_tqps < num_vport) { |
1793 | dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)" , |
1794 | hdev->num_tqps, num_vport); |
1795 | return -EINVAL; |
1796 | } |
1797 | |
1798 | /* Alloc the same number of TQPs for every vport */ |
1799 | tqp_per_vport = hdev->num_tqps / num_vport; |
1800 | tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; |
1801 | |
1802 | vport = devm_kcalloc(dev: &pdev->dev, n: num_vport, size: sizeof(struct hclge_vport), |
1803 | GFP_KERNEL); |
1804 | if (!vport) |
1805 | return -ENOMEM; |
1806 | |
1807 | hdev->vport = vport; |
1808 | hdev->num_alloc_vport = num_vport; |
1809 | |
1810 | if (IS_ENABLED(CONFIG_PCI_IOV)) |
1811 | hdev->num_alloc_vfs = hdev->num_req_vfs; |
1812 | |
1813 | for (i = 0; i < num_vport; i++) { |
1814 | vport->back = hdev; |
1815 | vport->vport_id = i; |
1816 | vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO; |
1817 | vport->mps = HCLGE_MAC_DEFAULT_FRAME; |
1818 | vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; |
1819 | vport->port_base_vlan_cfg.tbl_sta = true; |
1820 | vport->rxvlan_cfg.rx_vlan_offload_en = true; |
1821 | vport->req_vlan_fltr_en = true; |
1822 | INIT_LIST_HEAD(list: &vport->vlan_list); |
1823 | INIT_LIST_HEAD(list: &vport->uc_mac_list); |
1824 | INIT_LIST_HEAD(list: &vport->mc_mac_list); |
1825 | spin_lock_init(&vport->mac_list_lock); |
1826 | |
1827 | if (i == 0) |
1828 | ret = hclge_vport_setup(vport, num_tqps: tqp_main_vport); |
1829 | else |
1830 | ret = hclge_vport_setup(vport, num_tqps: tqp_per_vport); |
1831 | if (ret) { |
1832 | dev_err(&pdev->dev, |
1833 | "vport setup failed for vport %d, %d\n" , |
1834 | i, ret); |
1835 | return ret; |
1836 | } |
1837 | |
1838 | vport++; |
1839 | } |
1840 | |
1841 | return 0; |
1842 | } |
1843 | |
1844 | static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, |
1845 | struct hclge_pkt_buf_alloc *buf_alloc) |
1846 | { |
1847 | /* TX buffer size is unit by 128 byte */ |
1848 | #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 |
1849 | #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) |
1850 | struct hclge_tx_buff_alloc_cmd *req; |
1851 | struct hclge_desc desc; |
1852 | int ret; |
1853 | u8 i; |
1854 | |
1855 | req = (struct hclge_tx_buff_alloc_cmd *)desc.data; |
1856 | |
1857 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); |
1858 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
1859 | u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; |
1860 | |
1861 | req->tx_pkt_buff[i] = |
1862 | cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | |
1863 | HCLGE_BUF_SIZE_UPDATE_EN_MSK); |
1864 | } |
1865 | |
1866 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
1867 | if (ret) |
1868 | dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n" , |
1869 | ret); |
1870 | |
1871 | return ret; |
1872 | } |
1873 | |
1874 | static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, |
1875 | struct hclge_pkt_buf_alloc *buf_alloc) |
1876 | { |
1877 | int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); |
1878 | |
1879 | if (ret) |
1880 | dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n" , ret); |
1881 | |
1882 | return ret; |
1883 | } |
1884 | |
1885 | static u32 hclge_get_tc_num(struct hclge_dev *hdev) |
1886 | { |
1887 | unsigned int i; |
1888 | u32 cnt = 0; |
1889 | |
1890 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) |
1891 | if (hdev->hw_tc_map & BIT(i)) |
1892 | cnt++; |
1893 | return cnt; |
1894 | } |
1895 | |
1896 | /* Get the number of pfc enabled TCs, which have private buffer */ |
1897 | static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, |
1898 | struct hclge_pkt_buf_alloc *buf_alloc) |
1899 | { |
1900 | struct hclge_priv_buf *priv; |
1901 | unsigned int i; |
1902 | int cnt = 0; |
1903 | |
1904 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
1905 | priv = &buf_alloc->priv_buf[i]; |
1906 | if ((hdev->tm_info.hw_pfc_map & BIT(i)) && |
1907 | priv->enable) |
1908 | cnt++; |
1909 | } |
1910 | |
1911 | return cnt; |
1912 | } |
1913 | |
1914 | /* Get the number of pfc disabled TCs, which have private buffer */ |
1915 | static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, |
1916 | struct hclge_pkt_buf_alloc *buf_alloc) |
1917 | { |
1918 | struct hclge_priv_buf *priv; |
1919 | unsigned int i; |
1920 | int cnt = 0; |
1921 | |
1922 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
1923 | priv = &buf_alloc->priv_buf[i]; |
1924 | if (hdev->hw_tc_map & BIT(i) && |
1925 | !(hdev->tm_info.hw_pfc_map & BIT(i)) && |
1926 | priv->enable) |
1927 | cnt++; |
1928 | } |
1929 | |
1930 | return cnt; |
1931 | } |
1932 | |
1933 | static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) |
1934 | { |
1935 | struct hclge_priv_buf *priv; |
1936 | u32 rx_priv = 0; |
1937 | int i; |
1938 | |
1939 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
1940 | priv = &buf_alloc->priv_buf[i]; |
1941 | if (priv->enable) |
1942 | rx_priv += priv->buf_size; |
1943 | } |
1944 | return rx_priv; |
1945 | } |
1946 | |
1947 | static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) |
1948 | { |
1949 | u32 i, total_tx_size = 0; |
1950 | |
1951 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) |
1952 | total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; |
1953 | |
1954 | return total_tx_size; |
1955 | } |
1956 | |
1957 | static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, |
1958 | struct hclge_pkt_buf_alloc *buf_alloc, |
1959 | u32 rx_all) |
1960 | { |
1961 | u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; |
1962 | u32 tc_num = hclge_get_tc_num(hdev); |
1963 | u32 shared_buf, aligned_mps; |
1964 | u32 rx_priv; |
1965 | int i; |
1966 | |
1967 | aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); |
1968 | |
1969 | if (hnae3_dev_dcb_supported(hdev)) |
1970 | shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps + |
1971 | hdev->dv_buf_size; |
1972 | else |
1973 | shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF |
1974 | + hdev->dv_buf_size; |
1975 | |
1976 | shared_buf_tc = tc_num * aligned_mps + aligned_mps; |
1977 | shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), |
1978 | HCLGE_BUF_SIZE_UNIT); |
1979 | |
1980 | rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); |
1981 | if (rx_all < rx_priv + shared_std) |
1982 | return false; |
1983 | |
1984 | shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT); |
1985 | buf_alloc->s_buf.buf_size = shared_buf; |
1986 | if (hnae3_dev_dcb_supported(hdev)) { |
1987 | buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; |
1988 | buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high |
1989 | - roundup(aligned_mps / HCLGE_BUF_DIV_BY, |
1990 | HCLGE_BUF_SIZE_UNIT); |
1991 | } else { |
1992 | buf_alloc->s_buf.self.high = aligned_mps + |
1993 | HCLGE_NON_DCB_ADDITIONAL_BUF; |
1994 | buf_alloc->s_buf.self.low = aligned_mps; |
1995 | } |
1996 | |
1997 | if (hnae3_dev_dcb_supported(hdev)) { |
1998 | hi_thrd = shared_buf - hdev->dv_buf_size; |
1999 | |
2000 | if (tc_num <= NEED_RESERVE_TC_NUM) |
2001 | hi_thrd = hi_thrd * BUF_RESERVE_PERCENT |
2002 | / BUF_MAX_PERCENT; |
2003 | |
2004 | if (tc_num) |
2005 | hi_thrd = hi_thrd / tc_num; |
2006 | |
2007 | hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps); |
2008 | hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT); |
2009 | lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY; |
2010 | } else { |
2011 | hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF; |
2012 | lo_thrd = aligned_mps; |
2013 | } |
2014 | |
2015 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
2016 | buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; |
2017 | buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; |
2018 | } |
2019 | |
2020 | return true; |
2021 | } |
2022 | |
2023 | static int hclge_tx_buffer_calc(struct hclge_dev *hdev, |
2024 | struct hclge_pkt_buf_alloc *buf_alloc) |
2025 | { |
2026 | u32 i, total_size; |
2027 | |
2028 | total_size = hdev->pkt_buf_size; |
2029 | |
2030 | /* alloc tx buffer for all enabled tc */ |
2031 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
2032 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
2033 | |
2034 | if (hdev->hw_tc_map & BIT(i)) { |
2035 | if (total_size < hdev->tx_buf_size) |
2036 | return -ENOMEM; |
2037 | |
2038 | priv->tx_buf_size = hdev->tx_buf_size; |
2039 | } else { |
2040 | priv->tx_buf_size = 0; |
2041 | } |
2042 | |
2043 | total_size -= priv->tx_buf_size; |
2044 | } |
2045 | |
2046 | return 0; |
2047 | } |
2048 | |
2049 | static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, |
2050 | struct hclge_pkt_buf_alloc *buf_alloc) |
2051 | { |
2052 | u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); |
2053 | u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); |
2054 | unsigned int i; |
2055 | |
2056 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
2057 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
2058 | |
2059 | priv->enable = 0; |
2060 | priv->wl.low = 0; |
2061 | priv->wl.high = 0; |
2062 | priv->buf_size = 0; |
2063 | |
2064 | if (!(hdev->hw_tc_map & BIT(i))) |
2065 | continue; |
2066 | |
2067 | priv->enable = 1; |
2068 | |
2069 | if (hdev->tm_info.hw_pfc_map & BIT(i)) { |
2070 | priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT; |
2071 | priv->wl.high = roundup(priv->wl.low + aligned_mps, |
2072 | HCLGE_BUF_SIZE_UNIT); |
2073 | } else { |
2074 | priv->wl.low = 0; |
2075 | priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) : |
2076 | aligned_mps; |
2077 | } |
2078 | |
2079 | priv->buf_size = priv->wl.high + hdev->dv_buf_size; |
2080 | } |
2081 | |
2082 | return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); |
2083 | } |
2084 | |
2085 | static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev, |
2086 | struct hclge_pkt_buf_alloc *buf_alloc) |
2087 | { |
2088 | u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); |
2089 | int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); |
2090 | int i; |
2091 | |
2092 | /* let the last to be cleared first */ |
2093 | for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { |
2094 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
2095 | unsigned int mask = BIT((unsigned int)i); |
2096 | |
2097 | if (hdev->hw_tc_map & mask && |
2098 | !(hdev->tm_info.hw_pfc_map & mask)) { |
2099 | /* Clear the no pfc TC private buffer */ |
2100 | priv->wl.low = 0; |
2101 | priv->wl.high = 0; |
2102 | priv->buf_size = 0; |
2103 | priv->enable = 0; |
2104 | no_pfc_priv_num--; |
2105 | } |
2106 | |
2107 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || |
2108 | no_pfc_priv_num == 0) |
2109 | break; |
2110 | } |
2111 | |
2112 | return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); |
2113 | } |
2114 | |
2115 | static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev, |
2116 | struct hclge_pkt_buf_alloc *buf_alloc) |
2117 | { |
2118 | u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); |
2119 | int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); |
2120 | int i; |
2121 | |
2122 | /* let the last to be cleared first */ |
2123 | for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { |
2124 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
2125 | unsigned int mask = BIT((unsigned int)i); |
2126 | |
2127 | if (hdev->hw_tc_map & mask && |
2128 | hdev->tm_info.hw_pfc_map & mask) { |
2129 | /* Reduce the number of pfc TC with private buffer */ |
2130 | priv->wl.low = 0; |
2131 | priv->enable = 0; |
2132 | priv->wl.high = 0; |
2133 | priv->buf_size = 0; |
2134 | pfc_priv_num--; |
2135 | } |
2136 | |
2137 | if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || |
2138 | pfc_priv_num == 0) |
2139 | break; |
2140 | } |
2141 | |
2142 | return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); |
2143 | } |
2144 | |
2145 | static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev, |
2146 | struct hclge_pkt_buf_alloc *buf_alloc) |
2147 | { |
2148 | #define COMPENSATE_BUFFER 0x3C00 |
2149 | #define COMPENSATE_HALF_MPS_NUM 5 |
2150 | #define PRIV_WL_GAP 0x1800 |
2151 | |
2152 | u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); |
2153 | u32 tc_num = hclge_get_tc_num(hdev); |
2154 | u32 half_mps = hdev->mps >> 1; |
2155 | u32 min_rx_priv; |
2156 | unsigned int i; |
2157 | |
2158 | if (tc_num) |
2159 | rx_priv = rx_priv / tc_num; |
2160 | |
2161 | if (tc_num <= NEED_RESERVE_TC_NUM) |
2162 | rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; |
2163 | |
2164 | min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER + |
2165 | COMPENSATE_HALF_MPS_NUM * half_mps; |
2166 | min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT); |
2167 | rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT); |
2168 | if (rx_priv < min_rx_priv) |
2169 | return false; |
2170 | |
2171 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
2172 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
2173 | |
2174 | priv->enable = 0; |
2175 | priv->wl.low = 0; |
2176 | priv->wl.high = 0; |
2177 | priv->buf_size = 0; |
2178 | |
2179 | if (!(hdev->hw_tc_map & BIT(i))) |
2180 | continue; |
2181 | |
2182 | priv->enable = 1; |
2183 | priv->buf_size = rx_priv; |
2184 | priv->wl.high = rx_priv - hdev->dv_buf_size; |
2185 | priv->wl.low = priv->wl.high - PRIV_WL_GAP; |
2186 | } |
2187 | |
2188 | buf_alloc->s_buf.buf_size = 0; |
2189 | |
2190 | return true; |
2191 | } |
2192 | |
2193 | /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs |
2194 | * @hdev: pointer to struct hclge_dev |
2195 | * @buf_alloc: pointer to buffer calculation data |
2196 | * @return: 0: calculate successful, negative: fail |
2197 | */ |
2198 | static int hclge_rx_buffer_calc(struct hclge_dev *hdev, |
2199 | struct hclge_pkt_buf_alloc *buf_alloc) |
2200 | { |
2201 | /* When DCB is not supported, rx private buffer is not allocated. */ |
2202 | if (!hnae3_dev_dcb_supported(hdev)) { |
2203 | u32 rx_all = hdev->pkt_buf_size; |
2204 | |
2205 | rx_all -= hclge_get_tx_buff_alloced(buf_alloc); |
2206 | if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) |
2207 | return -ENOMEM; |
2208 | |
2209 | return 0; |
2210 | } |
2211 | |
2212 | if (hclge_only_alloc_priv_buff(hdev, buf_alloc)) |
2213 | return 0; |
2214 | |
2215 | if (hclge_rx_buf_calc_all(hdev, max: true, buf_alloc)) |
2216 | return 0; |
2217 | |
2218 | /* try to decrease the buffer size */ |
2219 | if (hclge_rx_buf_calc_all(hdev, max: false, buf_alloc)) |
2220 | return 0; |
2221 | |
2222 | if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc)) |
2223 | return 0; |
2224 | |
2225 | if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc)) |
2226 | return 0; |
2227 | |
2228 | return -ENOMEM; |
2229 | } |
2230 | |
2231 | static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, |
2232 | struct hclge_pkt_buf_alloc *buf_alloc) |
2233 | { |
2234 | struct hclge_rx_priv_buff_cmd *req; |
2235 | struct hclge_desc desc; |
2236 | int ret; |
2237 | int i; |
2238 | |
2239 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); |
2240 | req = (struct hclge_rx_priv_buff_cmd *)desc.data; |
2241 | |
2242 | /* Alloc private buffer TCs */ |
2243 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
2244 | struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; |
2245 | |
2246 | req->buf_num[i] = |
2247 | cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); |
2248 | req->buf_num[i] |= |
2249 | cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); |
2250 | } |
2251 | |
2252 | req->shared_buf = |
2253 | cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | |
2254 | (1 << HCLGE_TC0_PRI_BUF_EN_B)); |
2255 | |
2256 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
2257 | if (ret) |
2258 | dev_err(&hdev->pdev->dev, |
2259 | "rx private buffer alloc cmd failed %d\n" , ret); |
2260 | |
2261 | return ret; |
2262 | } |
2263 | |
2264 | static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, |
2265 | struct hclge_pkt_buf_alloc *buf_alloc) |
2266 | { |
2267 | struct hclge_rx_priv_wl_buf *req; |
2268 | struct hclge_priv_buf *priv; |
2269 | struct hclge_desc desc[2]; |
2270 | int i, j; |
2271 | int ret; |
2272 | |
2273 | for (i = 0; i < 2; i++) { |
2274 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, |
2275 | false); |
2276 | req = (struct hclge_rx_priv_wl_buf *)desc[i].data; |
2277 | |
2278 | /* The first descriptor set the NEXT bit to 1 */ |
2279 | if (i == 0) |
2280 | desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); |
2281 | else |
2282 | desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); |
2283 | |
2284 | for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { |
2285 | u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; |
2286 | |
2287 | priv = &buf_alloc->priv_buf[idx]; |
2288 | req->tc_wl[j].high = |
2289 | cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); |
2290 | req->tc_wl[j].high |= |
2291 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
2292 | req->tc_wl[j].low = |
2293 | cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); |
2294 | req->tc_wl[j].low |= |
2295 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
2296 | } |
2297 | } |
2298 | |
2299 | /* Send 2 descriptor at one time */ |
2300 | ret = hclge_cmd_send(hw: &hdev->hw, desc, num: 2); |
2301 | if (ret) |
2302 | dev_err(&hdev->pdev->dev, |
2303 | "rx private waterline config cmd failed %d\n" , |
2304 | ret); |
2305 | return ret; |
2306 | } |
2307 | |
2308 | static int hclge_common_thrd_config(struct hclge_dev *hdev, |
2309 | struct hclge_pkt_buf_alloc *buf_alloc) |
2310 | { |
2311 | struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; |
2312 | struct hclge_rx_com_thrd *req; |
2313 | struct hclge_desc desc[2]; |
2314 | struct hclge_tc_thrd *tc; |
2315 | int i, j; |
2316 | int ret; |
2317 | |
2318 | for (i = 0; i < 2; i++) { |
2319 | hclge_cmd_setup_basic_desc(&desc[i], |
2320 | HCLGE_OPC_RX_COM_THRD_ALLOC, false); |
2321 | req = (struct hclge_rx_com_thrd *)&desc[i].data; |
2322 | |
2323 | /* The first descriptor set the NEXT bit to 1 */ |
2324 | if (i == 0) |
2325 | desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); |
2326 | else |
2327 | desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); |
2328 | |
2329 | for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { |
2330 | tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; |
2331 | |
2332 | req->com_thrd[j].high = |
2333 | cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); |
2334 | req->com_thrd[j].high |= |
2335 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
2336 | req->com_thrd[j].low = |
2337 | cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); |
2338 | req->com_thrd[j].low |= |
2339 | cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
2340 | } |
2341 | } |
2342 | |
2343 | /* Send 2 descriptors at one time */ |
2344 | ret = hclge_cmd_send(hw: &hdev->hw, desc, num: 2); |
2345 | if (ret) |
2346 | dev_err(&hdev->pdev->dev, |
2347 | "common threshold config cmd failed %d\n" , ret); |
2348 | return ret; |
2349 | } |
2350 | |
2351 | static int hclge_common_wl_config(struct hclge_dev *hdev, |
2352 | struct hclge_pkt_buf_alloc *buf_alloc) |
2353 | { |
2354 | struct hclge_shared_buf *buf = &buf_alloc->s_buf; |
2355 | struct hclge_rx_com_wl *req; |
2356 | struct hclge_desc desc; |
2357 | int ret; |
2358 | |
2359 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); |
2360 | |
2361 | req = (struct hclge_rx_com_wl *)desc.data; |
2362 | req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); |
2363 | req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
2364 | |
2365 | req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); |
2366 | req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); |
2367 | |
2368 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
2369 | if (ret) |
2370 | dev_err(&hdev->pdev->dev, |
2371 | "common waterline config cmd failed %d\n" , ret); |
2372 | |
2373 | return ret; |
2374 | } |
2375 | |
2376 | int hclge_buffer_alloc(struct hclge_dev *hdev) |
2377 | { |
2378 | struct hclge_pkt_buf_alloc *pkt_buf; |
2379 | int ret; |
2380 | |
2381 | pkt_buf = kzalloc(size: sizeof(*pkt_buf), GFP_KERNEL); |
2382 | if (!pkt_buf) |
2383 | return -ENOMEM; |
2384 | |
2385 | ret = hclge_tx_buffer_calc(hdev, buf_alloc: pkt_buf); |
2386 | if (ret) { |
2387 | dev_err(&hdev->pdev->dev, |
2388 | "could not calc tx buffer size for all TCs %d\n" , ret); |
2389 | goto out; |
2390 | } |
2391 | |
2392 | ret = hclge_tx_buffer_alloc(hdev, buf_alloc: pkt_buf); |
2393 | if (ret) { |
2394 | dev_err(&hdev->pdev->dev, |
2395 | "could not alloc tx buffers %d\n" , ret); |
2396 | goto out; |
2397 | } |
2398 | |
2399 | ret = hclge_rx_buffer_calc(hdev, buf_alloc: pkt_buf); |
2400 | if (ret) { |
2401 | dev_err(&hdev->pdev->dev, |
2402 | "could not calc rx priv buffer size for all TCs %d\n" , |
2403 | ret); |
2404 | goto out; |
2405 | } |
2406 | |
2407 | ret = hclge_rx_priv_buf_alloc(hdev, buf_alloc: pkt_buf); |
2408 | if (ret) { |
2409 | dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n" , |
2410 | ret); |
2411 | goto out; |
2412 | } |
2413 | |
2414 | if (hnae3_dev_dcb_supported(hdev)) { |
2415 | ret = hclge_rx_priv_wl_config(hdev, buf_alloc: pkt_buf); |
2416 | if (ret) { |
2417 | dev_err(&hdev->pdev->dev, |
2418 | "could not configure rx private waterline %d\n" , |
2419 | ret); |
2420 | goto out; |
2421 | } |
2422 | |
2423 | ret = hclge_common_thrd_config(hdev, buf_alloc: pkt_buf); |
2424 | if (ret) { |
2425 | dev_err(&hdev->pdev->dev, |
2426 | "could not configure common threshold %d\n" , |
2427 | ret); |
2428 | goto out; |
2429 | } |
2430 | } |
2431 | |
2432 | ret = hclge_common_wl_config(hdev, buf_alloc: pkt_buf); |
2433 | if (ret) |
2434 | dev_err(&hdev->pdev->dev, |
2435 | "could not configure common waterline %d\n" , ret); |
2436 | |
2437 | out: |
2438 | kfree(objp: pkt_buf); |
2439 | return ret; |
2440 | } |
2441 | |
2442 | static int hclge_init_roce_base_info(struct hclge_vport *vport) |
2443 | { |
2444 | struct hnae3_handle *roce = &vport->roce; |
2445 | struct hnae3_handle *nic = &vport->nic; |
2446 | struct hclge_dev *hdev = vport->back; |
2447 | |
2448 | roce->rinfo.num_vectors = vport->back->num_roce_msi; |
2449 | |
2450 | if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi) |
2451 | return -EINVAL; |
2452 | |
2453 | roce->rinfo.base_vector = hdev->num_nic_msi; |
2454 | |
2455 | roce->rinfo.netdev = nic->kinfo.netdev; |
2456 | roce->rinfo.roce_io_base = hdev->hw.hw.io_base; |
2457 | roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; |
2458 | |
2459 | roce->pdev = nic->pdev; |
2460 | roce->ae_algo = nic->ae_algo; |
2461 | roce->numa_node_mask = nic->numa_node_mask; |
2462 | |
2463 | return 0; |
2464 | } |
2465 | |
2466 | static int hclge_init_msi(struct hclge_dev *hdev) |
2467 | { |
2468 | struct pci_dev *pdev = hdev->pdev; |
2469 | int vectors; |
2470 | int i; |
2471 | |
2472 | vectors = pci_alloc_irq_vectors(dev: pdev, HNAE3_MIN_VECTOR_NUM, |
2473 | max_vecs: hdev->num_msi, |
2474 | PCI_IRQ_MSI | PCI_IRQ_MSIX); |
2475 | if (vectors < 0) { |
2476 | dev_err(&pdev->dev, |
2477 | "failed(%d) to allocate MSI/MSI-X vectors\n" , |
2478 | vectors); |
2479 | return vectors; |
2480 | } |
2481 | if (vectors < hdev->num_msi) |
2482 | dev_warn(&hdev->pdev->dev, |
2483 | "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n" , |
2484 | hdev->num_msi, vectors); |
2485 | |
2486 | hdev->num_msi = vectors; |
2487 | hdev->num_msi_left = vectors; |
2488 | |
2489 | hdev->vector_status = devm_kcalloc(dev: &pdev->dev, n: hdev->num_msi, |
2490 | size: sizeof(u16), GFP_KERNEL); |
2491 | if (!hdev->vector_status) { |
2492 | pci_free_irq_vectors(dev: pdev); |
2493 | return -ENOMEM; |
2494 | } |
2495 | |
2496 | for (i = 0; i < hdev->num_msi; i++) |
2497 | hdev->vector_status[i] = HCLGE_INVALID_VPORT; |
2498 | |
2499 | hdev->vector_irq = devm_kcalloc(dev: &pdev->dev, n: hdev->num_msi, |
2500 | size: sizeof(int), GFP_KERNEL); |
2501 | if (!hdev->vector_irq) { |
2502 | pci_free_irq_vectors(dev: pdev); |
2503 | return -ENOMEM; |
2504 | } |
2505 | |
2506 | return 0; |
2507 | } |
2508 | |
2509 | static u8 hclge_check_speed_dup(u8 duplex, int speed) |
2510 | { |
2511 | if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) |
2512 | duplex = HCLGE_MAC_FULL; |
2513 | |
2514 | return duplex; |
2515 | } |
2516 | |
2517 | static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = { |
2518 | {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M}, |
2519 | {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M}, |
2520 | {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G}, |
2521 | {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G}, |
2522 | {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G}, |
2523 | {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G}, |
2524 | {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G}, |
2525 | {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G}, |
2526 | {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G}, |
2527 | }; |
2528 | |
2529 | static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw) |
2530 | { |
2531 | u16 i; |
2532 | |
2533 | for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) { |
2534 | if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) { |
2535 | *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw; |
2536 | return 0; |
2537 | } |
2538 | } |
2539 | |
2540 | return -EINVAL; |
2541 | } |
2542 | |
2543 | static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, |
2544 | u8 duplex, u8 lane_num) |
2545 | { |
2546 | struct hclge_config_mac_speed_dup_cmd *req; |
2547 | struct hclge_desc desc; |
2548 | u32 speed_fw; |
2549 | int ret; |
2550 | |
2551 | req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; |
2552 | |
2553 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); |
2554 | |
2555 | if (duplex) |
2556 | hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1); |
2557 | |
2558 | ret = hclge_convert_to_fw_speed(speed_drv: speed, speed_fw: &speed_fw); |
2559 | if (ret) { |
2560 | dev_err(&hdev->pdev->dev, "invalid speed (%d)\n" , speed); |
2561 | return ret; |
2562 | } |
2563 | |
2564 | hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S, |
2565 | speed_fw); |
2566 | hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, |
2567 | 1); |
2568 | req->lane_num = lane_num; |
2569 | |
2570 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
2571 | if (ret) { |
2572 | dev_err(&hdev->pdev->dev, |
2573 | "mac speed/duplex config cmd failed %d.\n" , ret); |
2574 | return ret; |
2575 | } |
2576 | |
2577 | return 0; |
2578 | } |
2579 | |
2580 | int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num) |
2581 | { |
2582 | struct hclge_mac *mac = &hdev->hw.mac; |
2583 | int ret; |
2584 | |
2585 | duplex = hclge_check_speed_dup(duplex, speed); |
2586 | if (!mac->support_autoneg && mac->speed == speed && |
2587 | mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0)) |
2588 | return 0; |
2589 | |
2590 | ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num); |
2591 | if (ret) |
2592 | return ret; |
2593 | |
2594 | hdev->hw.mac.speed = speed; |
2595 | hdev->hw.mac.duplex = duplex; |
2596 | if (!lane_num) |
2597 | hdev->hw.mac.lane_num = lane_num; |
2598 | |
2599 | return 0; |
2600 | } |
2601 | |
2602 | static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, |
2603 | u8 duplex, u8 lane_num) |
2604 | { |
2605 | struct hclge_vport *vport = hclge_get_vport(handle); |
2606 | struct hclge_dev *hdev = vport->back; |
2607 | |
2608 | return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num); |
2609 | } |
2610 | |
2611 | static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) |
2612 | { |
2613 | struct hclge_config_auto_neg_cmd *req; |
2614 | struct hclge_desc desc; |
2615 | u32 flag = 0; |
2616 | int ret; |
2617 | |
2618 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); |
2619 | |
2620 | req = (struct hclge_config_auto_neg_cmd *)desc.data; |
2621 | if (enable) |
2622 | hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U); |
2623 | req->cfg_an_cmd_flag = cpu_to_le32(flag); |
2624 | |
2625 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
2626 | if (ret) |
2627 | dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n" , |
2628 | ret); |
2629 | |
2630 | return ret; |
2631 | } |
2632 | |
2633 | static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) |
2634 | { |
2635 | struct hclge_vport *vport = hclge_get_vport(handle); |
2636 | struct hclge_dev *hdev = vport->back; |
2637 | |
2638 | if (!hdev->hw.mac.support_autoneg) { |
2639 | if (enable) { |
2640 | dev_err(&hdev->pdev->dev, |
2641 | "autoneg is not supported by current port\n" ); |
2642 | return -EOPNOTSUPP; |
2643 | } else { |
2644 | return 0; |
2645 | } |
2646 | } |
2647 | |
2648 | return hclge_set_autoneg_en(hdev, enable); |
2649 | } |
2650 | |
2651 | static int hclge_get_autoneg(struct hnae3_handle *handle) |
2652 | { |
2653 | struct hclge_vport *vport = hclge_get_vport(handle); |
2654 | struct hclge_dev *hdev = vport->back; |
2655 | struct phy_device *phydev = hdev->hw.mac.phydev; |
2656 | |
2657 | if (phydev) |
2658 | return phydev->autoneg; |
2659 | |
2660 | return hdev->hw.mac.autoneg; |
2661 | } |
2662 | |
2663 | static int hclge_restart_autoneg(struct hnae3_handle *handle) |
2664 | { |
2665 | struct hclge_vport *vport = hclge_get_vport(handle); |
2666 | struct hclge_dev *hdev = vport->back; |
2667 | int ret; |
2668 | |
2669 | dev_dbg(&hdev->pdev->dev, "restart autoneg\n" ); |
2670 | |
2671 | ret = hclge_notify_client(hdev, type: HNAE3_DOWN_CLIENT); |
2672 | if (ret) |
2673 | return ret; |
2674 | return hclge_notify_client(hdev, type: HNAE3_UP_CLIENT); |
2675 | } |
2676 | |
2677 | static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt) |
2678 | { |
2679 | struct hclge_vport *vport = hclge_get_vport(handle); |
2680 | struct hclge_dev *hdev = vport->back; |
2681 | |
2682 | if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg) |
2683 | return hclge_set_autoneg_en(hdev, enable: !halt); |
2684 | |
2685 | return 0; |
2686 | } |
2687 | |
2688 | static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev, |
2689 | struct hclge_desc *desc, u32 desc_len) |
2690 | { |
2691 | u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2; |
2692 | u32 desc_index = 0; |
2693 | u32 data_index = 0; |
2694 | u32 i; |
2695 | |
2696 | for (i = 0; i < lane_size; i++) { |
2697 | if (data_index >= HCLGE_DESC_DATA_LEN) { |
2698 | desc_index++; |
2699 | data_index = 0; |
2700 | } |
2701 | |
2702 | if (desc_index >= desc_len) |
2703 | return; |
2704 | |
2705 | hdev->fec_stats.per_lanes[i] += |
2706 | le32_to_cpu(desc[desc_index].data[data_index]); |
2707 | data_index++; |
2708 | } |
2709 | } |
2710 | |
2711 | static void hclge_parse_fec_stats(struct hclge_dev *hdev, |
2712 | struct hclge_desc *desc, u32 desc_len) |
2713 | { |
2714 | struct hclge_query_fec_stats_cmd *req; |
2715 | |
2716 | req = (struct hclge_query_fec_stats_cmd *)desc[0].data; |
2717 | |
2718 | hdev->fec_stats.base_r_lane_num = req->base_r_lane_num; |
2719 | hdev->fec_stats.rs_corr_blocks += |
2720 | le32_to_cpu(req->rs_fec_corr_blocks); |
2721 | hdev->fec_stats.rs_uncorr_blocks += |
2722 | le32_to_cpu(req->rs_fec_uncorr_blocks); |
2723 | hdev->fec_stats.rs_error_blocks += |
2724 | le32_to_cpu(req->rs_fec_error_blocks); |
2725 | hdev->fec_stats.base_r_corr_blocks += |
2726 | le32_to_cpu(req->base_r_fec_corr_blocks); |
2727 | hdev->fec_stats.base_r_uncorr_blocks += |
2728 | le32_to_cpu(req->base_r_fec_uncorr_blocks); |
2729 | |
2730 | hclge_parse_fec_stats_lanes(hdev, desc: &desc[1], desc_len: desc_len - 1); |
2731 | } |
2732 | |
2733 | static int hclge_update_fec_stats_hw(struct hclge_dev *hdev) |
2734 | { |
2735 | struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM]; |
2736 | int ret; |
2737 | u32 i; |
2738 | |
2739 | for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) { |
2740 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS, |
2741 | true); |
2742 | if (i != (HCLGE_FEC_STATS_CMD_NUM - 1)) |
2743 | desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); |
2744 | } |
2745 | |
2746 | ret = hclge_cmd_send(hw: &hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM); |
2747 | if (ret) |
2748 | return ret; |
2749 | |
2750 | hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM); |
2751 | |
2752 | return 0; |
2753 | } |
2754 | |
2755 | static void hclge_update_fec_stats(struct hclge_dev *hdev) |
2756 | { |
2757 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev: hdev->pdev); |
2758 | int ret; |
2759 | |
2760 | if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || |
2761 | test_and_set_bit(nr: HCLGE_STATE_FEC_STATS_UPDATING, addr: &hdev->state)) |
2762 | return; |
2763 | |
2764 | ret = hclge_update_fec_stats_hw(hdev); |
2765 | if (ret) |
2766 | dev_err(&hdev->pdev->dev, |
2767 | "failed to update fec stats, ret = %d\n" , ret); |
2768 | |
2769 | clear_bit(nr: HCLGE_STATE_FEC_STATS_UPDATING, addr: &hdev->state); |
2770 | } |
2771 | |
2772 | static void hclge_get_fec_stats_total(struct hclge_dev *hdev, |
2773 | struct ethtool_fec_stats *fec_stats) |
2774 | { |
2775 | fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks; |
2776 | fec_stats->uncorrectable_blocks.total = |
2777 | hdev->fec_stats.rs_uncorr_blocks; |
2778 | } |
2779 | |
2780 | static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev, |
2781 | struct ethtool_fec_stats *fec_stats) |
2782 | { |
2783 | u32 i; |
2784 | |
2785 | if (hdev->fec_stats.base_r_lane_num == 0 || |
2786 | hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) { |
2787 | dev_err(&hdev->pdev->dev, |
2788 | "fec stats lane number(%llu) is invalid\n" , |
2789 | hdev->fec_stats.base_r_lane_num); |
2790 | return; |
2791 | } |
2792 | |
2793 | for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) { |
2794 | fec_stats->corrected_blocks.lanes[i] = |
2795 | hdev->fec_stats.base_r_corr_per_lanes[i]; |
2796 | fec_stats->uncorrectable_blocks.lanes[i] = |
2797 | hdev->fec_stats.base_r_uncorr_per_lanes[i]; |
2798 | } |
2799 | } |
2800 | |
2801 | static void hclge_comm_get_fec_stats(struct hclge_dev *hdev, |
2802 | struct ethtool_fec_stats *fec_stats) |
2803 | { |
2804 | u32 fec_mode = hdev->hw.mac.fec_mode; |
2805 | |
2806 | switch (fec_mode) { |
2807 | case BIT(HNAE3_FEC_RS): |
2808 | case BIT(HNAE3_FEC_LLRS): |
2809 | hclge_get_fec_stats_total(hdev, fec_stats); |
2810 | break; |
2811 | case BIT(HNAE3_FEC_BASER): |
2812 | hclge_get_fec_stats_lanes(hdev, fec_stats); |
2813 | break; |
2814 | default: |
2815 | dev_err(&hdev->pdev->dev, |
2816 | "fec stats is not supported by current fec mode(0x%x)\n" , |
2817 | fec_mode); |
2818 | break; |
2819 | } |
2820 | } |
2821 | |
2822 | static void hclge_get_fec_stats(struct hnae3_handle *handle, |
2823 | struct ethtool_fec_stats *fec_stats) |
2824 | { |
2825 | struct hclge_vport *vport = hclge_get_vport(handle); |
2826 | struct hclge_dev *hdev = vport->back; |
2827 | u32 fec_mode = hdev->hw.mac.fec_mode; |
2828 | |
2829 | if (fec_mode == BIT(HNAE3_FEC_NONE) || |
2830 | fec_mode == BIT(HNAE3_FEC_AUTO) || |
2831 | fec_mode == BIT(HNAE3_FEC_USER_DEF)) |
2832 | return; |
2833 | |
2834 | hclge_update_fec_stats(hdev); |
2835 | |
2836 | hclge_comm_get_fec_stats(hdev, fec_stats); |
2837 | } |
2838 | |
2839 | static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode) |
2840 | { |
2841 | struct hclge_config_fec_cmd *req; |
2842 | struct hclge_desc desc; |
2843 | int ret; |
2844 | |
2845 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false); |
2846 | |
2847 | req = (struct hclge_config_fec_cmd *)desc.data; |
2848 | if (fec_mode & BIT(HNAE3_FEC_AUTO)) |
2849 | hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1); |
2850 | if (fec_mode & BIT(HNAE3_FEC_RS)) |
2851 | hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, |
2852 | HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS); |
2853 | if (fec_mode & BIT(HNAE3_FEC_LLRS)) |
2854 | hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, |
2855 | HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS); |
2856 | if (fec_mode & BIT(HNAE3_FEC_BASER)) |
2857 | hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, |
2858 | HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER); |
2859 | |
2860 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
2861 | if (ret) |
2862 | dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n" , ret); |
2863 | |
2864 | return ret; |
2865 | } |
2866 | |
2867 | static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode) |
2868 | { |
2869 | struct hclge_vport *vport = hclge_get_vport(handle); |
2870 | struct hclge_dev *hdev = vport->back; |
2871 | struct hclge_mac *mac = &hdev->hw.mac; |
2872 | int ret; |
2873 | |
2874 | if (fec_mode && !(mac->fec_ability & fec_mode)) { |
2875 | dev_err(&hdev->pdev->dev, "unsupported fec mode\n" ); |
2876 | return -EINVAL; |
2877 | } |
2878 | |
2879 | ret = hclge_set_fec_hw(hdev, fec_mode); |
2880 | if (ret) |
2881 | return ret; |
2882 | |
2883 | mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF); |
2884 | return 0; |
2885 | } |
2886 | |
2887 | static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability, |
2888 | u8 *fec_mode) |
2889 | { |
2890 | struct hclge_vport *vport = hclge_get_vport(handle); |
2891 | struct hclge_dev *hdev = vport->back; |
2892 | struct hclge_mac *mac = &hdev->hw.mac; |
2893 | |
2894 | if (fec_ability) |
2895 | *fec_ability = mac->fec_ability; |
2896 | if (fec_mode) |
2897 | *fec_mode = mac->fec_mode; |
2898 | } |
2899 | |
2900 | static int hclge_mac_init(struct hclge_dev *hdev) |
2901 | { |
2902 | struct hclge_mac *mac = &hdev->hw.mac; |
2903 | int ret; |
2904 | |
2905 | hdev->support_sfp_query = true; |
2906 | |
2907 | if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) |
2908 | hdev->hw.mac.duplex = HCLGE_MAC_FULL; |
2909 | |
2910 | ret = hclge_cfg_mac_speed_dup_hw(hdev, speed: hdev->hw.mac.speed, |
2911 | duplex: hdev->hw.mac.duplex, lane_num: hdev->hw.mac.lane_num); |
2912 | if (ret) |
2913 | return ret; |
2914 | |
2915 | if (hdev->hw.mac.support_autoneg) { |
2916 | ret = hclge_set_autoneg_en(hdev, enable: hdev->hw.mac.autoneg); |
2917 | if (ret) |
2918 | return ret; |
2919 | } |
2920 | |
2921 | mac->link = 0; |
2922 | |
2923 | if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) { |
2924 | ret = hclge_set_fec_hw(hdev, fec_mode: mac->user_fec_mode); |
2925 | if (ret) |
2926 | return ret; |
2927 | } |
2928 | |
2929 | ret = hclge_set_mac_mtu(hdev, new_mps: hdev->mps); |
2930 | if (ret) { |
2931 | dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n" , ret); |
2932 | return ret; |
2933 | } |
2934 | |
2935 | ret = hclge_set_default_loopback(hdev); |
2936 | if (ret) |
2937 | return ret; |
2938 | |
2939 | ret = hclge_buffer_alloc(hdev); |
2940 | if (ret) |
2941 | dev_err(&hdev->pdev->dev, |
2942 | "allocate buffer fail, ret=%d\n" , ret); |
2943 | |
2944 | return ret; |
2945 | } |
2946 | |
2947 | static void hclge_mbx_task_schedule(struct hclge_dev *hdev) |
2948 | { |
2949 | if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && |
2950 | !test_and_set_bit(nr: HCLGE_STATE_MBX_SERVICE_SCHED, addr: &hdev->state)) { |
2951 | hdev->last_mbx_scheduled = jiffies; |
2952 | mod_delayed_work(wq: hclge_wq, dwork: &hdev->service_task, delay: 0); |
2953 | } |
2954 | } |
2955 | |
2956 | static void hclge_reset_task_schedule(struct hclge_dev *hdev) |
2957 | { |
2958 | if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && |
2959 | test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && |
2960 | !test_and_set_bit(nr: HCLGE_STATE_RST_SERVICE_SCHED, addr: &hdev->state)) { |
2961 | hdev->last_rst_scheduled = jiffies; |
2962 | mod_delayed_work(wq: hclge_wq, dwork: &hdev->service_task, delay: 0); |
2963 | } |
2964 | } |
2965 | |
2966 | static void hclge_errhand_task_schedule(struct hclge_dev *hdev) |
2967 | { |
2968 | if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && |
2969 | !test_and_set_bit(nr: HCLGE_STATE_ERR_SERVICE_SCHED, addr: &hdev->state)) |
2970 | mod_delayed_work(wq: hclge_wq, dwork: &hdev->service_task, delay: 0); |
2971 | } |
2972 | |
2973 | void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) |
2974 | { |
2975 | if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && |
2976 | !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) |
2977 | mod_delayed_work(wq: hclge_wq, dwork: &hdev->service_task, delay: delay_time); |
2978 | } |
2979 | |
2980 | static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) |
2981 | { |
2982 | struct hclge_link_status_cmd *req; |
2983 | struct hclge_desc desc; |
2984 | int ret; |
2985 | |
2986 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); |
2987 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
2988 | if (ret) { |
2989 | dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n" , |
2990 | ret); |
2991 | return ret; |
2992 | } |
2993 | |
2994 | req = (struct hclge_link_status_cmd *)desc.data; |
2995 | *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ? |
2996 | HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; |
2997 | |
2998 | return 0; |
2999 | } |
3000 | |
3001 | static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status) |
3002 | { |
3003 | struct phy_device *phydev = hdev->hw.mac.phydev; |
3004 | |
3005 | *link_status = HCLGE_LINK_STATUS_DOWN; |
3006 | |
3007 | if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) |
3008 | return 0; |
3009 | |
3010 | if (phydev && (phydev->state != PHY_RUNNING || !phydev->link)) |
3011 | return 0; |
3012 | |
3013 | return hclge_get_mac_link_status(hdev, link_status); |
3014 | } |
3015 | |
3016 | static void hclge_push_link_status(struct hclge_dev *hdev) |
3017 | { |
3018 | struct hclge_vport *vport; |
3019 | int ret; |
3020 | u16 i; |
3021 | |
3022 | for (i = 0; i < pci_num_vf(dev: hdev->pdev); i++) { |
3023 | vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; |
3024 | |
3025 | if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) || |
3026 | vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO) |
3027 | continue; |
3028 | |
3029 | ret = hclge_push_vf_link_status(vport); |
3030 | if (ret) { |
3031 | dev_err(&hdev->pdev->dev, |
3032 | "failed to push link status to vf%u, ret = %d\n" , |
3033 | i, ret); |
3034 | } |
3035 | } |
3036 | } |
3037 | |
3038 | static void hclge_update_link_status(struct hclge_dev *hdev) |
3039 | { |
3040 | struct hnae3_handle *rhandle = &hdev->vport[0].roce; |
3041 | struct hnae3_handle *handle = &hdev->vport[0].nic; |
3042 | struct hnae3_client *rclient = hdev->roce_client; |
3043 | struct hnae3_client *client = hdev->nic_client; |
3044 | int state; |
3045 | int ret; |
3046 | |
3047 | if (!client) |
3048 | return; |
3049 | |
3050 | if (test_and_set_bit(nr: HCLGE_STATE_LINK_UPDATING, addr: &hdev->state)) |
3051 | return; |
3052 | |
3053 | ret = hclge_get_mac_phy_link(hdev, link_status: &state); |
3054 | if (ret) { |
3055 | clear_bit(nr: HCLGE_STATE_LINK_UPDATING, addr: &hdev->state); |
3056 | return; |
3057 | } |
3058 | |
3059 | if (state != hdev->hw.mac.link) { |
3060 | hdev->hw.mac.link = state; |
3061 | if (state == HCLGE_LINK_STATUS_UP) |
3062 | hclge_update_port_info(hdev); |
3063 | |
3064 | client->ops->link_status_change(handle, state); |
3065 | hclge_config_mac_tnl_int(hdev, en: state); |
3066 | if (rclient && rclient->ops->link_status_change) |
3067 | rclient->ops->link_status_change(rhandle, state); |
3068 | |
3069 | hclge_push_link_status(hdev); |
3070 | } |
3071 | |
3072 | clear_bit(nr: HCLGE_STATE_LINK_UPDATING, addr: &hdev->state); |
3073 | } |
3074 | |
3075 | static void hclge_update_speed_advertising(struct hclge_mac *mac) |
3076 | { |
3077 | u32 speed_ability; |
3078 | |
3079 | if (hclge_get_speed_bit(speed: mac->speed, speed_bit: &speed_ability)) |
3080 | return; |
3081 | |
3082 | switch (mac->module_type) { |
3083 | case HNAE3_MODULE_TYPE_FIBRE_LR: |
3084 | hclge_convert_setting_lr(speed_ability, link_mode: mac->advertising); |
3085 | break; |
3086 | case HNAE3_MODULE_TYPE_FIBRE_SR: |
3087 | case HNAE3_MODULE_TYPE_AOC: |
3088 | hclge_convert_setting_sr(speed_ability, link_mode: mac->advertising); |
3089 | break; |
3090 | case HNAE3_MODULE_TYPE_CR: |
3091 | hclge_convert_setting_cr(speed_ability, link_mode: mac->advertising); |
3092 | break; |
3093 | case HNAE3_MODULE_TYPE_KR: |
3094 | hclge_convert_setting_kr(speed_ability, link_mode: mac->advertising); |
3095 | break; |
3096 | default: |
3097 | break; |
3098 | } |
3099 | } |
3100 | |
3101 | static void hclge_update_fec_advertising(struct hclge_mac *mac) |
3102 | { |
3103 | if (mac->fec_mode & BIT(HNAE3_FEC_RS)) |
3104 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_FEC_RS_BIT, |
3105 | addr: mac->advertising); |
3106 | else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS)) |
3107 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_FEC_LLRS_BIT, |
3108 | addr: mac->advertising); |
3109 | else if (mac->fec_mode & BIT(HNAE3_FEC_BASER)) |
3110 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_FEC_BASER_BIT, |
3111 | addr: mac->advertising); |
3112 | else |
3113 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_FEC_NONE_BIT, |
3114 | addr: mac->advertising); |
3115 | } |
3116 | |
3117 | static void hclge_update_pause_advertising(struct hclge_dev *hdev) |
3118 | { |
3119 | struct hclge_mac *mac = &hdev->hw.mac; |
3120 | bool rx_en, tx_en; |
3121 | |
3122 | switch (hdev->fc_mode_last_time) { |
3123 | case HCLGE_FC_RX_PAUSE: |
3124 | rx_en = true; |
3125 | tx_en = false; |
3126 | break; |
3127 | case HCLGE_FC_TX_PAUSE: |
3128 | rx_en = false; |
3129 | tx_en = true; |
3130 | break; |
3131 | case HCLGE_FC_FULL: |
3132 | rx_en = true; |
3133 | tx_en = true; |
3134 | break; |
3135 | default: |
3136 | rx_en = false; |
3137 | tx_en = false; |
3138 | break; |
3139 | } |
3140 | |
3141 | linkmode_set_pause(advertisement: mac->advertising, tx: tx_en, rx: rx_en); |
3142 | } |
3143 | |
3144 | static void hclge_update_advertising(struct hclge_dev *hdev) |
3145 | { |
3146 | struct hclge_mac *mac = &hdev->hw.mac; |
3147 | |
3148 | linkmode_zero(dst: mac->advertising); |
3149 | hclge_update_speed_advertising(mac); |
3150 | hclge_update_fec_advertising(mac); |
3151 | hclge_update_pause_advertising(hdev); |
3152 | } |
3153 | |
3154 | static void hclge_update_port_capability(struct hclge_dev *hdev, |
3155 | struct hclge_mac *mac) |
3156 | { |
3157 | if (hnae3_dev_fec_supported(hdev)) |
3158 | hclge_convert_setting_fec(mac); |
3159 | |
3160 | /* firmware can not identify back plane type, the media type |
3161 | * read from configuration can help deal it |
3162 | */ |
3163 | if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE && |
3164 | mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN) |
3165 | mac->module_type = HNAE3_MODULE_TYPE_KR; |
3166 | else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) |
3167 | mac->module_type = HNAE3_MODULE_TYPE_TP; |
3168 | |
3169 | if (mac->support_autoneg) { |
3170 | linkmode_set_bit(nr: ETHTOOL_LINK_MODE_Autoneg_BIT, addr: mac->supported); |
3171 | linkmode_copy(dst: mac->advertising, src: mac->supported); |
3172 | } else { |
3173 | linkmode_clear_bit(nr: ETHTOOL_LINK_MODE_Autoneg_BIT, |
3174 | addr: mac->supported); |
3175 | hclge_update_advertising(hdev); |
3176 | } |
3177 | } |
3178 | |
3179 | static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) |
3180 | { |
3181 | struct hclge_sfp_info_cmd *resp; |
3182 | struct hclge_desc desc; |
3183 | int ret; |
3184 | |
3185 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); |
3186 | resp = (struct hclge_sfp_info_cmd *)desc.data; |
3187 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
3188 | if (ret == -EOPNOTSUPP) { |
3189 | dev_warn(&hdev->pdev->dev, |
3190 | "IMP do not support get SFP speed %d\n" , ret); |
3191 | return ret; |
3192 | } else if (ret) { |
3193 | dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n" , ret); |
3194 | return ret; |
3195 | } |
3196 | |
3197 | *speed = le32_to_cpu(resp->speed); |
3198 | |
3199 | return 0; |
3200 | } |
3201 | |
3202 | static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac) |
3203 | { |
3204 | struct hclge_sfp_info_cmd *resp; |
3205 | struct hclge_desc desc; |
3206 | int ret; |
3207 | |
3208 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); |
3209 | resp = (struct hclge_sfp_info_cmd *)desc.data; |
3210 | |
3211 | resp->query_type = QUERY_ACTIVE_SPEED; |
3212 | |
3213 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
3214 | if (ret == -EOPNOTSUPP) { |
3215 | dev_warn(&hdev->pdev->dev, |
3216 | "IMP does not support get SFP info %d\n" , ret); |
3217 | return ret; |
3218 | } else if (ret) { |
3219 | dev_err(&hdev->pdev->dev, "get sfp info failed %d\n" , ret); |
3220 | return ret; |
3221 | } |
3222 | |
3223 | /* In some case, mac speed get from IMP may be 0, it shouldn't be |
3224 | * set to mac->speed. |
3225 | */ |
3226 | if (!le32_to_cpu(resp->speed)) |
3227 | return 0; |
3228 | |
3229 | mac->speed = le32_to_cpu(resp->speed); |
3230 | /* if resp->speed_ability is 0, it means it's an old version |
3231 | * firmware, do not update these params |
3232 | */ |
3233 | if (resp->speed_ability) { |
3234 | mac->module_type = le32_to_cpu(resp->module_type); |
3235 | mac->speed_ability = le32_to_cpu(resp->speed_ability); |
3236 | mac->autoneg = resp->autoneg; |
3237 | mac->support_autoneg = resp->autoneg_ability; |
3238 | mac->speed_type = QUERY_ACTIVE_SPEED; |
3239 | mac->lane_num = resp->lane_num; |
3240 | if (!resp->active_fec) |
3241 | mac->fec_mode = 0; |
3242 | else |
3243 | mac->fec_mode = BIT(resp->active_fec); |
3244 | mac->fec_ability = resp->fec_ability; |
3245 | } else { |
3246 | mac->speed_type = QUERY_SFP_SPEED; |
3247 | } |
3248 | |
3249 | return 0; |
3250 | } |
3251 | |
3252 | static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle, |
3253 | struct ethtool_link_ksettings *cmd) |
3254 | { |
3255 | struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; |
3256 | struct hclge_vport *vport = hclge_get_vport(handle); |
3257 | struct hclge_phy_link_ksetting_0_cmd *req0; |
3258 | struct hclge_phy_link_ksetting_1_cmd *req1; |
3259 | u32 supported, advertising, lp_advertising; |
3260 | struct hclge_dev *hdev = vport->back; |
3261 | int ret; |
3262 | |
3263 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, |
3264 | true); |
3265 | desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); |
3266 | hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, |
3267 | true); |
3268 | |
3269 | ret = hclge_cmd_send(hw: &hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); |
3270 | if (ret) { |
3271 | dev_err(&hdev->pdev->dev, |
3272 | "failed to get phy link ksetting, ret = %d.\n" , ret); |
3273 | return ret; |
3274 | } |
3275 | |
3276 | req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; |
3277 | cmd->base.autoneg = req0->autoneg; |
3278 | cmd->base.speed = le32_to_cpu(req0->speed); |
3279 | cmd->base.duplex = req0->duplex; |
3280 | cmd->base.port = req0->port; |
3281 | cmd->base.transceiver = req0->transceiver; |
3282 | cmd->base.phy_address = req0->phy_address; |
3283 | cmd->base.eth_tp_mdix = req0->eth_tp_mdix; |
3284 | cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl; |
3285 | supported = le32_to_cpu(req0->supported); |
3286 | advertising = le32_to_cpu(req0->advertising); |
3287 | lp_advertising = le32_to_cpu(req0->lp_advertising); |
3288 | ethtool_convert_legacy_u32_to_link_mode(dst: cmd->link_modes.supported, |
3289 | legacy_u32: supported); |
3290 | ethtool_convert_legacy_u32_to_link_mode(dst: cmd->link_modes.advertising, |
3291 | legacy_u32: advertising); |
3292 | ethtool_convert_legacy_u32_to_link_mode(dst: cmd->link_modes.lp_advertising, |
3293 | legacy_u32: lp_advertising); |
3294 | |
3295 | req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; |
3296 | cmd->base.master_slave_cfg = req1->master_slave_cfg; |
3297 | cmd->base.master_slave_state = req1->master_slave_state; |
3298 | |
3299 | return 0; |
3300 | } |
3301 | |
3302 | static int |
3303 | hclge_set_phy_link_ksettings(struct hnae3_handle *handle, |
3304 | const struct ethtool_link_ksettings *cmd) |
3305 | { |
3306 | struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; |
3307 | struct hclge_vport *vport = hclge_get_vport(handle); |
3308 | struct hclge_phy_link_ksetting_0_cmd *req0; |
3309 | struct hclge_phy_link_ksetting_1_cmd *req1; |
3310 | struct hclge_dev *hdev = vport->back; |
3311 | u32 advertising; |
3312 | int ret; |
3313 | |
3314 | if (cmd->base.autoneg == AUTONEG_DISABLE && |
3315 | ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) || |
3316 | (cmd->base.duplex != DUPLEX_HALF && |
3317 | cmd->base.duplex != DUPLEX_FULL))) |
3318 | return -EINVAL; |
3319 | |
3320 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, |
3321 | false); |
3322 | desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); |
3323 | hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, |
3324 | false); |
3325 | |
3326 | req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; |
3327 | req0->autoneg = cmd->base.autoneg; |
3328 | req0->speed = cpu_to_le32(cmd->base.speed); |
3329 | req0->duplex = cmd->base.duplex; |
3330 | ethtool_convert_link_mode_to_legacy_u32(legacy_u32: &advertising, |
3331 | src: cmd->link_modes.advertising); |
3332 | req0->advertising = cpu_to_le32(advertising); |
3333 | req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; |
3334 | |
3335 | req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; |
3336 | req1->master_slave_cfg = cmd->base.master_slave_cfg; |
3337 | |
3338 | ret = hclge_cmd_send(hw: &hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); |
3339 | if (ret) { |
3340 | dev_err(&hdev->pdev->dev, |
3341 | "failed to set phy link ksettings, ret = %d.\n" , ret); |
3342 | return ret; |
3343 | } |
3344 | |
3345 | hdev->hw.mac.autoneg = cmd->base.autoneg; |
3346 | hdev->hw.mac.speed = cmd->base.speed; |
3347 | hdev->hw.mac.duplex = cmd->base.duplex; |
3348 | linkmode_copy(dst: hdev->hw.mac.advertising, src: cmd->link_modes.advertising); |
3349 | |
3350 | return 0; |
3351 | } |
3352 | |
3353 | static int hclge_update_tp_port_info(struct hclge_dev *hdev) |
3354 | { |
3355 | struct ethtool_link_ksettings cmd; |
3356 | int ret; |
3357 | |
3358 | if (!hnae3_dev_phy_imp_supported(hdev)) |
3359 | return 0; |
3360 | |
3361 | ret = hclge_get_phy_link_ksettings(handle: &hdev->vport->nic, cmd: &cmd); |
3362 | if (ret) |
3363 | return ret; |
3364 | |
3365 | hdev->hw.mac.autoneg = cmd.base.autoneg; |
3366 | hdev->hw.mac.speed = cmd.base.speed; |
3367 | hdev->hw.mac.duplex = cmd.base.duplex; |
3368 | linkmode_copy(dst: hdev->hw.mac.advertising, src: cmd.link_modes.advertising); |
3369 | |
3370 | return 0; |
3371 | } |
3372 | |
3373 | static int hclge_tp_port_init(struct hclge_dev *hdev) |
3374 | { |
3375 | struct ethtool_link_ksettings cmd; |
3376 | |
3377 | if (!hnae3_dev_phy_imp_supported(hdev)) |
3378 | return 0; |
3379 | |
3380 | cmd.base.autoneg = hdev->hw.mac.autoneg; |
3381 | cmd.base.speed = hdev->hw.mac.speed; |
3382 | cmd.base.duplex = hdev->hw.mac.duplex; |
3383 | linkmode_copy(dst: cmd.link_modes.advertising, src: hdev->hw.mac.advertising); |
3384 | |
3385 | return hclge_set_phy_link_ksettings(handle: &hdev->vport->nic, cmd: &cmd); |
3386 | } |
3387 | |
3388 | static int hclge_update_port_info(struct hclge_dev *hdev) |
3389 | { |
3390 | struct hclge_mac *mac = &hdev->hw.mac; |
3391 | int speed; |
3392 | int ret; |
3393 | |
3394 | /* get the port info from SFP cmd if not copper port */ |
3395 | if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) |
3396 | return hclge_update_tp_port_info(hdev); |
3397 | |
3398 | /* if IMP does not support get SFP/qSFP info, return directly */ |
3399 | if (!hdev->support_sfp_query) |
3400 | return 0; |
3401 | |
3402 | if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { |
3403 | speed = mac->speed; |
3404 | ret = hclge_get_sfp_info(hdev, mac); |
3405 | } else { |
3406 | speed = HCLGE_MAC_SPEED_UNKNOWN; |
3407 | ret = hclge_get_sfp_speed(hdev, speed: &speed); |
3408 | } |
3409 | |
3410 | if (ret == -EOPNOTSUPP) { |
3411 | hdev->support_sfp_query = false; |
3412 | return ret; |
3413 | } else if (ret) { |
3414 | return ret; |
3415 | } |
3416 | |
3417 | if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { |
3418 | if (mac->speed_type == QUERY_ACTIVE_SPEED) { |
3419 | hclge_update_port_capability(hdev, mac); |
3420 | if (mac->speed != speed) |
3421 | (void)hclge_tm_port_shaper_cfg(hdev); |
3422 | return 0; |
3423 | } |
3424 | return hclge_cfg_mac_speed_dup(hdev, speed: mac->speed, |
3425 | duplex: HCLGE_MAC_FULL, lane_num: mac->lane_num); |
3426 | } else { |
3427 | if (speed == HCLGE_MAC_SPEED_UNKNOWN) |
3428 | return 0; /* do nothing if no SFP */ |
3429 | |
3430 | /* must config full duplex for SFP */ |
3431 | return hclge_cfg_mac_speed_dup(hdev, speed, duplex: HCLGE_MAC_FULL, lane_num: 0); |
3432 | } |
3433 | } |
3434 | |
3435 | static int hclge_get_status(struct hnae3_handle *handle) |
3436 | { |
3437 | struct hclge_vport *vport = hclge_get_vport(handle); |
3438 | struct hclge_dev *hdev = vport->back; |
3439 | |
3440 | hclge_update_link_status(hdev); |
3441 | |
3442 | return hdev->hw.mac.link; |
3443 | } |
3444 | |
3445 | struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) |
3446 | { |
3447 | if (!pci_num_vf(dev: hdev->pdev)) { |
3448 | dev_err(&hdev->pdev->dev, |
3449 | "SRIOV is disabled, can not get vport(%d) info.\n" , vf); |
3450 | return NULL; |
3451 | } |
3452 | |
3453 | if (vf < 0 || vf >= pci_num_vf(dev: hdev->pdev)) { |
3454 | dev_err(&hdev->pdev->dev, |
3455 | "vf id(%d) is out of range(0 <= vfid < %d)\n" , |
3456 | vf, pci_num_vf(hdev->pdev)); |
3457 | return NULL; |
3458 | } |
3459 | |
3460 | /* VF start from 1 in vport */ |
3461 | vf += HCLGE_VF_VPORT_START_NUM; |
3462 | return &hdev->vport[vf]; |
3463 | } |
3464 | |
3465 | static int hclge_get_vf_config(struct hnae3_handle *handle, int vf, |
3466 | struct ifla_vf_info *ivf) |
3467 | { |
3468 | struct hclge_vport *vport = hclge_get_vport(handle); |
3469 | struct hclge_dev *hdev = vport->back; |
3470 | |
3471 | vport = hclge_get_vf_vport(hdev, vf); |
3472 | if (!vport) |
3473 | return -EINVAL; |
3474 | |
3475 | ivf->vf = vf; |
3476 | ivf->linkstate = vport->vf_info.link_state; |
3477 | ivf->spoofchk = vport->vf_info.spoofchk; |
3478 | ivf->trusted = vport->vf_info.trusted; |
3479 | ivf->min_tx_rate = 0; |
3480 | ivf->max_tx_rate = vport->vf_info.max_tx_rate; |
3481 | ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag; |
3482 | ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto); |
3483 | ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos; |
3484 | ether_addr_copy(dst: ivf->mac, src: vport->vf_info.mac); |
3485 | |
3486 | return 0; |
3487 | } |
3488 | |
3489 | static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, |
3490 | int link_state) |
3491 | { |
3492 | struct hclge_vport *vport = hclge_get_vport(handle); |
3493 | struct hclge_dev *hdev = vport->back; |
3494 | int link_state_old; |
3495 | int ret; |
3496 | |
3497 | vport = hclge_get_vf_vport(hdev, vf); |
3498 | if (!vport) |
3499 | return -EINVAL; |
3500 | |
3501 | link_state_old = vport->vf_info.link_state; |
3502 | vport->vf_info.link_state = link_state; |
3503 | |
3504 | /* return success directly if the VF is unalive, VF will |
3505 | * query link state itself when it starts work. |
3506 | */ |
3507 | if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) |
3508 | return 0; |
3509 | |
3510 | ret = hclge_push_vf_link_status(vport); |
3511 | if (ret) { |
3512 | vport->vf_info.link_state = link_state_old; |
3513 | dev_err(&hdev->pdev->dev, |
3514 | "failed to push vf%d link status, ret = %d\n" , vf, ret); |
3515 | } |
3516 | |
3517 | return ret; |
3518 | } |
3519 | |
3520 | static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) |
3521 | { |
3522 | u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg; |
3523 | |
3524 | /* fetch the events from their corresponding regs */ |
3525 | cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); |
3526 | msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); |
3527 | hw_err_src_reg = hclge_read_dev(&hdev->hw, |
3528 | HCLGE_RAS_PF_OTHER_INT_STS_REG); |
3529 | |
3530 | /* Assumption: If by any chance reset and mailbox events are reported |
3531 | * together then we will only process reset event in this go and will |
3532 | * defer the processing of the mailbox events. Since, we would have not |
3533 | * cleared RX CMDQ event this time we would receive again another |
3534 | * interrupt from H/W just for the mailbox. |
3535 | * |
3536 | * check for vector0 reset event sources |
3537 | */ |
3538 | if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) { |
3539 | dev_info(&hdev->pdev->dev, "IMP reset interrupt\n" ); |
3540 | set_bit(nr: HNAE3_IMP_RESET, addr: &hdev->reset_pending); |
3541 | set_bit(nr: HCLGE_COMM_STATE_CMD_DISABLE, addr: &hdev->hw.hw.comm_state); |
3542 | *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); |
3543 | hdev->rst_stats.imp_rst_cnt++; |
3544 | return HCLGE_VECTOR0_EVENT_RST; |
3545 | } |
3546 | |
3547 | if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) { |
3548 | dev_info(&hdev->pdev->dev, "global reset interrupt\n" ); |
3549 | set_bit(nr: HCLGE_COMM_STATE_CMD_DISABLE, addr: &hdev->hw.hw.comm_state); |
3550 | set_bit(nr: HNAE3_GLOBAL_RESET, addr: &hdev->reset_pending); |
3551 | *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); |
3552 | hdev->rst_stats.global_rst_cnt++; |
3553 | return HCLGE_VECTOR0_EVENT_RST; |
3554 | } |
3555 | |
3556 | /* check for vector0 msix event and hardware error event source */ |
3557 | if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK || |
3558 | hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK) |
3559 | return HCLGE_VECTOR0_EVENT_ERR; |
3560 | |
3561 | /* check for vector0 ptp event source */ |
3562 | if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) { |
3563 | *clearval = msix_src_reg; |
3564 | return HCLGE_VECTOR0_EVENT_PTP; |
3565 | } |
3566 | |
3567 | /* check for vector0 mailbox(=CMDQ RX) event source */ |
3568 | if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { |
3569 | cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); |
3570 | *clearval = cmdq_src_reg; |
3571 | return HCLGE_VECTOR0_EVENT_MBX; |
3572 | } |
3573 | |
3574 | /* print other vector0 event source */ |
3575 | dev_info(&hdev->pdev->dev, |
3576 | "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n" , |
3577 | cmdq_src_reg, hw_err_src_reg, msix_src_reg); |
3578 | |
3579 | return HCLGE_VECTOR0_EVENT_OTHER; |
3580 | } |
3581 | |
3582 | static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, |
3583 | u32 regclr) |
3584 | { |
3585 | #define HCLGE_IMP_RESET_DELAY 5 |
3586 | |
3587 | switch (event_type) { |
3588 | case HCLGE_VECTOR0_EVENT_PTP: |
3589 | case HCLGE_VECTOR0_EVENT_RST: |
3590 | if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B)) |
3591 | mdelay(HCLGE_IMP_RESET_DELAY); |
3592 | |
3593 | hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); |
3594 | break; |
3595 | case HCLGE_VECTOR0_EVENT_MBX: |
3596 | hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); |
3597 | break; |
3598 | default: |
3599 | break; |
3600 | } |
3601 | } |
3602 | |
3603 | static void hclge_clear_all_event_cause(struct hclge_dev *hdev) |
3604 | { |
3605 | hclge_clear_event_cause(hdev, event_type: HCLGE_VECTOR0_EVENT_RST, |
3606 | BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | |
3607 | BIT(HCLGE_VECTOR0_CORERESET_INT_B) | |
3608 | BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); |
3609 | hclge_clear_event_cause(hdev, event_type: HCLGE_VECTOR0_EVENT_MBX, regclr: 0); |
3610 | } |
3611 | |
3612 | static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) |
3613 | { |
3614 | writel(val: enable ? 1 : 0, addr: vector->addr); |
3615 | } |
3616 | |
3617 | static irqreturn_t hclge_misc_irq_handle(int irq, void *data) |
3618 | { |
3619 | struct hclge_dev *hdev = data; |
3620 | unsigned long flags; |
3621 | u32 clearval = 0; |
3622 | u32 event_cause; |
3623 | |
3624 | hclge_enable_vector(vector: &hdev->misc_vector, enable: false); |
3625 | event_cause = hclge_check_event_cause(hdev, clearval: &clearval); |
3626 | |
3627 | /* vector 0 interrupt is shared with reset and mailbox source events. */ |
3628 | switch (event_cause) { |
3629 | case HCLGE_VECTOR0_EVENT_ERR: |
3630 | hclge_errhand_task_schedule(hdev); |
3631 | break; |
3632 | case HCLGE_VECTOR0_EVENT_RST: |
3633 | hclge_reset_task_schedule(hdev); |
3634 | break; |
3635 | case HCLGE_VECTOR0_EVENT_PTP: |
3636 | spin_lock_irqsave(&hdev->ptp->lock, flags); |
3637 | hclge_ptp_clean_tx_hwts(hdev); |
3638 | spin_unlock_irqrestore(lock: &hdev->ptp->lock, flags); |
3639 | break; |
3640 | case HCLGE_VECTOR0_EVENT_MBX: |
3641 | /* If we are here then, |
3642 | * 1. Either we are not handling any mbx task and we are not |
3643 | * scheduled as well |
3644 | * OR |
3645 | * 2. We could be handling a mbx task but nothing more is |
3646 | * scheduled. |
3647 | * In both cases, we should schedule mbx task as there are more |
3648 | * mbx messages reported by this interrupt. |
3649 | */ |
3650 | hclge_mbx_task_schedule(hdev); |
3651 | break; |
3652 | default: |
3653 | dev_warn(&hdev->pdev->dev, |
3654 | "received unknown or unhandled event of vector0\n" ); |
3655 | break; |
3656 | } |
3657 | |
3658 | hclge_clear_event_cause(hdev, event_type: event_cause, regclr: clearval); |
3659 | |
3660 | /* Enable interrupt if it is not caused by reset event or error event */ |
3661 | if (event_cause == HCLGE_VECTOR0_EVENT_PTP || |
3662 | event_cause == HCLGE_VECTOR0_EVENT_MBX || |
3663 | event_cause == HCLGE_VECTOR0_EVENT_OTHER) |
3664 | hclge_enable_vector(vector: &hdev->misc_vector, enable: true); |
3665 | |
3666 | return IRQ_HANDLED; |
3667 | } |
3668 | |
3669 | static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) |
3670 | { |
3671 | if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { |
3672 | dev_warn(&hdev->pdev->dev, |
3673 | "vector(vector_id %d) has been freed.\n" , vector_id); |
3674 | return; |
3675 | } |
3676 | |
3677 | hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; |
3678 | hdev->num_msi_left += 1; |
3679 | hdev->num_msi_used -= 1; |
3680 | } |
3681 | |
3682 | static void hclge_get_misc_vector(struct hclge_dev *hdev) |
3683 | { |
3684 | struct hclge_misc_vector *vector = &hdev->misc_vector; |
3685 | |
3686 | vector->vector_irq = pci_irq_vector(dev: hdev->pdev, nr: 0); |
3687 | |
3688 | vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; |
3689 | hdev->vector_status[0] = 0; |
3690 | |
3691 | hdev->num_msi_left -= 1; |
3692 | hdev->num_msi_used += 1; |
3693 | } |
3694 | |
3695 | static int hclge_misc_irq_init(struct hclge_dev *hdev) |
3696 | { |
3697 | int ret; |
3698 | |
3699 | hclge_get_misc_vector(hdev); |
3700 | |
3701 | /* this would be explicitly freed in the end */ |
3702 | snprintf(buf: hdev->misc_vector.name, HNAE3_INT_NAME_LEN, fmt: "%s-misc-%s" , |
3703 | HCLGE_NAME, pci_name(pdev: hdev->pdev)); |
3704 | ret = request_irq(irq: hdev->misc_vector.vector_irq, handler: hclge_misc_irq_handle, |
3705 | flags: 0, name: hdev->misc_vector.name, dev: hdev); |
3706 | if (ret) { |
3707 | hclge_free_vector(hdev, vector_id: 0); |
3708 | dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n" , |
3709 | hdev->misc_vector.vector_irq); |
3710 | } |
3711 | |
3712 | return ret; |
3713 | } |
3714 | |
3715 | static void hclge_misc_irq_uninit(struct hclge_dev *hdev) |
3716 | { |
3717 | free_irq(hdev->misc_vector.vector_irq, hdev); |
3718 | hclge_free_vector(hdev, vector_id: 0); |
3719 | } |
3720 | |
3721 | int hclge_notify_client(struct hclge_dev *hdev, |
3722 | enum hnae3_reset_notify_type type) |
3723 | { |
3724 | struct hnae3_handle *handle = &hdev->vport[0].nic; |
3725 | struct hnae3_client *client = hdev->nic_client; |
3726 | int ret; |
3727 | |
3728 | if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) |
3729 | return 0; |
3730 | |
3731 | if (!client->ops->reset_notify) |
3732 | return -EOPNOTSUPP; |
3733 | |
3734 | ret = client->ops->reset_notify(handle, type); |
3735 | if (ret) |
3736 | dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n" , |
3737 | type, ret); |
3738 | |
3739 | return ret; |
3740 | } |
3741 | |
3742 | static int hclge_notify_roce_client(struct hclge_dev *hdev, |
3743 | enum hnae3_reset_notify_type type) |
3744 | { |
3745 | struct hnae3_handle *handle = &hdev->vport[0].roce; |
3746 | struct hnae3_client *client = hdev->roce_client; |
3747 | int ret; |
3748 | |
3749 | if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client) |
3750 | return 0; |
3751 | |
3752 | if (!client->ops->reset_notify) |
3753 | return -EOPNOTSUPP; |
3754 | |
3755 | ret = client->ops->reset_notify(handle, type); |
3756 | if (ret) |
3757 | dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)" , |
3758 | type, ret); |
3759 | |
3760 | return ret; |
3761 | } |
3762 | |
3763 | static int hclge_reset_wait(struct hclge_dev *hdev) |
3764 | { |
3765 | #define HCLGE_RESET_WATI_MS 100 |
3766 | #define HCLGE_RESET_WAIT_CNT 350 |
3767 | |
3768 | u32 val, reg, reg_bit; |
3769 | u32 cnt = 0; |
3770 | |
3771 | switch (hdev->reset_type) { |
3772 | case HNAE3_IMP_RESET: |
3773 | reg = HCLGE_GLOBAL_RESET_REG; |
3774 | reg_bit = HCLGE_IMP_RESET_BIT; |
3775 | break; |
3776 | case HNAE3_GLOBAL_RESET: |
3777 | reg = HCLGE_GLOBAL_RESET_REG; |
3778 | reg_bit = HCLGE_GLOBAL_RESET_BIT; |
3779 | break; |
3780 | case HNAE3_FUNC_RESET: |
3781 | reg = HCLGE_FUN_RST_ING; |
3782 | reg_bit = HCLGE_FUN_RST_ING_B; |
3783 | break; |
3784 | default: |
3785 | dev_err(&hdev->pdev->dev, |
3786 | "Wait for unsupported reset type: %d\n" , |
3787 | hdev->reset_type); |
3788 | return -EINVAL; |
3789 | } |
3790 | |
3791 | val = hclge_read_dev(&hdev->hw, reg); |
3792 | while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { |
3793 | msleep(HCLGE_RESET_WATI_MS); |
3794 | val = hclge_read_dev(&hdev->hw, reg); |
3795 | cnt++; |
3796 | } |
3797 | |
3798 | if (cnt >= HCLGE_RESET_WAIT_CNT) { |
3799 | dev_warn(&hdev->pdev->dev, |
3800 | "Wait for reset timeout: %d\n" , hdev->reset_type); |
3801 | return -EBUSY; |
3802 | } |
3803 | |
3804 | return 0; |
3805 | } |
3806 | |
3807 | static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) |
3808 | { |
3809 | struct hclge_vf_rst_cmd *req; |
3810 | struct hclge_desc desc; |
3811 | |
3812 | req = (struct hclge_vf_rst_cmd *)desc.data; |
3813 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); |
3814 | req->dest_vfid = func_id; |
3815 | |
3816 | if (reset) |
3817 | req->vf_rst = 0x1; |
3818 | |
3819 | return hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
3820 | } |
3821 | |
3822 | static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) |
3823 | { |
3824 | int i; |
3825 | |
3826 | for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) { |
3827 | struct hclge_vport *vport = &hdev->vport[i]; |
3828 | int ret; |
3829 | |
3830 | /* Send cmd to set/clear VF's FUNC_RST_ING */ |
3831 | ret = hclge_set_vf_rst(hdev, func_id: vport->vport_id, reset); |
3832 | if (ret) { |
3833 | dev_err(&hdev->pdev->dev, |
3834 | "set vf(%u) rst failed %d!\n" , |
3835 | vport->vport_id - HCLGE_VF_VPORT_START_NUM, |
3836 | ret); |
3837 | return ret; |
3838 | } |
3839 | |
3840 | if (!reset || |
3841 | !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state)) |
3842 | continue; |
3843 | |
3844 | if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) && |
3845 | hdev->reset_type == HNAE3_FUNC_RESET) { |
3846 | set_bit(nr: HCLGE_VPORT_NEED_NOTIFY_RESET, |
3847 | addr: &vport->need_notify); |
3848 | continue; |
3849 | } |
3850 | |
3851 | /* Inform VF to process the reset. |
3852 | * hclge_inform_reset_assert_to_vf may fail if VF |
3853 | * driver is not loaded. |
3854 | */ |
3855 | ret = hclge_inform_reset_assert_to_vf(vport); |
3856 | if (ret) |
3857 | dev_warn(&hdev->pdev->dev, |
3858 | "inform reset to vf(%u) failed %d!\n" , |
3859 | vport->vport_id - HCLGE_VF_VPORT_START_NUM, |
3860 | ret); |
3861 | } |
3862 | |
3863 | return 0; |
3864 | } |
3865 | |
3866 | static void hclge_mailbox_service_task(struct hclge_dev *hdev) |
3867 | { |
3868 | if (!test_and_clear_bit(nr: HCLGE_STATE_MBX_SERVICE_SCHED, addr: &hdev->state) || |
3869 | test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) || |
3870 | test_and_set_bit(nr: HCLGE_STATE_MBX_HANDLING, addr: &hdev->state)) |
3871 | return; |
3872 | |
3873 | if (time_is_before_jiffies(hdev->last_mbx_scheduled + |
3874 | HCLGE_MBX_SCHED_TIMEOUT)) |
3875 | dev_warn(&hdev->pdev->dev, |
3876 | "mbx service task is scheduled after %ums on cpu%u!\n" , |
3877 | jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled), |
3878 | smp_processor_id()); |
3879 | |
3880 | hclge_mbx_handler(hdev); |
3881 | |
3882 | clear_bit(nr: HCLGE_STATE_MBX_HANDLING, addr: &hdev->state); |
3883 | } |
3884 | |
3885 | static void hclge_func_reset_sync_vf(struct hclge_dev *hdev) |
3886 | { |
3887 | struct hclge_pf_rst_sync_cmd *req; |
3888 | struct hclge_desc desc; |
3889 | int cnt = 0; |
3890 | int ret; |
3891 | |
3892 | req = (struct hclge_pf_rst_sync_cmd *)desc.data; |
3893 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true); |
3894 | |
3895 | do { |
3896 | /* vf need to down netdev by mbx during PF or FLR reset */ |
3897 | hclge_mailbox_service_task(hdev); |
3898 | |
3899 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
3900 | /* for compatible with old firmware, wait |
3901 | * 100 ms for VF to stop IO |
3902 | */ |
3903 | if (ret == -EOPNOTSUPP) { |
3904 | msleep(HCLGE_RESET_SYNC_TIME); |
3905 | return; |
3906 | } else if (ret) { |
3907 | dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n" , |
3908 | ret); |
3909 | return; |
3910 | } else if (req->all_vf_ready) { |
3911 | return; |
3912 | } |
3913 | msleep(HCLGE_PF_RESET_SYNC_TIME); |
3914 | hclge_comm_cmd_reuse_desc(desc: &desc, is_read: true); |
3915 | } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT); |
3916 | |
3917 | dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n" ); |
3918 | } |
3919 | |
3920 | void hclge_report_hw_error(struct hclge_dev *hdev, |
3921 | enum hnae3_hw_error_type type) |
3922 | { |
3923 | struct hnae3_client *client = hdev->nic_client; |
3924 | |
3925 | if (!client || !client->ops->process_hw_error || |
3926 | !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) |
3927 | return; |
3928 | |
3929 | client->ops->process_hw_error(&hdev->vport[0].nic, type); |
3930 | } |
3931 | |
3932 | static void hclge_handle_imp_error(struct hclge_dev *hdev) |
3933 | { |
3934 | u32 reg_val; |
3935 | |
3936 | reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); |
3937 | if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) { |
3938 | hclge_report_hw_error(hdev, type: HNAE3_IMP_RD_POISON_ERROR); |
3939 | reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B); |
3940 | hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); |
3941 | } |
3942 | |
3943 | if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) { |
3944 | hclge_report_hw_error(hdev, type: HNAE3_CMDQ_ECC_ERROR); |
3945 | reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B); |
3946 | hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); |
3947 | } |
3948 | } |
3949 | |
3950 | int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) |
3951 | { |
3952 | struct hclge_desc desc; |
3953 | struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; |
3954 | int ret; |
3955 | |
3956 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); |
3957 | hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); |
3958 | req->fun_reset_vfid = func_id; |
3959 | |
3960 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
3961 | if (ret) |
3962 | dev_err(&hdev->pdev->dev, |
3963 | "send function reset cmd fail, status =%d\n" , ret); |
3964 | |
3965 | return ret; |
3966 | } |
3967 | |
3968 | static void hclge_do_reset(struct hclge_dev *hdev) |
3969 | { |
3970 | struct hnae3_handle *handle = &hdev->vport[0].nic; |
3971 | struct pci_dev *pdev = hdev->pdev; |
3972 | u32 val; |
3973 | |
3974 | if (hclge_get_hw_reset_stat(handle)) { |
3975 | dev_info(&pdev->dev, "hardware reset not finish\n" ); |
3976 | dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n" , |
3977 | hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), |
3978 | hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); |
3979 | return; |
3980 | } |
3981 | |
3982 | switch (hdev->reset_type) { |
3983 | case HNAE3_IMP_RESET: |
3984 | dev_info(&pdev->dev, "IMP reset requested\n" ); |
3985 | val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); |
3986 | hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1); |
3987 | hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val); |
3988 | break; |
3989 | case HNAE3_GLOBAL_RESET: |
3990 | dev_info(&pdev->dev, "global reset requested\n" ); |
3991 | val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); |
3992 | hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); |
3993 | hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); |
3994 | break; |
3995 | case HNAE3_FUNC_RESET: |
3996 | dev_info(&pdev->dev, "PF reset requested\n" ); |
3997 | /* schedule again to check later */ |
3998 | set_bit(nr: HNAE3_FUNC_RESET, addr: &hdev->reset_pending); |
3999 | hclge_reset_task_schedule(hdev); |
4000 | break; |
4001 | default: |
4002 | dev_warn(&pdev->dev, |
4003 | "unsupported reset type: %d\n" , hdev->reset_type); |
4004 | break; |
4005 | } |
4006 | } |
4007 | |
4008 | static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, |
4009 | unsigned long *addr) |
4010 | { |
4011 | enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; |
4012 | struct hclge_dev *hdev = ae_dev->priv; |
4013 | |
4014 | /* return the highest priority reset level amongst all */ |
4015 | if (test_bit(HNAE3_IMP_RESET, addr)) { |
4016 | rst_level = HNAE3_IMP_RESET; |
4017 | clear_bit(nr: HNAE3_IMP_RESET, addr); |
4018 | clear_bit(nr: HNAE3_GLOBAL_RESET, addr); |
4019 | clear_bit(nr: HNAE3_FUNC_RESET, addr); |
4020 | } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) { |
4021 | rst_level = HNAE3_GLOBAL_RESET; |
4022 | clear_bit(nr: HNAE3_GLOBAL_RESET, addr); |
4023 | clear_bit(nr: HNAE3_FUNC_RESET, addr); |
4024 | } else if (test_bit(HNAE3_FUNC_RESET, addr)) { |
4025 | rst_level = HNAE3_FUNC_RESET; |
4026 | clear_bit(nr: HNAE3_FUNC_RESET, addr); |
4027 | } else if (test_bit(HNAE3_FLR_RESET, addr)) { |
4028 | rst_level = HNAE3_FLR_RESET; |
4029 | clear_bit(nr: HNAE3_FLR_RESET, addr); |
4030 | } |
4031 | |
4032 | if (hdev->reset_type != HNAE3_NONE_RESET && |
4033 | rst_level < hdev->reset_type) |
4034 | return HNAE3_NONE_RESET; |
4035 | |
4036 | return rst_level; |
4037 | } |
4038 | |
4039 | static void hclge_clear_reset_cause(struct hclge_dev *hdev) |
4040 | { |
4041 | u32 clearval = 0; |
4042 | |
4043 | switch (hdev->reset_type) { |
4044 | case HNAE3_IMP_RESET: |
4045 | clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); |
4046 | break; |
4047 | case HNAE3_GLOBAL_RESET: |
4048 | clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); |
4049 | break; |
4050 | default: |
4051 | break; |
4052 | } |
4053 | |
4054 | if (!clearval) |
4055 | return; |
4056 | |
4057 | /* For revision 0x20, the reset interrupt source |
4058 | * can only be cleared after hardware reset done |
4059 | */ |
4060 | if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) |
4061 | hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, |
4062 | clearval); |
4063 | |
4064 | hclge_enable_vector(vector: &hdev->misc_vector, enable: true); |
4065 | } |
4066 | |
4067 | static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable) |
4068 | { |
4069 | u32 reg_val; |
4070 | |
4071 | reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); |
4072 | if (enable) |
4073 | reg_val |= HCLGE_COMM_NIC_SW_RST_RDY; |
4074 | else |
4075 | reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY; |
4076 | |
4077 | hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); |
4078 | } |
4079 | |
4080 | static int hclge_func_reset_notify_vf(struct hclge_dev *hdev) |
4081 | { |
4082 | int ret; |
4083 | |
4084 | ret = hclge_set_all_vf_rst(hdev, reset: true); |
4085 | if (ret) |
4086 | return ret; |
4087 | |
4088 | hclge_func_reset_sync_vf(hdev); |
4089 | |
4090 | return 0; |
4091 | } |
4092 | |
4093 | static int hclge_reset_prepare_wait(struct hclge_dev *hdev) |
4094 | { |
4095 | u32 reg_val; |
4096 | int ret = 0; |
4097 | |
4098 | switch (hdev->reset_type) { |
4099 | case HNAE3_FUNC_RESET: |
4100 | ret = hclge_func_reset_notify_vf(hdev); |
4101 | if (ret) |
4102 | return ret; |
4103 | |
4104 | ret = hclge_func_reset_cmd(hdev, func_id: 0); |
4105 | if (ret) { |
4106 | dev_err(&hdev->pdev->dev, |
4107 | "asserting function reset fail %d!\n" , ret); |
4108 | return ret; |
4109 | } |
4110 | |
4111 | /* After performaning pf reset, it is not necessary to do the |
4112 | * mailbox handling or send any command to firmware, because |
4113 | * any mailbox handling or command to firmware is only valid |
4114 | * after hclge_comm_cmd_init is called. |
4115 | */ |
4116 | set_bit(nr: HCLGE_COMM_STATE_CMD_DISABLE, addr: &hdev->hw.hw.comm_state); |
4117 | hdev->rst_stats.pf_rst_cnt++; |
4118 | break; |
4119 | case HNAE3_FLR_RESET: |
4120 | ret = hclge_func_reset_notify_vf(hdev); |
4121 | if (ret) |
4122 | return ret; |
4123 | break; |
4124 | case HNAE3_IMP_RESET: |
4125 | hclge_handle_imp_error(hdev); |
4126 | reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); |
4127 | hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, |
4128 | BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); |
4129 | break; |
4130 | default: |
4131 | break; |
4132 | } |
4133 | |
4134 | /* inform hardware that preparatory work is done */ |
4135 | msleep(HCLGE_RESET_SYNC_TIME); |
4136 | hclge_reset_handshake(hdev, enable: true); |
4137 | dev_info(&hdev->pdev->dev, "prepare wait ok\n" ); |
4138 | |
4139 | return ret; |
4140 | } |
4141 | |
4142 | static void hclge_show_rst_info(struct hclge_dev *hdev) |
4143 | { |
4144 | char *buf; |
4145 | |
4146 | buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL); |
4147 | if (!buf) |
4148 | return; |
4149 | |
4150 | hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN); |
4151 | |
4152 | dev_info(&hdev->pdev->dev, "dump reset info:\n%s" , buf); |
4153 | |
4154 | kfree(objp: buf); |
4155 | } |
4156 | |
4157 | static bool hclge_reset_err_handle(struct hclge_dev *hdev) |
4158 | { |
4159 | #define MAX_RESET_FAIL_CNT 5 |
4160 | |
4161 | if (hdev->reset_pending) { |
4162 | dev_info(&hdev->pdev->dev, "Reset pending %lu\n" , |
4163 | hdev->reset_pending); |
4164 | return true; |
4165 | } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & |
4166 | HCLGE_RESET_INT_M) { |
4167 | dev_info(&hdev->pdev->dev, |
4168 | "reset failed because new reset interrupt\n" ); |
4169 | hclge_clear_reset_cause(hdev); |
4170 | return false; |
4171 | } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { |
4172 | hdev->rst_stats.reset_fail_cnt++; |
4173 | set_bit(nr: hdev->reset_type, addr: &hdev->reset_pending); |
4174 | dev_info(&hdev->pdev->dev, |
4175 | "re-schedule reset task(%u)\n" , |
4176 | hdev->rst_stats.reset_fail_cnt); |
4177 | return true; |
4178 | } |
4179 | |
4180 | hclge_clear_reset_cause(hdev); |
4181 | |
4182 | /* recover the handshake status when reset fail */ |
4183 | hclge_reset_handshake(hdev, enable: true); |
4184 | |
4185 | dev_err(&hdev->pdev->dev, "Reset fail!\n" ); |
4186 | |
4187 | hclge_show_rst_info(hdev); |
4188 | |
4189 | set_bit(nr: HCLGE_STATE_RST_FAIL, addr: &hdev->state); |
4190 | |
4191 | return false; |
4192 | } |
4193 | |
4194 | static void hclge_update_reset_level(struct hclge_dev *hdev) |
4195 | { |
4196 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev: hdev->pdev); |
4197 | enum hnae3_reset_type reset_level; |
4198 | |
4199 | /* reset request will not be set during reset, so clear |
4200 | * pending reset request to avoid unnecessary reset |
4201 | * caused by the same reason. |
4202 | */ |
4203 | hclge_get_reset_level(ae_dev, addr: &hdev->reset_request); |
4204 | |
4205 | /* if default_reset_request has a higher level reset request, |
4206 | * it should be handled as soon as possible. since some errors |
4207 | * need this kind of reset to fix. |
4208 | */ |
4209 | reset_level = hclge_get_reset_level(ae_dev, |
4210 | addr: &hdev->default_reset_request); |
4211 | if (reset_level != HNAE3_NONE_RESET) |
4212 | set_bit(nr: reset_level, addr: &hdev->reset_request); |
4213 | } |
4214 | |
4215 | static int hclge_set_rst_done(struct hclge_dev *hdev) |
4216 | { |
4217 | struct hclge_pf_rst_done_cmd *req; |
4218 | struct hclge_desc desc; |
4219 | int ret; |
4220 | |
4221 | req = (struct hclge_pf_rst_done_cmd *)desc.data; |
4222 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false); |
4223 | req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; |
4224 | |
4225 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
4226 | /* To be compatible with the old firmware, which does not support |
4227 | * command HCLGE_OPC_PF_RST_DONE, just print a warning and |
4228 | * return success |
4229 | */ |
4230 | if (ret == -EOPNOTSUPP) { |
4231 | dev_warn(&hdev->pdev->dev, |
4232 | "current firmware does not support command(0x%x)!\n" , |
4233 | HCLGE_OPC_PF_RST_DONE); |
4234 | return 0; |
4235 | } else if (ret) { |
4236 | dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n" , |
4237 | ret); |
4238 | } |
4239 | |
4240 | return ret; |
4241 | } |
4242 | |
4243 | static int hclge_reset_prepare_up(struct hclge_dev *hdev) |
4244 | { |
4245 | int ret = 0; |
4246 | |
4247 | switch (hdev->reset_type) { |
4248 | case HNAE3_FUNC_RESET: |
4249 | case HNAE3_FLR_RESET: |
4250 | ret = hclge_set_all_vf_rst(hdev, reset: false); |
4251 | break; |
4252 | case HNAE3_GLOBAL_RESET: |
4253 | case HNAE3_IMP_RESET: |
4254 | ret = hclge_set_rst_done(hdev); |
4255 | break; |
4256 | default: |
4257 | break; |
4258 | } |
4259 | |
4260 | /* clear up the handshake status after re-initialize done */ |
4261 | hclge_reset_handshake(hdev, enable: false); |
4262 | |
4263 | return ret; |
4264 | } |
4265 | |
4266 | static int hclge_reset_stack(struct hclge_dev *hdev) |
4267 | { |
4268 | int ret; |
4269 | |
4270 | ret = hclge_notify_client(hdev, type: HNAE3_UNINIT_CLIENT); |
4271 | if (ret) |
4272 | return ret; |
4273 | |
4274 | ret = hclge_reset_ae_dev(ae_dev: hdev->ae_dev); |
4275 | if (ret) |
4276 | return ret; |
4277 | |
4278 | return hclge_notify_client(hdev, type: HNAE3_INIT_CLIENT); |
4279 | } |
4280 | |
4281 | static int hclge_reset_prepare(struct hclge_dev *hdev) |
4282 | { |
4283 | int ret; |
4284 | |
4285 | hdev->rst_stats.reset_cnt++; |
4286 | /* perform reset of the stack & ae device for a client */ |
4287 | ret = hclge_notify_roce_client(hdev, type: HNAE3_DOWN_CLIENT); |
4288 | if (ret) |
4289 | return ret; |
4290 | |
4291 | rtnl_lock(); |
4292 | ret = hclge_notify_client(hdev, type: HNAE3_DOWN_CLIENT); |
4293 | rtnl_unlock(); |
4294 | if (ret) |
4295 | return ret; |
4296 | |
4297 | return hclge_reset_prepare_wait(hdev); |
4298 | } |
4299 | |
4300 | static int hclge_reset_rebuild(struct hclge_dev *hdev) |
4301 | { |
4302 | int ret; |
4303 | |
4304 | hdev->rst_stats.hw_reset_done_cnt++; |
4305 | |
4306 | ret = hclge_notify_roce_client(hdev, type: HNAE3_UNINIT_CLIENT); |
4307 | if (ret) |
4308 | return ret; |
4309 | |
4310 | rtnl_lock(); |
4311 | ret = hclge_reset_stack(hdev); |
4312 | rtnl_unlock(); |
4313 | if (ret) |
4314 | return ret; |
4315 | |
4316 | hclge_clear_reset_cause(hdev); |
4317 | |
4318 | ret = hclge_notify_roce_client(hdev, type: HNAE3_INIT_CLIENT); |
4319 | /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1 |
4320 | * times |
4321 | */ |
4322 | if (ret && |
4323 | hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) |
4324 | return ret; |
4325 | |
4326 | ret = hclge_reset_prepare_up(hdev); |
4327 | if (ret) |
4328 | return ret; |
4329 | |
4330 | rtnl_lock(); |
4331 | ret = hclge_notify_client(hdev, type: HNAE3_UP_CLIENT); |
4332 | rtnl_unlock(); |
4333 | if (ret) |
4334 | return ret; |
4335 | |
4336 | ret = hclge_notify_roce_client(hdev, type: HNAE3_UP_CLIENT); |
4337 | if (ret) |
4338 | return ret; |
4339 | |
4340 | hdev->last_reset_time = jiffies; |
4341 | hdev->rst_stats.reset_fail_cnt = 0; |
4342 | hdev->rst_stats.reset_done_cnt++; |
4343 | clear_bit(nr: HCLGE_STATE_RST_FAIL, addr: &hdev->state); |
4344 | |
4345 | hclge_update_reset_level(hdev); |
4346 | |
4347 | return 0; |
4348 | } |
4349 | |
4350 | static void hclge_reset(struct hclge_dev *hdev) |
4351 | { |
4352 | if (hclge_reset_prepare(hdev)) |
4353 | goto err_reset; |
4354 | |
4355 | if (hclge_reset_wait(hdev)) |
4356 | goto err_reset; |
4357 | |
4358 | if (hclge_reset_rebuild(hdev)) |
4359 | goto err_reset; |
4360 | |
4361 | return; |
4362 | |
4363 | err_reset: |
4364 | if (hclge_reset_err_handle(hdev)) |
4365 | hclge_reset_task_schedule(hdev); |
4366 | } |
4367 | |
4368 | static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) |
4369 | { |
4370 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); |
4371 | struct hclge_dev *hdev = ae_dev->priv; |
4372 | |
4373 | /* We might end up getting called broadly because of 2 below cases: |
4374 | * 1. Recoverable error was conveyed through APEI and only way to bring |
4375 | * normalcy is to reset. |
4376 | * 2. A new reset request from the stack due to timeout |
4377 | * |
4378 | * check if this is a new reset request and we are not here just because |
4379 | * last reset attempt did not succeed and watchdog hit us again. We will |
4380 | * know this if last reset request did not occur very recently (watchdog |
4381 | * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) |
4382 | * In case of new request we reset the "reset level" to PF reset. |
4383 | * And if it is a repeat reset request of the most recent one then we |
4384 | * want to make sure we throttle the reset request. Therefore, we will |
4385 | * not allow it again before 3*HZ times. |
4386 | */ |
4387 | |
4388 | if (time_before(jiffies, (hdev->last_reset_time + |
4389 | HCLGE_RESET_INTERVAL))) { |
4390 | mod_timer(timer: &hdev->reset_timer, expires: jiffies + HCLGE_RESET_INTERVAL); |
4391 | return; |
4392 | } |
4393 | |
4394 | if (hdev->default_reset_request) { |
4395 | hdev->reset_level = |
4396 | hclge_get_reset_level(ae_dev, |
4397 | addr: &hdev->default_reset_request); |
4398 | } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) { |
4399 | hdev->reset_level = HNAE3_FUNC_RESET; |
4400 | } |
4401 | |
4402 | dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n" , |
4403 | hdev->reset_level); |
4404 | |
4405 | /* request reset & schedule reset task */ |
4406 | set_bit(nr: hdev->reset_level, addr: &hdev->reset_request); |
4407 | hclge_reset_task_schedule(hdev); |
4408 | |
4409 | if (hdev->reset_level < HNAE3_GLOBAL_RESET) |
4410 | hdev->reset_level++; |
4411 | } |
4412 | |
4413 | static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, |
4414 | enum hnae3_reset_type rst_type) |
4415 | { |
4416 | struct hclge_dev *hdev = ae_dev->priv; |
4417 | |
4418 | set_bit(nr: rst_type, addr: &hdev->default_reset_request); |
4419 | } |
4420 | |
4421 | static void hclge_reset_timer(struct timer_list *t) |
4422 | { |
4423 | struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); |
4424 | |
4425 | /* if default_reset_request has no value, it means that this reset |
4426 | * request has already be handled, so just return here |
4427 | */ |
4428 | if (!hdev->default_reset_request) |
4429 | return; |
4430 | |
4431 | dev_info(&hdev->pdev->dev, |
4432 | "triggering reset in reset timer\n" ); |
4433 | hclge_reset_event(pdev: hdev->pdev, NULL); |
4434 | } |
4435 | |
4436 | static void hclge_reset_subtask(struct hclge_dev *hdev) |
4437 | { |
4438 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev: hdev->pdev); |
4439 | |
4440 | /* check if there is any ongoing reset in the hardware. This status can |
4441 | * be checked from reset_pending. If there is then, we need to wait for |
4442 | * hardware to complete reset. |
4443 | * a. If we are able to figure out in reasonable time that hardware |
4444 | * has fully resetted then, we can proceed with driver, client |
4445 | * reset. |
4446 | * b. else, we can come back later to check this status so re-sched |
4447 | * now. |
4448 | */ |
4449 | hdev->last_reset_time = jiffies; |
4450 | hdev->reset_type = hclge_get_reset_level(ae_dev, addr: &hdev->reset_pending); |
4451 | if (hdev->reset_type != HNAE3_NONE_RESET) |
4452 | hclge_reset(hdev); |
4453 | |
4454 | /* check if we got any *new* reset requests to be honored */ |
4455 | hdev->reset_type = hclge_get_reset_level(ae_dev, addr: &hdev->reset_request); |
4456 | if (hdev->reset_type != HNAE3_NONE_RESET) |
4457 | hclge_do_reset(hdev); |
4458 | |
4459 | hdev->reset_type = HNAE3_NONE_RESET; |
4460 | } |
4461 | |
4462 | static void hclge_handle_err_reset_request(struct hclge_dev *hdev) |
4463 | { |
4464 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev: hdev->pdev); |
4465 | enum hnae3_reset_type reset_type; |
4466 | |
4467 | if (ae_dev->hw_err_reset_req) { |
4468 | reset_type = hclge_get_reset_level(ae_dev, |
4469 | addr: &ae_dev->hw_err_reset_req); |
4470 | hclge_set_def_reset_request(ae_dev, rst_type: reset_type); |
4471 | } |
4472 | |
4473 | if (hdev->default_reset_request && ae_dev->ops->reset_event) |
4474 | ae_dev->ops->reset_event(hdev->pdev, NULL); |
4475 | |
4476 | /* enable interrupt after error handling complete */ |
4477 | hclge_enable_vector(vector: &hdev->misc_vector, enable: true); |
4478 | } |
4479 | |
4480 | static void hclge_handle_err_recovery(struct hclge_dev *hdev) |
4481 | { |
4482 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev: hdev->pdev); |
4483 | |
4484 | ae_dev->hw_err_reset_req = 0; |
4485 | |
4486 | if (hclge_find_error_source(hdev)) { |
4487 | hclge_handle_error_info_log(ae_dev); |
4488 | hclge_handle_mac_tnl(hdev); |
4489 | hclge_handle_vf_queue_err_ras(hdev); |
4490 | } |
4491 | |
4492 | hclge_handle_err_reset_request(hdev); |
4493 | } |
4494 | |
4495 | static void hclge_misc_err_recovery(struct hclge_dev *hdev) |
4496 | { |
4497 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev: hdev->pdev); |
4498 | struct device *dev = &hdev->pdev->dev; |
4499 | u32 msix_sts_reg; |
4500 | |
4501 | msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); |
4502 | if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) { |
4503 | if (hclge_handle_hw_msix_error |
4504 | (hdev, reset_requests: &hdev->default_reset_request)) |
4505 | dev_info(dev, "received msix interrupt 0x%x\n" , |
4506 | msix_sts_reg); |
4507 | } |
4508 | |
4509 | hclge_handle_hw_ras_error(ae_dev); |
4510 | |
4511 | hclge_handle_err_reset_request(hdev); |
4512 | } |
4513 | |
4514 | static void hclge_errhand_service_task(struct hclge_dev *hdev) |
4515 | { |
4516 | if (!test_and_clear_bit(nr: HCLGE_STATE_ERR_SERVICE_SCHED, addr: &hdev->state)) |
4517 | return; |
4518 | |
4519 | if (hnae3_dev_ras_imp_supported(hdev)) |
4520 | hclge_handle_err_recovery(hdev); |
4521 | else |
4522 | hclge_misc_err_recovery(hdev); |
4523 | } |
4524 | |
4525 | static void hclge_reset_service_task(struct hclge_dev *hdev) |
4526 | { |
4527 | if (!test_and_clear_bit(nr: HCLGE_STATE_RST_SERVICE_SCHED, addr: &hdev->state)) |
4528 | return; |
4529 | |
4530 | if (time_is_before_jiffies(hdev->last_rst_scheduled + |
4531 | HCLGE_RESET_SCHED_TIMEOUT)) |
4532 | dev_warn(&hdev->pdev->dev, |
4533 | "reset service task is scheduled after %ums on cpu%u!\n" , |
4534 | jiffies_to_msecs(jiffies - hdev->last_rst_scheduled), |
4535 | smp_processor_id()); |
4536 | |
4537 | down(sem: &hdev->reset_sem); |
4538 | set_bit(nr: HCLGE_STATE_RST_HANDLING, addr: &hdev->state); |
4539 | |
4540 | hclge_reset_subtask(hdev); |
4541 | |
4542 | clear_bit(nr: HCLGE_STATE_RST_HANDLING, addr: &hdev->state); |
4543 | up(sem: &hdev->reset_sem); |
4544 | } |
4545 | |
4546 | static void hclge_update_vport_alive(struct hclge_dev *hdev) |
4547 | { |
4548 | #define HCLGE_ALIVE_SECONDS_NORMAL 8 |
4549 | |
4550 | unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ; |
4551 | int i; |
4552 | |
4553 | /* start from vport 1 for PF is always alive */ |
4554 | for (i = 1; i < hdev->num_alloc_vport; i++) { |
4555 | struct hclge_vport *vport = &hdev->vport[i]; |
4556 | |
4557 | if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) || |
4558 | !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) |
4559 | continue; |
4560 | if (time_after(jiffies, vport->last_active_jiffies + |
4561 | alive_time)) { |
4562 | clear_bit(nr: HCLGE_VPORT_STATE_ALIVE, addr: &vport->state); |
4563 | dev_warn(&hdev->pdev->dev, |
4564 | "VF %u heartbeat timeout\n" , |
4565 | i - HCLGE_VF_VPORT_START_NUM); |
4566 | } |
4567 | } |
4568 | } |
4569 | |
4570 | static void hclge_periodic_service_task(struct hclge_dev *hdev) |
4571 | { |
4572 | unsigned long delta = round_jiffies_relative(HZ); |
4573 | |
4574 | if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) |
4575 | return; |
4576 | |
4577 | /* Always handle the link updating to make sure link state is |
4578 | * updated when it is triggered by mbx. |
4579 | */ |
4580 | hclge_update_link_status(hdev); |
4581 | hclge_sync_mac_table(hdev); |
4582 | hclge_sync_promisc_mode(hdev); |
4583 | hclge_sync_fd_table(hdev); |
4584 | |
4585 | if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { |
4586 | delta = jiffies - hdev->last_serv_processed; |
4587 | |
4588 | if (delta < round_jiffies_relative(HZ)) { |
4589 | delta = round_jiffies_relative(HZ) - delta; |
4590 | goto out; |
4591 | } |
4592 | } |
4593 | |
4594 | hdev->serv_processed_cnt++; |
4595 | hclge_update_vport_alive(hdev); |
4596 | |
4597 | if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) { |
4598 | hdev->last_serv_processed = jiffies; |
4599 | goto out; |
4600 | } |
4601 | |
4602 | if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL)) |
4603 | hclge_update_stats_for_all(hdev); |
4604 | |
4605 | hclge_update_port_info(hdev); |
4606 | hclge_sync_vlan_filter(hdev); |
4607 | |
4608 | if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL)) |
4609 | hclge_rfs_filter_expire(hdev); |
4610 | |
4611 | hdev->last_serv_processed = jiffies; |
4612 | |
4613 | out: |
4614 | hclge_task_schedule(hdev, delay_time: delta); |
4615 | } |
4616 | |
4617 | static void hclge_ptp_service_task(struct hclge_dev *hdev) |
4618 | { |
4619 | unsigned long flags; |
4620 | |
4621 | if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) || |
4622 | !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) || |
4623 | !time_is_before_jiffies(hdev->ptp->tx_start + HZ)) |
4624 | return; |
4625 | |
4626 | /* to prevent concurrence with the irq handler */ |
4627 | spin_lock_irqsave(&hdev->ptp->lock, flags); |
4628 | |
4629 | /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq |
4630 | * handler may handle it just before spin_lock_irqsave(). |
4631 | */ |
4632 | if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) |
4633 | hclge_ptp_clean_tx_hwts(hdev); |
4634 | |
4635 | spin_unlock_irqrestore(lock: &hdev->ptp->lock, flags); |
4636 | } |
4637 | |
4638 | static void hclge_service_task(struct work_struct *work) |
4639 | { |
4640 | struct hclge_dev *hdev = |
4641 | container_of(work, struct hclge_dev, service_task.work); |
4642 | |
4643 | hclge_errhand_service_task(hdev); |
4644 | hclge_reset_service_task(hdev); |
4645 | hclge_ptp_service_task(hdev); |
4646 | hclge_mailbox_service_task(hdev); |
4647 | hclge_periodic_service_task(hdev); |
4648 | |
4649 | /* Handle error recovery, reset and mbx again in case periodical task |
4650 | * delays the handling by calling hclge_task_schedule() in |
4651 | * hclge_periodic_service_task(). |
4652 | */ |
4653 | hclge_errhand_service_task(hdev); |
4654 | hclge_reset_service_task(hdev); |
4655 | hclge_mailbox_service_task(hdev); |
4656 | } |
4657 | |
4658 | struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) |
4659 | { |
4660 | /* VF handle has no client */ |
4661 | if (!handle->client) |
4662 | return container_of(handle, struct hclge_vport, nic); |
4663 | else if (handle->client->type == HNAE3_CLIENT_ROCE) |
4664 | return container_of(handle, struct hclge_vport, roce); |
4665 | else |
4666 | return container_of(handle, struct hclge_vport, nic); |
4667 | } |
4668 | |
4669 | static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx, |
4670 | struct hnae3_vector_info *vector_info) |
4671 | { |
4672 | #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64 |
4673 | |
4674 | vector_info->vector = pci_irq_vector(dev: hdev->pdev, nr: idx); |
4675 | |
4676 | /* need an extend offset to config vector >= 64 */ |
4677 | if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2) |
4678 | vector_info->io_addr = hdev->hw.hw.io_base + |
4679 | HCLGE_VECTOR_REG_BASE + |
4680 | (idx - 1) * HCLGE_VECTOR_REG_OFFSET; |
4681 | else |
4682 | vector_info->io_addr = hdev->hw.hw.io_base + |
4683 | HCLGE_VECTOR_EXT_REG_BASE + |
4684 | (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * |
4685 | HCLGE_VECTOR_REG_OFFSET_H + |
4686 | (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * |
4687 | HCLGE_VECTOR_REG_OFFSET; |
4688 | |
4689 | hdev->vector_status[idx] = hdev->vport[0].vport_id; |
4690 | hdev->vector_irq[idx] = vector_info->vector; |
4691 | } |
4692 | |
4693 | static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, |
4694 | struct hnae3_vector_info *vector_info) |
4695 | { |
4696 | struct hclge_vport *vport = hclge_get_vport(handle); |
4697 | struct hnae3_vector_info *vector = vector_info; |
4698 | struct hclge_dev *hdev = vport->back; |
4699 | int alloc = 0; |
4700 | u16 i = 0; |
4701 | u16 j; |
4702 | |
4703 | vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); |
4704 | vector_num = min(hdev->num_msi_left, vector_num); |
4705 | |
4706 | for (j = 0; j < vector_num; j++) { |
4707 | while (++i < hdev->num_nic_msi) { |
4708 | if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { |
4709 | hclge_get_vector_info(hdev, idx: i, vector_info: vector); |
4710 | vector++; |
4711 | alloc++; |
4712 | |
4713 | break; |
4714 | } |
4715 | } |
4716 | } |
4717 | hdev->num_msi_left -= alloc; |
4718 | hdev->num_msi_used += alloc; |
4719 | |
4720 | return alloc; |
4721 | } |
4722 | |
4723 | static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) |
4724 | { |
4725 | int i; |
4726 | |
4727 | for (i = 0; i < hdev->num_msi; i++) |
4728 | if (vector == hdev->vector_irq[i]) |
4729 | return i; |
4730 | |
4731 | return -EINVAL; |
4732 | } |
4733 | |
4734 | static int hclge_put_vector(struct hnae3_handle *handle, int vector) |
4735 | { |
4736 | struct hclge_vport *vport = hclge_get_vport(handle); |
4737 | struct hclge_dev *hdev = vport->back; |
4738 | int vector_id; |
4739 | |
4740 | vector_id = hclge_get_vector_index(hdev, vector); |
4741 | if (vector_id < 0) { |
4742 | dev_err(&hdev->pdev->dev, |
4743 | "Get vector index fail. vector = %d\n" , vector); |
4744 | return vector_id; |
4745 | } |
4746 | |
4747 | hclge_free_vector(hdev, vector_id); |
4748 | |
4749 | return 0; |
4750 | } |
4751 | |
4752 | static int (struct hnae3_handle *handle, u32 *indir, |
4753 | u8 *key, u8 *hfunc) |
4754 | { |
4755 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev: handle->pdev); |
4756 | struct hclge_vport *vport = hclge_get_vport(handle); |
4757 | struct hclge_comm_rss_cfg * = &vport->back->rss_cfg; |
4758 | |
4759 | hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc); |
4760 | |
4761 | hclge_comm_get_rss_indir_tbl(rss_cfg, indir, |
4762 | rss_ind_tbl_size: ae_dev->dev_specs.rss_ind_tbl_size); |
4763 | |
4764 | return 0; |
4765 | } |
4766 | |
4767 | static int (struct hnae3_handle *handle, const u32 *indir, |
4768 | const u8 *key, const u8 hfunc) |
4769 | { |
4770 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev: handle->pdev); |
4771 | struct hclge_vport *vport = hclge_get_vport(handle); |
4772 | struct hclge_dev *hdev = vport->back; |
4773 | struct hclge_comm_rss_cfg * = &hdev->rss_cfg; |
4774 | int ret, i; |
4775 | |
4776 | ret = hclge_comm_set_rss_hash_key(rss_cfg, hw: &hdev->hw.hw, key, hfunc); |
4777 | if (ret) { |
4778 | dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n" , hfunc); |
4779 | return ret; |
4780 | } |
4781 | |
4782 | /* Update the shadow RSS table with user specified qids */ |
4783 | for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) |
4784 | rss_cfg->rss_indirection_tbl[i] = indir[i]; |
4785 | |
4786 | /* Update the hardware */ |
4787 | return hclge_comm_set_rss_indir_table(ae_dev, hw: &hdev->hw.hw, |
4788 | indir: rss_cfg->rss_indirection_tbl); |
4789 | } |
4790 | |
4791 | static int (struct hnae3_handle *handle, |
4792 | struct ethtool_rxnfc *nfc) |
4793 | { |
4794 | struct hclge_vport *vport = hclge_get_vport(handle); |
4795 | struct hclge_dev *hdev = vport->back; |
4796 | int ret; |
4797 | |
4798 | ret = hclge_comm_set_rss_tuple(ae_dev: hdev->ae_dev, hw: &hdev->hw.hw, |
4799 | rss_cfg: &hdev->rss_cfg, nfc); |
4800 | if (ret) { |
4801 | dev_err(&hdev->pdev->dev, |
4802 | "failed to set rss tuple, ret = %d.\n" , ret); |
4803 | return ret; |
4804 | } |
4805 | |
4806 | return 0; |
4807 | } |
4808 | |
4809 | static int (struct hnae3_handle *handle, |
4810 | struct ethtool_rxnfc *nfc) |
4811 | { |
4812 | struct hclge_vport *vport = hclge_get_vport(handle); |
4813 | u8 tuple_sets; |
4814 | int ret; |
4815 | |
4816 | nfc->data = 0; |
4817 | |
4818 | ret = hclge_comm_get_rss_tuple(rss_cfg: &vport->back->rss_cfg, flow_type: nfc->flow_type, |
4819 | tuple_sets: &tuple_sets); |
4820 | if (ret || !tuple_sets) |
4821 | return ret; |
4822 | |
4823 | nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); |
4824 | |
4825 | return 0; |
4826 | } |
4827 | |
4828 | static int hclge_get_tc_size(struct hnae3_handle *handle) |
4829 | { |
4830 | struct hclge_vport *vport = hclge_get_vport(handle); |
4831 | struct hclge_dev *hdev = vport->back; |
4832 | |
4833 | return hdev->pf_rss_size_max; |
4834 | } |
4835 | |
4836 | static int (struct hclge_dev *hdev) |
4837 | { |
4838 | struct hnae3_ae_dev *ae_dev = hdev->ae_dev; |
4839 | struct hclge_vport *vport = hdev->vport; |
4840 | u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; |
4841 | u16 tc_valid[HCLGE_MAX_TC_NUM] = {0}; |
4842 | u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; |
4843 | struct hnae3_tc_info *tc_info; |
4844 | u16 roundup_size; |
4845 | u16 ; |
4846 | int i; |
4847 | |
4848 | tc_info = &vport->nic.kinfo.tc_info; |
4849 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
4850 | rss_size = tc_info->tqp_count[i]; |
4851 | tc_valid[i] = 0; |
4852 | |
4853 | if (!(hdev->hw_tc_map & BIT(i))) |
4854 | continue; |
4855 | |
4856 | /* tc_size set to hardware is the log2 of roundup power of two |
4857 | * of rss_size, the acutal queue size is limited by indirection |
4858 | * table. |
4859 | */ |
4860 | if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size || |
4861 | rss_size == 0) { |
4862 | dev_err(&hdev->pdev->dev, |
4863 | "Configure rss tc size failed, invalid TC_SIZE = %u\n" , |
4864 | rss_size); |
4865 | return -EINVAL; |
4866 | } |
4867 | |
4868 | roundup_size = roundup_pow_of_two(rss_size); |
4869 | roundup_size = ilog2(roundup_size); |
4870 | |
4871 | tc_valid[i] = 1; |
4872 | tc_size[i] = roundup_size; |
4873 | tc_offset[i] = tc_info->tqp_offset[i]; |
4874 | } |
4875 | |
4876 | return hclge_comm_set_rss_tc_mode(hw: &hdev->hw.hw, tc_offset, tc_valid, |
4877 | tc_size); |
4878 | } |
4879 | |
4880 | int (struct hclge_dev *hdev) |
4881 | { |
4882 | u16 * = hdev->rss_cfg.rss_indirection_tbl; |
4883 | u8 *key = hdev->rss_cfg.rss_hash_key; |
4884 | u8 hfunc = hdev->rss_cfg.rss_algo; |
4885 | int ret; |
4886 | |
4887 | ret = hclge_comm_set_rss_indir_table(ae_dev: hdev->ae_dev, hw: &hdev->hw.hw, |
4888 | indir: rss_indir); |
4889 | if (ret) |
4890 | return ret; |
4891 | |
4892 | ret = hclge_comm_set_rss_algo_key(hw: &hdev->hw.hw, hfunc, key); |
4893 | if (ret) |
4894 | return ret; |
4895 | |
4896 | ret = hclge_comm_set_rss_input_tuple(hw: &hdev->hw.hw, rss_cfg: &hdev->rss_cfg); |
4897 | if (ret) |
4898 | return ret; |
4899 | |
4900 | return hclge_init_rss_tc_mode(hdev); |
4901 | } |
4902 | |
4903 | int hclge_bind_ring_with_vector(struct hclge_vport *vport, |
4904 | int vector_id, bool en, |
4905 | struct hnae3_ring_chain_node *ring_chain) |
4906 | { |
4907 | struct hclge_dev *hdev = vport->back; |
4908 | struct hnae3_ring_chain_node *node; |
4909 | struct hclge_desc desc; |
4910 | struct hclge_ctrl_vector_chain_cmd *req = |
4911 | (struct hclge_ctrl_vector_chain_cmd *)desc.data; |
4912 | enum hclge_comm_cmd_status status; |
4913 | enum hclge_opcode_type op; |
4914 | u16 tqp_type_and_id; |
4915 | int i; |
4916 | |
4917 | op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; |
4918 | hclge_cmd_setup_basic_desc(&desc, op, false); |
4919 | req->int_vector_id_l = hnae3_get_field(vector_id, |
4920 | HCLGE_VECTOR_ID_L_M, |
4921 | HCLGE_VECTOR_ID_L_S); |
4922 | req->int_vector_id_h = hnae3_get_field(vector_id, |
4923 | HCLGE_VECTOR_ID_H_M, |
4924 | HCLGE_VECTOR_ID_H_S); |
4925 | |
4926 | i = 0; |
4927 | for (node = ring_chain; node; node = node->next) { |
4928 | tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); |
4929 | hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, |
4930 | HCLGE_INT_TYPE_S, |
4931 | hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); |
4932 | hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, |
4933 | HCLGE_TQP_ID_S, node->tqp_index); |
4934 | hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, |
4935 | HCLGE_INT_GL_IDX_S, |
4936 | hnae3_get_field(node->int_gl_idx, |
4937 | HNAE3_RING_GL_IDX_M, |
4938 | HNAE3_RING_GL_IDX_S)); |
4939 | req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); |
4940 | if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { |
4941 | req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; |
4942 | req->vfid = vport->vport_id; |
4943 | |
4944 | status = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
4945 | if (status) { |
4946 | dev_err(&hdev->pdev->dev, |
4947 | "Map TQP fail, status is %d.\n" , |
4948 | status); |
4949 | return -EIO; |
4950 | } |
4951 | i = 0; |
4952 | |
4953 | hclge_cmd_setup_basic_desc(&desc, |
4954 | op, |
4955 | false); |
4956 | req->int_vector_id_l = |
4957 | hnae3_get_field(vector_id, |
4958 | HCLGE_VECTOR_ID_L_M, |
4959 | HCLGE_VECTOR_ID_L_S); |
4960 | req->int_vector_id_h = |
4961 | hnae3_get_field(vector_id, |
4962 | HCLGE_VECTOR_ID_H_M, |
4963 | HCLGE_VECTOR_ID_H_S); |
4964 | } |
4965 | } |
4966 | |
4967 | if (i > 0) { |
4968 | req->int_cause_num = i; |
4969 | req->vfid = vport->vport_id; |
4970 | status = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
4971 | if (status) { |
4972 | dev_err(&hdev->pdev->dev, |
4973 | "Map TQP fail, status is %d.\n" , status); |
4974 | return -EIO; |
4975 | } |
4976 | } |
4977 | |
4978 | return 0; |
4979 | } |
4980 | |
4981 | static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector, |
4982 | struct hnae3_ring_chain_node *ring_chain) |
4983 | { |
4984 | struct hclge_vport *vport = hclge_get_vport(handle); |
4985 | struct hclge_dev *hdev = vport->back; |
4986 | int vector_id; |
4987 | |
4988 | vector_id = hclge_get_vector_index(hdev, vector); |
4989 | if (vector_id < 0) { |
4990 | dev_err(&hdev->pdev->dev, |
4991 | "failed to get vector index. vector=%d\n" , vector); |
4992 | return vector_id; |
4993 | } |
4994 | |
4995 | return hclge_bind_ring_with_vector(vport, vector_id, en: true, ring_chain); |
4996 | } |
4997 | |
4998 | static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector, |
4999 | struct hnae3_ring_chain_node *ring_chain) |
5000 | { |
5001 | struct hclge_vport *vport = hclge_get_vport(handle); |
5002 | struct hclge_dev *hdev = vport->back; |
5003 | int vector_id, ret; |
5004 | |
5005 | if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) |
5006 | return 0; |
5007 | |
5008 | vector_id = hclge_get_vector_index(hdev, vector); |
5009 | if (vector_id < 0) { |
5010 | dev_err(&handle->pdev->dev, |
5011 | "Get vector index fail. ret =%d\n" , vector_id); |
5012 | return vector_id; |
5013 | } |
5014 | |
5015 | ret = hclge_bind_ring_with_vector(vport, vector_id, en: false, ring_chain); |
5016 | if (ret) |
5017 | dev_err(&handle->pdev->dev, |
5018 | "Unmap ring from vector fail. vectorid=%d, ret =%d\n" , |
5019 | vector_id, ret); |
5020 | |
5021 | return ret; |
5022 | } |
5023 | |
5024 | static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id, |
5025 | bool en_uc, bool en_mc, bool en_bc) |
5026 | { |
5027 | struct hclge_vport *vport = &hdev->vport[vf_id]; |
5028 | struct hnae3_handle *handle = &vport->nic; |
5029 | struct hclge_promisc_cfg_cmd *req; |
5030 | struct hclge_desc desc; |
5031 | bool uc_tx_en = en_uc; |
5032 | u8 promisc_cfg = 0; |
5033 | int ret; |
5034 | |
5035 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); |
5036 | |
5037 | req = (struct hclge_promisc_cfg_cmd *)desc.data; |
5038 | req->vf_id = vf_id; |
5039 | |
5040 | if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags)) |
5041 | uc_tx_en = false; |
5042 | |
5043 | hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0); |
5044 | hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0); |
5045 | hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0); |
5046 | hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0); |
5047 | hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0); |
5048 | hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0); |
5049 | req->extend_promisc = promisc_cfg; |
5050 | |
5051 | /* to be compatible with DEVICE_VERSION_V1/2 */ |
5052 | promisc_cfg = 0; |
5053 | hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0); |
5054 | hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0); |
5055 | hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0); |
5056 | hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1); |
5057 | hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1); |
5058 | req->promisc = promisc_cfg; |
5059 | |
5060 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
5061 | if (ret) |
5062 | dev_err(&hdev->pdev->dev, |
5063 | "failed to set vport %u promisc mode, ret = %d.\n" , |
5064 | vf_id, ret); |
5065 | |
5066 | return ret; |
5067 | } |
5068 | |
5069 | int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, |
5070 | bool en_mc_pmc, bool en_bc_pmc) |
5071 | { |
5072 | return hclge_cmd_set_promisc_mode(hdev: vport->back, vf_id: vport->vport_id, |
5073 | en_uc: en_uc_pmc, en_mc: en_mc_pmc, en_bc: en_bc_pmc); |
5074 | } |
5075 | |
5076 | static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, |
5077 | bool en_mc_pmc) |
5078 | { |
5079 | struct hclge_vport *vport = hclge_get_vport(handle); |
5080 | struct hclge_dev *hdev = vport->back; |
5081 | bool en_bc_pmc = true; |
5082 | |
5083 | /* For device whose version below V2, if broadcast promisc enabled, |
5084 | * vlan filter is always bypassed. So broadcast promisc should be |
5085 | * disabled until user enable promisc mode |
5086 | */ |
5087 | if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) |
5088 | en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false; |
5089 | |
5090 | return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc, |
5091 | en_bc_pmc); |
5092 | } |
5093 | |
5094 | static void hclge_request_update_promisc_mode(struct hnae3_handle *handle) |
5095 | { |
5096 | struct hclge_vport *vport = hclge_get_vport(handle); |
5097 | |
5098 | set_bit(nr: HCLGE_VPORT_STATE_PROMISC_CHANGE, addr: &vport->state); |
5099 | } |
5100 | |
5101 | static void hclge_sync_fd_state(struct hclge_dev *hdev) |
5102 | { |
5103 | if (hlist_empty(h: &hdev->fd_rule_list)) |
5104 | hdev->fd_active_type = HCLGE_FD_RULE_NONE; |
5105 | } |
5106 | |
5107 | static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location) |
5108 | { |
5109 | if (!test_bit(location, hdev->fd_bmap)) { |
5110 | set_bit(nr: location, addr: hdev->fd_bmap); |
5111 | hdev->hclge_fd_rule_num++; |
5112 | } |
5113 | } |
5114 | |
5115 | static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location) |
5116 | { |
5117 | if (test_bit(location, hdev->fd_bmap)) { |
5118 | clear_bit(nr: location, addr: hdev->fd_bmap); |
5119 | hdev->hclge_fd_rule_num--; |
5120 | } |
5121 | } |
5122 | |
5123 | static void hclge_fd_free_node(struct hclge_dev *hdev, |
5124 | struct hclge_fd_rule *rule) |
5125 | { |
5126 | hlist_del(n: &rule->rule_node); |
5127 | kfree(objp: rule); |
5128 | hclge_sync_fd_state(hdev); |
5129 | } |
5130 | |
5131 | static void hclge_update_fd_rule_node(struct hclge_dev *hdev, |
5132 | struct hclge_fd_rule *old_rule, |
5133 | struct hclge_fd_rule *new_rule, |
5134 | enum HCLGE_FD_NODE_STATE state) |
5135 | { |
5136 | switch (state) { |
5137 | case HCLGE_FD_TO_ADD: |
5138 | case HCLGE_FD_ACTIVE: |
5139 | /* 1) if the new state is TO_ADD, just replace the old rule |
5140 | * with the same location, no matter its state, because the |
5141 | * new rule will be configured to the hardware. |
5142 | * 2) if the new state is ACTIVE, it means the new rule |
5143 | * has been configured to the hardware, so just replace |
5144 | * the old rule node with the same location. |
5145 | * 3) for it doesn't add a new node to the list, so it's |
5146 | * unnecessary to update the rule number and fd_bmap. |
5147 | */ |
5148 | new_rule->rule_node.next = old_rule->rule_node.next; |
5149 | new_rule->rule_node.pprev = old_rule->rule_node.pprev; |
5150 | memcpy(old_rule, new_rule, sizeof(*old_rule)); |
5151 | kfree(objp: new_rule); |
5152 | break; |
5153 | case HCLGE_FD_DELETED: |
5154 | hclge_fd_dec_rule_cnt(hdev, location: old_rule->location); |
5155 | hclge_fd_free_node(hdev, rule: old_rule); |
5156 | break; |
5157 | case HCLGE_FD_TO_DEL: |
5158 | /* if new request is TO_DEL, and old rule is existent |
5159 | * 1) the state of old rule is TO_DEL, we need do nothing, |
5160 | * because we delete rule by location, other rule content |
5161 | * is unncessary. |
5162 | * 2) the state of old rule is ACTIVE, we need to change its |
5163 | * state to TO_DEL, so the rule will be deleted when periodic |
5164 | * task being scheduled. |
5165 | * 3) the state of old rule is TO_ADD, it means the rule hasn't |
5166 | * been added to hardware, so we just delete the rule node from |
5167 | * fd_rule_list directly. |
5168 | */ |
5169 | if (old_rule->state == HCLGE_FD_TO_ADD) { |
5170 | hclge_fd_dec_rule_cnt(hdev, location: old_rule->location); |
5171 | hclge_fd_free_node(hdev, rule: old_rule); |
5172 | return; |
5173 | } |
5174 | old_rule->state = HCLGE_FD_TO_DEL; |
5175 | break; |
5176 | } |
5177 | } |
5178 | |
5179 | static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist, |
5180 | u16 location, |
5181 | struct hclge_fd_rule **parent) |
5182 | { |
5183 | struct hclge_fd_rule *rule; |
5184 | struct hlist_node *node; |
5185 | |
5186 | hlist_for_each_entry_safe(rule, node, hlist, rule_node) { |
5187 | if (rule->location == location) |
5188 | return rule; |
5189 | else if (rule->location > location) |
5190 | return NULL; |
5191 | /* record the parent node, use to keep the nodes in fd_rule_list |
5192 | * in ascend order. |
5193 | */ |
5194 | *parent = rule; |
5195 | } |
5196 | |
5197 | return NULL; |
5198 | } |
5199 | |
5200 | /* insert fd rule node in ascend order according to rule->location */ |
5201 | static void hclge_fd_insert_rule_node(struct hlist_head *hlist, |
5202 | struct hclge_fd_rule *rule, |
5203 | struct hclge_fd_rule *parent) |
5204 | { |
5205 | INIT_HLIST_NODE(h: &rule->rule_node); |
5206 | |
5207 | if (parent) |
5208 | hlist_add_behind(n: &rule->rule_node, prev: &parent->rule_node); |
5209 | else |
5210 | hlist_add_head(n: &rule->rule_node, h: hlist); |
5211 | } |
5212 | |
5213 | static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev, |
5214 | struct hclge_fd_user_def_cfg *cfg) |
5215 | { |
5216 | struct hclge_fd_user_def_cfg_cmd *req; |
5217 | struct hclge_desc desc; |
5218 | u16 data = 0; |
5219 | int ret; |
5220 | |
5221 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false); |
5222 | |
5223 | req = (struct hclge_fd_user_def_cfg_cmd *)desc.data; |
5224 | |
5225 | hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0); |
5226 | hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, |
5227 | HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset); |
5228 | req->ol2_cfg = cpu_to_le16(data); |
5229 | |
5230 | data = 0; |
5231 | hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0); |
5232 | hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, |
5233 | HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset); |
5234 | req->ol3_cfg = cpu_to_le16(data); |
5235 | |
5236 | data = 0; |
5237 | hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0); |
5238 | hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, |
5239 | HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset); |
5240 | req->ol4_cfg = cpu_to_le16(data); |
5241 | |
5242 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
5243 | if (ret) |
5244 | dev_err(&hdev->pdev->dev, |
5245 | "failed to set fd user def data, ret= %d\n" , ret); |
5246 | return ret; |
5247 | } |
5248 | |
5249 | static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked) |
5250 | { |
5251 | int ret; |
5252 | |
5253 | if (!test_and_clear_bit(nr: HCLGE_STATE_FD_USER_DEF_CHANGED, addr: &hdev->state)) |
5254 | return; |
5255 | |
5256 | if (!locked) |
5257 | spin_lock_bh(lock: &hdev->fd_rule_lock); |
5258 | |
5259 | ret = hclge_fd_set_user_def_cmd(hdev, cfg: hdev->fd_cfg.user_def_cfg); |
5260 | if (ret) |
5261 | set_bit(nr: HCLGE_STATE_FD_USER_DEF_CHANGED, addr: &hdev->state); |
5262 | |
5263 | if (!locked) |
5264 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
5265 | } |
5266 | |
5267 | static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev, |
5268 | struct hclge_fd_rule *rule) |
5269 | { |
5270 | struct hlist_head *hlist = &hdev->fd_rule_list; |
5271 | struct hclge_fd_rule *fd_rule, *parent = NULL; |
5272 | struct hclge_fd_user_def_info *info, *old_info; |
5273 | struct hclge_fd_user_def_cfg *cfg; |
5274 | |
5275 | if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || |
5276 | rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) |
5277 | return 0; |
5278 | |
5279 | /* for valid layer is start from 1, so need minus 1 to get the cfg */ |
5280 | cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; |
5281 | info = &rule->ep.user_def; |
5282 | |
5283 | if (!cfg->ref_cnt || cfg->offset == info->offset) |
5284 | return 0; |
5285 | |
5286 | if (cfg->ref_cnt > 1) |
5287 | goto error; |
5288 | |
5289 | fd_rule = hclge_find_fd_rule(hlist, location: rule->location, parent: &parent); |
5290 | if (fd_rule) { |
5291 | old_info = &fd_rule->ep.user_def; |
5292 | if (info->layer == old_info->layer) |
5293 | return 0; |
5294 | } |
5295 | |
5296 | error: |
5297 | dev_err(&hdev->pdev->dev, |
5298 | "No available offset for layer%d fd rule, each layer only support one user def offset.\n" , |
5299 | info->layer + 1); |
5300 | return -ENOSPC; |
5301 | } |
5302 | |
5303 | static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev, |
5304 | struct hclge_fd_rule *rule) |
5305 | { |
5306 | struct hclge_fd_user_def_cfg *cfg; |
5307 | |
5308 | if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || |
5309 | rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) |
5310 | return; |
5311 | |
5312 | cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; |
5313 | if (!cfg->ref_cnt) { |
5314 | cfg->offset = rule->ep.user_def.offset; |
5315 | set_bit(nr: HCLGE_STATE_FD_USER_DEF_CHANGED, addr: &hdev->state); |
5316 | } |
5317 | cfg->ref_cnt++; |
5318 | } |
5319 | |
5320 | static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev, |
5321 | struct hclge_fd_rule *rule) |
5322 | { |
5323 | struct hclge_fd_user_def_cfg *cfg; |
5324 | |
5325 | if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || |
5326 | rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) |
5327 | return; |
5328 | |
5329 | cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; |
5330 | if (!cfg->ref_cnt) |
5331 | return; |
5332 | |
5333 | cfg->ref_cnt--; |
5334 | if (!cfg->ref_cnt) { |
5335 | cfg->offset = 0; |
5336 | set_bit(nr: HCLGE_STATE_FD_USER_DEF_CHANGED, addr: &hdev->state); |
5337 | } |
5338 | } |
5339 | |
5340 | static void hclge_update_fd_list(struct hclge_dev *hdev, |
5341 | enum HCLGE_FD_NODE_STATE state, u16 location, |
5342 | struct hclge_fd_rule *new_rule) |
5343 | { |
5344 | struct hlist_head *hlist = &hdev->fd_rule_list; |
5345 | struct hclge_fd_rule *fd_rule, *parent = NULL; |
5346 | |
5347 | fd_rule = hclge_find_fd_rule(hlist, location, parent: &parent); |
5348 | if (fd_rule) { |
5349 | hclge_fd_dec_user_def_refcnt(hdev, rule: fd_rule); |
5350 | if (state == HCLGE_FD_ACTIVE) |
5351 | hclge_fd_inc_user_def_refcnt(hdev, rule: new_rule); |
5352 | hclge_sync_fd_user_def_cfg(hdev, locked: true); |
5353 | |
5354 | hclge_update_fd_rule_node(hdev, old_rule: fd_rule, new_rule, state); |
5355 | return; |
5356 | } |
5357 | |
5358 | /* it's unlikely to fail here, because we have checked the rule |
5359 | * exist before. |
5360 | */ |
5361 | if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) { |
5362 | dev_warn(&hdev->pdev->dev, |
5363 | "failed to delete fd rule %u, it's inexistent\n" , |
5364 | location); |
5365 | return; |
5366 | } |
5367 | |
5368 | hclge_fd_inc_user_def_refcnt(hdev, rule: new_rule); |
5369 | hclge_sync_fd_user_def_cfg(hdev, locked: true); |
5370 | |
5371 | hclge_fd_insert_rule_node(hlist, rule: new_rule, parent); |
5372 | hclge_fd_inc_rule_cnt(hdev, location: new_rule->location); |
5373 | |
5374 | if (state == HCLGE_FD_TO_ADD) { |
5375 | set_bit(nr: HCLGE_STATE_FD_TBL_CHANGED, addr: &hdev->state); |
5376 | hclge_task_schedule(hdev, delay_time: 0); |
5377 | } |
5378 | } |
5379 | |
5380 | static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) |
5381 | { |
5382 | struct hclge_get_fd_mode_cmd *req; |
5383 | struct hclge_desc desc; |
5384 | int ret; |
5385 | |
5386 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); |
5387 | |
5388 | req = (struct hclge_get_fd_mode_cmd *)desc.data; |
5389 | |
5390 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
5391 | if (ret) { |
5392 | dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n" , ret); |
5393 | return ret; |
5394 | } |
5395 | |
5396 | *fd_mode = req->mode; |
5397 | |
5398 | return ret; |
5399 | } |
5400 | |
5401 | static int hclge_get_fd_allocation(struct hclge_dev *hdev, |
5402 | u32 *stage1_entry_num, |
5403 | u32 *stage2_entry_num, |
5404 | u16 *stage1_counter_num, |
5405 | u16 *stage2_counter_num) |
5406 | { |
5407 | struct hclge_get_fd_allocation_cmd *req; |
5408 | struct hclge_desc desc; |
5409 | int ret; |
5410 | |
5411 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); |
5412 | |
5413 | req = (struct hclge_get_fd_allocation_cmd *)desc.data; |
5414 | |
5415 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
5416 | if (ret) { |
5417 | dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n" , |
5418 | ret); |
5419 | return ret; |
5420 | } |
5421 | |
5422 | *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); |
5423 | *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); |
5424 | *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); |
5425 | *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); |
5426 | |
5427 | return ret; |
5428 | } |
5429 | |
5430 | static int hclge_set_fd_key_config(struct hclge_dev *hdev, |
5431 | enum HCLGE_FD_STAGE stage_num) |
5432 | { |
5433 | struct hclge_set_fd_key_config_cmd *req; |
5434 | struct hclge_fd_key_cfg *stage; |
5435 | struct hclge_desc desc; |
5436 | int ret; |
5437 | |
5438 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); |
5439 | |
5440 | req = (struct hclge_set_fd_key_config_cmd *)desc.data; |
5441 | stage = &hdev->fd_cfg.key_cfg[stage_num]; |
5442 | req->stage = stage_num; |
5443 | req->key_select = stage->key_sel; |
5444 | req->inner_sipv6_word_en = stage->inner_sipv6_word_en; |
5445 | req->inner_dipv6_word_en = stage->inner_dipv6_word_en; |
5446 | req->outer_sipv6_word_en = stage->outer_sipv6_word_en; |
5447 | req->outer_dipv6_word_en = stage->outer_dipv6_word_en; |
5448 | req->tuple_mask = cpu_to_le32(~stage->tuple_active); |
5449 | req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); |
5450 | |
5451 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
5452 | if (ret) |
5453 | dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n" , ret); |
5454 | |
5455 | return ret; |
5456 | } |
5457 | |
5458 | static void hclge_fd_disable_user_def(struct hclge_dev *hdev) |
5459 | { |
5460 | struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg; |
5461 | |
5462 | spin_lock_bh(lock: &hdev->fd_rule_lock); |
5463 | memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg)); |
5464 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
5465 | |
5466 | hclge_fd_set_user_def_cmd(hdev, cfg); |
5467 | } |
5468 | |
5469 | static int hclge_init_fd_config(struct hclge_dev *hdev) |
5470 | { |
5471 | #define LOW_2_WORDS 0x03 |
5472 | struct hclge_fd_key_cfg *key_cfg; |
5473 | int ret; |
5474 | |
5475 | if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) |
5476 | return 0; |
5477 | |
5478 | ret = hclge_get_fd_mode(hdev, fd_mode: &hdev->fd_cfg.fd_mode); |
5479 | if (ret) |
5480 | return ret; |
5481 | |
5482 | switch (hdev->fd_cfg.fd_mode) { |
5483 | case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: |
5484 | hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; |
5485 | break; |
5486 | case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: |
5487 | hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; |
5488 | break; |
5489 | default: |
5490 | dev_err(&hdev->pdev->dev, |
5491 | "Unsupported flow director mode %u\n" , |
5492 | hdev->fd_cfg.fd_mode); |
5493 | return -EOPNOTSUPP; |
5494 | } |
5495 | |
5496 | key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; |
5497 | key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE; |
5498 | key_cfg->inner_sipv6_word_en = LOW_2_WORDS; |
5499 | key_cfg->inner_dipv6_word_en = LOW_2_WORDS; |
5500 | key_cfg->outer_sipv6_word_en = 0; |
5501 | key_cfg->outer_dipv6_word_en = 0; |
5502 | |
5503 | key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | |
5504 | BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | |
5505 | BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | |
5506 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); |
5507 | |
5508 | /* If use max 400bit key, we can support tuples for ether type */ |
5509 | if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { |
5510 | key_cfg->tuple_active |= |
5511 | BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); |
5512 | if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) |
5513 | key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; |
5514 | } |
5515 | |
5516 | /* roce_type is used to filter roce frames |
5517 | * dst_vport is used to specify the rule |
5518 | */ |
5519 | key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); |
5520 | |
5521 | ret = hclge_get_fd_allocation(hdev, |
5522 | stage1_entry_num: &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], |
5523 | stage2_entry_num: &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], |
5524 | stage1_counter_num: &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], |
5525 | stage2_counter_num: &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); |
5526 | if (ret) |
5527 | return ret; |
5528 | |
5529 | return hclge_set_fd_key_config(hdev, stage_num: HCLGE_FD_STAGE_1); |
5530 | } |
5531 | |
5532 | static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, |
5533 | int loc, u8 *key, bool is_add) |
5534 | { |
5535 | struct hclge_fd_tcam_config_1_cmd *req1; |
5536 | struct hclge_fd_tcam_config_2_cmd *req2; |
5537 | struct hclge_fd_tcam_config_3_cmd *req3; |
5538 | struct hclge_desc desc[3]; |
5539 | int ret; |
5540 | |
5541 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); |
5542 | desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); |
5543 | hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); |
5544 | desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); |
5545 | hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); |
5546 | |
5547 | req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; |
5548 | req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; |
5549 | req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; |
5550 | |
5551 | req1->stage = stage; |
5552 | req1->xy_sel = sel_x ? 1 : 0; |
5553 | hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); |
5554 | req1->index = cpu_to_le32(loc); |
5555 | req1->entry_vld = sel_x ? is_add : 0; |
5556 | |
5557 | if (key) { |
5558 | memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); |
5559 | memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], |
5560 | sizeof(req2->tcam_data)); |
5561 | memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + |
5562 | sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); |
5563 | } |
5564 | |
5565 | ret = hclge_cmd_send(hw: &hdev->hw, desc, num: 3); |
5566 | if (ret) |
5567 | dev_err(&hdev->pdev->dev, |
5568 | "config tcam key fail, ret=%d\n" , |
5569 | ret); |
5570 | |
5571 | return ret; |
5572 | } |
5573 | |
5574 | static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, |
5575 | struct hclge_fd_ad_data *action) |
5576 | { |
5577 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev: hdev->pdev); |
5578 | struct hclge_fd_ad_config_cmd *req; |
5579 | struct hclge_desc desc; |
5580 | u64 ad_data = 0; |
5581 | int ret; |
5582 | |
5583 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); |
5584 | |
5585 | req = (struct hclge_fd_ad_config_cmd *)desc.data; |
5586 | req->index = cpu_to_le32(loc); |
5587 | req->stage = stage; |
5588 | |
5589 | hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, |
5590 | action->write_rule_id_to_bd); |
5591 | hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, |
5592 | action->rule_id); |
5593 | if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { |
5594 | hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B, |
5595 | action->override_tc); |
5596 | hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M, |
5597 | HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size); |
5598 | } |
5599 | ad_data <<= 32; |
5600 | hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); |
5601 | hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, |
5602 | action->forward_to_direct_queue); |
5603 | hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, |
5604 | action->queue_id); |
5605 | hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); |
5606 | hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, |
5607 | HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); |
5608 | hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); |
5609 | hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, |
5610 | action->counter_id); |
5611 | |
5612 | req->ad_data = cpu_to_le64(ad_data); |
5613 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
5614 | if (ret) |
5615 | dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n" , ret); |
5616 | |
5617 | return ret; |
5618 | } |
5619 | |
5620 | static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, |
5621 | struct hclge_fd_rule *rule) |
5622 | { |
5623 | int offset, moffset, ip_offset; |
5624 | enum HCLGE_FD_KEY_OPT key_opt; |
5625 | u16 tmp_x_s, tmp_y_s; |
5626 | u32 tmp_x_l, tmp_y_l; |
5627 | u8 *p = (u8 *)rule; |
5628 | int i; |
5629 | |
5630 | if (rule->unused_tuple & BIT(tuple_bit)) |
5631 | return true; |
5632 | |
5633 | key_opt = tuple_key_info[tuple_bit].key_opt; |
5634 | offset = tuple_key_info[tuple_bit].offset; |
5635 | moffset = tuple_key_info[tuple_bit].moffset; |
5636 | |
5637 | switch (key_opt) { |
5638 | case KEY_OPT_U8: |
5639 | calc_x(*key_x, p[offset], p[moffset]); |
5640 | calc_y(*key_y, p[offset], p[moffset]); |
5641 | |
5642 | return true; |
5643 | case KEY_OPT_LE16: |
5644 | calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); |
5645 | calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); |
5646 | *(__le16 *)key_x = cpu_to_le16(tmp_x_s); |
5647 | *(__le16 *)key_y = cpu_to_le16(tmp_y_s); |
5648 | |
5649 | return true; |
5650 | case KEY_OPT_LE32: |
5651 | calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); |
5652 | calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); |
5653 | *(__le32 *)key_x = cpu_to_le32(tmp_x_l); |
5654 | *(__le32 *)key_y = cpu_to_le32(tmp_y_l); |
5655 | |
5656 | return true; |
5657 | case KEY_OPT_MAC: |
5658 | for (i = 0; i < ETH_ALEN; i++) { |
5659 | calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i], |
5660 | p[moffset + i]); |
5661 | calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i], |
5662 | p[moffset + i]); |
5663 | } |
5664 | |
5665 | return true; |
5666 | case KEY_OPT_IP: |
5667 | ip_offset = IPV4_INDEX * sizeof(u32); |
5668 | calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]), |
5669 | *(u32 *)(&p[moffset + ip_offset])); |
5670 | calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]), |
5671 | *(u32 *)(&p[moffset + ip_offset])); |
5672 | *(__le32 *)key_x = cpu_to_le32(tmp_x_l); |
5673 | *(__le32 *)key_y = cpu_to_le32(tmp_y_l); |
5674 | |
5675 | return true; |
5676 | default: |
5677 | return false; |
5678 | } |
5679 | } |
5680 | |
5681 | static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, |
5682 | u8 vf_id, u8 network_port_id) |
5683 | { |
5684 | u32 port_number = 0; |
5685 | |
5686 | if (port_type == HOST_PORT) { |
5687 | hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, |
5688 | pf_id); |
5689 | hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, |
5690 | vf_id); |
5691 | hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); |
5692 | } else { |
5693 | hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, |
5694 | HCLGE_NETWORK_PORT_ID_S, network_port_id); |
5695 | hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); |
5696 | } |
5697 | |
5698 | return port_number; |
5699 | } |
5700 | |
5701 | static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, |
5702 | __le32 *key_x, __le32 *key_y, |
5703 | struct hclge_fd_rule *rule) |
5704 | { |
5705 | u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; |
5706 | u8 cur_pos = 0, tuple_size, shift_bits; |
5707 | unsigned int i; |
5708 | |
5709 | for (i = 0; i < MAX_META_DATA; i++) { |
5710 | tuple_size = meta_data_key_info[i].key_length; |
5711 | tuple_bit = key_cfg->meta_data_active & BIT(i); |
5712 | |
5713 | switch (tuple_bit) { |
5714 | case BIT(ROCE_TYPE): |
5715 | hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); |
5716 | cur_pos += tuple_size; |
5717 | break; |
5718 | case BIT(DST_VPORT): |
5719 | port_number = hclge_get_port_number(port_type: HOST_PORT, pf_id: 0, |
5720 | vf_id: rule->vf_id, network_port_id: 0); |
5721 | hnae3_set_field(meta_data, |
5722 | GENMASK(cur_pos + tuple_size, cur_pos), |
5723 | cur_pos, port_number); |
5724 | cur_pos += tuple_size; |
5725 | break; |
5726 | default: |
5727 | break; |
5728 | } |
5729 | } |
5730 | |
5731 | calc_x(tmp_x, meta_data, 0xFFFFFFFF); |
5732 | calc_y(tmp_y, meta_data, 0xFFFFFFFF); |
5733 | shift_bits = sizeof(meta_data) * 8 - cur_pos; |
5734 | |
5735 | *key_x = cpu_to_le32(tmp_x << shift_bits); |
5736 | *key_y = cpu_to_le32(tmp_y << shift_bits); |
5737 | } |
5738 | |
5739 | /* A complete key is combined with meta data key and tuple key. |
5740 | * Meta data key is stored at the MSB region, and tuple key is stored at |
5741 | * the LSB region, unused bits will be filled 0. |
5742 | */ |
5743 | static int hclge_config_key(struct hclge_dev *hdev, u8 stage, |
5744 | struct hclge_fd_rule *rule) |
5745 | { |
5746 | struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; |
5747 | u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; |
5748 | u8 *cur_key_x, *cur_key_y; |
5749 | u8 meta_data_region; |
5750 | u8 tuple_size; |
5751 | int ret; |
5752 | u32 i; |
5753 | |
5754 | memset(key_x, 0, sizeof(key_x)); |
5755 | memset(key_y, 0, sizeof(key_y)); |
5756 | cur_key_x = key_x; |
5757 | cur_key_y = key_y; |
5758 | |
5759 | for (i = 0; i < MAX_TUPLE; i++) { |
5760 | bool tuple_valid; |
5761 | |
5762 | tuple_size = tuple_key_info[i].key_length / 8; |
5763 | if (!(key_cfg->tuple_active & BIT(i))) |
5764 | continue; |
5765 | |
5766 | tuple_valid = hclge_fd_convert_tuple(tuple_bit: i, key_x: cur_key_x, |
5767 | key_y: cur_key_y, rule); |
5768 | if (tuple_valid) { |
5769 | cur_key_x += tuple_size; |
5770 | cur_key_y += tuple_size; |
5771 | } |
5772 | } |
5773 | |
5774 | meta_data_region = hdev->fd_cfg.max_key_length / 8 - |
5775 | MAX_META_DATA_LENGTH / 8; |
5776 | |
5777 | hclge_fd_convert_meta_data(key_cfg, |
5778 | key_x: (__le32 *)(key_x + meta_data_region), |
5779 | key_y: (__le32 *)(key_y + meta_data_region), |
5780 | rule); |
5781 | |
5782 | ret = hclge_fd_tcam_config(hdev, stage, sel_x: false, loc: rule->location, key: key_y, |
5783 | is_add: true); |
5784 | if (ret) { |
5785 | dev_err(&hdev->pdev->dev, |
5786 | "fd key_y config fail, loc=%u, ret=%d\n" , |
5787 | rule->queue_id, ret); |
5788 | return ret; |
5789 | } |
5790 | |
5791 | ret = hclge_fd_tcam_config(hdev, stage, sel_x: true, loc: rule->location, key: key_x, |
5792 | is_add: true); |
5793 | if (ret) |
5794 | dev_err(&hdev->pdev->dev, |
5795 | "fd key_x config fail, loc=%u, ret=%d\n" , |
5796 | rule->queue_id, ret); |
5797 | return ret; |
5798 | } |
5799 | |
5800 | static int hclge_config_action(struct hclge_dev *hdev, u8 stage, |
5801 | struct hclge_fd_rule *rule) |
5802 | { |
5803 | struct hclge_vport *vport = hdev->vport; |
5804 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; |
5805 | struct hclge_fd_ad_data ad_data; |
5806 | |
5807 | memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data)); |
5808 | ad_data.ad_id = rule->location; |
5809 | |
5810 | if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { |
5811 | ad_data.drop_packet = true; |
5812 | } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) { |
5813 | ad_data.override_tc = true; |
5814 | ad_data.queue_id = |
5815 | kinfo->tc_info.tqp_offset[rule->cls_flower.tc]; |
5816 | ad_data.tc_size = |
5817 | ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]); |
5818 | } else { |
5819 | ad_data.forward_to_direct_queue = true; |
5820 | ad_data.queue_id = rule->queue_id; |
5821 | } |
5822 | |
5823 | if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) { |
5824 | ad_data.use_counter = true; |
5825 | ad_data.counter_id = rule->vf_id % |
5826 | hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]; |
5827 | } else { |
5828 | ad_data.use_counter = false; |
5829 | ad_data.counter_id = 0; |
5830 | } |
5831 | |
5832 | ad_data.use_next_stage = false; |
5833 | ad_data.next_input_key = 0; |
5834 | |
5835 | ad_data.write_rule_id_to_bd = true; |
5836 | ad_data.rule_id = rule->location; |
5837 | |
5838 | return hclge_fd_ad_config(hdev, stage, loc: ad_data.ad_id, action: &ad_data); |
5839 | } |
5840 | |
5841 | static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec, |
5842 | u32 *unused_tuple) |
5843 | { |
5844 | if (!spec || !unused_tuple) |
5845 | return -EINVAL; |
5846 | |
5847 | *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); |
5848 | |
5849 | if (!spec->ip4src) |
5850 | *unused_tuple |= BIT(INNER_SRC_IP); |
5851 | |
5852 | if (!spec->ip4dst) |
5853 | *unused_tuple |= BIT(INNER_DST_IP); |
5854 | |
5855 | if (!spec->psrc) |
5856 | *unused_tuple |= BIT(INNER_SRC_PORT); |
5857 | |
5858 | if (!spec->pdst) |
5859 | *unused_tuple |= BIT(INNER_DST_PORT); |
5860 | |
5861 | if (!spec->tos) |
5862 | *unused_tuple |= BIT(INNER_IP_TOS); |
5863 | |
5864 | return 0; |
5865 | } |
5866 | |
5867 | static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec, |
5868 | u32 *unused_tuple) |
5869 | { |
5870 | if (!spec || !unused_tuple) |
5871 | return -EINVAL; |
5872 | |
5873 | *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | |
5874 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); |
5875 | |
5876 | if (!spec->ip4src) |
5877 | *unused_tuple |= BIT(INNER_SRC_IP); |
5878 | |
5879 | if (!spec->ip4dst) |
5880 | *unused_tuple |= BIT(INNER_DST_IP); |
5881 | |
5882 | if (!spec->tos) |
5883 | *unused_tuple |= BIT(INNER_IP_TOS); |
5884 | |
5885 | if (!spec->proto) |
5886 | *unused_tuple |= BIT(INNER_IP_PROTO); |
5887 | |
5888 | if (spec->l4_4_bytes) |
5889 | return -EOPNOTSUPP; |
5890 | |
5891 | if (spec->ip_ver != ETH_RX_NFC_IP4) |
5892 | return -EOPNOTSUPP; |
5893 | |
5894 | return 0; |
5895 | } |
5896 | |
5897 | static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, |
5898 | u32 *unused_tuple) |
5899 | { |
5900 | if (!spec || !unused_tuple) |
5901 | return -EINVAL; |
5902 | |
5903 | *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); |
5904 | |
5905 | /* check whether src/dst ip address used */ |
5906 | if (ipv6_addr_any(a: (struct in6_addr *)spec->ip6src)) |
5907 | *unused_tuple |= BIT(INNER_SRC_IP); |
5908 | |
5909 | if (ipv6_addr_any(a: (struct in6_addr *)spec->ip6dst)) |
5910 | *unused_tuple |= BIT(INNER_DST_IP); |
5911 | |
5912 | if (!spec->psrc) |
5913 | *unused_tuple |= BIT(INNER_SRC_PORT); |
5914 | |
5915 | if (!spec->pdst) |
5916 | *unused_tuple |= BIT(INNER_DST_PORT); |
5917 | |
5918 | if (!spec->tclass) |
5919 | *unused_tuple |= BIT(INNER_IP_TOS); |
5920 | |
5921 | return 0; |
5922 | } |
5923 | |
5924 | static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, |
5925 | u32 *unused_tuple) |
5926 | { |
5927 | if (!spec || !unused_tuple) |
5928 | return -EINVAL; |
5929 | |
5930 | *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | |
5931 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); |
5932 | |
5933 | /* check whether src/dst ip address used */ |
5934 | if (ipv6_addr_any(a: (struct in6_addr *)spec->ip6src)) |
5935 | *unused_tuple |= BIT(INNER_SRC_IP); |
5936 | |
5937 | if (ipv6_addr_any(a: (struct in6_addr *)spec->ip6dst)) |
5938 | *unused_tuple |= BIT(INNER_DST_IP); |
5939 | |
5940 | if (!spec->l4_proto) |
5941 | *unused_tuple |= BIT(INNER_IP_PROTO); |
5942 | |
5943 | if (!spec->tclass) |
5944 | *unused_tuple |= BIT(INNER_IP_TOS); |
5945 | |
5946 | if (spec->l4_4_bytes) |
5947 | return -EOPNOTSUPP; |
5948 | |
5949 | return 0; |
5950 | } |
5951 | |
5952 | static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple) |
5953 | { |
5954 | if (!spec || !unused_tuple) |
5955 | return -EINVAL; |
5956 | |
5957 | *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | |
5958 | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | |
5959 | BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); |
5960 | |
5961 | if (is_zero_ether_addr(addr: spec->h_source)) |
5962 | *unused_tuple |= BIT(INNER_SRC_MAC); |
5963 | |
5964 | if (is_zero_ether_addr(addr: spec->h_dest)) |
5965 | *unused_tuple |= BIT(INNER_DST_MAC); |
5966 | |
5967 | if (!spec->h_proto) |
5968 | *unused_tuple |= BIT(INNER_ETH_TYPE); |
5969 | |
5970 | return 0; |
5971 | } |
5972 | |
5973 | static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, |
5974 | struct ethtool_rx_flow_spec *fs, |
5975 | u32 *unused_tuple) |
5976 | { |
5977 | if (fs->flow_type & FLOW_EXT) { |
5978 | if (fs->h_ext.vlan_etype) { |
5979 | dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n" ); |
5980 | return -EOPNOTSUPP; |
5981 | } |
5982 | |
5983 | if (!fs->h_ext.vlan_tci) |
5984 | *unused_tuple |= BIT(INNER_VLAN_TAG_FST); |
5985 | |
5986 | if (fs->m_ext.vlan_tci && |
5987 | be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) { |
5988 | dev_err(&hdev->pdev->dev, |
5989 | "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n" , |
5990 | ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1); |
5991 | return -EINVAL; |
5992 | } |
5993 | } else { |
5994 | *unused_tuple |= BIT(INNER_VLAN_TAG_FST); |
5995 | } |
5996 | |
5997 | if (fs->flow_type & FLOW_MAC_EXT) { |
5998 | if (hdev->fd_cfg.fd_mode != |
5999 | HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { |
6000 | dev_err(&hdev->pdev->dev, |
6001 | "FLOW_MAC_EXT is not supported in current fd mode!\n" ); |
6002 | return -EOPNOTSUPP; |
6003 | } |
6004 | |
6005 | if (is_zero_ether_addr(addr: fs->h_ext.h_dest)) |
6006 | *unused_tuple |= BIT(INNER_DST_MAC); |
6007 | else |
6008 | *unused_tuple &= ~BIT(INNER_DST_MAC); |
6009 | } |
6010 | |
6011 | return 0; |
6012 | } |
6013 | |
6014 | static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple, |
6015 | struct hclge_fd_user_def_info *info) |
6016 | { |
6017 | switch (flow_type) { |
6018 | case ETHER_FLOW: |
6019 | info->layer = HCLGE_FD_USER_DEF_L2; |
6020 | *unused_tuple &= ~BIT(INNER_L2_RSV); |
6021 | break; |
6022 | case IP_USER_FLOW: |
6023 | case IPV6_USER_FLOW: |
6024 | info->layer = HCLGE_FD_USER_DEF_L3; |
6025 | *unused_tuple &= ~BIT(INNER_L3_RSV); |
6026 | break; |
6027 | case TCP_V4_FLOW: |
6028 | case UDP_V4_FLOW: |
6029 | case TCP_V6_FLOW: |
6030 | case UDP_V6_FLOW: |
6031 | info->layer = HCLGE_FD_USER_DEF_L4; |
6032 | *unused_tuple &= ~BIT(INNER_L4_RSV); |
6033 | break; |
6034 | default: |
6035 | return -EOPNOTSUPP; |
6036 | } |
6037 | |
6038 | return 0; |
6039 | } |
6040 | |
6041 | static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs) |
6042 | { |
6043 | return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0; |
6044 | } |
6045 | |
6046 | static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev, |
6047 | struct ethtool_rx_flow_spec *fs, |
6048 | u32 *unused_tuple, |
6049 | struct hclge_fd_user_def_info *info) |
6050 | { |
6051 | u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active; |
6052 | u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); |
6053 | u16 data, offset, data_mask, offset_mask; |
6054 | int ret; |
6055 | |
6056 | info->layer = HCLGE_FD_USER_DEF_NONE; |
6057 | *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; |
6058 | |
6059 | if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs)) |
6060 | return 0; |
6061 | |
6062 | /* user-def data from ethtool is 64 bit value, the bit0~15 is used |
6063 | * for data, and bit32~47 is used for offset. |
6064 | */ |
6065 | data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; |
6066 | data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; |
6067 | offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; |
6068 | offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; |
6069 | |
6070 | if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) { |
6071 | dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n" ); |
6072 | return -EOPNOTSUPP; |
6073 | } |
6074 | |
6075 | if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) { |
6076 | dev_err(&hdev->pdev->dev, |
6077 | "user-def offset[%u] should be no more than %u\n" , |
6078 | offset, HCLGE_FD_MAX_USER_DEF_OFFSET); |
6079 | return -EINVAL; |
6080 | } |
6081 | |
6082 | if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) { |
6083 | dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n" ); |
6084 | return -EINVAL; |
6085 | } |
6086 | |
6087 | ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info); |
6088 | if (ret) { |
6089 | dev_err(&hdev->pdev->dev, |
6090 | "unsupported flow type for user-def bytes, ret = %d\n" , |
6091 | ret); |
6092 | return ret; |
6093 | } |
6094 | |
6095 | info->data = data; |
6096 | info->data_mask = data_mask; |
6097 | info->offset = offset; |
6098 | |
6099 | return 0; |
6100 | } |
6101 | |
6102 | static int hclge_fd_check_spec(struct hclge_dev *hdev, |
6103 | struct ethtool_rx_flow_spec *fs, |
6104 | u32 *unused_tuple, |
6105 | struct hclge_fd_user_def_info *info) |
6106 | { |
6107 | u32 flow_type; |
6108 | int ret; |
6109 | |
6110 | if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { |
6111 | dev_err(&hdev->pdev->dev, |
6112 | "failed to config fd rules, invalid rule location: %u, max is %u\n." , |
6113 | fs->location, |
6114 | hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1); |
6115 | return -EINVAL; |
6116 | } |
6117 | |
6118 | ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info); |
6119 | if (ret) |
6120 | return ret; |
6121 | |
6122 | flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); |
6123 | switch (flow_type) { |
6124 | case SCTP_V4_FLOW: |
6125 | case TCP_V4_FLOW: |
6126 | case UDP_V4_FLOW: |
6127 | ret = hclge_fd_check_tcpip4_tuple(spec: &fs->h_u.tcp_ip4_spec, |
6128 | unused_tuple); |
6129 | break; |
6130 | case IP_USER_FLOW: |
6131 | ret = hclge_fd_check_ip4_tuple(spec: &fs->h_u.usr_ip4_spec, |
6132 | unused_tuple); |
6133 | break; |
6134 | case SCTP_V6_FLOW: |
6135 | case TCP_V6_FLOW: |
6136 | case UDP_V6_FLOW: |
6137 | ret = hclge_fd_check_tcpip6_tuple(spec: &fs->h_u.tcp_ip6_spec, |
6138 | unused_tuple); |
6139 | break; |
6140 | case IPV6_USER_FLOW: |
6141 | ret = hclge_fd_check_ip6_tuple(spec: &fs->h_u.usr_ip6_spec, |
6142 | unused_tuple); |
6143 | break; |
6144 | case ETHER_FLOW: |
6145 | if (hdev->fd_cfg.fd_mode != |
6146 | HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { |
6147 | dev_err(&hdev->pdev->dev, |
6148 | "ETHER_FLOW is not supported in current fd mode!\n" ); |
6149 | return -EOPNOTSUPP; |
6150 | } |
6151 | |
6152 | ret = hclge_fd_check_ether_tuple(spec: &fs->h_u.ether_spec, |
6153 | unused_tuple); |
6154 | break; |
6155 | default: |
6156 | dev_err(&hdev->pdev->dev, |
6157 | "unsupported protocol type, protocol type = %#x\n" , |
6158 | flow_type); |
6159 | return -EOPNOTSUPP; |
6160 | } |
6161 | |
6162 | if (ret) { |
6163 | dev_err(&hdev->pdev->dev, |
6164 | "failed to check flow union tuple, ret = %d\n" , |
6165 | ret); |
6166 | return ret; |
6167 | } |
6168 | |
6169 | return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple); |
6170 | } |
6171 | |
6172 | static void hclge_fd_get_tcpip4_tuple(struct ethtool_rx_flow_spec *fs, |
6173 | struct hclge_fd_rule *rule, u8 ip_proto) |
6174 | { |
6175 | rule->tuples.src_ip[IPV4_INDEX] = |
6176 | be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); |
6177 | rule->tuples_mask.src_ip[IPV4_INDEX] = |
6178 | be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); |
6179 | |
6180 | rule->tuples.dst_ip[IPV4_INDEX] = |
6181 | be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); |
6182 | rule->tuples_mask.dst_ip[IPV4_INDEX] = |
6183 | be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); |
6184 | |
6185 | rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); |
6186 | rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); |
6187 | |
6188 | rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); |
6189 | rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); |
6190 | |
6191 | rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; |
6192 | rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; |
6193 | |
6194 | rule->tuples.ether_proto = ETH_P_IP; |
6195 | rule->tuples_mask.ether_proto = 0xFFFF; |
6196 | |
6197 | rule->tuples.ip_proto = ip_proto; |
6198 | rule->tuples_mask.ip_proto = 0xFF; |
6199 | } |
6200 | |
6201 | static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec *fs, |
6202 | struct hclge_fd_rule *rule) |
6203 | { |
6204 | rule->tuples.src_ip[IPV4_INDEX] = |
6205 | be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); |
6206 | rule->tuples_mask.src_ip[IPV4_INDEX] = |
6207 | be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); |
6208 | |
6209 | rule->tuples.dst_ip[IPV4_INDEX] = |
6210 | be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); |
6211 | rule->tuples_mask.dst_ip[IPV4_INDEX] = |
6212 | be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); |
6213 | |
6214 | rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; |
6215 | rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; |
6216 | |
6217 | rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; |
6218 | rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; |
6219 | |
6220 | rule->tuples.ether_proto = ETH_P_IP; |
6221 | rule->tuples_mask.ether_proto = 0xFFFF; |
6222 | } |
6223 | |
6224 | static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs, |
6225 | struct hclge_fd_rule *rule, u8 ip_proto) |
6226 | { |
6227 | be32_to_cpu_array(dst: rule->tuples.src_ip, src: fs->h_u.tcp_ip6_spec.ip6src, |
6228 | IPV6_SIZE); |
6229 | be32_to_cpu_array(dst: rule->tuples_mask.src_ip, src: fs->m_u.tcp_ip6_spec.ip6src, |
6230 | IPV6_SIZE); |
6231 | |
6232 | be32_to_cpu_array(dst: rule->tuples.dst_ip, src: fs->h_u.tcp_ip6_spec.ip6dst, |
6233 | IPV6_SIZE); |
6234 | be32_to_cpu_array(dst: rule->tuples_mask.dst_ip, src: fs->m_u.tcp_ip6_spec.ip6dst, |
6235 | IPV6_SIZE); |
6236 | |
6237 | rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); |
6238 | rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); |
6239 | |
6240 | rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); |
6241 | rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); |
6242 | |
6243 | rule->tuples.ether_proto = ETH_P_IPV6; |
6244 | rule->tuples_mask.ether_proto = 0xFFFF; |
6245 | |
6246 | rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; |
6247 | rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; |
6248 | |
6249 | rule->tuples.ip_proto = ip_proto; |
6250 | rule->tuples_mask.ip_proto = 0xFF; |
6251 | } |
6252 | |
6253 | static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec *fs, |
6254 | struct hclge_fd_rule *rule) |
6255 | { |
6256 | be32_to_cpu_array(dst: rule->tuples.src_ip, src: fs->h_u.usr_ip6_spec.ip6src, |
6257 | IPV6_SIZE); |
6258 | be32_to_cpu_array(dst: rule->tuples_mask.src_ip, src: fs->m_u.usr_ip6_spec.ip6src, |
6259 | IPV6_SIZE); |
6260 | |
6261 | be32_to_cpu_array(dst: rule->tuples.dst_ip, src: fs->h_u.usr_ip6_spec.ip6dst, |
6262 | IPV6_SIZE); |
6263 | be32_to_cpu_array(dst: rule->tuples_mask.dst_ip, src: fs->m_u.usr_ip6_spec.ip6dst, |
6264 | IPV6_SIZE); |
6265 | |
6266 | rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; |
6267 | rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; |
6268 | |
6269 | rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; |
6270 | rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; |
6271 | |
6272 | rule->tuples.ether_proto = ETH_P_IPV6; |
6273 | rule->tuples_mask.ether_proto = 0xFFFF; |
6274 | } |
6275 | |
6276 | static void hclge_fd_get_ether_tuple(struct ethtool_rx_flow_spec *fs, |
6277 | struct hclge_fd_rule *rule) |
6278 | { |
6279 | ether_addr_copy(dst: rule->tuples.src_mac, src: fs->h_u.ether_spec.h_source); |
6280 | ether_addr_copy(dst: rule->tuples_mask.src_mac, src: fs->m_u.ether_spec.h_source); |
6281 | |
6282 | ether_addr_copy(dst: rule->tuples.dst_mac, src: fs->h_u.ether_spec.h_dest); |
6283 | ether_addr_copy(dst: rule->tuples_mask.dst_mac, src: fs->m_u.ether_spec.h_dest); |
6284 | |
6285 | rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto); |
6286 | rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto); |
6287 | } |
6288 | |
6289 | static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info, |
6290 | struct hclge_fd_rule *rule) |
6291 | { |
6292 | switch (info->layer) { |
6293 | case HCLGE_FD_USER_DEF_L2: |
6294 | rule->tuples.l2_user_def = info->data; |
6295 | rule->tuples_mask.l2_user_def = info->data_mask; |
6296 | break; |
6297 | case HCLGE_FD_USER_DEF_L3: |
6298 | rule->tuples.l3_user_def = info->data; |
6299 | rule->tuples_mask.l3_user_def = info->data_mask; |
6300 | break; |
6301 | case HCLGE_FD_USER_DEF_L4: |
6302 | rule->tuples.l4_user_def = (u32)info->data << 16; |
6303 | rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16; |
6304 | break; |
6305 | default: |
6306 | break; |
6307 | } |
6308 | |
6309 | rule->ep.user_def = *info; |
6310 | } |
6311 | |
6312 | static int hclge_fd_get_tuple(struct ethtool_rx_flow_spec *fs, |
6313 | struct hclge_fd_rule *rule, |
6314 | struct hclge_fd_user_def_info *info) |
6315 | { |
6316 | u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); |
6317 | |
6318 | switch (flow_type) { |
6319 | case SCTP_V4_FLOW: |
6320 | hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_SCTP); |
6321 | break; |
6322 | case TCP_V4_FLOW: |
6323 | hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_TCP); |
6324 | break; |
6325 | case UDP_V4_FLOW: |
6326 | hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_UDP); |
6327 | break; |
6328 | case IP_USER_FLOW: |
6329 | hclge_fd_get_ip4_tuple(fs, rule); |
6330 | break; |
6331 | case SCTP_V6_FLOW: |
6332 | hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_SCTP); |
6333 | break; |
6334 | case TCP_V6_FLOW: |
6335 | hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_TCP); |
6336 | break; |
6337 | case UDP_V6_FLOW: |
6338 | hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_UDP); |
6339 | break; |
6340 | case IPV6_USER_FLOW: |
6341 | hclge_fd_get_ip6_tuple(fs, rule); |
6342 | break; |
6343 | case ETHER_FLOW: |
6344 | hclge_fd_get_ether_tuple(fs, rule); |
6345 | break; |
6346 | default: |
6347 | return -EOPNOTSUPP; |
6348 | } |
6349 | |
6350 | if (fs->flow_type & FLOW_EXT) { |
6351 | rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); |
6352 | rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); |
6353 | hclge_fd_get_user_def_tuple(info, rule); |
6354 | } |
6355 | |
6356 | if (fs->flow_type & FLOW_MAC_EXT) { |
6357 | ether_addr_copy(dst: rule->tuples.dst_mac, src: fs->h_ext.h_dest); |
6358 | ether_addr_copy(dst: rule->tuples_mask.dst_mac, src: fs->m_ext.h_dest); |
6359 | } |
6360 | |
6361 | return 0; |
6362 | } |
6363 | |
6364 | static int hclge_fd_config_rule(struct hclge_dev *hdev, |
6365 | struct hclge_fd_rule *rule) |
6366 | { |
6367 | int ret; |
6368 | |
6369 | ret = hclge_config_action(hdev, stage: HCLGE_FD_STAGE_1, rule); |
6370 | if (ret) |
6371 | return ret; |
6372 | |
6373 | return hclge_config_key(hdev, stage: HCLGE_FD_STAGE_1, rule); |
6374 | } |
6375 | |
6376 | static int hclge_add_fd_entry_common(struct hclge_dev *hdev, |
6377 | struct hclge_fd_rule *rule) |
6378 | { |
6379 | int ret; |
6380 | |
6381 | spin_lock_bh(lock: &hdev->fd_rule_lock); |
6382 | |
6383 | if (hdev->fd_active_type != rule->rule_type && |
6384 | (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || |
6385 | hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) { |
6386 | dev_err(&hdev->pdev->dev, |
6387 | "mode conflict(new type %d, active type %d), please delete existent rules first\n" , |
6388 | rule->rule_type, hdev->fd_active_type); |
6389 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
6390 | return -EINVAL; |
6391 | } |
6392 | |
6393 | ret = hclge_fd_check_user_def_refcnt(hdev, rule); |
6394 | if (ret) |
6395 | goto out; |
6396 | |
6397 | ret = hclge_clear_arfs_rules(hdev); |
6398 | if (ret) |
6399 | goto out; |
6400 | |
6401 | ret = hclge_fd_config_rule(hdev, rule); |
6402 | if (ret) |
6403 | goto out; |
6404 | |
6405 | rule->state = HCLGE_FD_ACTIVE; |
6406 | hdev->fd_active_type = rule->rule_type; |
6407 | hclge_update_fd_list(hdev, state: rule->state, location: rule->location, new_rule: rule); |
6408 | |
6409 | out: |
6410 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
6411 | return ret; |
6412 | } |
6413 | |
6414 | static bool hclge_is_cls_flower_active(struct hnae3_handle *handle) |
6415 | { |
6416 | struct hclge_vport *vport = hclge_get_vport(handle); |
6417 | struct hclge_dev *hdev = vport->back; |
6418 | |
6419 | return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; |
6420 | } |
6421 | |
6422 | static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie, |
6423 | u16 *vport_id, u8 *action, u16 *queue_id) |
6424 | { |
6425 | struct hclge_vport *vport = hdev->vport; |
6426 | |
6427 | if (ring_cookie == RX_CLS_FLOW_DISC) { |
6428 | *action = HCLGE_FD_ACTION_DROP_PACKET; |
6429 | } else { |
6430 | u32 ring = ethtool_get_flow_spec_ring(ring_cookie); |
6431 | u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie); |
6432 | u16 tqps; |
6433 | |
6434 | /* To keep consistent with user's configuration, minus 1 when |
6435 | * printing 'vf', because vf id from ethtool is added 1 for vf. |
6436 | */ |
6437 | if (vf > hdev->num_req_vfs) { |
6438 | dev_err(&hdev->pdev->dev, |
6439 | "Error: vf id (%u) should be less than %u\n" , |
6440 | vf - 1U, hdev->num_req_vfs); |
6441 | return -EINVAL; |
6442 | } |
6443 | |
6444 | *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; |
6445 | tqps = hdev->vport[vf].nic.kinfo.num_tqps; |
6446 | |
6447 | if (ring >= tqps) { |
6448 | dev_err(&hdev->pdev->dev, |
6449 | "Error: queue id (%u) > max tqp num (%u)\n" , |
6450 | ring, tqps - 1U); |
6451 | return -EINVAL; |
6452 | } |
6453 | |
6454 | *action = HCLGE_FD_ACTION_SELECT_QUEUE; |
6455 | *queue_id = ring; |
6456 | } |
6457 | |
6458 | return 0; |
6459 | } |
6460 | |
6461 | static int hclge_add_fd_entry(struct hnae3_handle *handle, |
6462 | struct ethtool_rxnfc *cmd) |
6463 | { |
6464 | struct hclge_vport *vport = hclge_get_vport(handle); |
6465 | struct hclge_dev *hdev = vport->back; |
6466 | struct hclge_fd_user_def_info info; |
6467 | u16 dst_vport_id = 0, q_index = 0; |
6468 | struct ethtool_rx_flow_spec *fs; |
6469 | struct hclge_fd_rule *rule; |
6470 | u32 unused = 0; |
6471 | u8 action; |
6472 | int ret; |
6473 | |
6474 | if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { |
6475 | dev_err(&hdev->pdev->dev, |
6476 | "flow table director is not supported\n" ); |
6477 | return -EOPNOTSUPP; |
6478 | } |
6479 | |
6480 | if (!hdev->fd_en) { |
6481 | dev_err(&hdev->pdev->dev, |
6482 | "please enable flow director first\n" ); |
6483 | return -EOPNOTSUPP; |
6484 | } |
6485 | |
6486 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; |
6487 | |
6488 | ret = hclge_fd_check_spec(hdev, fs, unused_tuple: &unused, info: &info); |
6489 | if (ret) |
6490 | return ret; |
6491 | |
6492 | ret = hclge_fd_parse_ring_cookie(hdev, ring_cookie: fs->ring_cookie, vport_id: &dst_vport_id, |
6493 | action: &action, queue_id: &q_index); |
6494 | if (ret) |
6495 | return ret; |
6496 | |
6497 | rule = kzalloc(size: sizeof(*rule), GFP_KERNEL); |
6498 | if (!rule) |
6499 | return -ENOMEM; |
6500 | |
6501 | ret = hclge_fd_get_tuple(fs, rule, info: &info); |
6502 | if (ret) { |
6503 | kfree(objp: rule); |
6504 | return ret; |
6505 | } |
6506 | |
6507 | rule->flow_type = fs->flow_type; |
6508 | rule->location = fs->location; |
6509 | rule->unused_tuple = unused; |
6510 | rule->vf_id = dst_vport_id; |
6511 | rule->queue_id = q_index; |
6512 | rule->action = action; |
6513 | rule->rule_type = HCLGE_FD_EP_ACTIVE; |
6514 | |
6515 | ret = hclge_add_fd_entry_common(hdev, rule); |
6516 | if (ret) |
6517 | kfree(objp: rule); |
6518 | |
6519 | return ret; |
6520 | } |
6521 | |
6522 | static int hclge_del_fd_entry(struct hnae3_handle *handle, |
6523 | struct ethtool_rxnfc *cmd) |
6524 | { |
6525 | struct hclge_vport *vport = hclge_get_vport(handle); |
6526 | struct hclge_dev *hdev = vport->back; |
6527 | struct ethtool_rx_flow_spec *fs; |
6528 | int ret; |
6529 | |
6530 | if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) |
6531 | return -EOPNOTSUPP; |
6532 | |
6533 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; |
6534 | |
6535 | if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) |
6536 | return -EINVAL; |
6537 | |
6538 | spin_lock_bh(lock: &hdev->fd_rule_lock); |
6539 | if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || |
6540 | !test_bit(fs->location, hdev->fd_bmap)) { |
6541 | dev_err(&hdev->pdev->dev, |
6542 | "Delete fail, rule %u is inexistent\n" , fs->location); |
6543 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
6544 | return -ENOENT; |
6545 | } |
6546 | |
6547 | ret = hclge_fd_tcam_config(hdev, stage: HCLGE_FD_STAGE_1, sel_x: true, loc: fs->location, |
6548 | NULL, is_add: false); |
6549 | if (ret) |
6550 | goto out; |
6551 | |
6552 | hclge_update_fd_list(hdev, state: HCLGE_FD_DELETED, location: fs->location, NULL); |
6553 | |
6554 | out: |
6555 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
6556 | return ret; |
6557 | } |
6558 | |
6559 | static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev, |
6560 | bool clear_list) |
6561 | { |
6562 | struct hclge_fd_rule *rule; |
6563 | struct hlist_node *node; |
6564 | u16 location; |
6565 | |
6566 | spin_lock_bh(lock: &hdev->fd_rule_lock); |
6567 | |
6568 | for_each_set_bit(location, hdev->fd_bmap, |
6569 | hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) |
6570 | hclge_fd_tcam_config(hdev, stage: HCLGE_FD_STAGE_1, sel_x: true, loc: location, |
6571 | NULL, is_add: false); |
6572 | |
6573 | if (clear_list) { |
6574 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, |
6575 | rule_node) { |
6576 | hlist_del(n: &rule->rule_node); |
6577 | kfree(objp: rule); |
6578 | } |
6579 | hdev->fd_active_type = HCLGE_FD_RULE_NONE; |
6580 | hdev->hclge_fd_rule_num = 0; |
6581 | bitmap_zero(dst: hdev->fd_bmap, |
6582 | nbits: hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); |
6583 | } |
6584 | |
6585 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
6586 | } |
6587 | |
6588 | static void hclge_del_all_fd_entries(struct hclge_dev *hdev) |
6589 | { |
6590 | if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) |
6591 | return; |
6592 | |
6593 | hclge_clear_fd_rules_in_list(hdev, clear_list: true); |
6594 | hclge_fd_disable_user_def(hdev); |
6595 | } |
6596 | |
6597 | static int hclge_restore_fd_entries(struct hnae3_handle *handle) |
6598 | { |
6599 | struct hclge_vport *vport = hclge_get_vport(handle); |
6600 | struct hclge_dev *hdev = vport->back; |
6601 | struct hclge_fd_rule *rule; |
6602 | struct hlist_node *node; |
6603 | |
6604 | /* Return ok here, because reset error handling will check this |
6605 | * return value. If error is returned here, the reset process will |
6606 | * fail. |
6607 | */ |
6608 | if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) |
6609 | return 0; |
6610 | |
6611 | /* if fd is disabled, should not restore it when reset */ |
6612 | if (!hdev->fd_en) |
6613 | return 0; |
6614 | |
6615 | spin_lock_bh(lock: &hdev->fd_rule_lock); |
6616 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { |
6617 | if (rule->state == HCLGE_FD_ACTIVE) |
6618 | rule->state = HCLGE_FD_TO_ADD; |
6619 | } |
6620 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
6621 | set_bit(nr: HCLGE_STATE_FD_TBL_CHANGED, addr: &hdev->state); |
6622 | |
6623 | return 0; |
6624 | } |
6625 | |
6626 | static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, |
6627 | struct ethtool_rxnfc *cmd) |
6628 | { |
6629 | struct hclge_vport *vport = hclge_get_vport(handle); |
6630 | struct hclge_dev *hdev = vport->back; |
6631 | |
6632 | if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle)) |
6633 | return -EOPNOTSUPP; |
6634 | |
6635 | cmd->rule_cnt = hdev->hclge_fd_rule_num; |
6636 | cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; |
6637 | |
6638 | return 0; |
6639 | } |
6640 | |
6641 | static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule, |
6642 | struct ethtool_tcpip4_spec *spec, |
6643 | struct ethtool_tcpip4_spec *spec_mask) |
6644 | { |
6645 | spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); |
6646 | spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? |
6647 | 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); |
6648 | |
6649 | spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); |
6650 | spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? |
6651 | 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); |
6652 | |
6653 | spec->psrc = cpu_to_be16(rule->tuples.src_port); |
6654 | spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? |
6655 | 0 : cpu_to_be16(rule->tuples_mask.src_port); |
6656 | |
6657 | spec->pdst = cpu_to_be16(rule->tuples.dst_port); |
6658 | spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? |
6659 | 0 : cpu_to_be16(rule->tuples_mask.dst_port); |
6660 | |
6661 | spec->tos = rule->tuples.ip_tos; |
6662 | spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? |
6663 | 0 : rule->tuples_mask.ip_tos; |
6664 | } |
6665 | |
6666 | static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule, |
6667 | struct ethtool_usrip4_spec *spec, |
6668 | struct ethtool_usrip4_spec *spec_mask) |
6669 | { |
6670 | spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); |
6671 | spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? |
6672 | 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); |
6673 | |
6674 | spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); |
6675 | spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? |
6676 | 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); |
6677 | |
6678 | spec->tos = rule->tuples.ip_tos; |
6679 | spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? |
6680 | 0 : rule->tuples_mask.ip_tos; |
6681 | |
6682 | spec->proto = rule->tuples.ip_proto; |
6683 | spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? |
6684 | 0 : rule->tuples_mask.ip_proto; |
6685 | |
6686 | spec->ip_ver = ETH_RX_NFC_IP4; |
6687 | } |
6688 | |
6689 | static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule, |
6690 | struct ethtool_tcpip6_spec *spec, |
6691 | struct ethtool_tcpip6_spec *spec_mask) |
6692 | { |
6693 | cpu_to_be32_array(dst: spec->ip6src, |
6694 | src: rule->tuples.src_ip, IPV6_SIZE); |
6695 | cpu_to_be32_array(dst: spec->ip6dst, |
6696 | src: rule->tuples.dst_ip, IPV6_SIZE); |
6697 | if (rule->unused_tuple & BIT(INNER_SRC_IP)) |
6698 | memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); |
6699 | else |
6700 | cpu_to_be32_array(dst: spec_mask->ip6src, src: rule->tuples_mask.src_ip, |
6701 | IPV6_SIZE); |
6702 | |
6703 | if (rule->unused_tuple & BIT(INNER_DST_IP)) |
6704 | memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); |
6705 | else |
6706 | cpu_to_be32_array(dst: spec_mask->ip6dst, src: rule->tuples_mask.dst_ip, |
6707 | IPV6_SIZE); |
6708 | |
6709 | spec->tclass = rule->tuples.ip_tos; |
6710 | spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? |
6711 | 0 : rule->tuples_mask.ip_tos; |
6712 | |
6713 | spec->psrc = cpu_to_be16(rule->tuples.src_port); |
6714 | spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? |
6715 | 0 : cpu_to_be16(rule->tuples_mask.src_port); |
6716 | |
6717 | spec->pdst = cpu_to_be16(rule->tuples.dst_port); |
6718 | spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? |
6719 | 0 : cpu_to_be16(rule->tuples_mask.dst_port); |
6720 | } |
6721 | |
6722 | static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule, |
6723 | struct ethtool_usrip6_spec *spec, |
6724 | struct ethtool_usrip6_spec *spec_mask) |
6725 | { |
6726 | cpu_to_be32_array(dst: spec->ip6src, src: rule->tuples.src_ip, IPV6_SIZE); |
6727 | cpu_to_be32_array(dst: spec->ip6dst, src: rule->tuples.dst_ip, IPV6_SIZE); |
6728 | if (rule->unused_tuple & BIT(INNER_SRC_IP)) |
6729 | memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); |
6730 | else |
6731 | cpu_to_be32_array(dst: spec_mask->ip6src, |
6732 | src: rule->tuples_mask.src_ip, IPV6_SIZE); |
6733 | |
6734 | if (rule->unused_tuple & BIT(INNER_DST_IP)) |
6735 | memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); |
6736 | else |
6737 | cpu_to_be32_array(dst: spec_mask->ip6dst, |
6738 | src: rule->tuples_mask.dst_ip, IPV6_SIZE); |
6739 | |
6740 | spec->tclass = rule->tuples.ip_tos; |
6741 | spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? |
6742 | 0 : rule->tuples_mask.ip_tos; |
6743 | |
6744 | spec->l4_proto = rule->tuples.ip_proto; |
6745 | spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? |
6746 | 0 : rule->tuples_mask.ip_proto; |
6747 | } |
6748 | |
6749 | static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule, |
6750 | struct ethhdr *spec, |
6751 | struct ethhdr *spec_mask) |
6752 | { |
6753 | ether_addr_copy(dst: spec->h_source, src: rule->tuples.src_mac); |
6754 | ether_addr_copy(dst: spec->h_dest, src: rule->tuples.dst_mac); |
6755 | |
6756 | if (rule->unused_tuple & BIT(INNER_SRC_MAC)) |
6757 | eth_zero_addr(addr: spec_mask->h_source); |
6758 | else |
6759 | ether_addr_copy(dst: spec_mask->h_source, src: rule->tuples_mask.src_mac); |
6760 | |
6761 | if (rule->unused_tuple & BIT(INNER_DST_MAC)) |
6762 | eth_zero_addr(addr: spec_mask->h_dest); |
6763 | else |
6764 | ether_addr_copy(dst: spec_mask->h_dest, src: rule->tuples_mask.dst_mac); |
6765 | |
6766 | spec->h_proto = cpu_to_be16(rule->tuples.ether_proto); |
6767 | spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ? |
6768 | 0 : cpu_to_be16(rule->tuples_mask.ether_proto); |
6769 | } |
6770 | |
6771 | static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs, |
6772 | struct hclge_fd_rule *rule) |
6773 | { |
6774 | if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) == |
6775 | HCLGE_FD_TUPLE_USER_DEF_TUPLES) { |
6776 | fs->h_ext.data[0] = 0; |
6777 | fs->h_ext.data[1] = 0; |
6778 | fs->m_ext.data[0] = 0; |
6779 | fs->m_ext.data[1] = 0; |
6780 | } else { |
6781 | fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset); |
6782 | fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data); |
6783 | fs->m_ext.data[0] = |
6784 | cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK); |
6785 | fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask); |
6786 | } |
6787 | } |
6788 | |
6789 | static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, |
6790 | struct hclge_fd_rule *rule) |
6791 | { |
6792 | if (fs->flow_type & FLOW_EXT) { |
6793 | fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); |
6794 | fs->m_ext.vlan_tci = |
6795 | rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? |
6796 | 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1); |
6797 | |
6798 | hclge_fd_get_user_def_info(fs, rule); |
6799 | } |
6800 | |
6801 | if (fs->flow_type & FLOW_MAC_EXT) { |
6802 | ether_addr_copy(dst: fs->h_ext.h_dest, src: rule->tuples.dst_mac); |
6803 | if (rule->unused_tuple & BIT(INNER_DST_MAC)) |
6804 | eth_zero_addr(addr: fs->m_u.ether_spec.h_dest); |
6805 | else |
6806 | ether_addr_copy(dst: fs->m_u.ether_spec.h_dest, |
6807 | src: rule->tuples_mask.dst_mac); |
6808 | } |
6809 | } |
6810 | |
6811 | static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev, |
6812 | u16 location) |
6813 | { |
6814 | struct hclge_fd_rule *rule = NULL; |
6815 | struct hlist_node *node2; |
6816 | |
6817 | hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { |
6818 | if (rule->location == location) |
6819 | return rule; |
6820 | else if (rule->location > location) |
6821 | return NULL; |
6822 | } |
6823 | |
6824 | return NULL; |
6825 | } |
6826 | |
6827 | static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs, |
6828 | struct hclge_fd_rule *rule) |
6829 | { |
6830 | if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { |
6831 | fs->ring_cookie = RX_CLS_FLOW_DISC; |
6832 | } else { |
6833 | u64 vf_id; |
6834 | |
6835 | fs->ring_cookie = rule->queue_id; |
6836 | vf_id = rule->vf_id; |
6837 | vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; |
6838 | fs->ring_cookie |= vf_id; |
6839 | } |
6840 | } |
6841 | |
6842 | static int hclge_get_fd_rule_info(struct hnae3_handle *handle, |
6843 | struct ethtool_rxnfc *cmd) |
6844 | { |
6845 | struct hclge_vport *vport = hclge_get_vport(handle); |
6846 | struct hclge_fd_rule *rule = NULL; |
6847 | struct hclge_dev *hdev = vport->back; |
6848 | struct ethtool_rx_flow_spec *fs; |
6849 | |
6850 | if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) |
6851 | return -EOPNOTSUPP; |
6852 | |
6853 | fs = (struct ethtool_rx_flow_spec *)&cmd->fs; |
6854 | |
6855 | spin_lock_bh(lock: &hdev->fd_rule_lock); |
6856 | |
6857 | rule = hclge_get_fd_rule(hdev, location: fs->location); |
6858 | if (!rule) { |
6859 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
6860 | return -ENOENT; |
6861 | } |
6862 | |
6863 | fs->flow_type = rule->flow_type; |
6864 | switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { |
6865 | case SCTP_V4_FLOW: |
6866 | case TCP_V4_FLOW: |
6867 | case UDP_V4_FLOW: |
6868 | hclge_fd_get_tcpip4_info(rule, spec: &fs->h_u.tcp_ip4_spec, |
6869 | spec_mask: &fs->m_u.tcp_ip4_spec); |
6870 | break; |
6871 | case IP_USER_FLOW: |
6872 | hclge_fd_get_ip4_info(rule, spec: &fs->h_u.usr_ip4_spec, |
6873 | spec_mask: &fs->m_u.usr_ip4_spec); |
6874 | break; |
6875 | case SCTP_V6_FLOW: |
6876 | case TCP_V6_FLOW: |
6877 | case UDP_V6_FLOW: |
6878 | hclge_fd_get_tcpip6_info(rule, spec: &fs->h_u.tcp_ip6_spec, |
6879 | spec_mask: &fs->m_u.tcp_ip6_spec); |
6880 | break; |
6881 | case IPV6_USER_FLOW: |
6882 | hclge_fd_get_ip6_info(rule, spec: &fs->h_u.usr_ip6_spec, |
6883 | spec_mask: &fs->m_u.usr_ip6_spec); |
6884 | break; |
6885 | /* The flow type of fd rule has been checked before adding in to rule |
6886 | * list. As other flow types have been handled, it must be ETHER_FLOW |
6887 | * for the default case |
6888 | */ |
6889 | default: |
6890 | hclge_fd_get_ether_info(rule, spec: &fs->h_u.ether_spec, |
6891 | spec_mask: &fs->m_u.ether_spec); |
6892 | break; |
6893 | } |
6894 | |
6895 | hclge_fd_get_ext_info(fs, rule); |
6896 | |
6897 | hclge_fd_get_ring_cookie(fs, rule); |
6898 | |
6899 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
6900 | |
6901 | return 0; |
6902 | } |
6903 | |
6904 | static int hclge_get_all_rules(struct hnae3_handle *handle, |
6905 | struct ethtool_rxnfc *cmd, u32 *rule_locs) |
6906 | { |
6907 | struct hclge_vport *vport = hclge_get_vport(handle); |
6908 | struct hclge_dev *hdev = vport->back; |
6909 | struct hclge_fd_rule *rule; |
6910 | struct hlist_node *node2; |
6911 | int cnt = 0; |
6912 | |
6913 | if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) |
6914 | return -EOPNOTSUPP; |
6915 | |
6916 | cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; |
6917 | |
6918 | spin_lock_bh(lock: &hdev->fd_rule_lock); |
6919 | hlist_for_each_entry_safe(rule, node2, |
6920 | &hdev->fd_rule_list, rule_node) { |
6921 | if (cnt == cmd->rule_cnt) { |
6922 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
6923 | return -EMSGSIZE; |
6924 | } |
6925 | |
6926 | if (rule->state == HCLGE_FD_TO_DEL) |
6927 | continue; |
6928 | |
6929 | rule_locs[cnt] = rule->location; |
6930 | cnt++; |
6931 | } |
6932 | |
6933 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
6934 | |
6935 | cmd->rule_cnt = cnt; |
6936 | |
6937 | return 0; |
6938 | } |
6939 | |
6940 | static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys, |
6941 | struct hclge_fd_rule_tuples *tuples) |
6942 | { |
6943 | #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32 |
6944 | #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32 |
6945 | |
6946 | tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto); |
6947 | tuples->ip_proto = fkeys->basic.ip_proto; |
6948 | tuples->dst_port = be16_to_cpu(fkeys->ports.dst); |
6949 | |
6950 | if (fkeys->basic.n_proto == htons(ETH_P_IP)) { |
6951 | tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src); |
6952 | tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst); |
6953 | } else { |
6954 | int i; |
6955 | |
6956 | for (i = 0; i < IPV6_SIZE; i++) { |
6957 | tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]); |
6958 | tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]); |
6959 | } |
6960 | } |
6961 | } |
6962 | |
6963 | /* traverse all rules, check whether an existed rule has the same tuples */ |
6964 | static struct hclge_fd_rule * |
6965 | hclge_fd_search_flow_keys(struct hclge_dev *hdev, |
6966 | const struct hclge_fd_rule_tuples *tuples) |
6967 | { |
6968 | struct hclge_fd_rule *rule = NULL; |
6969 | struct hlist_node *node; |
6970 | |
6971 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { |
6972 | if (!memcmp(p: tuples, q: &rule->tuples, size: sizeof(*tuples))) |
6973 | return rule; |
6974 | } |
6975 | |
6976 | return NULL; |
6977 | } |
6978 | |
6979 | static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples, |
6980 | struct hclge_fd_rule *rule) |
6981 | { |
6982 | rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | |
6983 | BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) | |
6984 | BIT(INNER_SRC_PORT); |
6985 | rule->action = 0; |
6986 | rule->vf_id = 0; |
6987 | rule->rule_type = HCLGE_FD_ARFS_ACTIVE; |
6988 | rule->state = HCLGE_FD_TO_ADD; |
6989 | if (tuples->ether_proto == ETH_P_IP) { |
6990 | if (tuples->ip_proto == IPPROTO_TCP) |
6991 | rule->flow_type = TCP_V4_FLOW; |
6992 | else |
6993 | rule->flow_type = UDP_V4_FLOW; |
6994 | } else { |
6995 | if (tuples->ip_proto == IPPROTO_TCP) |
6996 | rule->flow_type = TCP_V6_FLOW; |
6997 | else |
6998 | rule->flow_type = UDP_V6_FLOW; |
6999 | } |
7000 | memcpy(&rule->tuples, tuples, sizeof(rule->tuples)); |
7001 | memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask)); |
7002 | } |
7003 | |
7004 | static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, |
7005 | u16 flow_id, struct flow_keys *fkeys) |
7006 | { |
7007 | struct hclge_vport *vport = hclge_get_vport(handle); |
7008 | struct hclge_fd_rule_tuples new_tuples = {}; |
7009 | struct hclge_dev *hdev = vport->back; |
7010 | struct hclge_fd_rule *rule; |
7011 | u16 bit_id; |
7012 | |
7013 | if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) |
7014 | return -EOPNOTSUPP; |
7015 | |
7016 | /* when there is already fd rule existed add by user, |
7017 | * arfs should not work |
7018 | */ |
7019 | spin_lock_bh(lock: &hdev->fd_rule_lock); |
7020 | if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE && |
7021 | hdev->fd_active_type != HCLGE_FD_RULE_NONE) { |
7022 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
7023 | return -EOPNOTSUPP; |
7024 | } |
7025 | |
7026 | hclge_fd_get_flow_tuples(fkeys, tuples: &new_tuples); |
7027 | |
7028 | /* check is there flow director filter existed for this flow, |
7029 | * if not, create a new filter for it; |
7030 | * if filter exist with different queue id, modify the filter; |
7031 | * if filter exist with same queue id, do nothing |
7032 | */ |
7033 | rule = hclge_fd_search_flow_keys(hdev, tuples: &new_tuples); |
7034 | if (!rule) { |
7035 | bit_id = find_first_zero_bit(addr: hdev->fd_bmap, MAX_FD_FILTER_NUM); |
7036 | if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { |
7037 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
7038 | return -ENOSPC; |
7039 | } |
7040 | |
7041 | rule = kzalloc(size: sizeof(*rule), GFP_ATOMIC); |
7042 | if (!rule) { |
7043 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
7044 | return -ENOMEM; |
7045 | } |
7046 | |
7047 | rule->location = bit_id; |
7048 | rule->arfs.flow_id = flow_id; |
7049 | rule->queue_id = queue_id; |
7050 | hclge_fd_build_arfs_rule(tuples: &new_tuples, rule); |
7051 | hclge_update_fd_list(hdev, state: rule->state, location: rule->location, new_rule: rule); |
7052 | hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE; |
7053 | } else if (rule->queue_id != queue_id) { |
7054 | rule->queue_id = queue_id; |
7055 | rule->state = HCLGE_FD_TO_ADD; |
7056 | set_bit(nr: HCLGE_STATE_FD_TBL_CHANGED, addr: &hdev->state); |
7057 | hclge_task_schedule(hdev, delay_time: 0); |
7058 | } |
7059 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
7060 | return rule->location; |
7061 | } |
7062 | |
7063 | static void hclge_rfs_filter_expire(struct hclge_dev *hdev) |
7064 | { |
7065 | #ifdef CONFIG_RFS_ACCEL |
7066 | struct hnae3_handle *handle = &hdev->vport[0].nic; |
7067 | struct hclge_fd_rule *rule; |
7068 | struct hlist_node *node; |
7069 | |
7070 | spin_lock_bh(lock: &hdev->fd_rule_lock); |
7071 | if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { |
7072 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
7073 | return; |
7074 | } |
7075 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { |
7076 | if (rule->state != HCLGE_FD_ACTIVE) |
7077 | continue; |
7078 | if (rps_may_expire_flow(dev: handle->netdev, rxq_index: rule->queue_id, |
7079 | flow_id: rule->arfs.flow_id, filter_id: rule->location)) { |
7080 | rule->state = HCLGE_FD_TO_DEL; |
7081 | set_bit(nr: HCLGE_STATE_FD_TBL_CHANGED, addr: &hdev->state); |
7082 | } |
7083 | } |
7084 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
7085 | #endif |
7086 | } |
7087 | |
7088 | /* make sure being called after lock up with fd_rule_lock */ |
7089 | static int hclge_clear_arfs_rules(struct hclge_dev *hdev) |
7090 | { |
7091 | #ifdef CONFIG_RFS_ACCEL |
7092 | struct hclge_fd_rule *rule; |
7093 | struct hlist_node *node; |
7094 | int ret; |
7095 | |
7096 | if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) |
7097 | return 0; |
7098 | |
7099 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { |
7100 | switch (rule->state) { |
7101 | case HCLGE_FD_TO_DEL: |
7102 | case HCLGE_FD_ACTIVE: |
7103 | ret = hclge_fd_tcam_config(hdev, stage: HCLGE_FD_STAGE_1, sel_x: true, |
7104 | loc: rule->location, NULL, is_add: false); |
7105 | if (ret) |
7106 | return ret; |
7107 | fallthrough; |
7108 | case HCLGE_FD_TO_ADD: |
7109 | hclge_fd_dec_rule_cnt(hdev, location: rule->location); |
7110 | hlist_del(n: &rule->rule_node); |
7111 | kfree(objp: rule); |
7112 | break; |
7113 | default: |
7114 | break; |
7115 | } |
7116 | } |
7117 | hclge_sync_fd_state(hdev); |
7118 | |
7119 | #endif |
7120 | return 0; |
7121 | } |
7122 | |
7123 | static void hclge_get_cls_key_basic(const struct flow_rule *flow, |
7124 | struct hclge_fd_rule *rule) |
7125 | { |
7126 | if (flow_rule_match_key(rule: flow, key: FLOW_DISSECTOR_KEY_BASIC)) { |
7127 | struct flow_match_basic match; |
7128 | u16 ethtype_key, ethtype_mask; |
7129 | |
7130 | flow_rule_match_basic(rule: flow, out: &match); |
7131 | ethtype_key = ntohs(match.key->n_proto); |
7132 | ethtype_mask = ntohs(match.mask->n_proto); |
7133 | |
7134 | if (ethtype_key == ETH_P_ALL) { |
7135 | ethtype_key = 0; |
7136 | ethtype_mask = 0; |
7137 | } |
7138 | rule->tuples.ether_proto = ethtype_key; |
7139 | rule->tuples_mask.ether_proto = ethtype_mask; |
7140 | rule->tuples.ip_proto = match.key->ip_proto; |
7141 | rule->tuples_mask.ip_proto = match.mask->ip_proto; |
7142 | } else { |
7143 | rule->unused_tuple |= BIT(INNER_IP_PROTO); |
7144 | rule->unused_tuple |= BIT(INNER_ETH_TYPE); |
7145 | } |
7146 | } |
7147 | |
7148 | static void hclge_get_cls_key_mac(const struct flow_rule *flow, |
7149 | struct hclge_fd_rule *rule) |
7150 | { |
7151 | if (flow_rule_match_key(rule: flow, key: FLOW_DISSECTOR_KEY_ETH_ADDRS)) { |
7152 | struct flow_match_eth_addrs match; |
7153 | |
7154 | flow_rule_match_eth_addrs(rule: flow, out: &match); |
7155 | ether_addr_copy(dst: rule->tuples.dst_mac, src: match.key->dst); |
7156 | ether_addr_copy(dst: rule->tuples_mask.dst_mac, src: match.mask->dst); |
7157 | ether_addr_copy(dst: rule->tuples.src_mac, src: match.key->src); |
7158 | ether_addr_copy(dst: rule->tuples_mask.src_mac, src: match.mask->src); |
7159 | } else { |
7160 | rule->unused_tuple |= BIT(INNER_DST_MAC); |
7161 | rule->unused_tuple |= BIT(INNER_SRC_MAC); |
7162 | } |
7163 | } |
7164 | |
7165 | static void hclge_get_cls_key_vlan(const struct flow_rule *flow, |
7166 | struct hclge_fd_rule *rule) |
7167 | { |
7168 | if (flow_rule_match_key(rule: flow, key: FLOW_DISSECTOR_KEY_VLAN)) { |
7169 | struct flow_match_vlan match; |
7170 | |
7171 | flow_rule_match_vlan(rule: flow, out: &match); |
7172 | rule->tuples.vlan_tag1 = match.key->vlan_id | |
7173 | (match.key->vlan_priority << VLAN_PRIO_SHIFT); |
7174 | rule->tuples_mask.vlan_tag1 = match.mask->vlan_id | |
7175 | (match.mask->vlan_priority << VLAN_PRIO_SHIFT); |
7176 | } else { |
7177 | rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST); |
7178 | } |
7179 | } |
7180 | |
7181 | static void hclge_get_cls_key_ip(const struct flow_rule *flow, |
7182 | struct hclge_fd_rule *rule) |
7183 | { |
7184 | u16 addr_type = 0; |
7185 | |
7186 | if (flow_rule_match_key(rule: flow, key: FLOW_DISSECTOR_KEY_CONTROL)) { |
7187 | struct flow_match_control match; |
7188 | |
7189 | flow_rule_match_control(rule: flow, out: &match); |
7190 | addr_type = match.key->addr_type; |
7191 | } |
7192 | |
7193 | if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { |
7194 | struct flow_match_ipv4_addrs match; |
7195 | |
7196 | flow_rule_match_ipv4_addrs(rule: flow, out: &match); |
7197 | rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src); |
7198 | rule->tuples_mask.src_ip[IPV4_INDEX] = |
7199 | be32_to_cpu(match.mask->src); |
7200 | rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst); |
7201 | rule->tuples_mask.dst_ip[IPV4_INDEX] = |
7202 | be32_to_cpu(match.mask->dst); |
7203 | } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { |
7204 | struct flow_match_ipv6_addrs match; |
7205 | |
7206 | flow_rule_match_ipv6_addrs(rule: flow, out: &match); |
7207 | be32_to_cpu_array(dst: rule->tuples.src_ip, src: match.key->src.s6_addr32, |
7208 | IPV6_SIZE); |
7209 | be32_to_cpu_array(dst: rule->tuples_mask.src_ip, |
7210 | src: match.mask->src.s6_addr32, IPV6_SIZE); |
7211 | be32_to_cpu_array(dst: rule->tuples.dst_ip, src: match.key->dst.s6_addr32, |
7212 | IPV6_SIZE); |
7213 | be32_to_cpu_array(dst: rule->tuples_mask.dst_ip, |
7214 | src: match.mask->dst.s6_addr32, IPV6_SIZE); |
7215 | } else { |
7216 | rule->unused_tuple |= BIT(INNER_SRC_IP); |
7217 | rule->unused_tuple |= BIT(INNER_DST_IP); |
7218 | } |
7219 | } |
7220 | |
7221 | static void hclge_get_cls_key_port(const struct flow_rule *flow, |
7222 | struct hclge_fd_rule *rule) |
7223 | { |
7224 | if (flow_rule_match_key(rule: flow, key: FLOW_DISSECTOR_KEY_PORTS)) { |
7225 | struct flow_match_ports match; |
7226 | |
7227 | flow_rule_match_ports(rule: flow, out: &match); |
7228 | |
7229 | rule->tuples.src_port = be16_to_cpu(match.key->src); |
7230 | rule->tuples_mask.src_port = be16_to_cpu(match.mask->src); |
7231 | rule->tuples.dst_port = be16_to_cpu(match.key->dst); |
7232 | rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst); |
7233 | } else { |
7234 | rule->unused_tuple |= BIT(INNER_SRC_PORT); |
7235 | rule->unused_tuple |= BIT(INNER_DST_PORT); |
7236 | } |
7237 | } |
7238 | |
7239 | static int hclge_parse_cls_flower(struct hclge_dev *hdev, |
7240 | struct flow_cls_offload *cls_flower, |
7241 | struct hclge_fd_rule *rule) |
7242 | { |
7243 | struct flow_rule *flow = flow_cls_offload_flow_rule(flow_cmd: cls_flower); |
7244 | struct flow_dissector *dissector = flow->match.dissector; |
7245 | |
7246 | if (dissector->used_keys & |
7247 | ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | |
7248 | BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | |
7249 | BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | |
7250 | BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | |
7251 | BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | |
7252 | BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | |
7253 | BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) { |
7254 | dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n" , |
7255 | dissector->used_keys); |
7256 | return -EOPNOTSUPP; |
7257 | } |
7258 | |
7259 | hclge_get_cls_key_basic(flow, rule); |
7260 | hclge_get_cls_key_mac(flow, rule); |
7261 | hclge_get_cls_key_vlan(flow, rule); |
7262 | hclge_get_cls_key_ip(flow, rule); |
7263 | hclge_get_cls_key_port(flow, rule); |
7264 | |
7265 | return 0; |
7266 | } |
7267 | |
7268 | static int hclge_check_cls_flower(struct hclge_dev *hdev, |
7269 | struct flow_cls_offload *cls_flower, int tc) |
7270 | { |
7271 | u32 prio = cls_flower->common.prio; |
7272 | |
7273 | if (tc < 0 || tc > hdev->tc_max) { |
7274 | dev_err(&hdev->pdev->dev, "invalid traffic class\n" ); |
7275 | return -EINVAL; |
7276 | } |
7277 | |
7278 | if (prio == 0 || |
7279 | prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { |
7280 | dev_err(&hdev->pdev->dev, |
7281 | "prio %u should be in range[1, %u]\n" , |
7282 | prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); |
7283 | return -EINVAL; |
7284 | } |
7285 | |
7286 | if (test_bit(prio - 1, hdev->fd_bmap)) { |
7287 | dev_err(&hdev->pdev->dev, "prio %u is already used\n" , prio); |
7288 | return -EINVAL; |
7289 | } |
7290 | return 0; |
7291 | } |
7292 | |
7293 | static int hclge_add_cls_flower(struct hnae3_handle *handle, |
7294 | struct flow_cls_offload *cls_flower, |
7295 | int tc) |
7296 | { |
7297 | struct hclge_vport *vport = hclge_get_vport(handle); |
7298 | struct hclge_dev *hdev = vport->back; |
7299 | struct hclge_fd_rule *rule; |
7300 | int ret; |
7301 | |
7302 | if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { |
7303 | dev_err(&hdev->pdev->dev, |
7304 | "cls flower is not supported\n" ); |
7305 | return -EOPNOTSUPP; |
7306 | } |
7307 | |
7308 | ret = hclge_check_cls_flower(hdev, cls_flower, tc); |
7309 | if (ret) { |
7310 | dev_err(&hdev->pdev->dev, |
7311 | "failed to check cls flower params, ret = %d\n" , ret); |
7312 | return ret; |
7313 | } |
7314 | |
7315 | rule = kzalloc(size: sizeof(*rule), GFP_KERNEL); |
7316 | if (!rule) |
7317 | return -ENOMEM; |
7318 | |
7319 | ret = hclge_parse_cls_flower(hdev, cls_flower, rule); |
7320 | if (ret) { |
7321 | kfree(objp: rule); |
7322 | return ret; |
7323 | } |
7324 | |
7325 | rule->action = HCLGE_FD_ACTION_SELECT_TC; |
7326 | rule->cls_flower.tc = tc; |
7327 | rule->location = cls_flower->common.prio - 1; |
7328 | rule->vf_id = 0; |
7329 | rule->cls_flower.cookie = cls_flower->cookie; |
7330 | rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE; |
7331 | |
7332 | ret = hclge_add_fd_entry_common(hdev, rule); |
7333 | if (ret) |
7334 | kfree(objp: rule); |
7335 | |
7336 | return ret; |
7337 | } |
7338 | |
7339 | static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev, |
7340 | unsigned long cookie) |
7341 | { |
7342 | struct hclge_fd_rule *rule; |
7343 | struct hlist_node *node; |
7344 | |
7345 | hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { |
7346 | if (rule->cls_flower.cookie == cookie) |
7347 | return rule; |
7348 | } |
7349 | |
7350 | return NULL; |
7351 | } |
7352 | |
7353 | static int hclge_del_cls_flower(struct hnae3_handle *handle, |
7354 | struct flow_cls_offload *cls_flower) |
7355 | { |
7356 | struct hclge_vport *vport = hclge_get_vport(handle); |
7357 | struct hclge_dev *hdev = vport->back; |
7358 | struct hclge_fd_rule *rule; |
7359 | int ret; |
7360 | |
7361 | if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) |
7362 | return -EOPNOTSUPP; |
7363 | |
7364 | spin_lock_bh(lock: &hdev->fd_rule_lock); |
7365 | |
7366 | rule = hclge_find_cls_flower(hdev, cookie: cls_flower->cookie); |
7367 | if (!rule) { |
7368 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
7369 | return -EINVAL; |
7370 | } |
7371 | |
7372 | ret = hclge_fd_tcam_config(hdev, stage: HCLGE_FD_STAGE_1, sel_x: true, loc: rule->location, |
7373 | NULL, is_add: false); |
7374 | if (ret) { |
7375 | /* if tcam config fail, set rule state to TO_DEL, |
7376 | * so the rule will be deleted when periodic |
7377 | * task being scheduled. |
7378 | */ |
7379 | hclge_update_fd_list(hdev, state: HCLGE_FD_TO_DEL, location: rule->location, NULL); |
7380 | set_bit(nr: HCLGE_STATE_FD_TBL_CHANGED, addr: &hdev->state); |
7381 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
7382 | return ret; |
7383 | } |
7384 | |
7385 | hclge_update_fd_list(hdev, state: HCLGE_FD_DELETED, location: rule->location, NULL); |
7386 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
7387 | |
7388 | return 0; |
7389 | } |
7390 | |
7391 | static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist) |
7392 | { |
7393 | struct hclge_fd_rule *rule; |
7394 | struct hlist_node *node; |
7395 | int ret = 0; |
7396 | |
7397 | if (!test_and_clear_bit(nr: HCLGE_STATE_FD_TBL_CHANGED, addr: &hdev->state)) |
7398 | return; |
7399 | |
7400 | spin_lock_bh(lock: &hdev->fd_rule_lock); |
7401 | |
7402 | hlist_for_each_entry_safe(rule, node, hlist, rule_node) { |
7403 | switch (rule->state) { |
7404 | case HCLGE_FD_TO_ADD: |
7405 | ret = hclge_fd_config_rule(hdev, rule); |
7406 | if (ret) |
7407 | goto out; |
7408 | rule->state = HCLGE_FD_ACTIVE; |
7409 | break; |
7410 | case HCLGE_FD_TO_DEL: |
7411 | ret = hclge_fd_tcam_config(hdev, stage: HCLGE_FD_STAGE_1, sel_x: true, |
7412 | loc: rule->location, NULL, is_add: false); |
7413 | if (ret) |
7414 | goto out; |
7415 | hclge_fd_dec_rule_cnt(hdev, location: rule->location); |
7416 | hclge_fd_free_node(hdev, rule); |
7417 | break; |
7418 | default: |
7419 | break; |
7420 | } |
7421 | } |
7422 | |
7423 | out: |
7424 | if (ret) |
7425 | set_bit(nr: HCLGE_STATE_FD_TBL_CHANGED, addr: &hdev->state); |
7426 | |
7427 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
7428 | } |
7429 | |
7430 | static void hclge_sync_fd_table(struct hclge_dev *hdev) |
7431 | { |
7432 | if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) |
7433 | return; |
7434 | |
7435 | if (test_and_clear_bit(nr: HCLGE_STATE_FD_CLEAR_ALL, addr: &hdev->state)) { |
7436 | bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; |
7437 | |
7438 | hclge_clear_fd_rules_in_list(hdev, clear_list); |
7439 | } |
7440 | |
7441 | hclge_sync_fd_user_def_cfg(hdev, locked: false); |
7442 | |
7443 | hclge_sync_fd_list(hdev, hlist: &hdev->fd_rule_list); |
7444 | } |
7445 | |
7446 | static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) |
7447 | { |
7448 | struct hclge_vport *vport = hclge_get_vport(handle); |
7449 | struct hclge_dev *hdev = vport->back; |
7450 | |
7451 | return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || |
7452 | hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); |
7453 | } |
7454 | |
7455 | static bool hclge_get_cmdq_stat(struct hnae3_handle *handle) |
7456 | { |
7457 | struct hclge_vport *vport = hclge_get_vport(handle); |
7458 | struct hclge_dev *hdev = vport->back; |
7459 | |
7460 | return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); |
7461 | } |
7462 | |
7463 | static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) |
7464 | { |
7465 | struct hclge_vport *vport = hclge_get_vport(handle); |
7466 | struct hclge_dev *hdev = vport->back; |
7467 | |
7468 | return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); |
7469 | } |
7470 | |
7471 | static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) |
7472 | { |
7473 | struct hclge_vport *vport = hclge_get_vport(handle); |
7474 | struct hclge_dev *hdev = vport->back; |
7475 | |
7476 | return hdev->rst_stats.hw_reset_done_cnt; |
7477 | } |
7478 | |
7479 | static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) |
7480 | { |
7481 | struct hclge_vport *vport = hclge_get_vport(handle); |
7482 | struct hclge_dev *hdev = vport->back; |
7483 | |
7484 | hdev->fd_en = enable; |
7485 | |
7486 | if (!enable) |
7487 | set_bit(nr: HCLGE_STATE_FD_CLEAR_ALL, addr: &hdev->state); |
7488 | else |
7489 | hclge_restore_fd_entries(handle); |
7490 | |
7491 | hclge_task_schedule(hdev, delay_time: 0); |
7492 | } |
7493 | |
7494 | static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) |
7495 | { |
7496 | #define HCLGE_LINK_STATUS_WAIT_CNT 3 |
7497 | |
7498 | struct hclge_desc desc; |
7499 | struct hclge_config_mac_mode_cmd *req = |
7500 | (struct hclge_config_mac_mode_cmd *)desc.data; |
7501 | u32 loop_en = 0; |
7502 | int ret; |
7503 | |
7504 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); |
7505 | |
7506 | if (enable) { |
7507 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U); |
7508 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U); |
7509 | hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U); |
7510 | hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U); |
7511 | hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U); |
7512 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U); |
7513 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U); |
7514 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U); |
7515 | hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U); |
7516 | hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U); |
7517 | } |
7518 | |
7519 | req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); |
7520 | |
7521 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
7522 | if (ret) { |
7523 | dev_err(&hdev->pdev->dev, |
7524 | "mac enable fail, ret =%d.\n" , ret); |
7525 | return; |
7526 | } |
7527 | |
7528 | if (!enable) |
7529 | hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN, |
7530 | HCLGE_LINK_STATUS_WAIT_CNT); |
7531 | } |
7532 | |
7533 | static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, |
7534 | u8 switch_param, u8 param_mask) |
7535 | { |
7536 | struct hclge_mac_vlan_switch_cmd *req; |
7537 | struct hclge_desc desc; |
7538 | u32 func_id; |
7539 | int ret; |
7540 | |
7541 | func_id = hclge_get_port_number(port_type: HOST_PORT, pf_id: 0, vf_id: vfid, network_port_id: 0); |
7542 | req = (struct hclge_mac_vlan_switch_cmd *)desc.data; |
7543 | |
7544 | /* read current config parameter */ |
7545 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM, |
7546 | true); |
7547 | req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL; |
7548 | req->func_id = cpu_to_le32(func_id); |
7549 | |
7550 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
7551 | if (ret) { |
7552 | dev_err(&hdev->pdev->dev, |
7553 | "read mac vlan switch parameter fail, ret = %d\n" , ret); |
7554 | return ret; |
7555 | } |
7556 | |
7557 | /* modify and write new config parameter */ |
7558 | hclge_comm_cmd_reuse_desc(desc: &desc, is_read: false); |
7559 | req->switch_param = (req->switch_param & param_mask) | switch_param; |
7560 | req->param_mask = param_mask; |
7561 | |
7562 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
7563 | if (ret) |
7564 | dev_err(&hdev->pdev->dev, |
7565 | "set mac vlan switch parameter fail, ret = %d\n" , ret); |
7566 | return ret; |
7567 | } |
7568 | |
7569 | static void hclge_phy_link_status_wait(struct hclge_dev *hdev, |
7570 | int link_ret) |
7571 | { |
7572 | #define HCLGE_PHY_LINK_STATUS_NUM 200 |
7573 | |
7574 | struct phy_device *phydev = hdev->hw.mac.phydev; |
7575 | int i = 0; |
7576 | int ret; |
7577 | |
7578 | do { |
7579 | ret = phy_read_status(phydev); |
7580 | if (ret) { |
7581 | dev_err(&hdev->pdev->dev, |
7582 | "phy update link status fail, ret = %d\n" , ret); |
7583 | return; |
7584 | } |
7585 | |
7586 | if (phydev->link == link_ret) |
7587 | break; |
7588 | |
7589 | msleep(HCLGE_LINK_STATUS_MS); |
7590 | } while (++i < HCLGE_PHY_LINK_STATUS_NUM); |
7591 | } |
7592 | |
7593 | static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, |
7594 | int wait_cnt) |
7595 | { |
7596 | int link_status; |
7597 | int i = 0; |
7598 | int ret; |
7599 | |
7600 | do { |
7601 | ret = hclge_get_mac_link_status(hdev, link_status: &link_status); |
7602 | if (ret) |
7603 | return ret; |
7604 | if (link_status == link_ret) |
7605 | return 0; |
7606 | |
7607 | msleep(HCLGE_LINK_STATUS_MS); |
7608 | } while (++i < wait_cnt); |
7609 | return -EBUSY; |
7610 | } |
7611 | |
7612 | static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, |
7613 | bool is_phy) |
7614 | { |
7615 | #define HCLGE_MAC_LINK_STATUS_NUM 100 |
7616 | |
7617 | int link_ret; |
7618 | |
7619 | link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; |
7620 | |
7621 | if (is_phy) |
7622 | hclge_phy_link_status_wait(hdev, link_ret); |
7623 | |
7624 | return hclge_mac_link_status_wait(hdev, link_ret, |
7625 | HCLGE_MAC_LINK_STATUS_NUM); |
7626 | } |
7627 | |
7628 | static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) |
7629 | { |
7630 | struct hclge_config_mac_mode_cmd *req; |
7631 | struct hclge_desc desc; |
7632 | u32 loop_en; |
7633 | int ret; |
7634 | |
7635 | req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; |
7636 | /* 1 Read out the MAC mode config at first */ |
7637 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); |
7638 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
7639 | if (ret) { |
7640 | dev_err(&hdev->pdev->dev, |
7641 | "mac loopback get fail, ret =%d.\n" , ret); |
7642 | return ret; |
7643 | } |
7644 | |
7645 | /* 2 Then setup the loopback flag */ |
7646 | loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); |
7647 | hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); |
7648 | |
7649 | req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); |
7650 | |
7651 | /* 3 Config mac work mode with loopback flag |
7652 | * and its original configure parameters |
7653 | */ |
7654 | hclge_comm_cmd_reuse_desc(desc: &desc, is_read: false); |
7655 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
7656 | if (ret) |
7657 | dev_err(&hdev->pdev->dev, |
7658 | "mac loopback set fail, ret =%d.\n" , ret); |
7659 | return ret; |
7660 | } |
7661 | |
7662 | static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en, |
7663 | enum hnae3_loop loop_mode) |
7664 | { |
7665 | struct hclge_common_lb_cmd *req; |
7666 | struct hclge_desc desc; |
7667 | u8 loop_mode_b; |
7668 | int ret; |
7669 | |
7670 | req = (struct hclge_common_lb_cmd *)desc.data; |
7671 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false); |
7672 | |
7673 | switch (loop_mode) { |
7674 | case HNAE3_LOOP_SERIAL_SERDES: |
7675 | loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; |
7676 | break; |
7677 | case HNAE3_LOOP_PARALLEL_SERDES: |
7678 | loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; |
7679 | break; |
7680 | case HNAE3_LOOP_PHY: |
7681 | loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B; |
7682 | break; |
7683 | default: |
7684 | dev_err(&hdev->pdev->dev, |
7685 | "unsupported loopback mode %d\n" , loop_mode); |
7686 | return -ENOTSUPP; |
7687 | } |
7688 | |
7689 | req->mask = loop_mode_b; |
7690 | if (en) |
7691 | req->enable = loop_mode_b; |
7692 | |
7693 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
7694 | if (ret) |
7695 | dev_err(&hdev->pdev->dev, |
7696 | "failed to send loopback cmd, loop_mode = %d, ret = %d\n" , |
7697 | loop_mode, ret); |
7698 | |
7699 | return ret; |
7700 | } |
7701 | |
7702 | static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev) |
7703 | { |
7704 | #define HCLGE_COMMON_LB_RETRY_MS 10 |
7705 | #define HCLGE_COMMON_LB_RETRY_NUM 100 |
7706 | |
7707 | struct hclge_common_lb_cmd *req; |
7708 | struct hclge_desc desc; |
7709 | u32 i = 0; |
7710 | int ret; |
7711 | |
7712 | req = (struct hclge_common_lb_cmd *)desc.data; |
7713 | |
7714 | do { |
7715 | msleep(HCLGE_COMMON_LB_RETRY_MS); |
7716 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, |
7717 | true); |
7718 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
7719 | if (ret) { |
7720 | dev_err(&hdev->pdev->dev, |
7721 | "failed to get loopback done status, ret = %d\n" , |
7722 | ret); |
7723 | return ret; |
7724 | } |
7725 | } while (++i < HCLGE_COMMON_LB_RETRY_NUM && |
7726 | !(req->result & HCLGE_CMD_COMMON_LB_DONE_B)); |
7727 | |
7728 | if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) { |
7729 | dev_err(&hdev->pdev->dev, "wait loopback timeout\n" ); |
7730 | return -EBUSY; |
7731 | } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) { |
7732 | dev_err(&hdev->pdev->dev, "failed to do loopback test\n" ); |
7733 | return -EIO; |
7734 | } |
7735 | |
7736 | return 0; |
7737 | } |
7738 | |
7739 | static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, |
7740 | enum hnae3_loop loop_mode) |
7741 | { |
7742 | int ret; |
7743 | |
7744 | ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode); |
7745 | if (ret) |
7746 | return ret; |
7747 | |
7748 | return hclge_cfg_common_loopback_wait(hdev); |
7749 | } |
7750 | |
7751 | static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en, |
7752 | enum hnae3_loop loop_mode) |
7753 | { |
7754 | int ret; |
7755 | |
7756 | ret = hclge_cfg_common_loopback(hdev, en, loop_mode); |
7757 | if (ret) |
7758 | return ret; |
7759 | |
7760 | hclge_cfg_mac_mode(hdev, enable: en); |
7761 | |
7762 | ret = hclge_mac_phy_link_status_wait(hdev, en, is_phy: false); |
7763 | if (ret) |
7764 | dev_err(&hdev->pdev->dev, |
7765 | "serdes loopback config mac mode timeout\n" ); |
7766 | |
7767 | return ret; |
7768 | } |
7769 | |
7770 | static int hclge_enable_phy_loopback(struct hclge_dev *hdev, |
7771 | struct phy_device *phydev) |
7772 | { |
7773 | int ret; |
7774 | |
7775 | if (!phydev->suspended) { |
7776 | ret = phy_suspend(phydev); |
7777 | if (ret) |
7778 | return ret; |
7779 | } |
7780 | |
7781 | ret = phy_resume(phydev); |
7782 | if (ret) |
7783 | return ret; |
7784 | |
7785 | return phy_loopback(phydev, enable: true); |
7786 | } |
7787 | |
7788 | static int hclge_disable_phy_loopback(struct hclge_dev *hdev, |
7789 | struct phy_device *phydev) |
7790 | { |
7791 | int ret; |
7792 | |
7793 | ret = phy_loopback(phydev, enable: false); |
7794 | if (ret) |
7795 | return ret; |
7796 | |
7797 | return phy_suspend(phydev); |
7798 | } |
7799 | |
7800 | static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) |
7801 | { |
7802 | struct phy_device *phydev = hdev->hw.mac.phydev; |
7803 | int ret; |
7804 | |
7805 | if (!phydev) { |
7806 | if (hnae3_dev_phy_imp_supported(hdev)) |
7807 | return hclge_set_common_loopback(hdev, en, |
7808 | loop_mode: HNAE3_LOOP_PHY); |
7809 | return -ENOTSUPP; |
7810 | } |
7811 | |
7812 | if (en) |
7813 | ret = hclge_enable_phy_loopback(hdev, phydev); |
7814 | else |
7815 | ret = hclge_disable_phy_loopback(hdev, phydev); |
7816 | if (ret) { |
7817 | dev_err(&hdev->pdev->dev, |
7818 | "set phy loopback fail, ret = %d\n" , ret); |
7819 | return ret; |
7820 | } |
7821 | |
7822 | hclge_cfg_mac_mode(hdev, enable: en); |
7823 | |
7824 | ret = hclge_mac_phy_link_status_wait(hdev, en, is_phy: true); |
7825 | if (ret) |
7826 | dev_err(&hdev->pdev->dev, |
7827 | "phy loopback config mac mode timeout\n" ); |
7828 | |
7829 | return ret; |
7830 | } |
7831 | |
7832 | static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id, |
7833 | u16 stream_id, bool enable) |
7834 | { |
7835 | struct hclge_desc desc; |
7836 | struct hclge_cfg_com_tqp_queue_cmd *req = |
7837 | (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; |
7838 | |
7839 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); |
7840 | req->tqp_id = cpu_to_le16(tqp_id); |
7841 | req->stream_id = cpu_to_le16(stream_id); |
7842 | if (enable) |
7843 | req->enable |= 1U << HCLGE_TQP_ENABLE_B; |
7844 | |
7845 | return hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
7846 | } |
7847 | |
7848 | static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable) |
7849 | { |
7850 | struct hclge_vport *vport = hclge_get_vport(handle); |
7851 | struct hclge_dev *hdev = vport->back; |
7852 | int ret; |
7853 | u16 i; |
7854 | |
7855 | for (i = 0; i < handle->kinfo.num_tqps; i++) { |
7856 | ret = hclge_tqp_enable_cmd_send(hdev, tqp_id: i, stream_id: 0, enable); |
7857 | if (ret) |
7858 | return ret; |
7859 | } |
7860 | return 0; |
7861 | } |
7862 | |
7863 | static int hclge_set_loopback(struct hnae3_handle *handle, |
7864 | enum hnae3_loop loop_mode, bool en) |
7865 | { |
7866 | struct hclge_vport *vport = hclge_get_vport(handle); |
7867 | struct hclge_dev *hdev = vport->back; |
7868 | int ret = 0; |
7869 | |
7870 | /* Loopback can be enabled in three places: SSU, MAC, and serdes. By |
7871 | * default, SSU loopback is enabled, so if the SMAC and the DMAC are |
7872 | * the same, the packets are looped back in the SSU. If SSU loopback |
7873 | * is disabled, packets can reach MAC even if SMAC is the same as DMAC. |
7874 | */ |
7875 | if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { |
7876 | u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B); |
7877 | |
7878 | ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param, |
7879 | HCLGE_SWITCH_ALW_LPBK_MASK); |
7880 | if (ret) |
7881 | return ret; |
7882 | } |
7883 | |
7884 | switch (loop_mode) { |
7885 | case HNAE3_LOOP_APP: |
7886 | ret = hclge_set_app_loopback(hdev, en); |
7887 | break; |
7888 | case HNAE3_LOOP_SERIAL_SERDES: |
7889 | case HNAE3_LOOP_PARALLEL_SERDES: |
7890 | ret = hclge_set_common_loopback(hdev, en, loop_mode); |
7891 | break; |
7892 | case HNAE3_LOOP_PHY: |
7893 | ret = hclge_set_phy_loopback(hdev, en); |
7894 | break; |
7895 | case HNAE3_LOOP_EXTERNAL: |
7896 | break; |
7897 | default: |
7898 | ret = -ENOTSUPP; |
7899 | dev_err(&hdev->pdev->dev, |
7900 | "loop_mode %d is not supported\n" , loop_mode); |
7901 | break; |
7902 | } |
7903 | |
7904 | if (ret) |
7905 | return ret; |
7906 | |
7907 | ret = hclge_tqp_enable(handle, enable: en); |
7908 | if (ret) |
7909 | dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n" , |
7910 | en ? "enable" : "disable" , ret); |
7911 | |
7912 | return ret; |
7913 | } |
7914 | |
7915 | static int hclge_set_default_loopback(struct hclge_dev *hdev) |
7916 | { |
7917 | int ret; |
7918 | |
7919 | ret = hclge_set_app_loopback(hdev, en: false); |
7920 | if (ret) |
7921 | return ret; |
7922 | |
7923 | ret = hclge_cfg_common_loopback(hdev, en: false, loop_mode: HNAE3_LOOP_SERIAL_SERDES); |
7924 | if (ret) |
7925 | return ret; |
7926 | |
7927 | return hclge_cfg_common_loopback(hdev, en: false, |
7928 | loop_mode: HNAE3_LOOP_PARALLEL_SERDES); |
7929 | } |
7930 | |
7931 | static void hclge_flush_link_update(struct hclge_dev *hdev) |
7932 | { |
7933 | #define HCLGE_FLUSH_LINK_TIMEOUT 100000 |
7934 | |
7935 | unsigned long last = hdev->serv_processed_cnt; |
7936 | int i = 0; |
7937 | |
7938 | while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) && |
7939 | i++ < HCLGE_FLUSH_LINK_TIMEOUT && |
7940 | last == hdev->serv_processed_cnt) |
7941 | usleep_range(min: 1, max: 1); |
7942 | } |
7943 | |
7944 | static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) |
7945 | { |
7946 | struct hclge_vport *vport = hclge_get_vport(handle); |
7947 | struct hclge_dev *hdev = vport->back; |
7948 | |
7949 | if (enable) { |
7950 | hclge_task_schedule(hdev, delay_time: 0); |
7951 | } else { |
7952 | /* Set the DOWN flag here to disable link updating */ |
7953 | set_bit(nr: HCLGE_STATE_DOWN, addr: &hdev->state); |
7954 | |
7955 | /* flush memory to make sure DOWN is seen by service task */ |
7956 | smp_mb__before_atomic(); |
7957 | hclge_flush_link_update(hdev); |
7958 | } |
7959 | } |
7960 | |
7961 | static int hclge_ae_start(struct hnae3_handle *handle) |
7962 | { |
7963 | struct hclge_vport *vport = hclge_get_vport(handle); |
7964 | struct hclge_dev *hdev = vport->back; |
7965 | |
7966 | /* mac enable */ |
7967 | hclge_cfg_mac_mode(hdev, enable: true); |
7968 | clear_bit(nr: HCLGE_STATE_DOWN, addr: &hdev->state); |
7969 | hdev->hw.mac.link = 0; |
7970 | |
7971 | /* reset tqp stats */ |
7972 | hclge_comm_reset_tqp_stats(handle); |
7973 | |
7974 | hclge_mac_start_phy(hdev); |
7975 | |
7976 | return 0; |
7977 | } |
7978 | |
7979 | static void hclge_ae_stop(struct hnae3_handle *handle) |
7980 | { |
7981 | struct hclge_vport *vport = hclge_get_vport(handle); |
7982 | struct hclge_dev *hdev = vport->back; |
7983 | |
7984 | set_bit(nr: HCLGE_STATE_DOWN, addr: &hdev->state); |
7985 | spin_lock_bh(lock: &hdev->fd_rule_lock); |
7986 | hclge_clear_arfs_rules(hdev); |
7987 | spin_unlock_bh(lock: &hdev->fd_rule_lock); |
7988 | |
7989 | /* If it is not PF reset or FLR, the firmware will disable the MAC, |
7990 | * so it only need to stop phy here. |
7991 | */ |
7992 | if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { |
7993 | hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE, |
7994 | HCLGE_PFC_DISABLE); |
7995 | if (hdev->reset_type != HNAE3_FUNC_RESET && |
7996 | hdev->reset_type != HNAE3_FLR_RESET) { |
7997 | hclge_mac_stop_phy(hdev); |
7998 | hclge_update_link_status(hdev); |
7999 | return; |
8000 | } |
8001 | } |
8002 | |
8003 | hclge_reset_tqp(handle); |
8004 | |
8005 | hclge_config_mac_tnl_int(hdev, en: false); |
8006 | |
8007 | /* Mac disable */ |
8008 | hclge_cfg_mac_mode(hdev, enable: false); |
8009 | |
8010 | hclge_mac_stop_phy(hdev); |
8011 | |
8012 | /* reset tqp stats */ |
8013 | hclge_comm_reset_tqp_stats(handle); |
8014 | hclge_update_link_status(hdev); |
8015 | } |
8016 | |
8017 | int hclge_vport_start(struct hclge_vport *vport) |
8018 | { |
8019 | struct hclge_dev *hdev = vport->back; |
8020 | |
8021 | set_bit(nr: HCLGE_VPORT_STATE_INITED, addr: &vport->state); |
8022 | set_bit(nr: HCLGE_VPORT_STATE_ALIVE, addr: &vport->state); |
8023 | set_bit(nr: HCLGE_VPORT_STATE_PROMISC_CHANGE, addr: &vport->state); |
8024 | vport->last_active_jiffies = jiffies; |
8025 | vport->need_notify = 0; |
8026 | |
8027 | if (test_bit(vport->vport_id, hdev->vport_config_block)) { |
8028 | if (vport->vport_id) { |
8029 | hclge_restore_mac_table_common(vport); |
8030 | hclge_restore_vport_vlan_table(vport); |
8031 | } else { |
8032 | hclge_restore_hw_table(hdev); |
8033 | } |
8034 | } |
8035 | |
8036 | clear_bit(nr: vport->vport_id, addr: hdev->vport_config_block); |
8037 | |
8038 | return 0; |
8039 | } |
8040 | |
8041 | void hclge_vport_stop(struct hclge_vport *vport) |
8042 | { |
8043 | clear_bit(nr: HCLGE_VPORT_STATE_INITED, addr: &vport->state); |
8044 | clear_bit(nr: HCLGE_VPORT_STATE_ALIVE, addr: &vport->state); |
8045 | vport->need_notify = 0; |
8046 | } |
8047 | |
8048 | static int hclge_client_start(struct hnae3_handle *handle) |
8049 | { |
8050 | struct hclge_vport *vport = hclge_get_vport(handle); |
8051 | |
8052 | return hclge_vport_start(vport); |
8053 | } |
8054 | |
8055 | static void hclge_client_stop(struct hnae3_handle *handle) |
8056 | { |
8057 | struct hclge_vport *vport = hclge_get_vport(handle); |
8058 | |
8059 | hclge_vport_stop(vport); |
8060 | } |
8061 | |
8062 | static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, |
8063 | u16 cmdq_resp, u8 resp_code, |
8064 | enum hclge_mac_vlan_tbl_opcode op) |
8065 | { |
8066 | struct hclge_dev *hdev = vport->back; |
8067 | |
8068 | if (cmdq_resp) { |
8069 | dev_err(&hdev->pdev->dev, |
8070 | "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n" , |
8071 | cmdq_resp); |
8072 | return -EIO; |
8073 | } |
8074 | |
8075 | if (op == HCLGE_MAC_VLAN_ADD) { |
8076 | if (!resp_code || resp_code == 1) |
8077 | return 0; |
8078 | else if (resp_code == HCLGE_ADD_UC_OVERFLOW || |
8079 | resp_code == HCLGE_ADD_MC_OVERFLOW) |
8080 | return -ENOSPC; |
8081 | |
8082 | dev_err(&hdev->pdev->dev, |
8083 | "add mac addr failed for undefined, code=%u.\n" , |
8084 | resp_code); |
8085 | return -EIO; |
8086 | } else if (op == HCLGE_MAC_VLAN_REMOVE) { |
8087 | if (!resp_code) { |
8088 | return 0; |
8089 | } else if (resp_code == 1) { |
8090 | dev_dbg(&hdev->pdev->dev, |
8091 | "remove mac addr failed for miss.\n" ); |
8092 | return -ENOENT; |
8093 | } |
8094 | |
8095 | dev_err(&hdev->pdev->dev, |
8096 | "remove mac addr failed for undefined, code=%u.\n" , |
8097 | resp_code); |
8098 | return -EIO; |
8099 | } else if (op == HCLGE_MAC_VLAN_LKUP) { |
8100 | if (!resp_code) { |
8101 | return 0; |
8102 | } else if (resp_code == 1) { |
8103 | dev_dbg(&hdev->pdev->dev, |
8104 | "lookup mac addr failed for miss.\n" ); |
8105 | return -ENOENT; |
8106 | } |
8107 | |
8108 | dev_err(&hdev->pdev->dev, |
8109 | "lookup mac addr failed for undefined, code=%u.\n" , |
8110 | resp_code); |
8111 | return -EIO; |
8112 | } |
8113 | |
8114 | dev_err(&hdev->pdev->dev, |
8115 | "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n" , op); |
8116 | |
8117 | return -EINVAL; |
8118 | } |
8119 | |
8120 | static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) |
8121 | { |
8122 | #define HCLGE_VF_NUM_IN_FIRST_DESC 192 |
8123 | |
8124 | unsigned int word_num; |
8125 | unsigned int bit_num; |
8126 | |
8127 | if (vfid > 255 || vfid < 0) |
8128 | return -EIO; |
8129 | |
8130 | if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) { |
8131 | word_num = vfid / 32; |
8132 | bit_num = vfid % 32; |
8133 | if (clr) |
8134 | desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); |
8135 | else |
8136 | desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); |
8137 | } else { |
8138 | word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32; |
8139 | bit_num = vfid % 32; |
8140 | if (clr) |
8141 | desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); |
8142 | else |
8143 | desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); |
8144 | } |
8145 | |
8146 | return 0; |
8147 | } |
8148 | |
8149 | static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) |
8150 | { |
8151 | #define HCLGE_DESC_NUMBER 3 |
8152 | #define HCLGE_FUNC_NUMBER_PER_DESC 6 |
8153 | int i, j; |
8154 | |
8155 | for (i = 1; i < HCLGE_DESC_NUMBER; i++) |
8156 | for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) |
8157 | if (desc[i].data[j]) |
8158 | return false; |
8159 | |
8160 | return true; |
8161 | } |
8162 | |
8163 | static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, |
8164 | const u8 *addr, bool is_mc) |
8165 | { |
8166 | const unsigned char *mac_addr = addr; |
8167 | u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | |
8168 | (mac_addr[0]) | (mac_addr[1] << 8); |
8169 | u32 low_val = mac_addr[4] | (mac_addr[5] << 8); |
8170 | |
8171 | hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
8172 | if (is_mc) { |
8173 | hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); |
8174 | hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); |
8175 | } |
8176 | |
8177 | new_req->mac_addr_hi32 = cpu_to_le32(high_val); |
8178 | new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); |
8179 | } |
8180 | |
8181 | static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, |
8182 | struct hclge_mac_vlan_tbl_entry_cmd *req) |
8183 | { |
8184 | struct hclge_dev *hdev = vport->back; |
8185 | struct hclge_desc desc; |
8186 | u8 resp_code; |
8187 | u16 retval; |
8188 | int ret; |
8189 | |
8190 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); |
8191 | |
8192 | memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
8193 | |
8194 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
8195 | if (ret) { |
8196 | dev_err(&hdev->pdev->dev, |
8197 | "del mac addr failed for cmd_send, ret =%d.\n" , |
8198 | ret); |
8199 | return ret; |
8200 | } |
8201 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; |
8202 | retval = le16_to_cpu(desc.retval); |
8203 | |
8204 | return hclge_get_mac_vlan_cmd_status(vport, cmdq_resp: retval, resp_code, |
8205 | op: HCLGE_MAC_VLAN_REMOVE); |
8206 | } |
8207 | |
8208 | static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, |
8209 | struct hclge_mac_vlan_tbl_entry_cmd *req, |
8210 | struct hclge_desc *desc, |
8211 | bool is_mc) |
8212 | { |
8213 | struct hclge_dev *hdev = vport->back; |
8214 | u8 resp_code; |
8215 | u16 retval; |
8216 | int ret; |
8217 | |
8218 | hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); |
8219 | if (is_mc) { |
8220 | desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); |
8221 | memcpy(desc[0].data, |
8222 | req, |
8223 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
8224 | hclge_cmd_setup_basic_desc(&desc[1], |
8225 | HCLGE_OPC_MAC_VLAN_ADD, |
8226 | true); |
8227 | desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); |
8228 | hclge_cmd_setup_basic_desc(&desc[2], |
8229 | HCLGE_OPC_MAC_VLAN_ADD, |
8230 | true); |
8231 | ret = hclge_cmd_send(hw: &hdev->hw, desc, num: 3); |
8232 | } else { |
8233 | memcpy(desc[0].data, |
8234 | req, |
8235 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
8236 | ret = hclge_cmd_send(hw: &hdev->hw, desc, num: 1); |
8237 | } |
8238 | if (ret) { |
8239 | dev_err(&hdev->pdev->dev, |
8240 | "lookup mac addr failed for cmd_send, ret =%d.\n" , |
8241 | ret); |
8242 | return ret; |
8243 | } |
8244 | resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; |
8245 | retval = le16_to_cpu(desc[0].retval); |
8246 | |
8247 | return hclge_get_mac_vlan_cmd_status(vport, cmdq_resp: retval, resp_code, |
8248 | op: HCLGE_MAC_VLAN_LKUP); |
8249 | } |
8250 | |
8251 | static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, |
8252 | struct hclge_mac_vlan_tbl_entry_cmd *req, |
8253 | struct hclge_desc *mc_desc) |
8254 | { |
8255 | struct hclge_dev *hdev = vport->back; |
8256 | int cfg_status; |
8257 | u8 resp_code; |
8258 | u16 retval; |
8259 | int ret; |
8260 | |
8261 | if (!mc_desc) { |
8262 | struct hclge_desc desc; |
8263 | |
8264 | hclge_cmd_setup_basic_desc(&desc, |
8265 | HCLGE_OPC_MAC_VLAN_ADD, |
8266 | false); |
8267 | memcpy(desc.data, req, |
8268 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
8269 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
8270 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; |
8271 | retval = le16_to_cpu(desc.retval); |
8272 | |
8273 | cfg_status = hclge_get_mac_vlan_cmd_status(vport, cmdq_resp: retval, |
8274 | resp_code, |
8275 | op: HCLGE_MAC_VLAN_ADD); |
8276 | } else { |
8277 | hclge_comm_cmd_reuse_desc(desc: &mc_desc[0], is_read: false); |
8278 | mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); |
8279 | hclge_comm_cmd_reuse_desc(desc: &mc_desc[1], is_read: false); |
8280 | mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); |
8281 | hclge_comm_cmd_reuse_desc(desc: &mc_desc[2], is_read: false); |
8282 | mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT); |
8283 | memcpy(mc_desc[0].data, req, |
8284 | sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); |
8285 | ret = hclge_cmd_send(hw: &hdev->hw, desc: mc_desc, num: 3); |
8286 | resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; |
8287 | retval = le16_to_cpu(mc_desc[0].retval); |
8288 | |
8289 | cfg_status = hclge_get_mac_vlan_cmd_status(vport, cmdq_resp: retval, |
8290 | resp_code, |
8291 | op: HCLGE_MAC_VLAN_ADD); |
8292 | } |
8293 | |
8294 | if (ret) { |
8295 | dev_err(&hdev->pdev->dev, |
8296 | "add mac addr failed for cmd_send, ret =%d.\n" , |
8297 | ret); |
8298 | return ret; |
8299 | } |
8300 | |
8301 | return cfg_status; |
8302 | } |
8303 | |
8304 | static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, |
8305 | u16 *allocated_size) |
8306 | { |
8307 | struct hclge_umv_spc_alc_cmd *req; |
8308 | struct hclge_desc desc; |
8309 | int ret; |
8310 | |
8311 | req = (struct hclge_umv_spc_alc_cmd *)desc.data; |
8312 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); |
8313 | |
8314 | req->space_size = cpu_to_le32(space_size); |
8315 | |
8316 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
8317 | if (ret) { |
8318 | dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n" , |
8319 | ret); |
8320 | return ret; |
8321 | } |
8322 | |
8323 | *allocated_size = le32_to_cpu(desc.data[1]); |
8324 | |
8325 | return 0; |
8326 | } |
8327 | |
8328 | static int hclge_init_umv_space(struct hclge_dev *hdev) |
8329 | { |
8330 | u16 allocated_size = 0; |
8331 | int ret; |
8332 | |
8333 | ret = hclge_set_umv_space(hdev, space_size: hdev->wanted_umv_size, allocated_size: &allocated_size); |
8334 | if (ret) |
8335 | return ret; |
8336 | |
8337 | if (allocated_size < hdev->wanted_umv_size) |
8338 | dev_warn(&hdev->pdev->dev, |
8339 | "failed to alloc umv space, want %u, get %u\n" , |
8340 | hdev->wanted_umv_size, allocated_size); |
8341 | |
8342 | hdev->max_umv_size = allocated_size; |
8343 | hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1); |
8344 | hdev->share_umv_size = hdev->priv_umv_size + |
8345 | hdev->max_umv_size % (hdev->num_alloc_vport + 1); |
8346 | |
8347 | if (hdev->ae_dev->dev_specs.mc_mac_size) |
8348 | set_bit(nr: HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, addr: hdev->ae_dev->caps); |
8349 | |
8350 | return 0; |
8351 | } |
8352 | |
8353 | static void hclge_reset_umv_space(struct hclge_dev *hdev) |
8354 | { |
8355 | struct hclge_vport *vport; |
8356 | int i; |
8357 | |
8358 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
8359 | vport = &hdev->vport[i]; |
8360 | vport->used_umv_num = 0; |
8361 | } |
8362 | |
8363 | mutex_lock(&hdev->vport_lock); |
8364 | hdev->share_umv_size = hdev->priv_umv_size + |
8365 | hdev->max_umv_size % (hdev->num_alloc_vport + 1); |
8366 | mutex_unlock(lock: &hdev->vport_lock); |
8367 | |
8368 | hdev->used_mc_mac_num = 0; |
8369 | } |
8370 | |
8371 | static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock) |
8372 | { |
8373 | struct hclge_dev *hdev = vport->back; |
8374 | bool is_full; |
8375 | |
8376 | if (need_lock) |
8377 | mutex_lock(&hdev->vport_lock); |
8378 | |
8379 | is_full = (vport->used_umv_num >= hdev->priv_umv_size && |
8380 | hdev->share_umv_size == 0); |
8381 | |
8382 | if (need_lock) |
8383 | mutex_unlock(lock: &hdev->vport_lock); |
8384 | |
8385 | return is_full; |
8386 | } |
8387 | |
8388 | static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) |
8389 | { |
8390 | struct hclge_dev *hdev = vport->back; |
8391 | |
8392 | if (is_free) { |
8393 | if (vport->used_umv_num > hdev->priv_umv_size) |
8394 | hdev->share_umv_size++; |
8395 | |
8396 | if (vport->used_umv_num > 0) |
8397 | vport->used_umv_num--; |
8398 | } else { |
8399 | if (vport->used_umv_num >= hdev->priv_umv_size && |
8400 | hdev->share_umv_size > 0) |
8401 | hdev->share_umv_size--; |
8402 | vport->used_umv_num++; |
8403 | } |
8404 | } |
8405 | |
8406 | static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list, |
8407 | const u8 *mac_addr) |
8408 | { |
8409 | struct hclge_mac_node *mac_node, *tmp; |
8410 | |
8411 | list_for_each_entry_safe(mac_node, tmp, list, node) |
8412 | if (ether_addr_equal(addr1: mac_addr, addr2: mac_node->mac_addr)) |
8413 | return mac_node; |
8414 | |
8415 | return NULL; |
8416 | } |
8417 | |
8418 | static void hclge_update_mac_node(struct hclge_mac_node *mac_node, |
8419 | enum HCLGE_MAC_NODE_STATE state) |
8420 | { |
8421 | switch (state) { |
8422 | /* from set_rx_mode or tmp_add_list */ |
8423 | case HCLGE_MAC_TO_ADD: |
8424 | if (mac_node->state == HCLGE_MAC_TO_DEL) |
8425 | mac_node->state = HCLGE_MAC_ACTIVE; |
8426 | break; |
8427 | /* only from set_rx_mode */ |
8428 | case HCLGE_MAC_TO_DEL: |
8429 | if (mac_node->state == HCLGE_MAC_TO_ADD) { |
8430 | list_del(entry: &mac_node->node); |
8431 | kfree(objp: mac_node); |
8432 | } else { |
8433 | mac_node->state = HCLGE_MAC_TO_DEL; |
8434 | } |
8435 | break; |
8436 | /* only from tmp_add_list, the mac_node->state won't be |
8437 | * ACTIVE. |
8438 | */ |
8439 | case HCLGE_MAC_ACTIVE: |
8440 | if (mac_node->state == HCLGE_MAC_TO_ADD) |
8441 | mac_node->state = HCLGE_MAC_ACTIVE; |
8442 | |
8443 | break; |
8444 | } |
8445 | } |
8446 | |
8447 | int hclge_update_mac_list(struct hclge_vport *vport, |
8448 | enum HCLGE_MAC_NODE_STATE state, |
8449 | enum HCLGE_MAC_ADDR_TYPE mac_type, |
8450 | const unsigned char *addr) |
8451 | { |
8452 | char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; |
8453 | struct hclge_dev *hdev = vport->back; |
8454 | struct hclge_mac_node *mac_node; |
8455 | struct list_head *list; |
8456 | |
8457 | list = (mac_type == HCLGE_MAC_ADDR_UC) ? |
8458 | &vport->uc_mac_list : &vport->mc_mac_list; |
8459 | |
8460 | spin_lock_bh(lock: &vport->mac_list_lock); |
8461 | |
8462 | /* if the mac addr is already in the mac list, no need to add a new |
8463 | * one into it, just check the mac addr state, convert it to a new |
8464 | * state, or just remove it, or do nothing. |
8465 | */ |
8466 | mac_node = hclge_find_mac_node(list, mac_addr: addr); |
8467 | if (mac_node) { |
8468 | hclge_update_mac_node(mac_node, state); |
8469 | spin_unlock_bh(lock: &vport->mac_list_lock); |
8470 | set_bit(nr: HCLGE_VPORT_STATE_MAC_TBL_CHANGE, addr: &vport->state); |
8471 | return 0; |
8472 | } |
8473 | |
8474 | /* if this address is never added, unnecessary to delete */ |
8475 | if (state == HCLGE_MAC_TO_DEL) { |
8476 | spin_unlock_bh(lock: &vport->mac_list_lock); |
8477 | hnae3_format_mac_addr(format_mac_addr, mac_addr: addr); |
8478 | dev_err(&hdev->pdev->dev, |
8479 | "failed to delete address %s from mac list\n" , |
8480 | format_mac_addr); |
8481 | return -ENOENT; |
8482 | } |
8483 | |
8484 | mac_node = kzalloc(size: sizeof(*mac_node), GFP_ATOMIC); |
8485 | if (!mac_node) { |
8486 | spin_unlock_bh(lock: &vport->mac_list_lock); |
8487 | return -ENOMEM; |
8488 | } |
8489 | |
8490 | set_bit(nr: HCLGE_VPORT_STATE_MAC_TBL_CHANGE, addr: &vport->state); |
8491 | |
8492 | mac_node->state = state; |
8493 | ether_addr_copy(dst: mac_node->mac_addr, src: addr); |
8494 | list_add_tail(new: &mac_node->node, head: list); |
8495 | |
8496 | spin_unlock_bh(lock: &vport->mac_list_lock); |
8497 | |
8498 | return 0; |
8499 | } |
8500 | |
8501 | static int hclge_add_uc_addr(struct hnae3_handle *handle, |
8502 | const unsigned char *addr) |
8503 | { |
8504 | struct hclge_vport *vport = hclge_get_vport(handle); |
8505 | |
8506 | return hclge_update_mac_list(vport, state: HCLGE_MAC_TO_ADD, mac_type: HCLGE_MAC_ADDR_UC, |
8507 | addr); |
8508 | } |
8509 | |
8510 | int hclge_add_uc_addr_common(struct hclge_vport *vport, |
8511 | const unsigned char *addr) |
8512 | { |
8513 | char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; |
8514 | struct hclge_dev *hdev = vport->back; |
8515 | struct hclge_mac_vlan_tbl_entry_cmd req; |
8516 | struct hclge_desc desc; |
8517 | u16 egress_port = 0; |
8518 | int ret; |
8519 | |
8520 | /* mac addr check */ |
8521 | if (is_zero_ether_addr(addr) || |
8522 | is_broadcast_ether_addr(addr) || |
8523 | is_multicast_ether_addr(addr)) { |
8524 | hnae3_format_mac_addr(format_mac_addr, mac_addr: addr); |
8525 | dev_err(&hdev->pdev->dev, |
8526 | "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n" , |
8527 | format_mac_addr, is_zero_ether_addr(addr), |
8528 | is_broadcast_ether_addr(addr), |
8529 | is_multicast_ether_addr(addr)); |
8530 | return -EINVAL; |
8531 | } |
8532 | |
8533 | memset(&req, 0, sizeof(req)); |
8534 | |
8535 | hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, |
8536 | HCLGE_MAC_EPORT_VFID_S, vport->vport_id); |
8537 | |
8538 | req.egress_port = cpu_to_le16(egress_port); |
8539 | |
8540 | hclge_prepare_mac_addr(new_req: &req, addr, is_mc: false); |
8541 | |
8542 | /* Lookup the mac address in the mac_vlan table, and add |
8543 | * it if the entry is inexistent. Repeated unicast entry |
8544 | * is not allowed in the mac vlan table. |
8545 | */ |
8546 | ret = hclge_lookup_mac_vlan_tbl(vport, req: &req, desc: &desc, is_mc: false); |
8547 | if (ret == -ENOENT) { |
8548 | mutex_lock(&hdev->vport_lock); |
8549 | if (!hclge_is_umv_space_full(vport, need_lock: false)) { |
8550 | ret = hclge_add_mac_vlan_tbl(vport, req: &req, NULL); |
8551 | if (!ret) |
8552 | hclge_update_umv_space(vport, is_free: false); |
8553 | mutex_unlock(lock: &hdev->vport_lock); |
8554 | return ret; |
8555 | } |
8556 | mutex_unlock(lock: &hdev->vport_lock); |
8557 | |
8558 | if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE)) |
8559 | dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n" , |
8560 | hdev->priv_umv_size); |
8561 | |
8562 | return -ENOSPC; |
8563 | } |
8564 | |
8565 | /* check if we just hit the duplicate */ |
8566 | if (!ret) |
8567 | return -EEXIST; |
8568 | |
8569 | return ret; |
8570 | } |
8571 | |
8572 | static int hclge_rm_uc_addr(struct hnae3_handle *handle, |
8573 | const unsigned char *addr) |
8574 | { |
8575 | struct hclge_vport *vport = hclge_get_vport(handle); |
8576 | |
8577 | return hclge_update_mac_list(vport, state: HCLGE_MAC_TO_DEL, mac_type: HCLGE_MAC_ADDR_UC, |
8578 | addr); |
8579 | } |
8580 | |
8581 | int hclge_rm_uc_addr_common(struct hclge_vport *vport, |
8582 | const unsigned char *addr) |
8583 | { |
8584 | char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; |
8585 | struct hclge_dev *hdev = vport->back; |
8586 | struct hclge_mac_vlan_tbl_entry_cmd req; |
8587 | int ret; |
8588 | |
8589 | /* mac addr check */ |
8590 | if (is_zero_ether_addr(addr) || |
8591 | is_broadcast_ether_addr(addr) || |
8592 | is_multicast_ether_addr(addr)) { |
8593 | hnae3_format_mac_addr(format_mac_addr, mac_addr: addr); |
8594 | dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n" , |
8595 | format_mac_addr); |
8596 | return -EINVAL; |
8597 | } |
8598 | |
8599 | memset(&req, 0, sizeof(req)); |
8600 | hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); |
8601 | hclge_prepare_mac_addr(new_req: &req, addr, is_mc: false); |
8602 | ret = hclge_remove_mac_vlan_tbl(vport, req: &req); |
8603 | if (!ret || ret == -ENOENT) { |
8604 | mutex_lock(&hdev->vport_lock); |
8605 | hclge_update_umv_space(vport, is_free: true); |
8606 | mutex_unlock(lock: &hdev->vport_lock); |
8607 | return 0; |
8608 | } |
8609 | |
8610 | return ret; |
8611 | } |
8612 | |
8613 | static int hclge_add_mc_addr(struct hnae3_handle *handle, |
8614 | const unsigned char *addr) |
8615 | { |
8616 | struct hclge_vport *vport = hclge_get_vport(handle); |
8617 | |
8618 | return hclge_update_mac_list(vport, state: HCLGE_MAC_TO_ADD, mac_type: HCLGE_MAC_ADDR_MC, |
8619 | addr); |
8620 | } |
8621 | |
8622 | int hclge_add_mc_addr_common(struct hclge_vport *vport, |
8623 | const unsigned char *addr) |
8624 | { |
8625 | char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; |
8626 | struct hclge_dev *hdev = vport->back; |
8627 | struct hclge_mac_vlan_tbl_entry_cmd req; |
8628 | struct hclge_desc desc[3]; |
8629 | bool is_new_addr = false; |
8630 | int status; |
8631 | |
8632 | /* mac addr check */ |
8633 | if (!is_multicast_ether_addr(addr)) { |
8634 | hnae3_format_mac_addr(format_mac_addr, mac_addr: addr); |
8635 | dev_err(&hdev->pdev->dev, |
8636 | "Add mc mac err! invalid mac:%s.\n" , |
8637 | format_mac_addr); |
8638 | return -EINVAL; |
8639 | } |
8640 | memset(&req, 0, sizeof(req)); |
8641 | hclge_prepare_mac_addr(new_req: &req, addr, is_mc: true); |
8642 | status = hclge_lookup_mac_vlan_tbl(vport, req: &req, desc, is_mc: true); |
8643 | if (status) { |
8644 | if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) && |
8645 | hdev->used_mc_mac_num >= |
8646 | hdev->ae_dev->dev_specs.mc_mac_size) |
8647 | goto err_no_space; |
8648 | |
8649 | is_new_addr = true; |
8650 | |
8651 | /* This mac addr do not exist, add new entry for it */ |
8652 | memset(desc[0].data, 0, sizeof(desc[0].data)); |
8653 | memset(desc[1].data, 0, sizeof(desc[0].data)); |
8654 | memset(desc[2].data, 0, sizeof(desc[0].data)); |
8655 | } |
8656 | status = hclge_update_desc_vfid(desc, vfid: vport->vport_id, clr: false); |
8657 | if (status) |
8658 | return status; |
8659 | status = hclge_add_mac_vlan_tbl(vport, req: &req, mc_desc: desc); |
8660 | if (status == -ENOSPC) |
8661 | goto err_no_space; |
8662 | else if (!status && is_new_addr) |
8663 | hdev->used_mc_mac_num++; |
8664 | |
8665 | return status; |
8666 | |
8667 | err_no_space: |
8668 | /* if already overflow, not to print each time */ |
8669 | if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) { |
8670 | vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; |
8671 | dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n" ); |
8672 | } |
8673 | |
8674 | return -ENOSPC; |
8675 | } |
8676 | |
8677 | static int hclge_rm_mc_addr(struct hnae3_handle *handle, |
8678 | const unsigned char *addr) |
8679 | { |
8680 | struct hclge_vport *vport = hclge_get_vport(handle); |
8681 | |
8682 | return hclge_update_mac_list(vport, state: HCLGE_MAC_TO_DEL, mac_type: HCLGE_MAC_ADDR_MC, |
8683 | addr); |
8684 | } |
8685 | |
8686 | int hclge_rm_mc_addr_common(struct hclge_vport *vport, |
8687 | const unsigned char *addr) |
8688 | { |
8689 | char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; |
8690 | struct hclge_dev *hdev = vport->back; |
8691 | struct hclge_mac_vlan_tbl_entry_cmd req; |
8692 | enum hclge_comm_cmd_status status; |
8693 | struct hclge_desc desc[3]; |
8694 | |
8695 | /* mac addr check */ |
8696 | if (!is_multicast_ether_addr(addr)) { |
8697 | hnae3_format_mac_addr(format_mac_addr, mac_addr: addr); |
8698 | dev_dbg(&hdev->pdev->dev, |
8699 | "Remove mc mac err! invalid mac:%s.\n" , |
8700 | format_mac_addr); |
8701 | return -EINVAL; |
8702 | } |
8703 | |
8704 | memset(&req, 0, sizeof(req)); |
8705 | hclge_prepare_mac_addr(new_req: &req, addr, is_mc: true); |
8706 | status = hclge_lookup_mac_vlan_tbl(vport, req: &req, desc, is_mc: true); |
8707 | if (!status) { |
8708 | /* This mac addr exist, remove this handle's VFID for it */ |
8709 | status = hclge_update_desc_vfid(desc, vfid: vport->vport_id, clr: true); |
8710 | if (status) |
8711 | return status; |
8712 | |
8713 | if (hclge_is_all_function_id_zero(desc)) { |
8714 | /* All the vfid is zero, so need to delete this entry */ |
8715 | status = hclge_remove_mac_vlan_tbl(vport, req: &req); |
8716 | if (!status) |
8717 | hdev->used_mc_mac_num--; |
8718 | } else { |
8719 | /* Not all the vfid is zero, update the vfid */ |
8720 | status = hclge_add_mac_vlan_tbl(vport, req: &req, mc_desc: desc); |
8721 | } |
8722 | } else if (status == -ENOENT) { |
8723 | status = 0; |
8724 | } |
8725 | |
8726 | return status; |
8727 | } |
8728 | |
8729 | static void hclge_sync_vport_mac_list(struct hclge_vport *vport, |
8730 | struct list_head *list, |
8731 | enum HCLGE_MAC_ADDR_TYPE mac_type) |
8732 | { |
8733 | int (*sync)(struct hclge_vport *vport, const unsigned char *addr); |
8734 | struct hclge_mac_node *mac_node, *tmp; |
8735 | int ret; |
8736 | |
8737 | if (mac_type == HCLGE_MAC_ADDR_UC) |
8738 | sync = hclge_add_uc_addr_common; |
8739 | else |
8740 | sync = hclge_add_mc_addr_common; |
8741 | |
8742 | list_for_each_entry_safe(mac_node, tmp, list, node) { |
8743 | ret = sync(vport, mac_node->mac_addr); |
8744 | if (!ret) { |
8745 | mac_node->state = HCLGE_MAC_ACTIVE; |
8746 | } else { |
8747 | set_bit(nr: HCLGE_VPORT_STATE_MAC_TBL_CHANGE, |
8748 | addr: &vport->state); |
8749 | |
8750 | /* If one unicast mac address is existing in hardware, |
8751 | * we need to try whether other unicast mac addresses |
8752 | * are new addresses that can be added. |
8753 | * Multicast mac address can be reusable, even though |
8754 | * there is no space to add new multicast mac address, |
8755 | * we should check whether other mac addresses are |
8756 | * existing in hardware for reuse. |
8757 | */ |
8758 | if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) || |
8759 | (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC)) |
8760 | break; |
8761 | } |
8762 | } |
8763 | } |
8764 | |
8765 | static void hclge_unsync_vport_mac_list(struct hclge_vport *vport, |
8766 | struct list_head *list, |
8767 | enum HCLGE_MAC_ADDR_TYPE mac_type) |
8768 | { |
8769 | int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); |
8770 | struct hclge_mac_node *mac_node, *tmp; |
8771 | int ret; |
8772 | |
8773 | if (mac_type == HCLGE_MAC_ADDR_UC) |
8774 | unsync = hclge_rm_uc_addr_common; |
8775 | else |
8776 | unsync = hclge_rm_mc_addr_common; |
8777 | |
8778 | list_for_each_entry_safe(mac_node, tmp, list, node) { |
8779 | ret = unsync(vport, mac_node->mac_addr); |
8780 | if (!ret || ret == -ENOENT) { |
8781 | list_del(entry: &mac_node->node); |
8782 | kfree(objp: mac_node); |
8783 | } else { |
8784 | set_bit(nr: HCLGE_VPORT_STATE_MAC_TBL_CHANGE, |
8785 | addr: &vport->state); |
8786 | break; |
8787 | } |
8788 | } |
8789 | } |
8790 | |
8791 | static bool hclge_sync_from_add_list(struct list_head *add_list, |
8792 | struct list_head *mac_list) |
8793 | { |
8794 | struct hclge_mac_node *mac_node, *tmp, *new_node; |
8795 | bool all_added = true; |
8796 | |
8797 | list_for_each_entry_safe(mac_node, tmp, add_list, node) { |
8798 | if (mac_node->state == HCLGE_MAC_TO_ADD) |
8799 | all_added = false; |
8800 | |
8801 | /* if the mac address from tmp_add_list is not in the |
8802 | * uc/mc_mac_list, it means have received a TO_DEL request |
8803 | * during the time window of adding the mac address into mac |
8804 | * table. if mac_node state is ACTIVE, then change it to TO_DEL, |
8805 | * then it will be removed at next time. else it must be TO_ADD, |
8806 | * this address hasn't been added into mac table, |
8807 | * so just remove the mac node. |
8808 | */ |
8809 | new_node = hclge_find_mac_node(list: mac_list, mac_addr: mac_node->mac_addr); |
8810 | if (new_node) { |
8811 | hclge_update_mac_node(mac_node: new_node, state: mac_node->state); |
8812 | list_del(entry: &mac_node->node); |
8813 | kfree(objp: mac_node); |
8814 | } else if (mac_node->state == HCLGE_MAC_ACTIVE) { |
8815 | mac_node->state = HCLGE_MAC_TO_DEL; |
8816 | list_move_tail(list: &mac_node->node, head: mac_list); |
8817 | } else { |
8818 | list_del(entry: &mac_node->node); |
8819 | kfree(objp: mac_node); |
8820 | } |
8821 | } |
8822 | |
8823 | return all_added; |
8824 | } |
8825 | |
8826 | static void hclge_sync_from_del_list(struct list_head *del_list, |
8827 | struct list_head *mac_list) |
8828 | { |
8829 | struct hclge_mac_node *mac_node, *tmp, *new_node; |
8830 | |
8831 | list_for_each_entry_safe(mac_node, tmp, del_list, node) { |
8832 | new_node = hclge_find_mac_node(list: mac_list, mac_addr: mac_node->mac_addr); |
8833 | if (new_node) { |
8834 | /* If the mac addr exists in the mac list, it means |
8835 | * received a new TO_ADD request during the time window |
8836 | * of configuring the mac address. For the mac node |
8837 | * state is TO_ADD, and the address is already in the |
8838 | * in the hardware(due to delete fail), so we just need |
8839 | * to change the mac node state to ACTIVE. |
8840 | */ |
8841 | new_node->state = HCLGE_MAC_ACTIVE; |
8842 | list_del(entry: &mac_node->node); |
8843 | kfree(objp: mac_node); |
8844 | } else { |
8845 | list_move_tail(list: &mac_node->node, head: mac_list); |
8846 | } |
8847 | } |
8848 | } |
8849 | |
8850 | static void hclge_update_overflow_flags(struct hclge_vport *vport, |
8851 | enum HCLGE_MAC_ADDR_TYPE mac_type, |
8852 | bool is_all_added) |
8853 | { |
8854 | if (mac_type == HCLGE_MAC_ADDR_UC) { |
8855 | if (is_all_added) |
8856 | vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE; |
8857 | else if (hclge_is_umv_space_full(vport, need_lock: true)) |
8858 | vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE; |
8859 | } else { |
8860 | if (is_all_added) |
8861 | vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE; |
8862 | else |
8863 | vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; |
8864 | } |
8865 | } |
8866 | |
8867 | static void hclge_sync_vport_mac_table(struct hclge_vport *vport, |
8868 | enum HCLGE_MAC_ADDR_TYPE mac_type) |
8869 | { |
8870 | struct hclge_mac_node *mac_node, *tmp, *new_node; |
8871 | struct list_head tmp_add_list, tmp_del_list; |
8872 | struct list_head *list; |
8873 | bool all_added; |
8874 | |
8875 | INIT_LIST_HEAD(list: &tmp_add_list); |
8876 | INIT_LIST_HEAD(list: &tmp_del_list); |
8877 | |
8878 | /* move the mac addr to the tmp_add_list and tmp_del_list, then |
8879 | * we can add/delete these mac addr outside the spin lock |
8880 | */ |
8881 | list = (mac_type == HCLGE_MAC_ADDR_UC) ? |
8882 | &vport->uc_mac_list : &vport->mc_mac_list; |
8883 | |
8884 | spin_lock_bh(lock: &vport->mac_list_lock); |
8885 | |
8886 | list_for_each_entry_safe(mac_node, tmp, list, node) { |
8887 | switch (mac_node->state) { |
8888 | case HCLGE_MAC_TO_DEL: |
8889 | list_move_tail(list: &mac_node->node, head: &tmp_del_list); |
8890 | break; |
8891 | case HCLGE_MAC_TO_ADD: |
8892 | new_node = kzalloc(size: sizeof(*new_node), GFP_ATOMIC); |
8893 | if (!new_node) |
8894 | goto stop_traverse; |
8895 | ether_addr_copy(dst: new_node->mac_addr, src: mac_node->mac_addr); |
8896 | new_node->state = mac_node->state; |
8897 | list_add_tail(new: &new_node->node, head: &tmp_add_list); |
8898 | break; |
8899 | default: |
8900 | break; |
8901 | } |
8902 | } |
8903 | |
8904 | stop_traverse: |
8905 | spin_unlock_bh(lock: &vport->mac_list_lock); |
8906 | |
8907 | /* delete first, in order to get max mac table space for adding */ |
8908 | hclge_unsync_vport_mac_list(vport, list: &tmp_del_list, mac_type); |
8909 | hclge_sync_vport_mac_list(vport, list: &tmp_add_list, mac_type); |
8910 | |
8911 | /* if some mac addresses were added/deleted fail, move back to the |
8912 | * mac_list, and retry at next time. |
8913 | */ |
8914 | spin_lock_bh(lock: &vport->mac_list_lock); |
8915 | |
8916 | hclge_sync_from_del_list(del_list: &tmp_del_list, mac_list: list); |
8917 | all_added = hclge_sync_from_add_list(add_list: &tmp_add_list, mac_list: list); |
8918 | |
8919 | spin_unlock_bh(lock: &vport->mac_list_lock); |
8920 | |
8921 | hclge_update_overflow_flags(vport, mac_type, is_all_added: all_added); |
8922 | } |
8923 | |
8924 | static bool hclge_need_sync_mac_table(struct hclge_vport *vport) |
8925 | { |
8926 | struct hclge_dev *hdev = vport->back; |
8927 | |
8928 | if (test_bit(vport->vport_id, hdev->vport_config_block)) |
8929 | return false; |
8930 | |
8931 | if (test_and_clear_bit(nr: HCLGE_VPORT_STATE_MAC_TBL_CHANGE, addr: &vport->state)) |
8932 | return true; |
8933 | |
8934 | return false; |
8935 | } |
8936 | |
8937 | static void hclge_sync_mac_table(struct hclge_dev *hdev) |
8938 | { |
8939 | int i; |
8940 | |
8941 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
8942 | struct hclge_vport *vport = &hdev->vport[i]; |
8943 | |
8944 | if (!hclge_need_sync_mac_table(vport)) |
8945 | continue; |
8946 | |
8947 | hclge_sync_vport_mac_table(vport, mac_type: HCLGE_MAC_ADDR_UC); |
8948 | hclge_sync_vport_mac_table(vport, mac_type: HCLGE_MAC_ADDR_MC); |
8949 | } |
8950 | } |
8951 | |
8952 | static void hclge_build_del_list(struct list_head *list, |
8953 | bool is_del_list, |
8954 | struct list_head *tmp_del_list) |
8955 | { |
8956 | struct hclge_mac_node *mac_cfg, *tmp; |
8957 | |
8958 | list_for_each_entry_safe(mac_cfg, tmp, list, node) { |
8959 | switch (mac_cfg->state) { |
8960 | case HCLGE_MAC_TO_DEL: |
8961 | case HCLGE_MAC_ACTIVE: |
8962 | list_move_tail(list: &mac_cfg->node, head: tmp_del_list); |
8963 | break; |
8964 | case HCLGE_MAC_TO_ADD: |
8965 | if (is_del_list) { |
8966 | list_del(entry: &mac_cfg->node); |
8967 | kfree(objp: mac_cfg); |
8968 | } |
8969 | break; |
8970 | } |
8971 | } |
8972 | } |
8973 | |
8974 | static void hclge_unsync_del_list(struct hclge_vport *vport, |
8975 | int (*unsync)(struct hclge_vport *vport, |
8976 | const unsigned char *addr), |
8977 | bool is_del_list, |
8978 | struct list_head *tmp_del_list) |
8979 | { |
8980 | struct hclge_mac_node *mac_cfg, *tmp; |
8981 | int ret; |
8982 | |
8983 | list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) { |
8984 | ret = unsync(vport, mac_cfg->mac_addr); |
8985 | if (!ret || ret == -ENOENT) { |
8986 | /* clear all mac addr from hardware, but remain these |
8987 | * mac addr in the mac list, and restore them after |
8988 | * vf reset finished. |
8989 | */ |
8990 | if (!is_del_list && |
8991 | mac_cfg->state == HCLGE_MAC_ACTIVE) { |
8992 | mac_cfg->state = HCLGE_MAC_TO_ADD; |
8993 | } else { |
8994 | list_del(entry: &mac_cfg->node); |
8995 | kfree(objp: mac_cfg); |
8996 | } |
8997 | } else if (is_del_list) { |
8998 | mac_cfg->state = HCLGE_MAC_TO_DEL; |
8999 | } |
9000 | } |
9001 | } |
9002 | |
9003 | void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, |
9004 | enum HCLGE_MAC_ADDR_TYPE mac_type) |
9005 | { |
9006 | int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); |
9007 | struct hclge_dev *hdev = vport->back; |
9008 | struct list_head tmp_del_list, *list; |
9009 | |
9010 | if (mac_type == HCLGE_MAC_ADDR_UC) { |
9011 | list = &vport->uc_mac_list; |
9012 | unsync = hclge_rm_uc_addr_common; |
9013 | } else { |
9014 | list = &vport->mc_mac_list; |
9015 | unsync = hclge_rm_mc_addr_common; |
9016 | } |
9017 | |
9018 | INIT_LIST_HEAD(list: &tmp_del_list); |
9019 | |
9020 | if (!is_del_list) |
9021 | set_bit(nr: vport->vport_id, addr: hdev->vport_config_block); |
9022 | |
9023 | spin_lock_bh(lock: &vport->mac_list_lock); |
9024 | |
9025 | hclge_build_del_list(list, is_del_list, tmp_del_list: &tmp_del_list); |
9026 | |
9027 | spin_unlock_bh(lock: &vport->mac_list_lock); |
9028 | |
9029 | hclge_unsync_del_list(vport, unsync, is_del_list, tmp_del_list: &tmp_del_list); |
9030 | |
9031 | spin_lock_bh(lock: &vport->mac_list_lock); |
9032 | |
9033 | hclge_sync_from_del_list(del_list: &tmp_del_list, mac_list: list); |
9034 | |
9035 | spin_unlock_bh(lock: &vport->mac_list_lock); |
9036 | } |
9037 | |
9038 | /* remove all mac address when uninitailize */ |
9039 | static void hclge_uninit_vport_mac_list(struct hclge_vport *vport, |
9040 | enum HCLGE_MAC_ADDR_TYPE mac_type) |
9041 | { |
9042 | struct hclge_mac_node *mac_node, *tmp; |
9043 | struct hclge_dev *hdev = vport->back; |
9044 | struct list_head tmp_del_list, *list; |
9045 | |
9046 | INIT_LIST_HEAD(list: &tmp_del_list); |
9047 | |
9048 | list = (mac_type == HCLGE_MAC_ADDR_UC) ? |
9049 | &vport->uc_mac_list : &vport->mc_mac_list; |
9050 | |
9051 | spin_lock_bh(lock: &vport->mac_list_lock); |
9052 | |
9053 | list_for_each_entry_safe(mac_node, tmp, list, node) { |
9054 | switch (mac_node->state) { |
9055 | case HCLGE_MAC_TO_DEL: |
9056 | case HCLGE_MAC_ACTIVE: |
9057 | list_move_tail(list: &mac_node->node, head: &tmp_del_list); |
9058 | break; |
9059 | case HCLGE_MAC_TO_ADD: |
9060 | list_del(entry: &mac_node->node); |
9061 | kfree(objp: mac_node); |
9062 | break; |
9063 | } |
9064 | } |
9065 | |
9066 | spin_unlock_bh(lock: &vport->mac_list_lock); |
9067 | |
9068 | hclge_unsync_vport_mac_list(vport, list: &tmp_del_list, mac_type); |
9069 | |
9070 | if (!list_empty(head: &tmp_del_list)) |
9071 | dev_warn(&hdev->pdev->dev, |
9072 | "uninit %s mac list for vport %u not completely.\n" , |
9073 | mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc" , |
9074 | vport->vport_id); |
9075 | |
9076 | list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) { |
9077 | list_del(entry: &mac_node->node); |
9078 | kfree(objp: mac_node); |
9079 | } |
9080 | } |
9081 | |
9082 | static void hclge_uninit_mac_table(struct hclge_dev *hdev) |
9083 | { |
9084 | struct hclge_vport *vport; |
9085 | int i; |
9086 | |
9087 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
9088 | vport = &hdev->vport[i]; |
9089 | hclge_uninit_vport_mac_list(vport, mac_type: HCLGE_MAC_ADDR_UC); |
9090 | hclge_uninit_vport_mac_list(vport, mac_type: HCLGE_MAC_ADDR_MC); |
9091 | } |
9092 | } |
9093 | |
9094 | static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, |
9095 | u16 cmdq_resp, u8 resp_code) |
9096 | { |
9097 | #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 |
9098 | #define HCLGE_ETHERTYPE_ALREADY_ADD 1 |
9099 | #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 |
9100 | #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 |
9101 | |
9102 | int return_status; |
9103 | |
9104 | if (cmdq_resp) { |
9105 | dev_err(&hdev->pdev->dev, |
9106 | "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n" , |
9107 | cmdq_resp); |
9108 | return -EIO; |
9109 | } |
9110 | |
9111 | switch (resp_code) { |
9112 | case HCLGE_ETHERTYPE_SUCCESS_ADD: |
9113 | case HCLGE_ETHERTYPE_ALREADY_ADD: |
9114 | return_status = 0; |
9115 | break; |
9116 | case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: |
9117 | dev_err(&hdev->pdev->dev, |
9118 | "add mac ethertype failed for manager table overflow.\n" ); |
9119 | return_status = -EIO; |
9120 | break; |
9121 | case HCLGE_ETHERTYPE_KEY_CONFLICT: |
9122 | dev_err(&hdev->pdev->dev, |
9123 | "add mac ethertype failed for key conflict.\n" ); |
9124 | return_status = -EIO; |
9125 | break; |
9126 | default: |
9127 | dev_err(&hdev->pdev->dev, |
9128 | "add mac ethertype failed for undefined, code=%u.\n" , |
9129 | resp_code); |
9130 | return_status = -EIO; |
9131 | } |
9132 | |
9133 | return return_status; |
9134 | } |
9135 | |
9136 | static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, |
9137 | u8 *mac_addr) |
9138 | { |
9139 | struct hclge_vport *vport = hclge_get_vport(handle); |
9140 | char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; |
9141 | struct hclge_dev *hdev = vport->back; |
9142 | |
9143 | vport = hclge_get_vf_vport(hdev, vf); |
9144 | if (!vport) |
9145 | return -EINVAL; |
9146 | |
9147 | hnae3_format_mac_addr(format_mac_addr, mac_addr); |
9148 | if (ether_addr_equal(addr1: mac_addr, addr2: vport->vf_info.mac)) { |
9149 | dev_info(&hdev->pdev->dev, |
9150 | "Specified MAC(=%s) is same as before, no change committed!\n" , |
9151 | format_mac_addr); |
9152 | return 0; |
9153 | } |
9154 | |
9155 | ether_addr_copy(dst: vport->vf_info.mac, src: mac_addr); |
9156 | |
9157 | /* there is a timewindow for PF to know VF unalive, it may |
9158 | * cause send mailbox fail, but it doesn't matter, VF will |
9159 | * query it when reinit. |
9160 | */ |
9161 | if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { |
9162 | dev_info(&hdev->pdev->dev, |
9163 | "MAC of VF %d has been set to %s, and it will be reinitialized!\n" , |
9164 | vf, format_mac_addr); |
9165 | (void)hclge_inform_reset_assert_to_vf(vport); |
9166 | return 0; |
9167 | } |
9168 | |
9169 | dev_info(&hdev->pdev->dev, |
9170 | "MAC of VF %d has been set to %s, will be active after VF reset\n" , |
9171 | vf, format_mac_addr); |
9172 | return 0; |
9173 | } |
9174 | |
9175 | static int hclge_add_mgr_tbl(struct hclge_dev *hdev, |
9176 | const struct hclge_mac_mgr_tbl_entry_cmd *req) |
9177 | { |
9178 | struct hclge_desc desc; |
9179 | u8 resp_code; |
9180 | u16 retval; |
9181 | int ret; |
9182 | |
9183 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); |
9184 | memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); |
9185 | |
9186 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
9187 | if (ret) { |
9188 | dev_err(&hdev->pdev->dev, |
9189 | "add mac ethertype failed for cmd_send, ret =%d.\n" , |
9190 | ret); |
9191 | return ret; |
9192 | } |
9193 | |
9194 | resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; |
9195 | retval = le16_to_cpu(desc.retval); |
9196 | |
9197 | return hclge_get_mac_ethertype_cmd_status(hdev, cmdq_resp: retval, resp_code); |
9198 | } |
9199 | |
9200 | static int init_mgr_tbl(struct hclge_dev *hdev) |
9201 | { |
9202 | int ret; |
9203 | int i; |
9204 | |
9205 | for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { |
9206 | ret = hclge_add_mgr_tbl(hdev, req: &hclge_mgr_table[i]); |
9207 | if (ret) { |
9208 | dev_err(&hdev->pdev->dev, |
9209 | "add mac ethertype failed, ret =%d.\n" , |
9210 | ret); |
9211 | return ret; |
9212 | } |
9213 | } |
9214 | |
9215 | return 0; |
9216 | } |
9217 | |
9218 | static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) |
9219 | { |
9220 | struct hclge_vport *vport = hclge_get_vport(handle); |
9221 | struct hclge_dev *hdev = vport->back; |
9222 | |
9223 | ether_addr_copy(dst: p, src: hdev->hw.mac.mac_addr); |
9224 | } |
9225 | |
9226 | int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, |
9227 | const u8 *old_addr, const u8 *new_addr) |
9228 | { |
9229 | struct list_head *list = &vport->uc_mac_list; |
9230 | struct hclge_mac_node *old_node, *new_node; |
9231 | |
9232 | new_node = hclge_find_mac_node(list, mac_addr: new_addr); |
9233 | if (!new_node) { |
9234 | new_node = kzalloc(size: sizeof(*new_node), GFP_ATOMIC); |
9235 | if (!new_node) |
9236 | return -ENOMEM; |
9237 | |
9238 | new_node->state = HCLGE_MAC_TO_ADD; |
9239 | ether_addr_copy(dst: new_node->mac_addr, src: new_addr); |
9240 | list_add(new: &new_node->node, head: list); |
9241 | } else { |
9242 | if (new_node->state == HCLGE_MAC_TO_DEL) |
9243 | new_node->state = HCLGE_MAC_ACTIVE; |
9244 | |
9245 | /* make sure the new addr is in the list head, avoid dev |
9246 | * addr may be not re-added into mac table for the umv space |
9247 | * limitation after global/imp reset which will clear mac |
9248 | * table by hardware. |
9249 | */ |
9250 | list_move(list: &new_node->node, head: list); |
9251 | } |
9252 | |
9253 | if (old_addr && !ether_addr_equal(addr1: old_addr, addr2: new_addr)) { |
9254 | old_node = hclge_find_mac_node(list, mac_addr: old_addr); |
9255 | if (old_node) { |
9256 | if (old_node->state == HCLGE_MAC_TO_ADD) { |
9257 | list_del(entry: &old_node->node); |
9258 | kfree(objp: old_node); |
9259 | } else { |
9260 | old_node->state = HCLGE_MAC_TO_DEL; |
9261 | } |
9262 | } |
9263 | } |
9264 | |
9265 | set_bit(nr: HCLGE_VPORT_STATE_MAC_TBL_CHANGE, addr: &vport->state); |
9266 | |
9267 | return 0; |
9268 | } |
9269 | |
9270 | static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p, |
9271 | bool is_first) |
9272 | { |
9273 | const unsigned char *new_addr = (const unsigned char *)p; |
9274 | struct hclge_vport *vport = hclge_get_vport(handle); |
9275 | char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; |
9276 | struct hclge_dev *hdev = vport->back; |
9277 | unsigned char *old_addr = NULL; |
9278 | int ret; |
9279 | |
9280 | /* mac addr check */ |
9281 | if (is_zero_ether_addr(addr: new_addr) || |
9282 | is_broadcast_ether_addr(addr: new_addr) || |
9283 | is_multicast_ether_addr(addr: new_addr)) { |
9284 | hnae3_format_mac_addr(format_mac_addr, mac_addr: new_addr); |
9285 | dev_err(&hdev->pdev->dev, |
9286 | "change uc mac err! invalid mac: %s.\n" , |
9287 | format_mac_addr); |
9288 | return -EINVAL; |
9289 | } |
9290 | |
9291 | ret = hclge_pause_addr_cfg(hdev, mac_addr: new_addr); |
9292 | if (ret) { |
9293 | dev_err(&hdev->pdev->dev, |
9294 | "failed to configure mac pause address, ret = %d\n" , |
9295 | ret); |
9296 | return ret; |
9297 | } |
9298 | |
9299 | if (!is_first) |
9300 | old_addr = hdev->hw.mac.mac_addr; |
9301 | |
9302 | spin_lock_bh(lock: &vport->mac_list_lock); |
9303 | ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr); |
9304 | if (ret) { |
9305 | hnae3_format_mac_addr(format_mac_addr, mac_addr: new_addr); |
9306 | dev_err(&hdev->pdev->dev, |
9307 | "failed to change the mac addr:%s, ret = %d\n" , |
9308 | format_mac_addr, ret); |
9309 | spin_unlock_bh(lock: &vport->mac_list_lock); |
9310 | |
9311 | if (!is_first) |
9312 | hclge_pause_addr_cfg(hdev, mac_addr: old_addr); |
9313 | |
9314 | return ret; |
9315 | } |
9316 | /* we must update dev addr with spin lock protect, preventing dev addr |
9317 | * being removed by set_rx_mode path. |
9318 | */ |
9319 | ether_addr_copy(dst: hdev->hw.mac.mac_addr, src: new_addr); |
9320 | spin_unlock_bh(lock: &vport->mac_list_lock); |
9321 | |
9322 | hclge_task_schedule(hdev, delay_time: 0); |
9323 | |
9324 | return 0; |
9325 | } |
9326 | |
9327 | static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd) |
9328 | { |
9329 | struct mii_ioctl_data *data = if_mii(rq: ifr); |
9330 | |
9331 | if (!hnae3_dev_phy_imp_supported(hdev)) |
9332 | return -EOPNOTSUPP; |
9333 | |
9334 | switch (cmd) { |
9335 | case SIOCGMIIPHY: |
9336 | data->phy_id = hdev->hw.mac.phy_addr; |
9337 | /* this command reads phy id and register at the same time */ |
9338 | fallthrough; |
9339 | case SIOCGMIIREG: |
9340 | data->val_out = hclge_read_phy_reg(hdev, reg_addr: data->reg_num); |
9341 | return 0; |
9342 | |
9343 | case SIOCSMIIREG: |
9344 | return hclge_write_phy_reg(hdev, reg_addr: data->reg_num, val: data->val_in); |
9345 | default: |
9346 | return -EOPNOTSUPP; |
9347 | } |
9348 | } |
9349 | |
9350 | static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, |
9351 | int cmd) |
9352 | { |
9353 | struct hclge_vport *vport = hclge_get_vport(handle); |
9354 | struct hclge_dev *hdev = vport->back; |
9355 | |
9356 | switch (cmd) { |
9357 | case SIOCGHWTSTAMP: |
9358 | return hclge_ptp_get_cfg(hdev, ifr); |
9359 | case SIOCSHWTSTAMP: |
9360 | return hclge_ptp_set_cfg(hdev, ifr); |
9361 | default: |
9362 | if (!hdev->hw.mac.phydev) |
9363 | return hclge_mii_ioctl(hdev, ifr, cmd); |
9364 | } |
9365 | |
9366 | return phy_mii_ioctl(phydev: hdev->hw.mac.phydev, ifr, cmd); |
9367 | } |
9368 | |
9369 | static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id, |
9370 | bool bypass_en) |
9371 | { |
9372 | struct hclge_port_vlan_filter_bypass_cmd *req; |
9373 | struct hclge_desc desc; |
9374 | int ret; |
9375 | |
9376 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false); |
9377 | req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data; |
9378 | req->vf_id = vf_id; |
9379 | hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B, |
9380 | bypass_en ? 1 : 0); |
9381 | |
9382 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
9383 | if (ret) |
9384 | dev_err(&hdev->pdev->dev, |
9385 | "failed to set vport%u port vlan filter bypass state, ret = %d.\n" , |
9386 | vf_id, ret); |
9387 | |
9388 | return ret; |
9389 | } |
9390 | |
9391 | static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, |
9392 | u8 fe_type, bool filter_en, u8 vf_id) |
9393 | { |
9394 | struct hclge_vlan_filter_ctrl_cmd *req; |
9395 | struct hclge_desc desc; |
9396 | int ret; |
9397 | |
9398 | /* read current vlan filter parameter */ |
9399 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true); |
9400 | req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; |
9401 | req->vlan_type = vlan_type; |
9402 | req->vf_id = vf_id; |
9403 | |
9404 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
9405 | if (ret) { |
9406 | dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n" , |
9407 | vf_id, ret); |
9408 | return ret; |
9409 | } |
9410 | |
9411 | /* modify and write new config parameter */ |
9412 | hclge_comm_cmd_reuse_desc(desc: &desc, is_read: false); |
9413 | req->vlan_fe = filter_en ? |
9414 | (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type); |
9415 | |
9416 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
9417 | if (ret) |
9418 | dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n" , |
9419 | vf_id, ret); |
9420 | |
9421 | return ret; |
9422 | } |
9423 | |
9424 | static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable) |
9425 | { |
9426 | struct hclge_dev *hdev = vport->back; |
9427 | struct hnae3_ae_dev *ae_dev = hdev->ae_dev; |
9428 | int ret; |
9429 | |
9430 | if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) |
9431 | return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, |
9432 | HCLGE_FILTER_FE_EGRESS_V1_B, |
9433 | filter_en: enable, vf_id: vport->vport_id); |
9434 | |
9435 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, |
9436 | HCLGE_FILTER_FE_EGRESS, filter_en: enable, |
9437 | vf_id: vport->vport_id); |
9438 | if (ret) |
9439 | return ret; |
9440 | |
9441 | if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) { |
9442 | ret = hclge_set_port_vlan_filter_bypass(hdev, vf_id: vport->vport_id, |
9443 | bypass_en: !enable); |
9444 | } else if (!vport->vport_id) { |
9445 | if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) |
9446 | enable = false; |
9447 | |
9448 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, |
9449 | HCLGE_FILTER_FE_INGRESS, |
9450 | filter_en: enable, vf_id: 0); |
9451 | } |
9452 | |
9453 | return ret; |
9454 | } |
9455 | |
9456 | static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport) |
9457 | { |
9458 | struct hnae3_handle *handle = &vport->nic; |
9459 | struct hclge_vport_vlan_cfg *vlan, *tmp; |
9460 | struct hclge_dev *hdev = vport->back; |
9461 | |
9462 | if (vport->vport_id) { |
9463 | if (vport->port_base_vlan_cfg.state != |
9464 | HNAE3_PORT_BASE_VLAN_DISABLE) |
9465 | return true; |
9466 | |
9467 | if (vport->vf_info.trusted && vport->vf_info.request_uc_en) |
9468 | return false; |
9469 | } else if (handle->netdev_flags & HNAE3_USER_UPE) { |
9470 | return false; |
9471 | } |
9472 | |
9473 | if (!vport->req_vlan_fltr_en) |
9474 | return false; |
9475 | |
9476 | /* compatible with former device, always enable vlan filter */ |
9477 | if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) |
9478 | return true; |
9479 | |
9480 | list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) |
9481 | if (vlan->vlan_id != 0) |
9482 | return true; |
9483 | |
9484 | return false; |
9485 | } |
9486 | |
9487 | int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en) |
9488 | { |
9489 | struct hclge_dev *hdev = vport->back; |
9490 | bool need_en; |
9491 | int ret; |
9492 | |
9493 | mutex_lock(&hdev->vport_lock); |
9494 | |
9495 | vport->req_vlan_fltr_en = request_en; |
9496 | |
9497 | need_en = hclge_need_enable_vport_vlan_filter(vport); |
9498 | if (need_en == vport->cur_vlan_fltr_en) { |
9499 | mutex_unlock(lock: &hdev->vport_lock); |
9500 | return 0; |
9501 | } |
9502 | |
9503 | ret = hclge_set_vport_vlan_filter(vport, enable: need_en); |
9504 | if (ret) { |
9505 | mutex_unlock(lock: &hdev->vport_lock); |
9506 | return ret; |
9507 | } |
9508 | |
9509 | vport->cur_vlan_fltr_en = need_en; |
9510 | |
9511 | mutex_unlock(lock: &hdev->vport_lock); |
9512 | |
9513 | return 0; |
9514 | } |
9515 | |
9516 | static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) |
9517 | { |
9518 | struct hclge_vport *vport = hclge_get_vport(handle); |
9519 | |
9520 | return hclge_enable_vport_vlan_filter(vport, request_en: enable); |
9521 | } |
9522 | |
9523 | static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid, |
9524 | bool is_kill, u16 vlan, |
9525 | struct hclge_desc *desc) |
9526 | { |
9527 | struct hclge_vlan_filter_vf_cfg_cmd *req0; |
9528 | struct hclge_vlan_filter_vf_cfg_cmd *req1; |
9529 | u8 vf_byte_val; |
9530 | u8 vf_byte_off; |
9531 | int ret; |
9532 | |
9533 | hclge_cmd_setup_basic_desc(&desc[0], |
9534 | HCLGE_OPC_VLAN_FILTER_VF_CFG, false); |
9535 | hclge_cmd_setup_basic_desc(&desc[1], |
9536 | HCLGE_OPC_VLAN_FILTER_VF_CFG, false); |
9537 | |
9538 | desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); |
9539 | |
9540 | vf_byte_off = vfid / 8; |
9541 | vf_byte_val = 1 << (vfid % 8); |
9542 | |
9543 | req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; |
9544 | req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; |
9545 | |
9546 | req0->vlan_id = cpu_to_le16(vlan); |
9547 | req0->vlan_cfg = is_kill; |
9548 | |
9549 | if (vf_byte_off < HCLGE_MAX_VF_BYTES) |
9550 | req0->vf_bitmap[vf_byte_off] = vf_byte_val; |
9551 | else |
9552 | req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; |
9553 | |
9554 | ret = hclge_cmd_send(hw: &hdev->hw, desc, num: 2); |
9555 | if (ret) { |
9556 | dev_err(&hdev->pdev->dev, |
9557 | "Send vf vlan command fail, ret =%d.\n" , |
9558 | ret); |
9559 | return ret; |
9560 | } |
9561 | |
9562 | return 0; |
9563 | } |
9564 | |
9565 | static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid, |
9566 | bool is_kill, struct hclge_desc *desc) |
9567 | { |
9568 | struct hclge_vlan_filter_vf_cfg_cmd *req; |
9569 | |
9570 | req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; |
9571 | |
9572 | if (!is_kill) { |
9573 | #define HCLGE_VF_VLAN_NO_ENTRY 2 |
9574 | if (!req->resp_code || req->resp_code == 1) |
9575 | return 0; |
9576 | |
9577 | if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { |
9578 | set_bit(nr: vfid, addr: hdev->vf_vlan_full); |
9579 | dev_warn(&hdev->pdev->dev, |
9580 | "vf vlan table is full, vf vlan filter is disabled\n" ); |
9581 | return 0; |
9582 | } |
9583 | |
9584 | dev_err(&hdev->pdev->dev, |
9585 | "Add vf vlan filter fail, ret =%u.\n" , |
9586 | req->resp_code); |
9587 | } else { |
9588 | #define HCLGE_VF_VLAN_DEL_NO_FOUND 1 |
9589 | if (!req->resp_code) |
9590 | return 0; |
9591 | |
9592 | /* vf vlan filter is disabled when vf vlan table is full, |
9593 | * then new vlan id will not be added into vf vlan table. |
9594 | * Just return 0 without warning, avoid massive verbose |
9595 | * print logs when unload. |
9596 | */ |
9597 | if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) |
9598 | return 0; |
9599 | |
9600 | dev_err(&hdev->pdev->dev, |
9601 | "Kill vf vlan filter fail, ret =%u.\n" , |
9602 | req->resp_code); |
9603 | } |
9604 | |
9605 | return -EIO; |
9606 | } |
9607 | |
9608 | static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, |
9609 | bool is_kill, u16 vlan) |
9610 | { |
9611 | struct hclge_vport *vport = &hdev->vport[vfid]; |
9612 | struct hclge_desc desc[2]; |
9613 | int ret; |
9614 | |
9615 | /* if vf vlan table is full, firmware will close vf vlan filter, it |
9616 | * is unable and unnecessary to add new vlan id to vf vlan filter. |
9617 | * If spoof check is enable, and vf vlan is full, it shouldn't add |
9618 | * new vlan, because tx packets with these vlan id will be dropped. |
9619 | */ |
9620 | if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) { |
9621 | if (vport->vf_info.spoofchk && vlan) { |
9622 | dev_err(&hdev->pdev->dev, |
9623 | "Can't add vlan due to spoof check is on and vf vlan table is full\n" ); |
9624 | return -EPERM; |
9625 | } |
9626 | return 0; |
9627 | } |
9628 | |
9629 | ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc); |
9630 | if (ret) |
9631 | return ret; |
9632 | |
9633 | return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc); |
9634 | } |
9635 | |
9636 | static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, |
9637 | u16 vlan_id, bool is_kill) |
9638 | { |
9639 | struct hclge_vlan_filter_pf_cfg_cmd *req; |
9640 | struct hclge_desc desc; |
9641 | u8 vlan_offset_byte_val; |
9642 | u8 vlan_offset_byte; |
9643 | u8 vlan_offset_160; |
9644 | int ret; |
9645 | |
9646 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); |
9647 | |
9648 | vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP; |
9649 | vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) / |
9650 | HCLGE_VLAN_BYTE_SIZE; |
9651 | vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE); |
9652 | |
9653 | req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; |
9654 | req->vlan_offset = vlan_offset_160; |
9655 | req->vlan_cfg = is_kill; |
9656 | req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; |
9657 | |
9658 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
9659 | if (ret) |
9660 | dev_err(&hdev->pdev->dev, |
9661 | "port vlan command, send fail, ret =%d.\n" , ret); |
9662 | return ret; |
9663 | } |
9664 | |
9665 | static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id, |
9666 | u16 vlan_id, bool is_kill) |
9667 | { |
9668 | /* vlan 0 may be added twice when 8021q module is enabled */ |
9669 | if (!is_kill && !vlan_id && |
9670 | test_bit(vport_id, hdev->vlan_table[vlan_id])) |
9671 | return false; |
9672 | |
9673 | if (!is_kill && test_and_set_bit(nr: vport_id, addr: hdev->vlan_table[vlan_id])) { |
9674 | dev_warn(&hdev->pdev->dev, |
9675 | "Add port vlan failed, vport %u is already in vlan %u\n" , |
9676 | vport_id, vlan_id); |
9677 | return false; |
9678 | } |
9679 | |
9680 | if (is_kill && |
9681 | !test_and_clear_bit(nr: vport_id, addr: hdev->vlan_table[vlan_id])) { |
9682 | dev_warn(&hdev->pdev->dev, |
9683 | "Delete port vlan failed, vport %u is not in vlan %u\n" , |
9684 | vport_id, vlan_id); |
9685 | return false; |
9686 | } |
9687 | |
9688 | return true; |
9689 | } |
9690 | |
9691 | static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, |
9692 | u16 vport_id, u16 vlan_id, |
9693 | bool is_kill) |
9694 | { |
9695 | u16 vport_idx, vport_num = 0; |
9696 | int ret; |
9697 | |
9698 | if (is_kill && !vlan_id) |
9699 | return 0; |
9700 | |
9701 | if (vlan_id >= VLAN_N_VID) |
9702 | return -EINVAL; |
9703 | |
9704 | ret = hclge_set_vf_vlan_common(hdev, vfid: vport_id, is_kill, vlan: vlan_id); |
9705 | if (ret) { |
9706 | dev_err(&hdev->pdev->dev, |
9707 | "Set %u vport vlan filter config fail, ret =%d.\n" , |
9708 | vport_id, ret); |
9709 | return ret; |
9710 | } |
9711 | |
9712 | if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill)) |
9713 | return 0; |
9714 | |
9715 | for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) |
9716 | vport_num++; |
9717 | |
9718 | if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) |
9719 | ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, |
9720 | is_kill); |
9721 | |
9722 | return ret; |
9723 | } |
9724 | |
9725 | static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) |
9726 | { |
9727 | struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; |
9728 | struct hclge_vport_vtag_tx_cfg_cmd *req; |
9729 | struct hclge_dev *hdev = vport->back; |
9730 | struct hclge_desc desc; |
9731 | u16 bmap_index; |
9732 | int status; |
9733 | |
9734 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); |
9735 | |
9736 | req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; |
9737 | req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); |
9738 | req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); |
9739 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, |
9740 | vcfg->accept_tag1 ? 1 : 0); |
9741 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, |
9742 | vcfg->accept_untag1 ? 1 : 0); |
9743 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, |
9744 | vcfg->accept_tag2 ? 1 : 0); |
9745 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, |
9746 | vcfg->accept_untag2 ? 1 : 0); |
9747 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, |
9748 | vcfg->insert_tag1_en ? 1 : 0); |
9749 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, |
9750 | vcfg->insert_tag2_en ? 1 : 0); |
9751 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B, |
9752 | vcfg->tag_shift_mode_en ? 1 : 0); |
9753 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); |
9754 | |
9755 | req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; |
9756 | bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / |
9757 | HCLGE_VF_NUM_PER_BYTE; |
9758 | req->vf_bitmap[bmap_index] = |
9759 | 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); |
9760 | |
9761 | status = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
9762 | if (status) |
9763 | dev_err(&hdev->pdev->dev, |
9764 | "Send port txvlan cfg command fail, ret =%d\n" , |
9765 | status); |
9766 | |
9767 | return status; |
9768 | } |
9769 | |
9770 | static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) |
9771 | { |
9772 | struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; |
9773 | struct hclge_vport_vtag_rx_cfg_cmd *req; |
9774 | struct hclge_dev *hdev = vport->back; |
9775 | struct hclge_desc desc; |
9776 | u16 bmap_index; |
9777 | int status; |
9778 | |
9779 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); |
9780 | |
9781 | req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; |
9782 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, |
9783 | vcfg->strip_tag1_en ? 1 : 0); |
9784 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, |
9785 | vcfg->strip_tag2_en ? 1 : 0); |
9786 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, |
9787 | vcfg->vlan1_vlan_prionly ? 1 : 0); |
9788 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, |
9789 | vcfg->vlan2_vlan_prionly ? 1 : 0); |
9790 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B, |
9791 | vcfg->strip_tag1_discard_en ? 1 : 0); |
9792 | hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B, |
9793 | vcfg->strip_tag2_discard_en ? 1 : 0); |
9794 | |
9795 | req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; |
9796 | bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / |
9797 | HCLGE_VF_NUM_PER_BYTE; |
9798 | req->vf_bitmap[bmap_index] = |
9799 | 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); |
9800 | |
9801 | status = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
9802 | if (status) |
9803 | dev_err(&hdev->pdev->dev, |
9804 | "Send port rxvlan cfg command fail, ret =%d\n" , |
9805 | status); |
9806 | |
9807 | return status; |
9808 | } |
9809 | |
9810 | static int hclge_vlan_offload_cfg(struct hclge_vport *vport, |
9811 | u16 port_base_vlan_state, |
9812 | u16 vlan_tag, u8 qos) |
9813 | { |
9814 | int ret; |
9815 | |
9816 | if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { |
9817 | vport->txvlan_cfg.accept_tag1 = true; |
9818 | vport->txvlan_cfg.insert_tag1_en = false; |
9819 | vport->txvlan_cfg.default_tag1 = 0; |
9820 | } else { |
9821 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev: vport->nic.pdev); |
9822 | |
9823 | vport->txvlan_cfg.accept_tag1 = |
9824 | ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3; |
9825 | vport->txvlan_cfg.insert_tag1_en = true; |
9826 | vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) | |
9827 | vlan_tag; |
9828 | } |
9829 | |
9830 | vport->txvlan_cfg.accept_untag1 = true; |
9831 | |
9832 | /* accept_tag2 and accept_untag2 are not supported on |
9833 | * pdev revision(0x20), new revision support them, |
9834 | * this two fields can not be configured by user. |
9835 | */ |
9836 | vport->txvlan_cfg.accept_tag2 = true; |
9837 | vport->txvlan_cfg.accept_untag2 = true; |
9838 | vport->txvlan_cfg.insert_tag2_en = false; |
9839 | vport->txvlan_cfg.default_tag2 = 0; |
9840 | vport->txvlan_cfg.tag_shift_mode_en = true; |
9841 | |
9842 | if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { |
9843 | vport->rxvlan_cfg.strip_tag1_en = false; |
9844 | vport->rxvlan_cfg.strip_tag2_en = |
9845 | vport->rxvlan_cfg.rx_vlan_offload_en; |
9846 | vport->rxvlan_cfg.strip_tag2_discard_en = false; |
9847 | } else { |
9848 | vport->rxvlan_cfg.strip_tag1_en = |
9849 | vport->rxvlan_cfg.rx_vlan_offload_en; |
9850 | vport->rxvlan_cfg.strip_tag2_en = true; |
9851 | vport->rxvlan_cfg.strip_tag2_discard_en = true; |
9852 | } |
9853 | |
9854 | vport->rxvlan_cfg.strip_tag1_discard_en = false; |
9855 | vport->rxvlan_cfg.vlan1_vlan_prionly = false; |
9856 | vport->rxvlan_cfg.vlan2_vlan_prionly = false; |
9857 | |
9858 | ret = hclge_set_vlan_tx_offload_cfg(vport); |
9859 | if (ret) |
9860 | return ret; |
9861 | |
9862 | return hclge_set_vlan_rx_offload_cfg(vport); |
9863 | } |
9864 | |
9865 | static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) |
9866 | { |
9867 | struct hclge_rx_vlan_type_cfg_cmd *rx_req; |
9868 | struct hclge_tx_vlan_type_cfg_cmd *tx_req; |
9869 | struct hclge_desc desc; |
9870 | int status; |
9871 | |
9872 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); |
9873 | rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; |
9874 | rx_req->ot_fst_vlan_type = |
9875 | cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); |
9876 | rx_req->ot_sec_vlan_type = |
9877 | cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); |
9878 | rx_req->in_fst_vlan_type = |
9879 | cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); |
9880 | rx_req->in_sec_vlan_type = |
9881 | cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); |
9882 | |
9883 | status = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
9884 | if (status) { |
9885 | dev_err(&hdev->pdev->dev, |
9886 | "Send rxvlan protocol type command fail, ret =%d\n" , |
9887 | status); |
9888 | return status; |
9889 | } |
9890 | |
9891 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); |
9892 | |
9893 | tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; |
9894 | tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); |
9895 | tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); |
9896 | |
9897 | status = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
9898 | if (status) |
9899 | dev_err(&hdev->pdev->dev, |
9900 | "Send txvlan protocol type command fail, ret =%d\n" , |
9901 | status); |
9902 | |
9903 | return status; |
9904 | } |
9905 | |
9906 | static int hclge_init_vlan_filter(struct hclge_dev *hdev) |
9907 | { |
9908 | struct hclge_vport *vport; |
9909 | int ret; |
9910 | int i; |
9911 | |
9912 | if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) |
9913 | return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, |
9914 | HCLGE_FILTER_FE_EGRESS_V1_B, |
9915 | filter_en: true, vf_id: 0); |
9916 | |
9917 | /* for revision 0x21, vf vlan filter is per function */ |
9918 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
9919 | vport = &hdev->vport[i]; |
9920 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, |
9921 | HCLGE_FILTER_FE_EGRESS, filter_en: true, |
9922 | vf_id: vport->vport_id); |
9923 | if (ret) |
9924 | return ret; |
9925 | vport->cur_vlan_fltr_en = true; |
9926 | } |
9927 | |
9928 | return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, |
9929 | HCLGE_FILTER_FE_INGRESS, filter_en: true, vf_id: 0); |
9930 | } |
9931 | |
9932 | static int hclge_init_vlan_type(struct hclge_dev *hdev) |
9933 | { |
9934 | hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q; |
9935 | hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q; |
9936 | hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q; |
9937 | hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q; |
9938 | hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q; |
9939 | hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q; |
9940 | |
9941 | return hclge_set_vlan_protocol_type(hdev); |
9942 | } |
9943 | |
9944 | static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev) |
9945 | { |
9946 | struct hclge_port_base_vlan_config *cfg; |
9947 | struct hclge_vport *vport; |
9948 | int ret; |
9949 | int i; |
9950 | |
9951 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
9952 | vport = &hdev->vport[i]; |
9953 | cfg = &vport->port_base_vlan_cfg; |
9954 | |
9955 | ret = hclge_vlan_offload_cfg(vport, port_base_vlan_state: cfg->state, |
9956 | vlan_tag: cfg->vlan_info.vlan_tag, |
9957 | qos: cfg->vlan_info.qos); |
9958 | if (ret) |
9959 | return ret; |
9960 | } |
9961 | return 0; |
9962 | } |
9963 | |
9964 | static int hclge_init_vlan_config(struct hclge_dev *hdev) |
9965 | { |
9966 | struct hnae3_handle *handle = &hdev->vport[0].nic; |
9967 | int ret; |
9968 | |
9969 | ret = hclge_init_vlan_filter(hdev); |
9970 | if (ret) |
9971 | return ret; |
9972 | |
9973 | ret = hclge_init_vlan_type(hdev); |
9974 | if (ret) |
9975 | return ret; |
9976 | |
9977 | ret = hclge_init_vport_vlan_offload(hdev); |
9978 | if (ret) |
9979 | return ret; |
9980 | |
9981 | return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), vlan_id: 0, is_kill: false); |
9982 | } |
9983 | |
9984 | static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, |
9985 | bool writen_to_tbl) |
9986 | { |
9987 | struct hclge_vport_vlan_cfg *vlan, *tmp; |
9988 | struct hclge_dev *hdev = vport->back; |
9989 | |
9990 | mutex_lock(&hdev->vport_lock); |
9991 | |
9992 | list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { |
9993 | if (vlan->vlan_id == vlan_id) { |
9994 | mutex_unlock(lock: &hdev->vport_lock); |
9995 | return; |
9996 | } |
9997 | } |
9998 | |
9999 | vlan = kzalloc(size: sizeof(*vlan), GFP_KERNEL); |
10000 | if (!vlan) { |
10001 | mutex_unlock(lock: &hdev->vport_lock); |
10002 | return; |
10003 | } |
10004 | |
10005 | vlan->hd_tbl_status = writen_to_tbl; |
10006 | vlan->vlan_id = vlan_id; |
10007 | |
10008 | list_add_tail(new: &vlan->node, head: &vport->vlan_list); |
10009 | mutex_unlock(lock: &hdev->vport_lock); |
10010 | } |
10011 | |
10012 | static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) |
10013 | { |
10014 | struct hclge_vport_vlan_cfg *vlan, *tmp; |
10015 | struct hclge_dev *hdev = vport->back; |
10016 | int ret; |
10017 | |
10018 | mutex_lock(&hdev->vport_lock); |
10019 | |
10020 | list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { |
10021 | if (!vlan->hd_tbl_status) { |
10022 | ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), |
10023 | vport_id: vport->vport_id, |
10024 | vlan_id: vlan->vlan_id, is_kill: false); |
10025 | if (ret) { |
10026 | dev_err(&hdev->pdev->dev, |
10027 | "restore vport vlan list failed, ret=%d\n" , |
10028 | ret); |
10029 | |
10030 | mutex_unlock(lock: &hdev->vport_lock); |
10031 | return ret; |
10032 | } |
10033 | } |
10034 | vlan->hd_tbl_status = true; |
10035 | } |
10036 | |
10037 | mutex_unlock(lock: &hdev->vport_lock); |
10038 | |
10039 | return 0; |
10040 | } |
10041 | |
10042 | static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, |
10043 | bool is_write_tbl) |
10044 | { |
10045 | struct hclge_vport_vlan_cfg *vlan, *tmp; |
10046 | struct hclge_dev *hdev = vport->back; |
10047 | |
10048 | list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { |
10049 | if (vlan->vlan_id == vlan_id) { |
10050 | if (is_write_tbl && vlan->hd_tbl_status) |
10051 | hclge_set_vlan_filter_hw(hdev, |
10052 | htons(ETH_P_8021Q), |
10053 | vport_id: vport->vport_id, |
10054 | vlan_id, |
10055 | is_kill: true); |
10056 | |
10057 | list_del(entry: &vlan->node); |
10058 | kfree(objp: vlan); |
10059 | break; |
10060 | } |
10061 | } |
10062 | } |
10063 | |
10064 | void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) |
10065 | { |
10066 | struct hclge_vport_vlan_cfg *vlan, *tmp; |
10067 | struct hclge_dev *hdev = vport->back; |
10068 | |
10069 | mutex_lock(&hdev->vport_lock); |
10070 | |
10071 | list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { |
10072 | if (vlan->hd_tbl_status) |
10073 | hclge_set_vlan_filter_hw(hdev, |
10074 | htons(ETH_P_8021Q), |
10075 | vport_id: vport->vport_id, |
10076 | vlan_id: vlan->vlan_id, |
10077 | is_kill: true); |
10078 | |
10079 | vlan->hd_tbl_status = false; |
10080 | if (is_del_list) { |
10081 | list_del(entry: &vlan->node); |
10082 | kfree(objp: vlan); |
10083 | } |
10084 | } |
10085 | clear_bit(nr: vport->vport_id, addr: hdev->vf_vlan_full); |
10086 | mutex_unlock(lock: &hdev->vport_lock); |
10087 | } |
10088 | |
10089 | void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) |
10090 | { |
10091 | struct hclge_vport_vlan_cfg *vlan, *tmp; |
10092 | struct hclge_vport *vport; |
10093 | int i; |
10094 | |
10095 | mutex_lock(&hdev->vport_lock); |
10096 | |
10097 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
10098 | vport = &hdev->vport[i]; |
10099 | list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { |
10100 | list_del(entry: &vlan->node); |
10101 | kfree(objp: vlan); |
10102 | } |
10103 | } |
10104 | |
10105 | mutex_unlock(lock: &hdev->vport_lock); |
10106 | } |
10107 | |
10108 | void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev) |
10109 | { |
10110 | struct hclge_vlan_info *vlan_info; |
10111 | struct hclge_vport *vport; |
10112 | u16 vlan_proto; |
10113 | u16 vlan_id; |
10114 | u16 state; |
10115 | int vf_id; |
10116 | int ret; |
10117 | |
10118 | /* PF should restore all vfs port base vlan */ |
10119 | for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) { |
10120 | vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM]; |
10121 | vlan_info = vport->port_base_vlan_cfg.tbl_sta ? |
10122 | &vport->port_base_vlan_cfg.vlan_info : |
10123 | &vport->port_base_vlan_cfg.old_vlan_info; |
10124 | |
10125 | vlan_id = vlan_info->vlan_tag; |
10126 | vlan_proto = vlan_info->vlan_proto; |
10127 | state = vport->port_base_vlan_cfg.state; |
10128 | |
10129 | if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { |
10130 | clear_bit(nr: vport->vport_id, addr: hdev->vlan_table[vlan_id]); |
10131 | ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), |
10132 | vport_id: vport->vport_id, |
10133 | vlan_id, is_kill: false); |
10134 | vport->port_base_vlan_cfg.tbl_sta = ret == 0; |
10135 | } |
10136 | } |
10137 | } |
10138 | |
10139 | void hclge_restore_vport_vlan_table(struct hclge_vport *vport) |
10140 | { |
10141 | struct hclge_vport_vlan_cfg *vlan, *tmp; |
10142 | struct hclge_dev *hdev = vport->back; |
10143 | int ret; |
10144 | |
10145 | mutex_lock(&hdev->vport_lock); |
10146 | |
10147 | if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { |
10148 | list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { |
10149 | ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), |
10150 | vport_id: vport->vport_id, |
10151 | vlan_id: vlan->vlan_id, is_kill: false); |
10152 | if (ret) |
10153 | break; |
10154 | vlan->hd_tbl_status = true; |
10155 | } |
10156 | } |
10157 | |
10158 | mutex_unlock(lock: &hdev->vport_lock); |
10159 | } |
10160 | |
10161 | /* For global reset and imp reset, hardware will clear the mac table, |
10162 | * so we change the mac address state from ACTIVE to TO_ADD, then they |
10163 | * can be restored in the service task after reset complete. Furtherly, |
10164 | * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to |
10165 | * be restored after reset, so just remove these mac nodes from mac_list. |
10166 | */ |
10167 | static void hclge_mac_node_convert_for_reset(struct list_head *list) |
10168 | { |
10169 | struct hclge_mac_node *mac_node, *tmp; |
10170 | |
10171 | list_for_each_entry_safe(mac_node, tmp, list, node) { |
10172 | if (mac_node->state == HCLGE_MAC_ACTIVE) { |
10173 | mac_node->state = HCLGE_MAC_TO_ADD; |
10174 | } else if (mac_node->state == HCLGE_MAC_TO_DEL) { |
10175 | list_del(entry: &mac_node->node); |
10176 | kfree(objp: mac_node); |
10177 | } |
10178 | } |
10179 | } |
10180 | |
10181 | void hclge_restore_mac_table_common(struct hclge_vport *vport) |
10182 | { |
10183 | spin_lock_bh(lock: &vport->mac_list_lock); |
10184 | |
10185 | hclge_mac_node_convert_for_reset(list: &vport->uc_mac_list); |
10186 | hclge_mac_node_convert_for_reset(list: &vport->mc_mac_list); |
10187 | set_bit(nr: HCLGE_VPORT_STATE_MAC_TBL_CHANGE, addr: &vport->state); |
10188 | |
10189 | spin_unlock_bh(lock: &vport->mac_list_lock); |
10190 | } |
10191 | |
10192 | static void hclge_restore_hw_table(struct hclge_dev *hdev) |
10193 | { |
10194 | struct hclge_vport *vport = &hdev->vport[0]; |
10195 | struct hnae3_handle *handle = &vport->nic; |
10196 | |
10197 | hclge_restore_mac_table_common(vport); |
10198 | hclge_restore_vport_port_base_vlan_config(hdev); |
10199 | hclge_restore_vport_vlan_table(vport); |
10200 | set_bit(nr: HCLGE_STATE_FD_USER_DEF_CHANGED, addr: &hdev->state); |
10201 | hclge_restore_fd_entries(handle); |
10202 | } |
10203 | |
10204 | int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) |
10205 | { |
10206 | struct hclge_vport *vport = hclge_get_vport(handle); |
10207 | |
10208 | if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { |
10209 | vport->rxvlan_cfg.strip_tag1_en = false; |
10210 | vport->rxvlan_cfg.strip_tag2_en = enable; |
10211 | vport->rxvlan_cfg.strip_tag2_discard_en = false; |
10212 | } else { |
10213 | vport->rxvlan_cfg.strip_tag1_en = enable; |
10214 | vport->rxvlan_cfg.strip_tag2_en = true; |
10215 | vport->rxvlan_cfg.strip_tag2_discard_en = true; |
10216 | } |
10217 | |
10218 | vport->rxvlan_cfg.strip_tag1_discard_en = false; |
10219 | vport->rxvlan_cfg.vlan1_vlan_prionly = false; |
10220 | vport->rxvlan_cfg.vlan2_vlan_prionly = false; |
10221 | vport->rxvlan_cfg.rx_vlan_offload_en = enable; |
10222 | |
10223 | return hclge_set_vlan_rx_offload_cfg(vport); |
10224 | } |
10225 | |
10226 | static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport) |
10227 | { |
10228 | struct hclge_dev *hdev = vport->back; |
10229 | |
10230 | if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) |
10231 | set_bit(nr: HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, addr: &vport->state); |
10232 | } |
10233 | |
10234 | static int hclge_update_vlan_filter_entries(struct hclge_vport *vport, |
10235 | u16 port_base_vlan_state, |
10236 | struct hclge_vlan_info *new_info, |
10237 | struct hclge_vlan_info *old_info) |
10238 | { |
10239 | struct hclge_dev *hdev = vport->back; |
10240 | int ret; |
10241 | |
10242 | if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) { |
10243 | hclge_rm_vport_all_vlan_table(vport, is_del_list: false); |
10244 | /* force clear VLAN 0 */ |
10245 | ret = hclge_set_vf_vlan_common(hdev, vfid: vport->vport_id, is_kill: true, vlan: 0); |
10246 | if (ret) |
10247 | return ret; |
10248 | return hclge_set_vlan_filter_hw(hdev, |
10249 | htons(new_info->vlan_proto), |
10250 | vport_id: vport->vport_id, |
10251 | vlan_id: new_info->vlan_tag, |
10252 | is_kill: false); |
10253 | } |
10254 | |
10255 | vport->port_base_vlan_cfg.tbl_sta = false; |
10256 | |
10257 | /* force add VLAN 0 */ |
10258 | ret = hclge_set_vf_vlan_common(hdev, vfid: vport->vport_id, is_kill: false, vlan: 0); |
10259 | if (ret) |
10260 | return ret; |
10261 | |
10262 | ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), |
10263 | vport_id: vport->vport_id, vlan_id: old_info->vlan_tag, |
10264 | is_kill: true); |
10265 | if (ret) |
10266 | return ret; |
10267 | |
10268 | return hclge_add_vport_all_vlan_table(vport); |
10269 | } |
10270 | |
10271 | static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg, |
10272 | const struct hclge_vlan_info *old_cfg) |
10273 | { |
10274 | if (new_cfg->vlan_tag != old_cfg->vlan_tag) |
10275 | return true; |
10276 | |
10277 | if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0)) |
10278 | return true; |
10279 | |
10280 | return false; |
10281 | } |
10282 | |
10283 | static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport, |
10284 | struct hclge_vlan_info *new_info, |
10285 | struct hclge_vlan_info *old_info) |
10286 | { |
10287 | struct hclge_dev *hdev = vport->back; |
10288 | int ret; |
10289 | |
10290 | /* add new VLAN tag */ |
10291 | ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto), |
10292 | vport_id: vport->vport_id, vlan_id: new_info->vlan_tag, |
10293 | is_kill: false); |
10294 | if (ret) |
10295 | return ret; |
10296 | |
10297 | vport->port_base_vlan_cfg.tbl_sta = false; |
10298 | /* remove old VLAN tag */ |
10299 | if (old_info->vlan_tag == 0) |
10300 | ret = hclge_set_vf_vlan_common(hdev, vfid: vport->vport_id, |
10301 | is_kill: true, vlan: 0); |
10302 | else |
10303 | ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), |
10304 | vport_id: vport->vport_id, |
10305 | vlan_id: old_info->vlan_tag, is_kill: true); |
10306 | if (ret) |
10307 | dev_err(&hdev->pdev->dev, |
10308 | "failed to clear vport%u port base vlan %u, ret = %d.\n" , |
10309 | vport->vport_id, old_info->vlan_tag, ret); |
10310 | |
10311 | return ret; |
10312 | } |
10313 | |
10314 | int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, |
10315 | struct hclge_vlan_info *vlan_info) |
10316 | { |
10317 | struct hnae3_handle *nic = &vport->nic; |
10318 | struct hclge_vlan_info *old_vlan_info; |
10319 | int ret; |
10320 | |
10321 | old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; |
10322 | |
10323 | ret = hclge_vlan_offload_cfg(vport, port_base_vlan_state: state, vlan_tag: vlan_info->vlan_tag, |
10324 | qos: vlan_info->qos); |
10325 | if (ret) |
10326 | return ret; |
10327 | |
10328 | if (!hclge_need_update_vlan_filter(new_cfg: vlan_info, old_cfg: old_vlan_info)) |
10329 | goto out; |
10330 | |
10331 | if (state == HNAE3_PORT_BASE_VLAN_MODIFY) |
10332 | ret = hclge_modify_port_base_vlan_tag(vport, new_info: vlan_info, |
10333 | old_info: old_vlan_info); |
10334 | else |
10335 | ret = hclge_update_vlan_filter_entries(vport, port_base_vlan_state: state, new_info: vlan_info, |
10336 | old_info: old_vlan_info); |
10337 | if (ret) |
10338 | return ret; |
10339 | |
10340 | out: |
10341 | vport->port_base_vlan_cfg.state = state; |
10342 | if (state == HNAE3_PORT_BASE_VLAN_DISABLE) |
10343 | nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; |
10344 | else |
10345 | nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; |
10346 | |
10347 | vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info; |
10348 | vport->port_base_vlan_cfg.vlan_info = *vlan_info; |
10349 | vport->port_base_vlan_cfg.tbl_sta = true; |
10350 | hclge_set_vport_vlan_fltr_change(vport); |
10351 | |
10352 | return 0; |
10353 | } |
10354 | |
10355 | static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport, |
10356 | enum hnae3_port_base_vlan_state state, |
10357 | u16 vlan, u8 qos) |
10358 | { |
10359 | if (state == HNAE3_PORT_BASE_VLAN_DISABLE) { |
10360 | if (!vlan && !qos) |
10361 | return HNAE3_PORT_BASE_VLAN_NOCHANGE; |
10362 | |
10363 | return HNAE3_PORT_BASE_VLAN_ENABLE; |
10364 | } |
10365 | |
10366 | if (!vlan && !qos) |
10367 | return HNAE3_PORT_BASE_VLAN_DISABLE; |
10368 | |
10369 | if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan && |
10370 | vport->port_base_vlan_cfg.vlan_info.qos == qos) |
10371 | return HNAE3_PORT_BASE_VLAN_NOCHANGE; |
10372 | |
10373 | return HNAE3_PORT_BASE_VLAN_MODIFY; |
10374 | } |
10375 | |
10376 | static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, |
10377 | u16 vlan, u8 qos, __be16 proto) |
10378 | { |
10379 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev: handle->pdev); |
10380 | struct hclge_vport *vport = hclge_get_vport(handle); |
10381 | struct hclge_dev *hdev = vport->back; |
10382 | struct hclge_vlan_info vlan_info; |
10383 | u16 state; |
10384 | int ret; |
10385 | |
10386 | if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) |
10387 | return -EOPNOTSUPP; |
10388 | |
10389 | vport = hclge_get_vf_vport(hdev, vf: vfid); |
10390 | if (!vport) |
10391 | return -EINVAL; |
10392 | |
10393 | /* qos is a 3 bits value, so can not be bigger than 7 */ |
10394 | if (vlan > VLAN_N_VID - 1 || qos > 7) |
10395 | return -EINVAL; |
10396 | if (proto != htons(ETH_P_8021Q)) |
10397 | return -EPROTONOSUPPORT; |
10398 | |
10399 | state = hclge_get_port_base_vlan_state(vport, |
10400 | state: vport->port_base_vlan_cfg.state, |
10401 | vlan, qos); |
10402 | if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE) |
10403 | return 0; |
10404 | |
10405 | vlan_info.vlan_tag = vlan; |
10406 | vlan_info.qos = qos; |
10407 | vlan_info.vlan_proto = ntohs(proto); |
10408 | |
10409 | ret = hclge_update_port_base_vlan_cfg(vport, state, vlan_info: &vlan_info); |
10410 | if (ret) { |
10411 | dev_err(&hdev->pdev->dev, |
10412 | "failed to update port base vlan for vf %d, ret = %d\n" , |
10413 | vfid, ret); |
10414 | return ret; |
10415 | } |
10416 | |
10417 | /* there is a timewindow for PF to know VF unalive, it may |
10418 | * cause send mailbox fail, but it doesn't matter, VF will |
10419 | * query it when reinit. |
10420 | * for DEVICE_VERSION_V3, vf doesn't need to know about the port based |
10421 | * VLAN state. |
10422 | */ |
10423 | if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { |
10424 | if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) |
10425 | (void)hclge_push_vf_port_base_vlan_info(vport: &hdev->vport[0], |
10426 | vfid: vport->vport_id, |
10427 | state, |
10428 | vlan_info: &vlan_info); |
10429 | else |
10430 | set_bit(nr: HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, |
10431 | addr: &vport->need_notify); |
10432 | } |
10433 | return 0; |
10434 | } |
10435 | |
10436 | static void hclge_clear_vf_vlan(struct hclge_dev *hdev) |
10437 | { |
10438 | struct hclge_vlan_info *vlan_info; |
10439 | struct hclge_vport *vport; |
10440 | int ret; |
10441 | int vf; |
10442 | |
10443 | /* clear port base vlan for all vf */ |
10444 | for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { |
10445 | vport = &hdev->vport[vf]; |
10446 | vlan_info = &vport->port_base_vlan_cfg.vlan_info; |
10447 | |
10448 | ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), |
10449 | vport_id: vport->vport_id, |
10450 | vlan_id: vlan_info->vlan_tag, is_kill: true); |
10451 | if (ret) |
10452 | dev_err(&hdev->pdev->dev, |
10453 | "failed to clear vf vlan for vf%d, ret = %d\n" , |
10454 | vf - HCLGE_VF_VPORT_START_NUM, ret); |
10455 | } |
10456 | } |
10457 | |
10458 | int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, |
10459 | u16 vlan_id, bool is_kill) |
10460 | { |
10461 | struct hclge_vport *vport = hclge_get_vport(handle); |
10462 | struct hclge_dev *hdev = vport->back; |
10463 | bool writen_to_tbl = false; |
10464 | int ret = 0; |
10465 | |
10466 | /* When device is resetting or reset failed, firmware is unable to |
10467 | * handle mailbox. Just record the vlan id, and remove it after |
10468 | * reset finished. |
10469 | */ |
10470 | mutex_lock(&hdev->vport_lock); |
10471 | if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || |
10472 | test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { |
10473 | set_bit(nr: vlan_id, addr: vport->vlan_del_fail_bmap); |
10474 | mutex_unlock(lock: &hdev->vport_lock); |
10475 | return -EBUSY; |
10476 | } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) { |
10477 | clear_bit(nr: vlan_id, addr: vport->vlan_del_fail_bmap); |
10478 | } |
10479 | mutex_unlock(lock: &hdev->vport_lock); |
10480 | |
10481 | /* when port base vlan enabled, we use port base vlan as the vlan |
10482 | * filter entry. In this case, we don't update vlan filter table |
10483 | * when user add new vlan or remove exist vlan, just update the vport |
10484 | * vlan list. The vlan id in vlan list will be writen in vlan filter |
10485 | * table until port base vlan disabled |
10486 | */ |
10487 | if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { |
10488 | ret = hclge_set_vlan_filter_hw(hdev, proto, vport_id: vport->vport_id, |
10489 | vlan_id, is_kill); |
10490 | writen_to_tbl = true; |
10491 | } |
10492 | |
10493 | if (!ret) { |
10494 | if (!is_kill) { |
10495 | hclge_add_vport_vlan_table(vport, vlan_id, |
10496 | writen_to_tbl); |
10497 | } else if (is_kill && vlan_id != 0) { |
10498 | mutex_lock(&hdev->vport_lock); |
10499 | hclge_rm_vport_vlan_table(vport, vlan_id, is_write_tbl: false); |
10500 | mutex_unlock(lock: &hdev->vport_lock); |
10501 | } |
10502 | } else if (is_kill) { |
10503 | /* when remove hw vlan filter failed, record the vlan id, |
10504 | * and try to remove it from hw later, to be consistence |
10505 | * with stack |
10506 | */ |
10507 | mutex_lock(&hdev->vport_lock); |
10508 | set_bit(nr: vlan_id, addr: vport->vlan_del_fail_bmap); |
10509 | mutex_unlock(lock: &hdev->vport_lock); |
10510 | } |
10511 | |
10512 | hclge_set_vport_vlan_fltr_change(vport); |
10513 | |
10514 | return ret; |
10515 | } |
10516 | |
10517 | static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev) |
10518 | { |
10519 | struct hclge_vport *vport; |
10520 | int ret; |
10521 | u16 i; |
10522 | |
10523 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
10524 | vport = &hdev->vport[i]; |
10525 | if (!test_and_clear_bit(nr: HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, |
10526 | addr: &vport->state)) |
10527 | continue; |
10528 | |
10529 | ret = hclge_enable_vport_vlan_filter(vport, |
10530 | request_en: vport->req_vlan_fltr_en); |
10531 | if (ret) { |
10532 | dev_err(&hdev->pdev->dev, |
10533 | "failed to sync vlan filter state for vport%u, ret = %d\n" , |
10534 | vport->vport_id, ret); |
10535 | set_bit(nr: HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, |
10536 | addr: &vport->state); |
10537 | return; |
10538 | } |
10539 | } |
10540 | } |
10541 | |
10542 | static void hclge_sync_vlan_filter(struct hclge_dev *hdev) |
10543 | { |
10544 | #define HCLGE_MAX_SYNC_COUNT 60 |
10545 | |
10546 | int i, ret, sync_cnt = 0; |
10547 | u16 vlan_id; |
10548 | |
10549 | mutex_lock(&hdev->vport_lock); |
10550 | /* start from vport 1 for PF is always alive */ |
10551 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
10552 | struct hclge_vport *vport = &hdev->vport[i]; |
10553 | |
10554 | vlan_id = find_first_bit(addr: vport->vlan_del_fail_bmap, |
10555 | VLAN_N_VID); |
10556 | while (vlan_id != VLAN_N_VID) { |
10557 | ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), |
10558 | vport_id: vport->vport_id, vlan_id, |
10559 | is_kill: true); |
10560 | if (ret && ret != -EINVAL) { |
10561 | mutex_unlock(lock: &hdev->vport_lock); |
10562 | return; |
10563 | } |
10564 | |
10565 | clear_bit(nr: vlan_id, addr: vport->vlan_del_fail_bmap); |
10566 | hclge_rm_vport_vlan_table(vport, vlan_id, is_write_tbl: false); |
10567 | hclge_set_vport_vlan_fltr_change(vport); |
10568 | |
10569 | sync_cnt++; |
10570 | if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) { |
10571 | mutex_unlock(lock: &hdev->vport_lock); |
10572 | return; |
10573 | } |
10574 | |
10575 | vlan_id = find_first_bit(addr: vport->vlan_del_fail_bmap, |
10576 | VLAN_N_VID); |
10577 | } |
10578 | } |
10579 | mutex_unlock(lock: &hdev->vport_lock); |
10580 | |
10581 | hclge_sync_vlan_fltr_state(hdev); |
10582 | } |
10583 | |
10584 | static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) |
10585 | { |
10586 | struct hclge_config_max_frm_size_cmd *req; |
10587 | struct hclge_desc desc; |
10588 | |
10589 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); |
10590 | |
10591 | req = (struct hclge_config_max_frm_size_cmd *)desc.data; |
10592 | req->max_frm_size = cpu_to_le16(new_mps); |
10593 | req->min_frm_size = HCLGE_MAC_MIN_FRAME; |
10594 | |
10595 | return hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
10596 | } |
10597 | |
10598 | static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) |
10599 | { |
10600 | struct hclge_vport *vport = hclge_get_vport(handle); |
10601 | |
10602 | return hclge_set_vport_mtu(vport, new_mtu); |
10603 | } |
10604 | |
10605 | int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) |
10606 | { |
10607 | struct hclge_dev *hdev = vport->back; |
10608 | int i, max_frm_size, ret; |
10609 | |
10610 | /* HW supprt 2 layer vlan */ |
10611 | max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; |
10612 | if (max_frm_size < HCLGE_MAC_MIN_FRAME || |
10613 | max_frm_size > hdev->ae_dev->dev_specs.max_frm_size) |
10614 | return -EINVAL; |
10615 | |
10616 | max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); |
10617 | mutex_lock(&hdev->vport_lock); |
10618 | /* VF's mps must fit within hdev->mps */ |
10619 | if (vport->vport_id && max_frm_size > hdev->mps) { |
10620 | mutex_unlock(lock: &hdev->vport_lock); |
10621 | return -EINVAL; |
10622 | } else if (vport->vport_id) { |
10623 | vport->mps = max_frm_size; |
10624 | mutex_unlock(lock: &hdev->vport_lock); |
10625 | return 0; |
10626 | } |
10627 | |
10628 | /* PF's mps must be greater then VF's mps */ |
10629 | for (i = 1; i < hdev->num_alloc_vport; i++) |
10630 | if (max_frm_size < hdev->vport[i].mps) { |
10631 | dev_err(&hdev->pdev->dev, |
10632 | "failed to set pf mtu for less than vport %d, mps = %u.\n" , |
10633 | i, hdev->vport[i].mps); |
10634 | mutex_unlock(lock: &hdev->vport_lock); |
10635 | return -EINVAL; |
10636 | } |
10637 | |
10638 | hclge_notify_client(hdev, type: HNAE3_DOWN_CLIENT); |
10639 | |
10640 | ret = hclge_set_mac_mtu(hdev, new_mps: max_frm_size); |
10641 | if (ret) { |
10642 | dev_err(&hdev->pdev->dev, |
10643 | "Change mtu fail, ret =%d\n" , ret); |
10644 | goto out; |
10645 | } |
10646 | |
10647 | hdev->mps = max_frm_size; |
10648 | vport->mps = max_frm_size; |
10649 | |
10650 | ret = hclge_buffer_alloc(hdev); |
10651 | if (ret) |
10652 | dev_err(&hdev->pdev->dev, |
10653 | "Allocate buffer fail, ret =%d\n" , ret); |
10654 | |
10655 | out: |
10656 | hclge_notify_client(hdev, type: HNAE3_UP_CLIENT); |
10657 | mutex_unlock(lock: &hdev->vport_lock); |
10658 | return ret; |
10659 | } |
10660 | |
10661 | static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id, |
10662 | bool enable) |
10663 | { |
10664 | struct hclge_reset_tqp_queue_cmd *req; |
10665 | struct hclge_desc desc; |
10666 | int ret; |
10667 | |
10668 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); |
10669 | |
10670 | req = (struct hclge_reset_tqp_queue_cmd *)desc.data; |
10671 | req->tqp_id = cpu_to_le16(queue_id); |
10672 | if (enable) |
10673 | hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U); |
10674 | |
10675 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
10676 | if (ret) { |
10677 | dev_err(&hdev->pdev->dev, |
10678 | "Send tqp reset cmd error, status =%d\n" , ret); |
10679 | return ret; |
10680 | } |
10681 | |
10682 | return 0; |
10683 | } |
10684 | |
10685 | static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id, |
10686 | u8 *reset_status) |
10687 | { |
10688 | struct hclge_reset_tqp_queue_cmd *req; |
10689 | struct hclge_desc desc; |
10690 | int ret; |
10691 | |
10692 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); |
10693 | |
10694 | req = (struct hclge_reset_tqp_queue_cmd *)desc.data; |
10695 | req->tqp_id = cpu_to_le16(queue_id); |
10696 | |
10697 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
10698 | if (ret) { |
10699 | dev_err(&hdev->pdev->dev, |
10700 | "Get reset status error, status =%d\n" , ret); |
10701 | return ret; |
10702 | } |
10703 | |
10704 | *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); |
10705 | |
10706 | return 0; |
10707 | } |
10708 | |
10709 | u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) |
10710 | { |
10711 | struct hclge_comm_tqp *tqp; |
10712 | struct hnae3_queue *queue; |
10713 | |
10714 | queue = handle->kinfo.tqp[queue_id]; |
10715 | tqp = container_of(queue, struct hclge_comm_tqp, q); |
10716 | |
10717 | return tqp->index; |
10718 | } |
10719 | |
10720 | static int hclge_reset_tqp_cmd(struct hnae3_handle *handle) |
10721 | { |
10722 | struct hclge_vport *vport = hclge_get_vport(handle); |
10723 | struct hclge_dev *hdev = vport->back; |
10724 | u16 reset_try_times = 0; |
10725 | u8 reset_status; |
10726 | u16 queue_gid; |
10727 | int ret; |
10728 | u16 i; |
10729 | |
10730 | for (i = 0; i < handle->kinfo.num_tqps; i++) { |
10731 | queue_gid = hclge_covert_handle_qid_global(handle, queue_id: i); |
10732 | ret = hclge_reset_tqp_cmd_send(hdev, queue_id: queue_gid, enable: true); |
10733 | if (ret) { |
10734 | dev_err(&hdev->pdev->dev, |
10735 | "failed to send reset tqp cmd, ret = %d\n" , |
10736 | ret); |
10737 | return ret; |
10738 | } |
10739 | |
10740 | while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { |
10741 | ret = hclge_get_reset_status(hdev, queue_id: queue_gid, |
10742 | reset_status: &reset_status); |
10743 | if (ret) |
10744 | return ret; |
10745 | |
10746 | if (reset_status) |
10747 | break; |
10748 | |
10749 | /* Wait for tqp hw reset */ |
10750 | usleep_range(min: 1000, max: 1200); |
10751 | } |
10752 | |
10753 | if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { |
10754 | dev_err(&hdev->pdev->dev, |
10755 | "wait for tqp hw reset timeout\n" ); |
10756 | return -ETIME; |
10757 | } |
10758 | |
10759 | ret = hclge_reset_tqp_cmd_send(hdev, queue_id: queue_gid, enable: false); |
10760 | if (ret) { |
10761 | dev_err(&hdev->pdev->dev, |
10762 | "failed to deassert soft reset, ret = %d\n" , |
10763 | ret); |
10764 | return ret; |
10765 | } |
10766 | reset_try_times = 0; |
10767 | } |
10768 | return 0; |
10769 | } |
10770 | |
10771 | static int hclge_reset_rcb(struct hnae3_handle *handle) |
10772 | { |
10773 | #define HCLGE_RESET_RCB_NOT_SUPPORT 0U |
10774 | #define HCLGE_RESET_RCB_SUCCESS 1U |
10775 | |
10776 | struct hclge_vport *vport = hclge_get_vport(handle); |
10777 | struct hclge_dev *hdev = vport->back; |
10778 | struct hclge_reset_cmd *req; |
10779 | struct hclge_desc desc; |
10780 | u8 return_status; |
10781 | u16 queue_gid; |
10782 | int ret; |
10783 | |
10784 | queue_gid = hclge_covert_handle_qid_global(handle, queue_id: 0); |
10785 | |
10786 | req = (struct hclge_reset_cmd *)desc.data; |
10787 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); |
10788 | hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1); |
10789 | req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid); |
10790 | req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps); |
10791 | |
10792 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
10793 | if (ret) { |
10794 | dev_err(&hdev->pdev->dev, |
10795 | "failed to send rcb reset cmd, ret = %d\n" , ret); |
10796 | return ret; |
10797 | } |
10798 | |
10799 | return_status = req->fun_reset_rcb_return_status; |
10800 | if (return_status == HCLGE_RESET_RCB_SUCCESS) |
10801 | return 0; |
10802 | |
10803 | if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) { |
10804 | dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n" , |
10805 | return_status); |
10806 | return -EIO; |
10807 | } |
10808 | |
10809 | /* if reset rcb cmd is unsupported, we need to send reset tqp cmd |
10810 | * again to reset all tqps |
10811 | */ |
10812 | return hclge_reset_tqp_cmd(handle); |
10813 | } |
10814 | |
10815 | int hclge_reset_tqp(struct hnae3_handle *handle) |
10816 | { |
10817 | struct hclge_vport *vport = hclge_get_vport(handle); |
10818 | struct hclge_dev *hdev = vport->back; |
10819 | int ret; |
10820 | |
10821 | /* only need to disable PF's tqp */ |
10822 | if (!vport->vport_id) { |
10823 | ret = hclge_tqp_enable(handle, enable: false); |
10824 | if (ret) { |
10825 | dev_err(&hdev->pdev->dev, |
10826 | "failed to disable tqp, ret = %d\n" , ret); |
10827 | return ret; |
10828 | } |
10829 | } |
10830 | |
10831 | return hclge_reset_rcb(handle); |
10832 | } |
10833 | |
10834 | static u32 hclge_get_fw_version(struct hnae3_handle *handle) |
10835 | { |
10836 | struct hclge_vport *vport = hclge_get_vport(handle); |
10837 | struct hclge_dev *hdev = vport->back; |
10838 | |
10839 | return hdev->fw_version; |
10840 | } |
10841 | |
10842 | static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) |
10843 | { |
10844 | struct phy_device *phydev = hdev->hw.mac.phydev; |
10845 | |
10846 | if (!phydev) |
10847 | return; |
10848 | |
10849 | phy_set_asym_pause(phydev, rx: rx_en, tx: tx_en); |
10850 | } |
10851 | |
10852 | static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) |
10853 | { |
10854 | int ret; |
10855 | |
10856 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) |
10857 | return 0; |
10858 | |
10859 | ret = hclge_mac_pause_en_cfg(hdev, tx: tx_en, rx: rx_en); |
10860 | if (ret) |
10861 | dev_err(&hdev->pdev->dev, |
10862 | "configure pauseparam error, ret = %d.\n" , ret); |
10863 | |
10864 | return ret; |
10865 | } |
10866 | |
10867 | int hclge_cfg_flowctrl(struct hclge_dev *hdev) |
10868 | { |
10869 | struct phy_device *phydev = hdev->hw.mac.phydev; |
10870 | u16 remote_advertising = 0; |
10871 | u16 local_advertising; |
10872 | u32 rx_pause, tx_pause; |
10873 | u8 flowctl; |
10874 | |
10875 | if (!phydev->link) |
10876 | return 0; |
10877 | |
10878 | if (!phydev->autoneg) |
10879 | return hclge_mac_pause_setup_hw(hdev); |
10880 | |
10881 | local_advertising = linkmode_adv_to_lcl_adv_t(advertising: phydev->advertising); |
10882 | |
10883 | if (phydev->pause) |
10884 | remote_advertising = LPA_PAUSE_CAP; |
10885 | |
10886 | if (phydev->asym_pause) |
10887 | remote_advertising |= LPA_PAUSE_ASYM; |
10888 | |
10889 | flowctl = mii_resolve_flowctrl_fdx(lcladv: local_advertising, |
10890 | rmtadv: remote_advertising); |
10891 | tx_pause = flowctl & FLOW_CTRL_TX; |
10892 | rx_pause = flowctl & FLOW_CTRL_RX; |
10893 | |
10894 | if (phydev->duplex == HCLGE_MAC_HALF) { |
10895 | tx_pause = 0; |
10896 | rx_pause = 0; |
10897 | } |
10898 | |
10899 | return hclge_cfg_pauseparam(hdev, rx_en: rx_pause, tx_en: tx_pause); |
10900 | } |
10901 | |
10902 | static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, |
10903 | u32 *rx_en, u32 *tx_en) |
10904 | { |
10905 | struct hclge_vport *vport = hclge_get_vport(handle); |
10906 | struct hclge_dev *hdev = vport->back; |
10907 | u8 media_type = hdev->hw.mac.media_type; |
10908 | |
10909 | *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ? |
10910 | hclge_get_autoneg(handle) : 0; |
10911 | |
10912 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { |
10913 | *rx_en = 0; |
10914 | *tx_en = 0; |
10915 | return; |
10916 | } |
10917 | |
10918 | if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { |
10919 | *rx_en = 1; |
10920 | *tx_en = 0; |
10921 | } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { |
10922 | *tx_en = 1; |
10923 | *rx_en = 0; |
10924 | } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { |
10925 | *rx_en = 1; |
10926 | *tx_en = 1; |
10927 | } else { |
10928 | *rx_en = 0; |
10929 | *tx_en = 0; |
10930 | } |
10931 | } |
10932 | |
10933 | static void hclge_record_user_pauseparam(struct hclge_dev *hdev, |
10934 | u32 rx_en, u32 tx_en) |
10935 | { |
10936 | if (rx_en && tx_en) |
10937 | hdev->fc_mode_last_time = HCLGE_FC_FULL; |
10938 | else if (rx_en && !tx_en) |
10939 | hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; |
10940 | else if (!rx_en && tx_en) |
10941 | hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; |
10942 | else |
10943 | hdev->fc_mode_last_time = HCLGE_FC_NONE; |
10944 | |
10945 | hdev->tm_info.fc_mode = hdev->fc_mode_last_time; |
10946 | } |
10947 | |
10948 | static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, |
10949 | u32 rx_en, u32 tx_en) |
10950 | { |
10951 | struct hclge_vport *vport = hclge_get_vport(handle); |
10952 | struct hclge_dev *hdev = vport->back; |
10953 | struct phy_device *phydev = hdev->hw.mac.phydev; |
10954 | u32 fc_autoneg; |
10955 | |
10956 | if (phydev || hnae3_dev_phy_imp_supported(hdev)) { |
10957 | fc_autoneg = hclge_get_autoneg(handle); |
10958 | if (auto_neg != fc_autoneg) { |
10959 | dev_info(&hdev->pdev->dev, |
10960 | "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n" ); |
10961 | return -EOPNOTSUPP; |
10962 | } |
10963 | } |
10964 | |
10965 | if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { |
10966 | dev_info(&hdev->pdev->dev, |
10967 | "Priority flow control enabled. Cannot set link flow control.\n" ); |
10968 | return -EOPNOTSUPP; |
10969 | } |
10970 | |
10971 | hclge_set_flowctrl_adv(hdev, rx_en, tx_en); |
10972 | |
10973 | hclge_record_user_pauseparam(hdev, rx_en, tx_en); |
10974 | |
10975 | if (!auto_neg || hnae3_dev_phy_imp_supported(hdev)) |
10976 | return hclge_cfg_pauseparam(hdev, rx_en, tx_en); |
10977 | |
10978 | if (phydev) |
10979 | return phy_start_aneg(phydev); |
10980 | |
10981 | return -EOPNOTSUPP; |
10982 | } |
10983 | |
10984 | static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, |
10985 | u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num) |
10986 | { |
10987 | struct hclge_vport *vport = hclge_get_vport(handle); |
10988 | struct hclge_dev *hdev = vport->back; |
10989 | |
10990 | if (speed) |
10991 | *speed = hdev->hw.mac.speed; |
10992 | if (duplex) |
10993 | *duplex = hdev->hw.mac.duplex; |
10994 | if (auto_neg) |
10995 | *auto_neg = hdev->hw.mac.autoneg; |
10996 | if (lane_num) |
10997 | *lane_num = hdev->hw.mac.lane_num; |
10998 | } |
10999 | |
11000 | static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type, |
11001 | u8 *module_type) |
11002 | { |
11003 | struct hclge_vport *vport = hclge_get_vport(handle); |
11004 | struct hclge_dev *hdev = vport->back; |
11005 | |
11006 | /* When nic is down, the service task is not running, doesn't update |
11007 | * the port information per second. Query the port information before |
11008 | * return the media type, ensure getting the correct media information. |
11009 | */ |
11010 | hclge_update_port_info(hdev); |
11011 | |
11012 | if (media_type) |
11013 | *media_type = hdev->hw.mac.media_type; |
11014 | |
11015 | if (module_type) |
11016 | *module_type = hdev->hw.mac.module_type; |
11017 | } |
11018 | |
11019 | static void hclge_get_mdix_mode(struct hnae3_handle *handle, |
11020 | u8 *tp_mdix_ctrl, u8 *tp_mdix) |
11021 | { |
11022 | struct hclge_vport *vport = hclge_get_vport(handle); |
11023 | struct hclge_dev *hdev = vport->back; |
11024 | struct phy_device *phydev = hdev->hw.mac.phydev; |
11025 | int mdix_ctrl, mdix, is_resolved; |
11026 | unsigned int retval; |
11027 | |
11028 | if (!phydev) { |
11029 | *tp_mdix_ctrl = ETH_TP_MDI_INVALID; |
11030 | *tp_mdix = ETH_TP_MDI_INVALID; |
11031 | return; |
11032 | } |
11033 | |
11034 | phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); |
11035 | |
11036 | retval = phy_read(phydev, HCLGE_PHY_CSC_REG); |
11037 | mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, |
11038 | HCLGE_PHY_MDIX_CTRL_S); |
11039 | |
11040 | retval = phy_read(phydev, HCLGE_PHY_CSS_REG); |
11041 | mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); |
11042 | is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); |
11043 | |
11044 | phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); |
11045 | |
11046 | switch (mdix_ctrl) { |
11047 | case 0x0: |
11048 | *tp_mdix_ctrl = ETH_TP_MDI; |
11049 | break; |
11050 | case 0x1: |
11051 | *tp_mdix_ctrl = ETH_TP_MDI_X; |
11052 | break; |
11053 | case 0x3: |
11054 | *tp_mdix_ctrl = ETH_TP_MDI_AUTO; |
11055 | break; |
11056 | default: |
11057 | *tp_mdix_ctrl = ETH_TP_MDI_INVALID; |
11058 | break; |
11059 | } |
11060 | |
11061 | if (!is_resolved) |
11062 | *tp_mdix = ETH_TP_MDI_INVALID; |
11063 | else if (mdix) |
11064 | *tp_mdix = ETH_TP_MDI_X; |
11065 | else |
11066 | *tp_mdix = ETH_TP_MDI; |
11067 | } |
11068 | |
11069 | static void hclge_info_show(struct hclge_dev *hdev) |
11070 | { |
11071 | struct hnae3_handle *handle = &hdev->vport->nic; |
11072 | struct device *dev = &hdev->pdev->dev; |
11073 | |
11074 | dev_info(dev, "PF info begin:\n" ); |
11075 | |
11076 | dev_info(dev, "Task queue pairs numbers: %u\n" , hdev->num_tqps); |
11077 | dev_info(dev, "Desc num per TX queue: %u\n" , hdev->num_tx_desc); |
11078 | dev_info(dev, "Desc num per RX queue: %u\n" , hdev->num_rx_desc); |
11079 | dev_info(dev, "Numbers of vports: %u\n" , hdev->num_alloc_vport); |
11080 | dev_info(dev, "Numbers of VF for this PF: %u\n" , hdev->num_req_vfs); |
11081 | dev_info(dev, "HW tc map: 0x%x\n" , hdev->hw_tc_map); |
11082 | dev_info(dev, "Total buffer size for TX/RX: %u\n" , hdev->pkt_buf_size); |
11083 | dev_info(dev, "TX buffer size for each TC: %u\n" , hdev->tx_buf_size); |
11084 | dev_info(dev, "DV buffer size for each TC: %u\n" , hdev->dv_buf_size); |
11085 | dev_info(dev, "This is %s PF\n" , |
11086 | hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main" ); |
11087 | dev_info(dev, "DCB %s\n" , |
11088 | handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable" ); |
11089 | dev_info(dev, "MQPRIO %s\n" , |
11090 | handle->kinfo.tc_info.mqprio_active ? "enable" : "disable" ); |
11091 | dev_info(dev, "Default tx spare buffer size: %u\n" , |
11092 | hdev->tx_spare_buf_size); |
11093 | |
11094 | dev_info(dev, "PF info end.\n" ); |
11095 | } |
11096 | |
11097 | static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, |
11098 | struct hclge_vport *vport) |
11099 | { |
11100 | struct hnae3_client *client = vport->nic.client; |
11101 | struct hclge_dev *hdev = ae_dev->priv; |
11102 | int rst_cnt = hdev->rst_stats.reset_cnt; |
11103 | int ret; |
11104 | |
11105 | ret = client->ops->init_instance(&vport->nic); |
11106 | if (ret) |
11107 | return ret; |
11108 | |
11109 | set_bit(nr: HCLGE_STATE_NIC_REGISTERED, addr: &hdev->state); |
11110 | if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || |
11111 | rst_cnt != hdev->rst_stats.reset_cnt) { |
11112 | ret = -EBUSY; |
11113 | goto init_nic_err; |
11114 | } |
11115 | |
11116 | /* Enable nic hw error interrupts */ |
11117 | ret = hclge_config_nic_hw_error(hdev, state: true); |
11118 | if (ret) { |
11119 | dev_err(&ae_dev->pdev->dev, |
11120 | "fail(%d) to enable hw error interrupts\n" , ret); |
11121 | goto init_nic_err; |
11122 | } |
11123 | |
11124 | hnae3_set_client_init_flag(client, ae_dev, inited: 1); |
11125 | |
11126 | if (netif_msg_drv(&hdev->vport->nic)) |
11127 | hclge_info_show(hdev); |
11128 | |
11129 | return ret; |
11130 | |
11131 | init_nic_err: |
11132 | clear_bit(nr: HCLGE_STATE_NIC_REGISTERED, addr: &hdev->state); |
11133 | while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) |
11134 | msleep(HCLGE_WAIT_RESET_DONE); |
11135 | |
11136 | client->ops->uninit_instance(&vport->nic, 0); |
11137 | |
11138 | return ret; |
11139 | } |
11140 | |
11141 | static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, |
11142 | struct hclge_vport *vport) |
11143 | { |
11144 | struct hclge_dev *hdev = ae_dev->priv; |
11145 | struct hnae3_client *client; |
11146 | int rst_cnt; |
11147 | int ret; |
11148 | |
11149 | if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || |
11150 | !hdev->nic_client) |
11151 | return 0; |
11152 | |
11153 | client = hdev->roce_client; |
11154 | ret = hclge_init_roce_base_info(vport); |
11155 | if (ret) |
11156 | return ret; |
11157 | |
11158 | rst_cnt = hdev->rst_stats.reset_cnt; |
11159 | ret = client->ops->init_instance(&vport->roce); |
11160 | if (ret) |
11161 | return ret; |
11162 | |
11163 | set_bit(nr: HCLGE_STATE_ROCE_REGISTERED, addr: &hdev->state); |
11164 | if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || |
11165 | rst_cnt != hdev->rst_stats.reset_cnt) { |
11166 | ret = -EBUSY; |
11167 | goto init_roce_err; |
11168 | } |
11169 | |
11170 | /* Enable roce ras interrupts */ |
11171 | ret = hclge_config_rocee_ras_interrupt(hdev, en: true); |
11172 | if (ret) { |
11173 | dev_err(&ae_dev->pdev->dev, |
11174 | "fail(%d) to enable roce ras interrupts\n" , ret); |
11175 | goto init_roce_err; |
11176 | } |
11177 | |
11178 | hnae3_set_client_init_flag(client, ae_dev, inited: 1); |
11179 | |
11180 | return 0; |
11181 | |
11182 | init_roce_err: |
11183 | clear_bit(nr: HCLGE_STATE_ROCE_REGISTERED, addr: &hdev->state); |
11184 | while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) |
11185 | msleep(HCLGE_WAIT_RESET_DONE); |
11186 | |
11187 | hdev->roce_client->ops->uninit_instance(&vport->roce, 0); |
11188 | |
11189 | return ret; |
11190 | } |
11191 | |
11192 | static int hclge_init_client_instance(struct hnae3_client *client, |
11193 | struct hnae3_ae_dev *ae_dev) |
11194 | { |
11195 | struct hclge_dev *hdev = ae_dev->priv; |
11196 | struct hclge_vport *vport = &hdev->vport[0]; |
11197 | int ret; |
11198 | |
11199 | switch (client->type) { |
11200 | case HNAE3_CLIENT_KNIC: |
11201 | hdev->nic_client = client; |
11202 | vport->nic.client = client; |
11203 | ret = hclge_init_nic_client_instance(ae_dev, vport); |
11204 | if (ret) |
11205 | goto clear_nic; |
11206 | |
11207 | ret = hclge_init_roce_client_instance(ae_dev, vport); |
11208 | if (ret) |
11209 | goto clear_roce; |
11210 | |
11211 | break; |
11212 | case HNAE3_CLIENT_ROCE: |
11213 | if (hnae3_dev_roce_supported(hdev)) { |
11214 | hdev->roce_client = client; |
11215 | vport->roce.client = client; |
11216 | } |
11217 | |
11218 | ret = hclge_init_roce_client_instance(ae_dev, vport); |
11219 | if (ret) |
11220 | goto clear_roce; |
11221 | |
11222 | break; |
11223 | default: |
11224 | return -EINVAL; |
11225 | } |
11226 | |
11227 | return 0; |
11228 | |
11229 | clear_nic: |
11230 | hdev->nic_client = NULL; |
11231 | vport->nic.client = NULL; |
11232 | return ret; |
11233 | clear_roce: |
11234 | hdev->roce_client = NULL; |
11235 | vport->roce.client = NULL; |
11236 | return ret; |
11237 | } |
11238 | |
11239 | static void hclge_uninit_client_instance(struct hnae3_client *client, |
11240 | struct hnae3_ae_dev *ae_dev) |
11241 | { |
11242 | struct hclge_dev *hdev = ae_dev->priv; |
11243 | struct hclge_vport *vport = &hdev->vport[0]; |
11244 | |
11245 | if (hdev->roce_client) { |
11246 | clear_bit(nr: HCLGE_STATE_ROCE_REGISTERED, addr: &hdev->state); |
11247 | while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) |
11248 | msleep(HCLGE_WAIT_RESET_DONE); |
11249 | |
11250 | hdev->roce_client->ops->uninit_instance(&vport->roce, 0); |
11251 | hdev->roce_client = NULL; |
11252 | vport->roce.client = NULL; |
11253 | } |
11254 | if (client->type == HNAE3_CLIENT_ROCE) |
11255 | return; |
11256 | if (hdev->nic_client && client->ops->uninit_instance) { |
11257 | clear_bit(nr: HCLGE_STATE_NIC_REGISTERED, addr: &hdev->state); |
11258 | while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) |
11259 | msleep(HCLGE_WAIT_RESET_DONE); |
11260 | |
11261 | client->ops->uninit_instance(&vport->nic, 0); |
11262 | hdev->nic_client = NULL; |
11263 | vport->nic.client = NULL; |
11264 | } |
11265 | } |
11266 | |
11267 | static int hclge_dev_mem_map(struct hclge_dev *hdev) |
11268 | { |
11269 | struct pci_dev *pdev = hdev->pdev; |
11270 | struct hclge_hw *hw = &hdev->hw; |
11271 | |
11272 | /* for device does not have device memory, return directly */ |
11273 | if (!(pci_select_bars(dev: pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR))) |
11274 | return 0; |
11275 | |
11276 | hw->hw.mem_base = |
11277 | devm_ioremap_wc(dev: &pdev->dev, |
11278 | pci_resource_start(pdev, HCLGE_MEM_BAR), |
11279 | pci_resource_len(pdev, HCLGE_MEM_BAR)); |
11280 | if (!hw->hw.mem_base) { |
11281 | dev_err(&pdev->dev, "failed to map device memory\n" ); |
11282 | return -EFAULT; |
11283 | } |
11284 | |
11285 | return 0; |
11286 | } |
11287 | |
11288 | static int hclge_pci_init(struct hclge_dev *hdev) |
11289 | { |
11290 | struct pci_dev *pdev = hdev->pdev; |
11291 | struct hclge_hw *hw; |
11292 | int ret; |
11293 | |
11294 | ret = pci_enable_device(dev: pdev); |
11295 | if (ret) { |
11296 | dev_err(&pdev->dev, "failed to enable PCI device\n" ); |
11297 | return ret; |
11298 | } |
11299 | |
11300 | ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64)); |
11301 | if (ret) { |
11302 | ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(32)); |
11303 | if (ret) { |
11304 | dev_err(&pdev->dev, |
11305 | "can't set consistent PCI DMA" ); |
11306 | goto err_disable_device; |
11307 | } |
11308 | dev_warn(&pdev->dev, "set DMA mask to 32 bits\n" ); |
11309 | } |
11310 | |
11311 | ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); |
11312 | if (ret) { |
11313 | dev_err(&pdev->dev, "PCI request regions failed %d\n" , ret); |
11314 | goto err_disable_device; |
11315 | } |
11316 | |
11317 | pci_set_master(dev: pdev); |
11318 | hw = &hdev->hw; |
11319 | hw->hw.io_base = pcim_iomap(pdev, bar: 2, maxlen: 0); |
11320 | if (!hw->hw.io_base) { |
11321 | dev_err(&pdev->dev, "Can't map configuration register space\n" ); |
11322 | ret = -ENOMEM; |
11323 | goto err_release_regions; |
11324 | } |
11325 | |
11326 | ret = hclge_dev_mem_map(hdev); |
11327 | if (ret) |
11328 | goto err_unmap_io_base; |
11329 | |
11330 | hdev->num_req_vfs = pci_sriov_get_totalvfs(dev: pdev); |
11331 | |
11332 | return 0; |
11333 | |
11334 | err_unmap_io_base: |
11335 | pcim_iounmap(pdev, addr: hdev->hw.hw.io_base); |
11336 | err_release_regions: |
11337 | pci_release_regions(pdev); |
11338 | err_disable_device: |
11339 | pci_disable_device(dev: pdev); |
11340 | |
11341 | return ret; |
11342 | } |
11343 | |
11344 | static void hclge_pci_uninit(struct hclge_dev *hdev) |
11345 | { |
11346 | struct pci_dev *pdev = hdev->pdev; |
11347 | |
11348 | if (hdev->hw.hw.mem_base) |
11349 | devm_iounmap(dev: &pdev->dev, addr: hdev->hw.hw.mem_base); |
11350 | |
11351 | pcim_iounmap(pdev, addr: hdev->hw.hw.io_base); |
11352 | pci_free_irq_vectors(dev: pdev); |
11353 | pci_release_mem_regions(pdev); |
11354 | pci_disable_device(dev: pdev); |
11355 | } |
11356 | |
11357 | static void hclge_state_init(struct hclge_dev *hdev) |
11358 | { |
11359 | set_bit(nr: HCLGE_STATE_SERVICE_INITED, addr: &hdev->state); |
11360 | set_bit(nr: HCLGE_STATE_DOWN, addr: &hdev->state); |
11361 | clear_bit(nr: HCLGE_STATE_RST_SERVICE_SCHED, addr: &hdev->state); |
11362 | clear_bit(nr: HCLGE_STATE_RST_HANDLING, addr: &hdev->state); |
11363 | clear_bit(nr: HCLGE_STATE_RST_FAIL, addr: &hdev->state); |
11364 | clear_bit(nr: HCLGE_STATE_MBX_SERVICE_SCHED, addr: &hdev->state); |
11365 | clear_bit(nr: HCLGE_STATE_MBX_HANDLING, addr: &hdev->state); |
11366 | } |
11367 | |
11368 | static void hclge_state_uninit(struct hclge_dev *hdev) |
11369 | { |
11370 | set_bit(nr: HCLGE_STATE_DOWN, addr: &hdev->state); |
11371 | set_bit(nr: HCLGE_STATE_REMOVING, addr: &hdev->state); |
11372 | |
11373 | if (hdev->reset_timer.function) |
11374 | del_timer_sync(timer: &hdev->reset_timer); |
11375 | if (hdev->service_task.work.func) |
11376 | cancel_delayed_work_sync(dwork: &hdev->service_task); |
11377 | } |
11378 | |
11379 | static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev, |
11380 | enum hnae3_reset_type rst_type) |
11381 | { |
11382 | #define HCLGE_RESET_RETRY_WAIT_MS 500 |
11383 | #define HCLGE_RESET_RETRY_CNT 5 |
11384 | |
11385 | struct hclge_dev *hdev = ae_dev->priv; |
11386 | int retry_cnt = 0; |
11387 | int ret; |
11388 | |
11389 | while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) { |
11390 | down(sem: &hdev->reset_sem); |
11391 | set_bit(nr: HCLGE_STATE_RST_HANDLING, addr: &hdev->state); |
11392 | hdev->reset_type = rst_type; |
11393 | ret = hclge_reset_prepare(hdev); |
11394 | if (!ret && !hdev->reset_pending) |
11395 | break; |
11396 | |
11397 | dev_err(&hdev->pdev->dev, |
11398 | "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n" , |
11399 | ret, hdev->reset_pending, retry_cnt); |
11400 | clear_bit(nr: HCLGE_STATE_RST_HANDLING, addr: &hdev->state); |
11401 | up(sem: &hdev->reset_sem); |
11402 | msleep(HCLGE_RESET_RETRY_WAIT_MS); |
11403 | } |
11404 | |
11405 | /* disable misc vector before reset done */ |
11406 | hclge_enable_vector(vector: &hdev->misc_vector, enable: false); |
11407 | set_bit(nr: HCLGE_COMM_STATE_CMD_DISABLE, addr: &hdev->hw.hw.comm_state); |
11408 | |
11409 | if (hdev->reset_type == HNAE3_FLR_RESET) |
11410 | hdev->rst_stats.flr_rst_cnt++; |
11411 | } |
11412 | |
11413 | static void hclge_reset_done(struct hnae3_ae_dev *ae_dev) |
11414 | { |
11415 | struct hclge_dev *hdev = ae_dev->priv; |
11416 | int ret; |
11417 | |
11418 | hclge_enable_vector(vector: &hdev->misc_vector, enable: true); |
11419 | |
11420 | ret = hclge_reset_rebuild(hdev); |
11421 | if (ret) |
11422 | dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n" , ret); |
11423 | |
11424 | hdev->reset_type = HNAE3_NONE_RESET; |
11425 | clear_bit(nr: HCLGE_STATE_RST_HANDLING, addr: &hdev->state); |
11426 | up(sem: &hdev->reset_sem); |
11427 | } |
11428 | |
11429 | static void hclge_clear_resetting_state(struct hclge_dev *hdev) |
11430 | { |
11431 | u16 i; |
11432 | |
11433 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
11434 | struct hclge_vport *vport = &hdev->vport[i]; |
11435 | int ret; |
11436 | |
11437 | /* Send cmd to clear vport's FUNC_RST_ING */ |
11438 | ret = hclge_set_vf_rst(hdev, func_id: vport->vport_id, reset: false); |
11439 | if (ret) |
11440 | dev_warn(&hdev->pdev->dev, |
11441 | "clear vport(%u) rst failed %d!\n" , |
11442 | vport->vport_id, ret); |
11443 | } |
11444 | } |
11445 | |
11446 | static int hclge_clear_hw_resource(struct hclge_dev *hdev) |
11447 | { |
11448 | struct hclge_desc desc; |
11449 | int ret; |
11450 | |
11451 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false); |
11452 | |
11453 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
11454 | /* This new command is only supported by new firmware, it will |
11455 | * fail with older firmware. Error value -EOPNOSUPP can only be |
11456 | * returned by older firmware running this command, to keep code |
11457 | * backward compatible we will override this value and return |
11458 | * success. |
11459 | */ |
11460 | if (ret && ret != -EOPNOTSUPP) { |
11461 | dev_err(&hdev->pdev->dev, |
11462 | "failed to clear hw resource, ret = %d\n" , ret); |
11463 | return ret; |
11464 | } |
11465 | return 0; |
11466 | } |
11467 | |
11468 | static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev) |
11469 | { |
11470 | if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) |
11471 | hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1); |
11472 | } |
11473 | |
11474 | static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev) |
11475 | { |
11476 | if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) |
11477 | hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0); |
11478 | } |
11479 | |
11480 | static struct hclge_wol_info *hclge_get_wol_info(struct hnae3_handle *handle) |
11481 | { |
11482 | struct hclge_vport *vport = hclge_get_vport(handle); |
11483 | |
11484 | return &vport->back->hw.mac.wol; |
11485 | } |
11486 | |
11487 | static int hclge_get_wol_supported_mode(struct hclge_dev *hdev, |
11488 | u32 *wol_supported) |
11489 | { |
11490 | struct hclge_query_wol_supported_cmd *wol_supported_cmd; |
11491 | struct hclge_desc desc; |
11492 | int ret; |
11493 | |
11494 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_GET_SUPPORTED_MODE, |
11495 | true); |
11496 | wol_supported_cmd = (struct hclge_query_wol_supported_cmd *)desc.data; |
11497 | |
11498 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
11499 | if (ret) { |
11500 | dev_err(&hdev->pdev->dev, |
11501 | "failed to query wol supported, ret = %d\n" , ret); |
11502 | return ret; |
11503 | } |
11504 | |
11505 | *wol_supported = le32_to_cpu(wol_supported_cmd->supported_wake_mode); |
11506 | |
11507 | return 0; |
11508 | } |
11509 | |
11510 | static int hclge_set_wol_cfg(struct hclge_dev *hdev, |
11511 | struct hclge_wol_info *wol_info) |
11512 | { |
11513 | struct hclge_wol_cfg_cmd *wol_cfg_cmd; |
11514 | struct hclge_desc desc; |
11515 | int ret; |
11516 | |
11517 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_CFG, false); |
11518 | wol_cfg_cmd = (struct hclge_wol_cfg_cmd *)desc.data; |
11519 | wol_cfg_cmd->wake_on_lan_mode = cpu_to_le32(wol_info->wol_current_mode); |
11520 | wol_cfg_cmd->sopass_size = wol_info->wol_sopass_size; |
11521 | memcpy(wol_cfg_cmd->sopass, wol_info->wol_sopass, SOPASS_MAX); |
11522 | |
11523 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
11524 | if (ret) |
11525 | dev_err(&hdev->pdev->dev, |
11526 | "failed to set wol config, ret = %d\n" , ret); |
11527 | |
11528 | return ret; |
11529 | } |
11530 | |
11531 | static int hclge_update_wol(struct hclge_dev *hdev) |
11532 | { |
11533 | struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; |
11534 | |
11535 | if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) |
11536 | return 0; |
11537 | |
11538 | return hclge_set_wol_cfg(hdev, wol_info); |
11539 | } |
11540 | |
11541 | static int hclge_init_wol(struct hclge_dev *hdev) |
11542 | { |
11543 | struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; |
11544 | int ret; |
11545 | |
11546 | if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) |
11547 | return 0; |
11548 | |
11549 | memset(wol_info, 0, sizeof(struct hclge_wol_info)); |
11550 | ret = hclge_get_wol_supported_mode(hdev, |
11551 | wol_supported: &wol_info->wol_support_mode); |
11552 | if (ret) { |
11553 | wol_info->wol_support_mode = 0; |
11554 | return ret; |
11555 | } |
11556 | |
11557 | return hclge_update_wol(hdev); |
11558 | } |
11559 | |
11560 | static void hclge_get_wol(struct hnae3_handle *handle, |
11561 | struct ethtool_wolinfo *wol) |
11562 | { |
11563 | struct hclge_wol_info *wol_info = hclge_get_wol_info(handle); |
11564 | |
11565 | wol->supported = wol_info->wol_support_mode; |
11566 | wol->wolopts = wol_info->wol_current_mode; |
11567 | if (wol_info->wol_current_mode & WAKE_MAGICSECURE) |
11568 | memcpy(wol->sopass, wol_info->wol_sopass, SOPASS_MAX); |
11569 | } |
11570 | |
11571 | static int hclge_set_wol(struct hnae3_handle *handle, |
11572 | struct ethtool_wolinfo *wol) |
11573 | { |
11574 | struct hclge_wol_info *wol_info = hclge_get_wol_info(handle); |
11575 | struct hclge_vport *vport = hclge_get_vport(handle); |
11576 | u32 wol_mode; |
11577 | int ret; |
11578 | |
11579 | wol_mode = wol->wolopts; |
11580 | if (wol_mode & ~wol_info->wol_support_mode) |
11581 | return -EINVAL; |
11582 | |
11583 | wol_info->wol_current_mode = wol_mode; |
11584 | if (wol_mode & WAKE_MAGICSECURE) { |
11585 | memcpy(wol_info->wol_sopass, wol->sopass, SOPASS_MAX); |
11586 | wol_info->wol_sopass_size = SOPASS_MAX; |
11587 | } else { |
11588 | wol_info->wol_sopass_size = 0; |
11589 | } |
11590 | |
11591 | ret = hclge_set_wol_cfg(hdev: vport->back, wol_info); |
11592 | if (ret) |
11593 | wol_info->wol_current_mode = 0; |
11594 | |
11595 | return ret; |
11596 | } |
11597 | |
11598 | static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) |
11599 | { |
11600 | struct pci_dev *pdev = ae_dev->pdev; |
11601 | struct hclge_dev *hdev; |
11602 | int ret; |
11603 | |
11604 | hdev = devm_kzalloc(dev: &pdev->dev, size: sizeof(*hdev), GFP_KERNEL); |
11605 | if (!hdev) |
11606 | return -ENOMEM; |
11607 | |
11608 | hdev->pdev = pdev; |
11609 | hdev->ae_dev = ae_dev; |
11610 | hdev->reset_type = HNAE3_NONE_RESET; |
11611 | hdev->reset_level = HNAE3_FUNC_RESET; |
11612 | ae_dev->priv = hdev; |
11613 | |
11614 | /* HW supprt 2 layer vlan */ |
11615 | hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; |
11616 | |
11617 | mutex_init(&hdev->vport_lock); |
11618 | spin_lock_init(&hdev->fd_rule_lock); |
11619 | sema_init(sem: &hdev->reset_sem, val: 1); |
11620 | |
11621 | ret = hclge_pci_init(hdev); |
11622 | if (ret) |
11623 | goto out; |
11624 | |
11625 | ret = hclge_devlink_init(hdev); |
11626 | if (ret) |
11627 | goto err_pci_uninit; |
11628 | |
11629 | devl_lock(devlink: hdev->devlink); |
11630 | |
11631 | /* Firmware command queue initialize */ |
11632 | ret = hclge_comm_cmd_queue_init(pdev: hdev->pdev, hw: &hdev->hw.hw); |
11633 | if (ret) |
11634 | goto err_devlink_uninit; |
11635 | |
11636 | /* Firmware command initialize */ |
11637 | ret = hclge_comm_cmd_init(ae_dev: hdev->ae_dev, hw: &hdev->hw.hw, fw_version: &hdev->fw_version, |
11638 | is_pf: true, reset_pending: hdev->reset_pending); |
11639 | if (ret) |
11640 | goto err_cmd_uninit; |
11641 | |
11642 | ret = hclge_clear_hw_resource(hdev); |
11643 | if (ret) |
11644 | goto err_cmd_uninit; |
11645 | |
11646 | ret = hclge_get_cap(hdev); |
11647 | if (ret) |
11648 | goto err_cmd_uninit; |
11649 | |
11650 | ret = hclge_query_dev_specs(hdev); |
11651 | if (ret) { |
11652 | dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n" , |
11653 | ret); |
11654 | goto err_cmd_uninit; |
11655 | } |
11656 | |
11657 | ret = hclge_configure(hdev); |
11658 | if (ret) { |
11659 | dev_err(&pdev->dev, "Configure dev error, ret = %d.\n" , ret); |
11660 | goto err_cmd_uninit; |
11661 | } |
11662 | |
11663 | ret = hclge_init_msi(hdev); |
11664 | if (ret) { |
11665 | dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n" , ret); |
11666 | goto err_cmd_uninit; |
11667 | } |
11668 | |
11669 | ret = hclge_misc_irq_init(hdev); |
11670 | if (ret) |
11671 | goto err_msi_uninit; |
11672 | |
11673 | ret = hclge_alloc_tqps(hdev); |
11674 | if (ret) { |
11675 | dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n" , ret); |
11676 | goto err_msi_irq_uninit; |
11677 | } |
11678 | |
11679 | ret = hclge_alloc_vport(hdev); |
11680 | if (ret) |
11681 | goto err_msi_irq_uninit; |
11682 | |
11683 | ret = hclge_map_tqp(hdev); |
11684 | if (ret) |
11685 | goto err_msi_irq_uninit; |
11686 | |
11687 | if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { |
11688 | clear_bit(nr: HNAE3_DEV_SUPPORT_FEC_B, addr: ae_dev->caps); |
11689 | if (hnae3_dev_phy_imp_supported(hdev)) |
11690 | ret = hclge_update_tp_port_info(hdev); |
11691 | else |
11692 | ret = hclge_mac_mdio_config(hdev); |
11693 | |
11694 | if (ret) |
11695 | goto err_msi_irq_uninit; |
11696 | } |
11697 | |
11698 | ret = hclge_init_umv_space(hdev); |
11699 | if (ret) |
11700 | goto err_mdiobus_unreg; |
11701 | |
11702 | ret = hclge_mac_init(hdev); |
11703 | if (ret) { |
11704 | dev_err(&pdev->dev, "Mac init error, ret = %d\n" , ret); |
11705 | goto err_mdiobus_unreg; |
11706 | } |
11707 | |
11708 | ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); |
11709 | if (ret) { |
11710 | dev_err(&pdev->dev, "Enable tso fail, ret =%d\n" , ret); |
11711 | goto err_mdiobus_unreg; |
11712 | } |
11713 | |
11714 | ret = hclge_config_gro(hdev); |
11715 | if (ret) |
11716 | goto err_mdiobus_unreg; |
11717 | |
11718 | ret = hclge_init_vlan_config(hdev); |
11719 | if (ret) { |
11720 | dev_err(&pdev->dev, "VLAN init fail, ret =%d\n" , ret); |
11721 | goto err_mdiobus_unreg; |
11722 | } |
11723 | |
11724 | ret = hclge_tm_schd_init(hdev); |
11725 | if (ret) { |
11726 | dev_err(&pdev->dev, "tm schd init fail, ret =%d\n" , ret); |
11727 | goto err_mdiobus_unreg; |
11728 | } |
11729 | |
11730 | ret = hclge_comm_rss_init_cfg(nic: &hdev->vport->nic, ae_dev: hdev->ae_dev, |
11731 | rss_cfg: &hdev->rss_cfg); |
11732 | if (ret) { |
11733 | dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n" , ret); |
11734 | goto err_mdiobus_unreg; |
11735 | } |
11736 | |
11737 | ret = hclge_rss_init_hw(hdev); |
11738 | if (ret) { |
11739 | dev_err(&pdev->dev, "Rss init fail, ret =%d\n" , ret); |
11740 | goto err_mdiobus_unreg; |
11741 | } |
11742 | |
11743 | ret = init_mgr_tbl(hdev); |
11744 | if (ret) { |
11745 | dev_err(&pdev->dev, "manager table init fail, ret =%d\n" , ret); |
11746 | goto err_mdiobus_unreg; |
11747 | } |
11748 | |
11749 | ret = hclge_init_fd_config(hdev); |
11750 | if (ret) { |
11751 | dev_err(&pdev->dev, |
11752 | "fd table init fail, ret=%d\n" , ret); |
11753 | goto err_mdiobus_unreg; |
11754 | } |
11755 | |
11756 | ret = hclge_ptp_init(hdev); |
11757 | if (ret) |
11758 | goto err_mdiobus_unreg; |
11759 | |
11760 | ret = hclge_update_port_info(hdev); |
11761 | if (ret) |
11762 | goto err_mdiobus_unreg; |
11763 | |
11764 | INIT_KFIFO(hdev->mac_tnl_log); |
11765 | |
11766 | hclge_dcb_ops_set(hdev); |
11767 | |
11768 | timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); |
11769 | INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); |
11770 | |
11771 | hclge_clear_all_event_cause(hdev); |
11772 | hclge_clear_resetting_state(hdev); |
11773 | |
11774 | /* Log and clear the hw errors those already occurred */ |
11775 | if (hnae3_dev_ras_imp_supported(hdev)) |
11776 | hclge_handle_occurred_error(hdev); |
11777 | else |
11778 | hclge_handle_all_hns_hw_errors(ae_dev); |
11779 | |
11780 | /* request delayed reset for the error recovery because an immediate |
11781 | * global reset on a PF affecting pending initialization of other PFs |
11782 | */ |
11783 | if (ae_dev->hw_err_reset_req) { |
11784 | enum hnae3_reset_type reset_level; |
11785 | |
11786 | reset_level = hclge_get_reset_level(ae_dev, |
11787 | addr: &ae_dev->hw_err_reset_req); |
11788 | hclge_set_def_reset_request(ae_dev, rst_type: reset_level); |
11789 | mod_timer(timer: &hdev->reset_timer, expires: jiffies + HCLGE_RESET_INTERVAL); |
11790 | } |
11791 | |
11792 | hclge_init_rxd_adv_layout(hdev); |
11793 | |
11794 | /* Enable MISC vector(vector0) */ |
11795 | hclge_enable_vector(vector: &hdev->misc_vector, enable: true); |
11796 | |
11797 | ret = hclge_init_wol(hdev); |
11798 | if (ret) |
11799 | dev_warn(&pdev->dev, |
11800 | "failed to wake on lan init, ret = %d\n" , ret); |
11801 | |
11802 | hclge_state_init(hdev); |
11803 | hdev->last_reset_time = jiffies; |
11804 | |
11805 | dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n" , |
11806 | HCLGE_DRIVER_NAME); |
11807 | |
11808 | hclge_task_schedule(hdev, delay_time: round_jiffies_relative(HZ)); |
11809 | |
11810 | devl_unlock(devlink: hdev->devlink); |
11811 | return 0; |
11812 | |
11813 | err_mdiobus_unreg: |
11814 | if (hdev->hw.mac.phydev) |
11815 | mdiobus_unregister(bus: hdev->hw.mac.mdio_bus); |
11816 | err_msi_irq_uninit: |
11817 | hclge_misc_irq_uninit(hdev); |
11818 | err_msi_uninit: |
11819 | pci_free_irq_vectors(dev: pdev); |
11820 | err_cmd_uninit: |
11821 | hclge_comm_cmd_uninit(ae_dev: hdev->ae_dev, hw: &hdev->hw.hw); |
11822 | err_devlink_uninit: |
11823 | devl_unlock(devlink: hdev->devlink); |
11824 | hclge_devlink_uninit(hdev); |
11825 | err_pci_uninit: |
11826 | pcim_iounmap(pdev, addr: hdev->hw.hw.io_base); |
11827 | pci_release_regions(pdev); |
11828 | pci_disable_device(dev: pdev); |
11829 | out: |
11830 | mutex_destroy(lock: &hdev->vport_lock); |
11831 | return ret; |
11832 | } |
11833 | |
11834 | static void hclge_stats_clear(struct hclge_dev *hdev) |
11835 | { |
11836 | memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats)); |
11837 | memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats)); |
11838 | } |
11839 | |
11840 | static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable) |
11841 | { |
11842 | return hclge_config_switch_param(hdev, vfid: vf, switch_param: enable, |
11843 | HCLGE_SWITCH_ANTI_SPOOF_MASK); |
11844 | } |
11845 | |
11846 | static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable) |
11847 | { |
11848 | return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, |
11849 | HCLGE_FILTER_FE_NIC_INGRESS_B, |
11850 | filter_en: enable, vf_id: vf); |
11851 | } |
11852 | |
11853 | static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable) |
11854 | { |
11855 | int ret; |
11856 | |
11857 | ret = hclge_set_mac_spoofchk(hdev, vf, enable); |
11858 | if (ret) { |
11859 | dev_err(&hdev->pdev->dev, |
11860 | "Set vf %d mac spoof check %s failed, ret=%d\n" , |
11861 | vf, enable ? "on" : "off" , ret); |
11862 | return ret; |
11863 | } |
11864 | |
11865 | ret = hclge_set_vlan_spoofchk(hdev, vf, enable); |
11866 | if (ret) |
11867 | dev_err(&hdev->pdev->dev, |
11868 | "Set vf %d vlan spoof check %s failed, ret=%d\n" , |
11869 | vf, enable ? "on" : "off" , ret); |
11870 | |
11871 | return ret; |
11872 | } |
11873 | |
11874 | static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf, |
11875 | bool enable) |
11876 | { |
11877 | struct hclge_vport *vport = hclge_get_vport(handle); |
11878 | struct hclge_dev *hdev = vport->back; |
11879 | u32 new_spoofchk = enable ? 1 : 0; |
11880 | int ret; |
11881 | |
11882 | if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) |
11883 | return -EOPNOTSUPP; |
11884 | |
11885 | vport = hclge_get_vf_vport(hdev, vf); |
11886 | if (!vport) |
11887 | return -EINVAL; |
11888 | |
11889 | if (vport->vf_info.spoofchk == new_spoofchk) |
11890 | return 0; |
11891 | |
11892 | if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full)) |
11893 | dev_warn(&hdev->pdev->dev, |
11894 | "vf %d vlan table is full, enable spoof check may cause its packet send fail\n" , |
11895 | vf); |
11896 | else if (enable && hclge_is_umv_space_full(vport, need_lock: true)) |
11897 | dev_warn(&hdev->pdev->dev, |
11898 | "vf %d mac table is full, enable spoof check may cause its packet send fail\n" , |
11899 | vf); |
11900 | |
11901 | ret = hclge_set_vf_spoofchk_hw(hdev, vf: vport->vport_id, enable); |
11902 | if (ret) |
11903 | return ret; |
11904 | |
11905 | vport->vf_info.spoofchk = new_spoofchk; |
11906 | return 0; |
11907 | } |
11908 | |
11909 | static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev) |
11910 | { |
11911 | struct hclge_vport *vport = hdev->vport; |
11912 | int ret; |
11913 | int i; |
11914 | |
11915 | if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) |
11916 | return 0; |
11917 | |
11918 | /* resume the vf spoof check state after reset */ |
11919 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
11920 | ret = hclge_set_vf_spoofchk_hw(hdev, vf: vport->vport_id, |
11921 | enable: vport->vf_info.spoofchk); |
11922 | if (ret) |
11923 | return ret; |
11924 | |
11925 | vport++; |
11926 | } |
11927 | |
11928 | return 0; |
11929 | } |
11930 | |
11931 | static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable) |
11932 | { |
11933 | struct hclge_vport *vport = hclge_get_vport(handle); |
11934 | struct hclge_dev *hdev = vport->back; |
11935 | u32 new_trusted = enable ? 1 : 0; |
11936 | |
11937 | vport = hclge_get_vf_vport(hdev, vf); |
11938 | if (!vport) |
11939 | return -EINVAL; |
11940 | |
11941 | if (vport->vf_info.trusted == new_trusted) |
11942 | return 0; |
11943 | |
11944 | vport->vf_info.trusted = new_trusted; |
11945 | set_bit(nr: HCLGE_VPORT_STATE_PROMISC_CHANGE, addr: &vport->state); |
11946 | hclge_task_schedule(hdev, delay_time: 0); |
11947 | |
11948 | return 0; |
11949 | } |
11950 | |
11951 | static void hclge_reset_vf_rate(struct hclge_dev *hdev) |
11952 | { |
11953 | int ret; |
11954 | int vf; |
11955 | |
11956 | /* reset vf rate to default value */ |
11957 | for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { |
11958 | struct hclge_vport *vport = &hdev->vport[vf]; |
11959 | |
11960 | vport->vf_info.max_tx_rate = 0; |
11961 | ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate: vport->vf_info.max_tx_rate); |
11962 | if (ret) |
11963 | dev_err(&hdev->pdev->dev, |
11964 | "vf%d failed to reset to default, ret=%d\n" , |
11965 | vf - HCLGE_VF_VPORT_START_NUM, ret); |
11966 | } |
11967 | } |
11968 | |
11969 | static int hclge_vf_rate_param_check(struct hclge_dev *hdev, |
11970 | int min_tx_rate, int max_tx_rate) |
11971 | { |
11972 | if (min_tx_rate != 0 || |
11973 | max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) { |
11974 | dev_err(&hdev->pdev->dev, |
11975 | "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n" , |
11976 | min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed); |
11977 | return -EINVAL; |
11978 | } |
11979 | |
11980 | return 0; |
11981 | } |
11982 | |
11983 | static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf, |
11984 | int min_tx_rate, int max_tx_rate, bool force) |
11985 | { |
11986 | struct hclge_vport *vport = hclge_get_vport(handle); |
11987 | struct hclge_dev *hdev = vport->back; |
11988 | int ret; |
11989 | |
11990 | ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate); |
11991 | if (ret) |
11992 | return ret; |
11993 | |
11994 | vport = hclge_get_vf_vport(hdev, vf); |
11995 | if (!vport) |
11996 | return -EINVAL; |
11997 | |
11998 | if (!force && max_tx_rate == vport->vf_info.max_tx_rate) |
11999 | return 0; |
12000 | |
12001 | ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate); |
12002 | if (ret) |
12003 | return ret; |
12004 | |
12005 | vport->vf_info.max_tx_rate = max_tx_rate; |
12006 | |
12007 | return 0; |
12008 | } |
12009 | |
12010 | static int hclge_resume_vf_rate(struct hclge_dev *hdev) |
12011 | { |
12012 | struct hnae3_handle *handle = &hdev->vport->nic; |
12013 | struct hclge_vport *vport; |
12014 | int ret; |
12015 | int vf; |
12016 | |
12017 | /* resume the vf max_tx_rate after reset */ |
12018 | for (vf = 0; vf < pci_num_vf(dev: hdev->pdev); vf++) { |
12019 | vport = hclge_get_vf_vport(hdev, vf); |
12020 | if (!vport) |
12021 | return -EINVAL; |
12022 | |
12023 | /* zero means max rate, after reset, firmware already set it to |
12024 | * max rate, so just continue. |
12025 | */ |
12026 | if (!vport->vf_info.max_tx_rate) |
12027 | continue; |
12028 | |
12029 | ret = hclge_set_vf_rate(handle, vf, min_tx_rate: 0, |
12030 | max_tx_rate: vport->vf_info.max_tx_rate, force: true); |
12031 | if (ret) { |
12032 | dev_err(&hdev->pdev->dev, |
12033 | "vf%d failed to resume tx_rate:%u, ret=%d\n" , |
12034 | vf, vport->vf_info.max_tx_rate, ret); |
12035 | return ret; |
12036 | } |
12037 | } |
12038 | |
12039 | return 0; |
12040 | } |
12041 | |
12042 | static void hclge_reset_vport_state(struct hclge_dev *hdev) |
12043 | { |
12044 | struct hclge_vport *vport = hdev->vport; |
12045 | int i; |
12046 | |
12047 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
12048 | clear_bit(nr: HCLGE_VPORT_STATE_ALIVE, addr: &vport->state); |
12049 | vport++; |
12050 | } |
12051 | } |
12052 | |
12053 | static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) |
12054 | { |
12055 | struct hclge_dev *hdev = ae_dev->priv; |
12056 | struct pci_dev *pdev = ae_dev->pdev; |
12057 | int ret; |
12058 | |
12059 | set_bit(nr: HCLGE_STATE_DOWN, addr: &hdev->state); |
12060 | |
12061 | hclge_stats_clear(hdev); |
12062 | /* NOTE: pf reset needn't to clear or restore pf and vf table entry. |
12063 | * so here should not clean table in memory. |
12064 | */ |
12065 | if (hdev->reset_type == HNAE3_IMP_RESET || |
12066 | hdev->reset_type == HNAE3_GLOBAL_RESET) { |
12067 | memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); |
12068 | memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); |
12069 | bitmap_set(map: hdev->vport_config_block, start: 0, nbits: hdev->num_alloc_vport); |
12070 | hclge_reset_umv_space(hdev); |
12071 | } |
12072 | |
12073 | ret = hclge_comm_cmd_init(ae_dev: hdev->ae_dev, hw: &hdev->hw.hw, fw_version: &hdev->fw_version, |
12074 | is_pf: true, reset_pending: hdev->reset_pending); |
12075 | if (ret) { |
12076 | dev_err(&pdev->dev, "Cmd queue init failed\n" ); |
12077 | return ret; |
12078 | } |
12079 | |
12080 | ret = hclge_map_tqp(hdev); |
12081 | if (ret) { |
12082 | dev_err(&pdev->dev, "Map tqp error, ret = %d.\n" , ret); |
12083 | return ret; |
12084 | } |
12085 | |
12086 | ret = hclge_mac_init(hdev); |
12087 | if (ret) { |
12088 | dev_err(&pdev->dev, "Mac init error, ret = %d\n" , ret); |
12089 | return ret; |
12090 | } |
12091 | |
12092 | ret = hclge_tp_port_init(hdev); |
12093 | if (ret) { |
12094 | dev_err(&pdev->dev, "failed to init tp port, ret = %d\n" , |
12095 | ret); |
12096 | return ret; |
12097 | } |
12098 | |
12099 | ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); |
12100 | if (ret) { |
12101 | dev_err(&pdev->dev, "Enable tso fail, ret =%d\n" , ret); |
12102 | return ret; |
12103 | } |
12104 | |
12105 | ret = hclge_config_gro(hdev); |
12106 | if (ret) |
12107 | return ret; |
12108 | |
12109 | ret = hclge_init_vlan_config(hdev); |
12110 | if (ret) { |
12111 | dev_err(&pdev->dev, "VLAN init fail, ret =%d\n" , ret); |
12112 | return ret; |
12113 | } |
12114 | |
12115 | hclge_reset_tc_config(hdev); |
12116 | |
12117 | ret = hclge_tm_init_hw(hdev, init: true); |
12118 | if (ret) { |
12119 | dev_err(&pdev->dev, "tm init hw fail, ret =%d\n" , ret); |
12120 | return ret; |
12121 | } |
12122 | |
12123 | ret = hclge_rss_init_hw(hdev); |
12124 | if (ret) { |
12125 | dev_err(&pdev->dev, "Rss init fail, ret =%d\n" , ret); |
12126 | return ret; |
12127 | } |
12128 | |
12129 | ret = init_mgr_tbl(hdev); |
12130 | if (ret) { |
12131 | dev_err(&pdev->dev, |
12132 | "failed to reinit manager table, ret = %d\n" , ret); |
12133 | return ret; |
12134 | } |
12135 | |
12136 | ret = hclge_init_fd_config(hdev); |
12137 | if (ret) { |
12138 | dev_err(&pdev->dev, "fd table init fail, ret=%d\n" , ret); |
12139 | return ret; |
12140 | } |
12141 | |
12142 | ret = hclge_ptp_init(hdev); |
12143 | if (ret) |
12144 | return ret; |
12145 | |
12146 | /* Log and clear the hw errors those already occurred */ |
12147 | if (hnae3_dev_ras_imp_supported(hdev)) |
12148 | hclge_handle_occurred_error(hdev); |
12149 | else |
12150 | hclge_handle_all_hns_hw_errors(ae_dev); |
12151 | |
12152 | /* Re-enable the hw error interrupts because |
12153 | * the interrupts get disabled on global reset. |
12154 | */ |
12155 | ret = hclge_config_nic_hw_error(hdev, state: true); |
12156 | if (ret) { |
12157 | dev_err(&pdev->dev, |
12158 | "fail(%d) to re-enable NIC hw error interrupts\n" , |
12159 | ret); |
12160 | return ret; |
12161 | } |
12162 | |
12163 | if (hdev->roce_client) { |
12164 | ret = hclge_config_rocee_ras_interrupt(hdev, en: true); |
12165 | if (ret) { |
12166 | dev_err(&pdev->dev, |
12167 | "fail(%d) to re-enable roce ras interrupts\n" , |
12168 | ret); |
12169 | return ret; |
12170 | } |
12171 | } |
12172 | |
12173 | hclge_reset_vport_state(hdev); |
12174 | ret = hclge_reset_vport_spoofchk(hdev); |
12175 | if (ret) |
12176 | return ret; |
12177 | |
12178 | ret = hclge_resume_vf_rate(hdev); |
12179 | if (ret) |
12180 | return ret; |
12181 | |
12182 | hclge_init_rxd_adv_layout(hdev); |
12183 | |
12184 | ret = hclge_update_wol(hdev); |
12185 | if (ret) |
12186 | dev_warn(&pdev->dev, |
12187 | "failed to update wol config, ret = %d\n" , ret); |
12188 | |
12189 | dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n" , |
12190 | HCLGE_DRIVER_NAME); |
12191 | |
12192 | return 0; |
12193 | } |
12194 | |
12195 | static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) |
12196 | { |
12197 | struct hclge_dev *hdev = ae_dev->priv; |
12198 | struct hclge_mac *mac = &hdev->hw.mac; |
12199 | |
12200 | hclge_reset_vf_rate(hdev); |
12201 | hclge_clear_vf_vlan(hdev); |
12202 | hclge_state_uninit(hdev); |
12203 | hclge_ptp_uninit(hdev); |
12204 | hclge_uninit_rxd_adv_layout(hdev); |
12205 | hclge_uninit_mac_table(hdev); |
12206 | hclge_del_all_fd_entries(hdev); |
12207 | |
12208 | if (mac->phydev) |
12209 | mdiobus_unregister(bus: mac->mdio_bus); |
12210 | |
12211 | /* Disable MISC vector(vector0) */ |
12212 | hclge_enable_vector(vector: &hdev->misc_vector, enable: false); |
12213 | synchronize_irq(irq: hdev->misc_vector.vector_irq); |
12214 | |
12215 | /* Disable all hw interrupts */ |
12216 | hclge_config_mac_tnl_int(hdev, en: false); |
12217 | hclge_config_nic_hw_error(hdev, state: false); |
12218 | hclge_config_rocee_ras_interrupt(hdev, en: false); |
12219 | |
12220 | hclge_comm_cmd_uninit(ae_dev: hdev->ae_dev, hw: &hdev->hw.hw); |
12221 | hclge_misc_irq_uninit(hdev); |
12222 | hclge_devlink_uninit(hdev); |
12223 | hclge_pci_uninit(hdev); |
12224 | hclge_uninit_vport_vlan_table(hdev); |
12225 | mutex_destroy(lock: &hdev->vport_lock); |
12226 | ae_dev->priv = NULL; |
12227 | } |
12228 | |
12229 | static u32 hclge_get_max_channels(struct hnae3_handle *handle) |
12230 | { |
12231 | struct hclge_vport *vport = hclge_get_vport(handle); |
12232 | struct hclge_dev *hdev = vport->back; |
12233 | |
12234 | return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps); |
12235 | } |
12236 | |
12237 | static void hclge_get_channels(struct hnae3_handle *handle, |
12238 | struct ethtool_channels *ch) |
12239 | { |
12240 | ch->max_combined = hclge_get_max_channels(handle); |
12241 | ch->other_count = 1; |
12242 | ch->max_other = 1; |
12243 | ch->combined_count = handle->kinfo.rss_size; |
12244 | } |
12245 | |
12246 | static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, |
12247 | u16 *alloc_tqps, u16 *) |
12248 | { |
12249 | struct hclge_vport *vport = hclge_get_vport(handle); |
12250 | struct hclge_dev *hdev = vport->back; |
12251 | |
12252 | *alloc_tqps = vport->alloc_tqps; |
12253 | *max_rss_size = hdev->pf_rss_size_max; |
12254 | } |
12255 | |
12256 | static int (struct hnae3_handle *handle) |
12257 | { |
12258 | struct hclge_vport *vport = hclge_get_vport(handle); |
12259 | u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; |
12260 | struct hclge_dev *hdev = vport->back; |
12261 | u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; |
12262 | u16 tc_valid[HCLGE_MAX_TC_NUM]; |
12263 | u16 roundup_size; |
12264 | unsigned int i; |
12265 | |
12266 | roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size); |
12267 | roundup_size = ilog2(roundup_size); |
12268 | /* Set the RSS TC mode according to the new RSS size */ |
12269 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
12270 | tc_valid[i] = 0; |
12271 | |
12272 | if (!(hdev->hw_tc_map & BIT(i))) |
12273 | continue; |
12274 | |
12275 | tc_valid[i] = 1; |
12276 | tc_size[i] = roundup_size; |
12277 | tc_offset[i] = vport->nic.kinfo.rss_size * i; |
12278 | } |
12279 | |
12280 | return hclge_comm_set_rss_tc_mode(hw: &hdev->hw.hw, tc_offset, tc_valid, |
12281 | tc_size); |
12282 | } |
12283 | |
12284 | static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, |
12285 | bool rxfh_configured) |
12286 | { |
12287 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev: handle->pdev); |
12288 | struct hclge_vport *vport = hclge_get_vport(handle); |
12289 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; |
12290 | struct hclge_dev *hdev = vport->back; |
12291 | u16 = kinfo->rss_size; |
12292 | u16 cur_tqps = kinfo->num_tqps; |
12293 | u32 *; |
12294 | unsigned int i; |
12295 | int ret; |
12296 | |
12297 | kinfo->req_rss_size = new_tqps_num; |
12298 | |
12299 | ret = hclge_tm_vport_map_update(hdev); |
12300 | if (ret) { |
12301 | dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n" , ret); |
12302 | return ret; |
12303 | } |
12304 | |
12305 | ret = hclge_set_rss_tc_mode_cfg(handle); |
12306 | if (ret) |
12307 | return ret; |
12308 | |
12309 | /* RSS indirection table has been configured by user */ |
12310 | if (rxfh_configured) |
12311 | goto out; |
12312 | |
12313 | /* Reinitializes the rss indirect table according to the new RSS size */ |
12314 | rss_indir = kcalloc(n: ae_dev->dev_specs.rss_ind_tbl_size, size: sizeof(u32), |
12315 | GFP_KERNEL); |
12316 | if (!rss_indir) |
12317 | return -ENOMEM; |
12318 | |
12319 | for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) |
12320 | rss_indir[i] = i % kinfo->rss_size; |
12321 | |
12322 | ret = hclge_set_rss(handle, indir: rss_indir, NULL, hfunc: 0); |
12323 | if (ret) |
12324 | dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n" , |
12325 | ret); |
12326 | |
12327 | kfree(objp: rss_indir); |
12328 | |
12329 | out: |
12330 | if (!ret) |
12331 | dev_info(&hdev->pdev->dev, |
12332 | "Channels changed, rss_size from %u to %u, tqps from %u to %u" , |
12333 | cur_rss_size, kinfo->rss_size, |
12334 | cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); |
12335 | |
12336 | return ret; |
12337 | } |
12338 | |
12339 | static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) |
12340 | { |
12341 | struct hclge_set_led_state_cmd *req; |
12342 | struct hclge_desc desc; |
12343 | int ret; |
12344 | |
12345 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); |
12346 | |
12347 | req = (struct hclge_set_led_state_cmd *)desc.data; |
12348 | hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, |
12349 | HCLGE_LED_LOCATE_STATE_S, locate_led_status); |
12350 | |
12351 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
12352 | if (ret) |
12353 | dev_err(&hdev->pdev->dev, |
12354 | "Send set led state cmd error, ret =%d\n" , ret); |
12355 | |
12356 | return ret; |
12357 | } |
12358 | |
12359 | enum hclge_led_status { |
12360 | HCLGE_LED_OFF, |
12361 | HCLGE_LED_ON, |
12362 | HCLGE_LED_NO_CHANGE = 0xFF, |
12363 | }; |
12364 | |
12365 | static int hclge_set_led_id(struct hnae3_handle *handle, |
12366 | enum ethtool_phys_id_state status) |
12367 | { |
12368 | struct hclge_vport *vport = hclge_get_vport(handle); |
12369 | struct hclge_dev *hdev = vport->back; |
12370 | |
12371 | switch (status) { |
12372 | case ETHTOOL_ID_ACTIVE: |
12373 | return hclge_set_led_status(hdev, locate_led_status: HCLGE_LED_ON); |
12374 | case ETHTOOL_ID_INACTIVE: |
12375 | return hclge_set_led_status(hdev, locate_led_status: HCLGE_LED_OFF); |
12376 | default: |
12377 | return -EINVAL; |
12378 | } |
12379 | } |
12380 | |
12381 | static void hclge_get_link_mode(struct hnae3_handle *handle, |
12382 | unsigned long *supported, |
12383 | unsigned long *advertising) |
12384 | { |
12385 | unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); |
12386 | struct hclge_vport *vport = hclge_get_vport(handle); |
12387 | struct hclge_dev *hdev = vport->back; |
12388 | unsigned int idx = 0; |
12389 | |
12390 | for (; idx < size; idx++) { |
12391 | supported[idx] = hdev->hw.mac.supported[idx]; |
12392 | advertising[idx] = hdev->hw.mac.advertising[idx]; |
12393 | } |
12394 | } |
12395 | |
12396 | static int hclge_gro_en(struct hnae3_handle *handle, bool enable) |
12397 | { |
12398 | struct hclge_vport *vport = hclge_get_vport(handle); |
12399 | struct hclge_dev *hdev = vport->back; |
12400 | bool gro_en_old = hdev->gro_en; |
12401 | int ret; |
12402 | |
12403 | hdev->gro_en = enable; |
12404 | ret = hclge_config_gro(hdev); |
12405 | if (ret) |
12406 | hdev->gro_en = gro_en_old; |
12407 | |
12408 | return ret; |
12409 | } |
12410 | |
12411 | static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport) |
12412 | { |
12413 | struct hnae3_handle *handle = &vport->nic; |
12414 | struct hclge_dev *hdev = vport->back; |
12415 | bool uc_en = false; |
12416 | bool mc_en = false; |
12417 | u8 tmp_flags; |
12418 | bool bc_en; |
12419 | int ret; |
12420 | |
12421 | if (vport->last_promisc_flags != vport->overflow_promisc_flags) { |
12422 | set_bit(nr: HCLGE_VPORT_STATE_PROMISC_CHANGE, addr: &vport->state); |
12423 | vport->last_promisc_flags = vport->overflow_promisc_flags; |
12424 | } |
12425 | |
12426 | if (!test_and_clear_bit(nr: HCLGE_VPORT_STATE_PROMISC_CHANGE, |
12427 | addr: &vport->state)) |
12428 | return 0; |
12429 | |
12430 | /* for PF */ |
12431 | if (!vport->vport_id) { |
12432 | tmp_flags = handle->netdev_flags | vport->last_promisc_flags; |
12433 | ret = hclge_set_promisc_mode(handle, en_uc_pmc: tmp_flags & HNAE3_UPE, |
12434 | en_mc_pmc: tmp_flags & HNAE3_MPE); |
12435 | if (!ret) |
12436 | set_bit(nr: HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, |
12437 | addr: &vport->state); |
12438 | else |
12439 | set_bit(nr: HCLGE_VPORT_STATE_PROMISC_CHANGE, |
12440 | addr: &vport->state); |
12441 | return ret; |
12442 | } |
12443 | |
12444 | /* for VF */ |
12445 | if (vport->vf_info.trusted) { |
12446 | uc_en = vport->vf_info.request_uc_en > 0 || |
12447 | vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE; |
12448 | mc_en = vport->vf_info.request_mc_en > 0 || |
12449 | vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE; |
12450 | } |
12451 | bc_en = vport->vf_info.request_bc_en > 0; |
12452 | |
12453 | ret = hclge_cmd_set_promisc_mode(hdev, vf_id: vport->vport_id, en_uc: uc_en, |
12454 | en_mc: mc_en, en_bc: bc_en); |
12455 | if (ret) { |
12456 | set_bit(nr: HCLGE_VPORT_STATE_PROMISC_CHANGE, addr: &vport->state); |
12457 | return ret; |
12458 | } |
12459 | hclge_set_vport_vlan_fltr_change(vport); |
12460 | |
12461 | return 0; |
12462 | } |
12463 | |
12464 | static void hclge_sync_promisc_mode(struct hclge_dev *hdev) |
12465 | { |
12466 | struct hclge_vport *vport; |
12467 | int ret; |
12468 | u16 i; |
12469 | |
12470 | for (i = 0; i < hdev->num_alloc_vport; i++) { |
12471 | vport = &hdev->vport[i]; |
12472 | |
12473 | ret = hclge_sync_vport_promisc_mode(vport); |
12474 | if (ret) |
12475 | return; |
12476 | } |
12477 | } |
12478 | |
12479 | static bool hclge_module_existed(struct hclge_dev *hdev) |
12480 | { |
12481 | struct hclge_desc desc; |
12482 | u32 existed; |
12483 | int ret; |
12484 | |
12485 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true); |
12486 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
12487 | if (ret) { |
12488 | dev_err(&hdev->pdev->dev, |
12489 | "failed to get SFP exist state, ret = %d\n" , ret); |
12490 | return false; |
12491 | } |
12492 | |
12493 | existed = le32_to_cpu(desc.data[0]); |
12494 | |
12495 | return existed != 0; |
12496 | } |
12497 | |
12498 | /* need 6 bds(total 140 bytes) in one reading |
12499 | * return the number of bytes actually read, 0 means read failed. |
12500 | */ |
12501 | static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset, |
12502 | u32 len, u8 *data) |
12503 | { |
12504 | struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM]; |
12505 | struct hclge_sfp_info_bd0_cmd *sfp_info_bd0; |
12506 | u16 read_len; |
12507 | u16 copy_len; |
12508 | int ret; |
12509 | int i; |
12510 | |
12511 | /* setup all 6 bds to read module eeprom info. */ |
12512 | for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) { |
12513 | hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM, |
12514 | true); |
12515 | |
12516 | /* bd0~bd4 need next flag */ |
12517 | if (i < HCLGE_SFP_INFO_CMD_NUM - 1) |
12518 | desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); |
12519 | } |
12520 | |
12521 | /* setup bd0, this bd contains offset and read length. */ |
12522 | sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data; |
12523 | sfp_info_bd0->offset = cpu_to_le16((u16)offset); |
12524 | read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN); |
12525 | sfp_info_bd0->read_len = cpu_to_le16(read_len); |
12526 | |
12527 | ret = hclge_cmd_send(hw: &hdev->hw, desc, num: i); |
12528 | if (ret) { |
12529 | dev_err(&hdev->pdev->dev, |
12530 | "failed to get SFP eeprom info, ret = %d\n" , ret); |
12531 | return 0; |
12532 | } |
12533 | |
12534 | /* copy sfp info from bd0 to out buffer. */ |
12535 | copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN); |
12536 | memcpy(data, sfp_info_bd0->data, copy_len); |
12537 | read_len = copy_len; |
12538 | |
12539 | /* copy sfp info from bd1~bd5 to out buffer if needed. */ |
12540 | for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) { |
12541 | if (read_len >= len) |
12542 | return read_len; |
12543 | |
12544 | copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN); |
12545 | memcpy(data + read_len, desc[i].data, copy_len); |
12546 | read_len += copy_len; |
12547 | } |
12548 | |
12549 | return read_len; |
12550 | } |
12551 | |
12552 | static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset, |
12553 | u32 len, u8 *data) |
12554 | { |
12555 | struct hclge_vport *vport = hclge_get_vport(handle); |
12556 | struct hclge_dev *hdev = vport->back; |
12557 | u32 read_len = 0; |
12558 | u16 data_len; |
12559 | |
12560 | if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) |
12561 | return -EOPNOTSUPP; |
12562 | |
12563 | if (!hclge_module_existed(hdev)) |
12564 | return -ENXIO; |
12565 | |
12566 | while (read_len < len) { |
12567 | data_len = hclge_get_sfp_eeprom_info(hdev, |
12568 | offset: offset + read_len, |
12569 | len: len - read_len, |
12570 | data: data + read_len); |
12571 | if (!data_len) |
12572 | return -EIO; |
12573 | |
12574 | read_len += data_len; |
12575 | } |
12576 | |
12577 | return 0; |
12578 | } |
12579 | |
12580 | static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle, |
12581 | u32 *status_code) |
12582 | { |
12583 | struct hclge_vport *vport = hclge_get_vport(handle); |
12584 | struct hclge_dev *hdev = vport->back; |
12585 | struct hclge_desc desc; |
12586 | int ret; |
12587 | |
12588 | if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) |
12589 | return -EOPNOTSUPP; |
12590 | |
12591 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true); |
12592 | ret = hclge_cmd_send(hw: &hdev->hw, desc: &desc, num: 1); |
12593 | if (ret) { |
12594 | dev_err(&hdev->pdev->dev, |
12595 | "failed to query link diagnosis info, ret = %d\n" , ret); |
12596 | return ret; |
12597 | } |
12598 | |
12599 | *status_code = le32_to_cpu(desc.data[0]); |
12600 | return 0; |
12601 | } |
12602 | |
12603 | /* After disable sriov, VF still has some config and info need clean, |
12604 | * which configed by PF. |
12605 | */ |
12606 | static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid) |
12607 | { |
12608 | struct hclge_dev *hdev = vport->back; |
12609 | struct hclge_vlan_info vlan_info; |
12610 | int ret; |
12611 | |
12612 | clear_bit(nr: HCLGE_VPORT_STATE_INITED, addr: &vport->state); |
12613 | clear_bit(nr: HCLGE_VPORT_STATE_ALIVE, addr: &vport->state); |
12614 | vport->need_notify = 0; |
12615 | vport->mps = 0; |
12616 | |
12617 | /* after disable sriov, clean VF rate configured by PF */ |
12618 | ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate: 0); |
12619 | if (ret) |
12620 | dev_err(&hdev->pdev->dev, |
12621 | "failed to clean vf%d rate config, ret = %d\n" , |
12622 | vfid, ret); |
12623 | |
12624 | vlan_info.vlan_tag = 0; |
12625 | vlan_info.qos = 0; |
12626 | vlan_info.vlan_proto = ETH_P_8021Q; |
12627 | ret = hclge_update_port_base_vlan_cfg(vport, |
12628 | state: HNAE3_PORT_BASE_VLAN_DISABLE, |
12629 | vlan_info: &vlan_info); |
12630 | if (ret) |
12631 | dev_err(&hdev->pdev->dev, |
12632 | "failed to clean vf%d port base vlan, ret = %d\n" , |
12633 | vfid, ret); |
12634 | |
12635 | ret = hclge_set_vf_spoofchk_hw(hdev, vf: vport->vport_id, enable: false); |
12636 | if (ret) |
12637 | dev_err(&hdev->pdev->dev, |
12638 | "failed to clean vf%d spoof config, ret = %d\n" , |
12639 | vfid, ret); |
12640 | |
12641 | memset(&vport->vf_info, 0, sizeof(vport->vf_info)); |
12642 | } |
12643 | |
12644 | static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs) |
12645 | { |
12646 | struct hclge_dev *hdev = ae_dev->priv; |
12647 | struct hclge_vport *vport; |
12648 | int i; |
12649 | |
12650 | for (i = 0; i < num_vfs; i++) { |
12651 | vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; |
12652 | |
12653 | hclge_clear_vport_vf_info(vport, vfid: i); |
12654 | } |
12655 | } |
12656 | |
12657 | static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode, |
12658 | u8 *priority) |
12659 | { |
12660 | struct hclge_vport *vport = hclge_get_vport(handle: h); |
12661 | |
12662 | if (dscp >= HNAE3_MAX_DSCP) |
12663 | return -EINVAL; |
12664 | |
12665 | if (tc_mode) |
12666 | *tc_mode = vport->nic.kinfo.tc_map_mode; |
12667 | if (priority) |
12668 | *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 : |
12669 | vport->nic.kinfo.dscp_prio[dscp]; |
12670 | |
12671 | return 0; |
12672 | } |
12673 | |
12674 | static const struct hnae3_ae_ops hclge_ops = { |
12675 | .init_ae_dev = hclge_init_ae_dev, |
12676 | .uninit_ae_dev = hclge_uninit_ae_dev, |
12677 | .reset_prepare = hclge_reset_prepare_general, |
12678 | .reset_done = hclge_reset_done, |
12679 | .init_client_instance = hclge_init_client_instance, |
12680 | .uninit_client_instance = hclge_uninit_client_instance, |
12681 | .map_ring_to_vector = hclge_map_ring_to_vector, |
12682 | .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, |
12683 | .get_vector = hclge_get_vector, |
12684 | .put_vector = hclge_put_vector, |
12685 | .set_promisc_mode = hclge_set_promisc_mode, |
12686 | .request_update_promisc_mode = hclge_request_update_promisc_mode, |
12687 | .set_loopback = hclge_set_loopback, |
12688 | .start = hclge_ae_start, |
12689 | .stop = hclge_ae_stop, |
12690 | .client_start = hclge_client_start, |
12691 | .client_stop = hclge_client_stop, |
12692 | .get_status = hclge_get_status, |
12693 | .get_ksettings_an_result = hclge_get_ksettings_an_result, |
12694 | .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, |
12695 | .get_media_type = hclge_get_media_type, |
12696 | .check_port_speed = hclge_check_port_speed, |
12697 | .get_fec_stats = hclge_get_fec_stats, |
12698 | .get_fec = hclge_get_fec, |
12699 | .set_fec = hclge_set_fec, |
12700 | .get_rss_key_size = hclge_comm_get_rss_key_size, |
12701 | .get_rss = hclge_get_rss, |
12702 | .set_rss = hclge_set_rss, |
12703 | .set_rss_tuple = hclge_set_rss_tuple, |
12704 | .get_rss_tuple = hclge_get_rss_tuple, |
12705 | .get_tc_size = hclge_get_tc_size, |
12706 | .get_mac_addr = hclge_get_mac_addr, |
12707 | .set_mac_addr = hclge_set_mac_addr, |
12708 | .do_ioctl = hclge_do_ioctl, |
12709 | .add_uc_addr = hclge_add_uc_addr, |
12710 | .rm_uc_addr = hclge_rm_uc_addr, |
12711 | .add_mc_addr = hclge_add_mc_addr, |
12712 | .rm_mc_addr = hclge_rm_mc_addr, |
12713 | .set_autoneg = hclge_set_autoneg, |
12714 | .get_autoneg = hclge_get_autoneg, |
12715 | .restart_autoneg = hclge_restart_autoneg, |
12716 | .halt_autoneg = hclge_halt_autoneg, |
12717 | .get_pauseparam = hclge_get_pauseparam, |
12718 | .set_pauseparam = hclge_set_pauseparam, |
12719 | .set_mtu = hclge_set_mtu, |
12720 | .reset_queue = hclge_reset_tqp, |
12721 | .get_stats = hclge_get_stats, |
12722 | .get_mac_stats = hclge_get_mac_stat, |
12723 | .update_stats = hclge_update_stats, |
12724 | .get_strings = hclge_get_strings, |
12725 | .get_sset_count = hclge_get_sset_count, |
12726 | .get_fw_version = hclge_get_fw_version, |
12727 | .get_mdix_mode = hclge_get_mdix_mode, |
12728 | .enable_vlan_filter = hclge_enable_vlan_filter, |
12729 | .set_vlan_filter = hclge_set_vlan_filter, |
12730 | .set_vf_vlan_filter = hclge_set_vf_vlan_filter, |
12731 | .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, |
12732 | .reset_event = hclge_reset_event, |
12733 | .get_reset_level = hclge_get_reset_level, |
12734 | .set_default_reset_request = hclge_set_def_reset_request, |
12735 | .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, |
12736 | .set_channels = hclge_set_channels, |
12737 | .get_channels = hclge_get_channels, |
12738 | .get_regs_len = hclge_get_regs_len, |
12739 | .get_regs = hclge_get_regs, |
12740 | .set_led_id = hclge_set_led_id, |
12741 | .get_link_mode = hclge_get_link_mode, |
12742 | .add_fd_entry = hclge_add_fd_entry, |
12743 | .del_fd_entry = hclge_del_fd_entry, |
12744 | .get_fd_rule_cnt = hclge_get_fd_rule_cnt, |
12745 | .get_fd_rule_info = hclge_get_fd_rule_info, |
12746 | .get_fd_all_rules = hclge_get_all_rules, |
12747 | .enable_fd = hclge_enable_fd, |
12748 | .add_arfs_entry = hclge_add_fd_entry_by_arfs, |
12749 | .dbg_read_cmd = hclge_dbg_read_cmd, |
12750 | .handle_hw_ras_error = hclge_handle_hw_ras_error, |
12751 | .get_hw_reset_stat = hclge_get_hw_reset_stat, |
12752 | .ae_dev_resetting = hclge_ae_dev_resetting, |
12753 | .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, |
12754 | .set_gro_en = hclge_gro_en, |
12755 | .get_global_queue_id = hclge_covert_handle_qid_global, |
12756 | .set_timer_task = hclge_set_timer_task, |
12757 | .mac_connect_phy = hclge_mac_connect_phy, |
12758 | .mac_disconnect_phy = hclge_mac_disconnect_phy, |
12759 | .get_vf_config = hclge_get_vf_config, |
12760 | .set_vf_link_state = hclge_set_vf_link_state, |
12761 | .set_vf_spoofchk = hclge_set_vf_spoofchk, |
12762 | .set_vf_trust = hclge_set_vf_trust, |
12763 | .set_vf_rate = hclge_set_vf_rate, |
12764 | .set_vf_mac = hclge_set_vf_mac, |
12765 | .get_module_eeprom = hclge_get_module_eeprom, |
12766 | .get_cmdq_stat = hclge_get_cmdq_stat, |
12767 | .add_cls_flower = hclge_add_cls_flower, |
12768 | .del_cls_flower = hclge_del_cls_flower, |
12769 | .cls_flower_active = hclge_is_cls_flower_active, |
12770 | .get_phy_link_ksettings = hclge_get_phy_link_ksettings, |
12771 | .set_phy_link_ksettings = hclge_set_phy_link_ksettings, |
12772 | .set_tx_hwts_info = hclge_ptp_set_tx_info, |
12773 | .get_rx_hwts = hclge_ptp_get_rx_hwts, |
12774 | .get_ts_info = hclge_ptp_get_ts_info, |
12775 | .get_link_diagnosis_info = hclge_get_link_diagnosis_info, |
12776 | .clean_vf_config = hclge_clean_vport_config, |
12777 | .get_dscp_prio = hclge_get_dscp_prio, |
12778 | .get_wol = hclge_get_wol, |
12779 | .set_wol = hclge_set_wol, |
12780 | }; |
12781 | |
12782 | static struct hnae3_ae_algo ae_algo = { |
12783 | .ops = &hclge_ops, |
12784 | .pdev_id_table = ae_algo_pci_tbl, |
12785 | }; |
12786 | |
12787 | static int __init hclge_init(void) |
12788 | { |
12789 | pr_info("%s is initializing\n" , HCLGE_NAME); |
12790 | |
12791 | hclge_wq = alloc_workqueue(fmt: "%s" , flags: WQ_UNBOUND, max_active: 0, HCLGE_NAME); |
12792 | if (!hclge_wq) { |
12793 | pr_err("%s: failed to create workqueue\n" , HCLGE_NAME); |
12794 | return -ENOMEM; |
12795 | } |
12796 | |
12797 | hnae3_register_ae_algo(ae_algo: &ae_algo); |
12798 | |
12799 | return 0; |
12800 | } |
12801 | |
12802 | static void __exit hclge_exit(void) |
12803 | { |
12804 | hnae3_unregister_ae_algo_prepare(ae_algo: &ae_algo); |
12805 | hnae3_unregister_ae_algo(ae_algo: &ae_algo); |
12806 | destroy_workqueue(wq: hclge_wq); |
12807 | } |
12808 | module_init(hclge_init); |
12809 | module_exit(hclge_exit); |
12810 | |
12811 | MODULE_LICENSE("GPL" ); |
12812 | MODULE_AUTHOR("Huawei Tech. Co., Ltd." ); |
12813 | MODULE_DESCRIPTION("HCLGE Driver" ); |
12814 | MODULE_VERSION(HCLGE_MOD_VERSION); |
12815 | |