1 | /* Broadcom NetXtreme-C/E network driver. |
2 | * |
3 | * Copyright (c) 2014-2016 Broadcom Corporation |
4 | * Copyright (c) 2016-2019 Broadcom Limited |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation. |
9 | */ |
10 | |
11 | #include <linux/module.h> |
12 | |
13 | #include <linux/stringify.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/timer.h> |
16 | #include <linux/errno.h> |
17 | #include <linux/ioport.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/vmalloc.h> |
20 | #include <linux/interrupt.h> |
21 | #include <linux/pci.h> |
22 | #include <linux/netdevice.h> |
23 | #include <linux/etherdevice.h> |
24 | #include <linux/skbuff.h> |
25 | #include <linux/dma-mapping.h> |
26 | #include <linux/bitops.h> |
27 | #include <linux/io.h> |
28 | #include <linux/irq.h> |
29 | #include <linux/delay.h> |
30 | #include <asm/byteorder.h> |
31 | #include <asm/page.h> |
32 | #include <linux/time.h> |
33 | #include <linux/mii.h> |
34 | #include <linux/mdio.h> |
35 | #include <linux/if.h> |
36 | #include <linux/if_vlan.h> |
37 | #include <linux/if_bridge.h> |
38 | #include <linux/rtc.h> |
39 | #include <linux/bpf.h> |
40 | #include <net/gro.h> |
41 | #include <net/ip.h> |
42 | #include <net/tcp.h> |
43 | #include <net/udp.h> |
44 | #include <net/checksum.h> |
45 | #include <net/ip6_checksum.h> |
46 | #include <net/udp_tunnel.h> |
47 | #include <linux/workqueue.h> |
48 | #include <linux/prefetch.h> |
49 | #include <linux/cache.h> |
50 | #include <linux/log2.h> |
51 | #include <linux/bitmap.h> |
52 | #include <linux/cpu_rmap.h> |
53 | #include <linux/cpumask.h> |
54 | #include <net/pkt_cls.h> |
55 | #include <net/page_pool/helpers.h> |
56 | #include <linux/align.h> |
57 | #include <net/netdev_queues.h> |
58 | |
59 | #include "bnxt_hsi.h" |
60 | #include "bnxt.h" |
61 | #include "bnxt_hwrm.h" |
62 | #include "bnxt_ulp.h" |
63 | #include "bnxt_sriov.h" |
64 | #include "bnxt_ethtool.h" |
65 | #include "bnxt_dcb.h" |
66 | #include "bnxt_xdp.h" |
67 | #include "bnxt_ptp.h" |
68 | #include "bnxt_vfr.h" |
69 | #include "bnxt_tc.h" |
70 | #include "bnxt_devlink.h" |
71 | #include "bnxt_debugfs.h" |
72 | #include "bnxt_hwmon.h" |
73 | |
74 | #define BNXT_TX_TIMEOUT (5 * HZ) |
75 | #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ |
76 | NETIF_MSG_TX_ERR) |
77 | |
78 | MODULE_LICENSE("GPL" ); |
79 | MODULE_DESCRIPTION("Broadcom BCM573xx network driver" ); |
80 | |
81 | #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) |
82 | #define BNXT_RX_DMA_OFFSET NET_SKB_PAD |
83 | #define BNXT_RX_COPY_THRESH 256 |
84 | |
85 | #define BNXT_TX_PUSH_THRESH 164 |
86 | |
87 | /* indexed by enum board_idx */ |
88 | static const struct { |
89 | char *name; |
90 | } board_info[] = { |
91 | [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, |
92 | [BCM57302] = { .name: "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, |
93 | [BCM57304] = { .name: "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, |
94 | [BCM57417_NPAR] = { .name: "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, |
95 | [BCM58700] = { .name: "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, |
96 | [BCM57311] = { .name: "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, |
97 | [BCM57312] = { .name: "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, |
98 | [BCM57402] = { .name: "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, |
99 | [BCM57404] = { .name: "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, |
100 | [BCM57406] = { .name: "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, |
101 | [BCM57402_NPAR] = { .name: "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, |
102 | [BCM57407] = { .name: "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, |
103 | [BCM57412] = { .name: "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, |
104 | [BCM57414] = { .name: "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, |
105 | [BCM57416] = { .name: "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, |
106 | [BCM57417] = { .name: "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, |
107 | [BCM57412_NPAR] = { .name: "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, |
108 | [BCM57314] = { .name: "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, |
109 | [BCM57417_SFP] = { .name: "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, |
110 | [BCM57416_SFP] = { .name: "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, |
111 | [BCM57404_NPAR] = { .name: "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, |
112 | [BCM57406_NPAR] = { .name: "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, |
113 | [BCM57407_SFP] = { .name: "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, |
114 | [BCM57407_NPAR] = { .name: "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, |
115 | [BCM57414_NPAR] = { .name: "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, |
116 | [BCM57416_NPAR] = { .name: "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, |
117 | [BCM57452] = { .name: "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, |
118 | [BCM57454] = { .name: "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, |
119 | [BCM5745x_NPAR] = { .name: "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, |
120 | [BCM57508] = { .name: "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, |
121 | [BCM57504] = { .name: "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, |
122 | [BCM57502] = { .name: "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, |
123 | [BCM57608] = { .name: "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" }, |
124 | [BCM57604] = { .name: "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, |
125 | [BCM57602] = { .name: "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" }, |
126 | [BCM57601] = { .name: "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" }, |
127 | [BCM57508_NPAR] = { .name: "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, |
128 | [BCM57504_NPAR] = { .name: "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, |
129 | [BCM57502_NPAR] = { .name: "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, |
130 | [BCM58802] = { .name: "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, |
131 | [BCM58804] = { .name: "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, |
132 | [BCM58808] = { .name: "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, |
133 | [NETXTREME_E_VF] = { .name: "Broadcom NetXtreme-E Ethernet Virtual Function" }, |
134 | [NETXTREME_C_VF] = { .name: "Broadcom NetXtreme-C Ethernet Virtual Function" }, |
135 | [NETXTREME_S_VF] = { .name: "Broadcom NetXtreme-S Ethernet Virtual Function" }, |
136 | [NETXTREME_C_VF_HV] = { .name: "Broadcom NetXtreme-C Virtual Function for Hyper-V" }, |
137 | [NETXTREME_E_VF_HV] = { .name: "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, |
138 | [NETXTREME_E_P5_VF] = { .name: "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, |
139 | [NETXTREME_E_P5_VF_HV] = { .name: "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, |
140 | }; |
141 | |
142 | static const struct pci_device_id bnxt_pci_tbl[] = { |
143 | { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, |
144 | { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, |
145 | { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, |
146 | { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, |
147 | { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, |
148 | { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, |
149 | { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, |
150 | { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, |
151 | { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, |
152 | { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, |
153 | { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, |
154 | { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, |
155 | { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, |
156 | { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, |
157 | { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, |
158 | { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, |
159 | { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, |
160 | { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, |
161 | { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, |
162 | { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, |
163 | { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, |
164 | { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, |
165 | { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, |
166 | { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, |
167 | { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, |
168 | { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, |
169 | { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, |
170 | { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, |
171 | { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, |
172 | { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, |
173 | { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, |
174 | { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, |
175 | { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, |
176 | { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, |
177 | { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, |
178 | { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, |
179 | { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, |
180 | { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, |
181 | { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 }, |
182 | { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 }, |
183 | { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 }, |
184 | { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 }, |
185 | { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR }, |
186 | { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, |
187 | { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR }, |
188 | { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR }, |
189 | { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, |
190 | { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR }, |
191 | { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, |
192 | { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, |
193 | #ifdef CONFIG_BNXT_SRIOV |
194 | { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, |
195 | { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV }, |
196 | { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV }, |
197 | { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, |
198 | { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV }, |
199 | { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, |
200 | { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV }, |
201 | { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV }, |
202 | { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV }, |
203 | { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV }, |
204 | { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, |
205 | { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, |
206 | { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, |
207 | { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, |
208 | { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, |
209 | { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV }, |
210 | { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, |
211 | { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, |
212 | { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, |
213 | { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, |
214 | { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, |
215 | #endif |
216 | { 0 } |
217 | }; |
218 | |
219 | MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); |
220 | |
221 | static const u16 bnxt_vf_req_snif[] = { |
222 | HWRM_FUNC_CFG, |
223 | HWRM_FUNC_VF_CFG, |
224 | HWRM_PORT_PHY_QCFG, |
225 | HWRM_CFA_L2_FILTER_ALLOC, |
226 | }; |
227 | |
228 | static const u16 bnxt_async_events_arr[] = { |
229 | ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, |
230 | ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, |
231 | ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, |
232 | ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, |
233 | ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, |
234 | ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, |
235 | ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, |
236 | ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, |
237 | ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, |
238 | ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, |
239 | ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE, |
240 | ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, |
241 | ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST, |
242 | ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP, |
243 | ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT, |
244 | ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE, |
245 | }; |
246 | |
247 | static struct workqueue_struct *bnxt_pf_wq; |
248 | |
249 | #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ |
250 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}} |
251 | #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}} |
252 | |
253 | const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = { |
254 | .ports = { |
255 | .src = 0, |
256 | .dst = 0, |
257 | }, |
258 | .addrs = { |
259 | .v6addrs = { |
260 | .src = BNXT_IPV6_MASK_NONE, |
261 | .dst = BNXT_IPV6_MASK_NONE, |
262 | }, |
263 | }, |
264 | }; |
265 | |
266 | const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = { |
267 | .ports = { |
268 | .src = cpu_to_be16(0xffff), |
269 | .dst = cpu_to_be16(0xffff), |
270 | }, |
271 | .addrs = { |
272 | .v6addrs = { |
273 | .src = BNXT_IPV6_MASK_ALL, |
274 | .dst = BNXT_IPV6_MASK_ALL, |
275 | }, |
276 | }, |
277 | }; |
278 | |
279 | const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = { |
280 | .ports = { |
281 | .src = cpu_to_be16(0xffff), |
282 | .dst = cpu_to_be16(0xffff), |
283 | }, |
284 | .addrs = { |
285 | .v4addrs = { |
286 | .src = cpu_to_be32(0xffffffff), |
287 | .dst = cpu_to_be32(0xffffffff), |
288 | }, |
289 | }, |
290 | }; |
291 | |
292 | static bool bnxt_vf_pciid(enum board_idx idx) |
293 | { |
294 | return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || |
295 | idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || |
296 | idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF || |
297 | idx == NETXTREME_E_P5_VF_HV); |
298 | } |
299 | |
300 | #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) |
301 | #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) |
302 | #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) |
303 | |
304 | #define BNXT_CP_DB_IRQ_DIS(db) \ |
305 | writel(DB_CP_IRQ_DIS_FLAGS, db) |
306 | |
307 | #define BNXT_DB_CQ(db, idx) \ |
308 | writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) |
309 | |
310 | #define BNXT_DB_NQ_P5(db, idx) \ |
311 | bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\ |
312 | (db)->doorbell) |
313 | |
314 | #define BNXT_DB_NQ_P7(db, idx) \ |
315 | bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \ |
316 | DB_RING_IDX(db, idx), (db)->doorbell) |
317 | |
318 | #define BNXT_DB_CQ_ARM(db, idx) \ |
319 | writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) |
320 | |
321 | #define BNXT_DB_NQ_ARM_P5(db, idx) \ |
322 | bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \ |
323 | DB_RING_IDX(db, idx), (db)->doorbell) |
324 | |
325 | static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) |
326 | { |
327 | if (bp->flags & BNXT_FLAG_CHIP_P7) |
328 | BNXT_DB_NQ_P7(db, idx); |
329 | else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
330 | BNXT_DB_NQ_P5(db, idx); |
331 | else |
332 | BNXT_DB_CQ(db, idx); |
333 | } |
334 | |
335 | static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) |
336 | { |
337 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
338 | BNXT_DB_NQ_ARM_P5(db, idx); |
339 | else |
340 | BNXT_DB_CQ_ARM(db, idx); |
341 | } |
342 | |
343 | static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) |
344 | { |
345 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
346 | bnxt_writeq(bp, val: db->db_key64 | DBR_TYPE_CQ_ARMALL | |
347 | DB_RING_IDX(db, idx), addr: db->doorbell); |
348 | else |
349 | BNXT_DB_CQ(db, idx); |
350 | } |
351 | |
352 | static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) |
353 | { |
354 | if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) |
355 | return; |
356 | |
357 | if (BNXT_PF(bp)) |
358 | queue_delayed_work(wq: bnxt_pf_wq, dwork: &bp->fw_reset_task, delay); |
359 | else |
360 | schedule_delayed_work(dwork: &bp->fw_reset_task, delay); |
361 | } |
362 | |
363 | static void __bnxt_queue_sp_work(struct bnxt *bp) |
364 | { |
365 | if (BNXT_PF(bp)) |
366 | queue_work(wq: bnxt_pf_wq, work: &bp->sp_task); |
367 | else |
368 | schedule_work(work: &bp->sp_task); |
369 | } |
370 | |
371 | static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event) |
372 | { |
373 | set_bit(nr: event, addr: &bp->sp_event); |
374 | __bnxt_queue_sp_work(bp); |
375 | } |
376 | |
377 | static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) |
378 | { |
379 | if (!rxr->bnapi->in_reset) { |
380 | rxr->bnapi->in_reset = true; |
381 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
382 | set_bit(BNXT_RESET_TASK_SP_EVENT, addr: &bp->sp_event); |
383 | else |
384 | set_bit(BNXT_RST_RING_SP_EVENT, addr: &bp->sp_event); |
385 | __bnxt_queue_sp_work(bp); |
386 | } |
387 | rxr->rx_next_cons = 0xffff; |
388 | } |
389 | |
390 | void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, |
391 | u16 curr) |
392 | { |
393 | struct bnxt_napi *bnapi = txr->bnapi; |
394 | |
395 | if (bnapi->tx_fault) |
396 | return; |
397 | |
398 | netdev_err(dev: bp->dev, format: "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)" , |
399 | txr->txq_index, txr->tx_hw_cons, |
400 | txr->tx_cons, txr->tx_prod, curr); |
401 | WARN_ON_ONCE(1); |
402 | bnapi->tx_fault = 1; |
403 | bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); |
404 | } |
405 | |
406 | const u16 bnxt_lhint_arr[] = { |
407 | TX_BD_FLAGS_LHINT_512_AND_SMALLER, |
408 | TX_BD_FLAGS_LHINT_512_TO_1023, |
409 | TX_BD_FLAGS_LHINT_1024_TO_2047, |
410 | TX_BD_FLAGS_LHINT_1024_TO_2047, |
411 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
412 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
413 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
414 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
415 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
416 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
417 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
418 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
419 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
420 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
421 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
422 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
423 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
424 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
425 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
426 | }; |
427 | |
428 | static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) |
429 | { |
430 | struct metadata_dst *md_dst = skb_metadata_dst(skb); |
431 | |
432 | if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) |
433 | return 0; |
434 | |
435 | return md_dst->u.port_info.port_id; |
436 | } |
437 | |
438 | static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, |
439 | u16 prod) |
440 | { |
441 | /* Sync BD data before updating doorbell */ |
442 | wmb(); |
443 | bnxt_db_write(bp, db: &txr->tx_db, idx: prod); |
444 | txr->kick_pending = 0; |
445 | } |
446 | |
447 | static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) |
448 | { |
449 | struct bnxt *bp = netdev_priv(dev); |
450 | struct tx_bd *txbd, *txbd0; |
451 | struct tx_bd_ext *txbd1; |
452 | struct netdev_queue *txq; |
453 | int i; |
454 | dma_addr_t mapping; |
455 | unsigned int length, pad = 0; |
456 | u32 len, free_size, vlan_tag_flags, cfa_action, flags; |
457 | u16 prod, last_frag; |
458 | struct pci_dev *pdev = bp->pdev; |
459 | struct bnxt_tx_ring_info *txr; |
460 | struct bnxt_sw_tx_bd *tx_buf; |
461 | __le32 lflags = 0; |
462 | |
463 | i = skb_get_queue_mapping(skb); |
464 | if (unlikely(i >= bp->tx_nr_rings)) { |
465 | dev_kfree_skb_any(skb); |
466 | dev_core_stats_tx_dropped_inc(dev); |
467 | return NETDEV_TX_OK; |
468 | } |
469 | |
470 | txq = netdev_get_tx_queue(dev, index: i); |
471 | txr = &bp->tx_ring[bp->tx_ring_map[i]]; |
472 | prod = txr->tx_prod; |
473 | |
474 | free_size = bnxt_tx_avail(bp, txr); |
475 | if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { |
476 | /* We must have raced with NAPI cleanup */ |
477 | if (net_ratelimit() && txr->kick_pending) |
478 | netif_warn(bp, tx_err, dev, |
479 | "bnxt: ring busy w/ flush pending!\n" ); |
480 | if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), |
481 | bp->tx_wake_thresh)) |
482 | return NETDEV_TX_BUSY; |
483 | } |
484 | |
485 | if (unlikely(ipv6_hopopt_jumbo_remove(skb))) |
486 | goto tx_free; |
487 | |
488 | length = skb->len; |
489 | len = skb_headlen(skb); |
490 | last_frag = skb_shinfo(skb)->nr_frags; |
491 | |
492 | txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; |
493 | |
494 | tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; |
495 | tx_buf->skb = skb; |
496 | tx_buf->nr_frags = last_frag; |
497 | |
498 | vlan_tag_flags = 0; |
499 | cfa_action = bnxt_xmit_get_cfa_action(skb); |
500 | if (skb_vlan_tag_present(skb)) { |
501 | vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | |
502 | skb_vlan_tag_get(skb); |
503 | /* Currently supports 8021Q, 8021AD vlan offloads |
504 | * QINQ1, QINQ2, QINQ3 vlan headers are deprecated |
505 | */ |
506 | if (skb->vlan_proto == htons(ETH_P_8021Q)) |
507 | vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; |
508 | } |
509 | |
510 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { |
511 | struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; |
512 | |
513 | if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) && |
514 | atomic_dec_if_positive(v: &ptp->tx_avail) >= 0) { |
515 | if (!bnxt_ptp_parse(skb, seq_id: &ptp->tx_seqid, |
516 | hdr_off: &ptp->tx_hdr_off)) { |
517 | if (vlan_tag_flags) |
518 | ptp->tx_hdr_off += VLAN_HLEN; |
519 | lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); |
520 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
521 | } else { |
522 | atomic_inc(v: &bp->ptp_cfg->tx_avail); |
523 | } |
524 | } |
525 | } |
526 | |
527 | if (unlikely(skb->no_fcs)) |
528 | lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC); |
529 | |
530 | if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh && |
531 | !lflags) { |
532 | struct tx_push_buffer *tx_push_buf = txr->tx_push; |
533 | struct tx_push_bd *tx_push = &tx_push_buf->push_bd; |
534 | struct tx_bd_ext *tx_push1 = &tx_push->txbd2; |
535 | void __iomem *db = txr->tx_db.doorbell; |
536 | void *pdata = tx_push_buf->data; |
537 | u64 *end; |
538 | int j, push_len; |
539 | |
540 | /* Set COAL_NOW to be ready quickly for the next push */ |
541 | tx_push->tx_bd_len_flags_type = |
542 | cpu_to_le32((length << TX_BD_LEN_SHIFT) | |
543 | TX_BD_TYPE_LONG_TX_BD | |
544 | TX_BD_FLAGS_LHINT_512_AND_SMALLER | |
545 | TX_BD_FLAGS_COAL_NOW | |
546 | TX_BD_FLAGS_PACKET_END | |
547 | (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); |
548 | |
549 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
550 | tx_push1->tx_bd_hsize_lflags = |
551 | cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); |
552 | else |
553 | tx_push1->tx_bd_hsize_lflags = 0; |
554 | |
555 | tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); |
556 | tx_push1->tx_bd_cfa_action = |
557 | cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); |
558 | |
559 | end = pdata + length; |
560 | end = PTR_ALIGN(end, 8) - 1; |
561 | *end = 0; |
562 | |
563 | skb_copy_from_linear_data(skb, to: pdata, len); |
564 | pdata += len; |
565 | for (j = 0; j < last_frag; j++) { |
566 | skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; |
567 | void *fptr; |
568 | |
569 | fptr = skb_frag_address_safe(frag); |
570 | if (!fptr) |
571 | goto normal_tx; |
572 | |
573 | memcpy(pdata, fptr, skb_frag_size(frag)); |
574 | pdata += skb_frag_size(frag); |
575 | } |
576 | |
577 | txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; |
578 | txbd->tx_bd_haddr = txr->data_mapping; |
579 | txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2); |
580 | prod = NEXT_TX(prod); |
581 | tx_push->tx_bd_opaque = txbd->tx_bd_opaque; |
582 | txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; |
583 | memcpy(txbd, tx_push1, sizeof(*txbd)); |
584 | prod = NEXT_TX(prod); |
585 | tx_push->doorbell = |
586 | cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | |
587 | DB_RING_IDX(&txr->tx_db, prod)); |
588 | WRITE_ONCE(txr->tx_prod, prod); |
589 | |
590 | tx_buf->is_push = 1; |
591 | netdev_tx_sent_queue(dev_queue: txq, bytes: skb->len); |
592 | wmb(); /* Sync is_push and byte queue before pushing data */ |
593 | |
594 | push_len = (length + sizeof(*tx_push) + 7) / 8; |
595 | if (push_len > 16) { |
596 | __iowrite64_copy(to: db, from: tx_push_buf, count: 16); |
597 | __iowrite32_copy(to: db + 4, from: tx_push_buf + 1, |
598 | count: (push_len - 16) << 1); |
599 | } else { |
600 | __iowrite64_copy(to: db, from: tx_push_buf, count: push_len); |
601 | } |
602 | |
603 | goto tx_done; |
604 | } |
605 | |
606 | normal_tx: |
607 | if (length < BNXT_MIN_PKT_SIZE) { |
608 | pad = BNXT_MIN_PKT_SIZE - length; |
609 | if (skb_pad(skb, pad)) |
610 | /* SKB already freed. */ |
611 | goto tx_kick_pending; |
612 | length = BNXT_MIN_PKT_SIZE; |
613 | } |
614 | |
615 | mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); |
616 | |
617 | if (unlikely(dma_mapping_error(&pdev->dev, mapping))) |
618 | goto tx_free; |
619 | |
620 | dma_unmap_addr_set(tx_buf, mapping, mapping); |
621 | flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | |
622 | ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); |
623 | |
624 | txbd->tx_bd_haddr = cpu_to_le64(mapping); |
625 | txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag); |
626 | |
627 | prod = NEXT_TX(prod); |
628 | txbd1 = (struct tx_bd_ext *) |
629 | &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; |
630 | |
631 | txbd1->tx_bd_hsize_lflags = lflags; |
632 | if (skb_is_gso(skb)) { |
633 | bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4); |
634 | u32 hdr_len; |
635 | |
636 | if (skb->encapsulation) { |
637 | if (udp_gso) |
638 | hdr_len = skb_inner_transport_offset(skb) + |
639 | sizeof(struct udphdr); |
640 | else |
641 | hdr_len = skb_inner_tcp_all_headers(skb); |
642 | } else if (udp_gso) { |
643 | hdr_len = skb_transport_offset(skb) + |
644 | sizeof(struct udphdr); |
645 | } else { |
646 | hdr_len = skb_tcp_all_headers(skb); |
647 | } |
648 | |
649 | txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | |
650 | TX_BD_FLAGS_T_IPID | |
651 | (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); |
652 | length = skb_shinfo(skb)->gso_size; |
653 | txbd1->tx_bd_mss = cpu_to_le32(length); |
654 | length += hdr_len; |
655 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
656 | txbd1->tx_bd_hsize_lflags |= |
657 | cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); |
658 | txbd1->tx_bd_mss = 0; |
659 | } |
660 | |
661 | length >>= 9; |
662 | if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { |
663 | dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n" , |
664 | skb->len); |
665 | i = 0; |
666 | goto tx_dma_error; |
667 | } |
668 | flags |= bnxt_lhint_arr[length]; |
669 | txbd->tx_bd_len_flags_type = cpu_to_le32(flags); |
670 | |
671 | txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); |
672 | txbd1->tx_bd_cfa_action = |
673 | cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); |
674 | txbd0 = txbd; |
675 | for (i = 0; i < last_frag; i++) { |
676 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
677 | |
678 | prod = NEXT_TX(prod); |
679 | txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; |
680 | |
681 | len = skb_frag_size(frag); |
682 | mapping = skb_frag_dma_map(dev: &pdev->dev, frag, offset: 0, size: len, |
683 | dir: DMA_TO_DEVICE); |
684 | |
685 | if (unlikely(dma_mapping_error(&pdev->dev, mapping))) |
686 | goto tx_dma_error; |
687 | |
688 | tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; |
689 | dma_unmap_addr_set(tx_buf, mapping, mapping); |
690 | |
691 | txbd->tx_bd_haddr = cpu_to_le64(mapping); |
692 | |
693 | flags = len << TX_BD_LEN_SHIFT; |
694 | txbd->tx_bd_len_flags_type = cpu_to_le32(flags); |
695 | } |
696 | |
697 | flags &= ~TX_BD_LEN; |
698 | txbd->tx_bd_len_flags_type = |
699 | cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | |
700 | TX_BD_FLAGS_PACKET_END); |
701 | |
702 | netdev_tx_sent_queue(dev_queue: txq, bytes: skb->len); |
703 | |
704 | skb_tx_timestamp(skb); |
705 | |
706 | prod = NEXT_TX(prod); |
707 | WRITE_ONCE(txr->tx_prod, prod); |
708 | |
709 | if (!netdev_xmit_more() || netif_xmit_stopped(dev_queue: txq)) { |
710 | bnxt_txr_db_kick(bp, txr, prod); |
711 | } else { |
712 | if (free_size >= bp->tx_wake_thresh) |
713 | txbd0->tx_bd_len_flags_type |= |
714 | cpu_to_le32(TX_BD_FLAGS_NO_CMPL); |
715 | txr->kick_pending = 1; |
716 | } |
717 | |
718 | tx_done: |
719 | |
720 | if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { |
721 | if (netdev_xmit_more() && !tx_buf->is_push) { |
722 | txbd0->tx_bd_len_flags_type &= |
723 | cpu_to_le32(~TX_BD_FLAGS_NO_CMPL); |
724 | bnxt_txr_db_kick(bp, txr, prod); |
725 | } |
726 | |
727 | netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), |
728 | bp->tx_wake_thresh); |
729 | } |
730 | return NETDEV_TX_OK; |
731 | |
732 | tx_dma_error: |
733 | if (BNXT_TX_PTP_IS_SET(lflags)) |
734 | atomic_inc(v: &bp->ptp_cfg->tx_avail); |
735 | |
736 | last_frag = i; |
737 | |
738 | /* start back at beginning and unmap skb */ |
739 | prod = txr->tx_prod; |
740 | tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; |
741 | dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), |
742 | skb_headlen(skb), DMA_TO_DEVICE); |
743 | prod = NEXT_TX(prod); |
744 | |
745 | /* unmap remaining mapped pages */ |
746 | for (i = 0; i < last_frag; i++) { |
747 | prod = NEXT_TX(prod); |
748 | tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; |
749 | dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), |
750 | skb_frag_size(&skb_shinfo(skb)->frags[i]), |
751 | DMA_TO_DEVICE); |
752 | } |
753 | |
754 | tx_free: |
755 | dev_kfree_skb_any(skb); |
756 | tx_kick_pending: |
757 | if (txr->kick_pending) |
758 | bnxt_txr_db_kick(bp, txr, prod: txr->tx_prod); |
759 | txr->tx_buf_ring[txr->tx_prod].skb = NULL; |
760 | dev_core_stats_tx_dropped_inc(dev); |
761 | return NETDEV_TX_OK; |
762 | } |
763 | |
764 | static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, |
765 | int budget) |
766 | { |
767 | struct netdev_queue *txq = netdev_get_tx_queue(dev: bp->dev, index: txr->txq_index); |
768 | struct pci_dev *pdev = bp->pdev; |
769 | u16 hw_cons = txr->tx_hw_cons; |
770 | unsigned int tx_bytes = 0; |
771 | u16 cons = txr->tx_cons; |
772 | int tx_pkts = 0; |
773 | |
774 | while (RING_TX(bp, cons) != hw_cons) { |
775 | struct bnxt_sw_tx_bd *tx_buf; |
776 | struct sk_buff *skb; |
777 | int j, last; |
778 | |
779 | tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; |
780 | cons = NEXT_TX(cons); |
781 | skb = tx_buf->skb; |
782 | tx_buf->skb = NULL; |
783 | |
784 | if (unlikely(!skb)) { |
785 | bnxt_sched_reset_txr(bp, txr, curr: cons); |
786 | return; |
787 | } |
788 | |
789 | tx_pkts++; |
790 | tx_bytes += skb->len; |
791 | |
792 | if (tx_buf->is_push) { |
793 | tx_buf->is_push = 0; |
794 | goto next_tx_int; |
795 | } |
796 | |
797 | dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), |
798 | skb_headlen(skb), DMA_TO_DEVICE); |
799 | last = tx_buf->nr_frags; |
800 | |
801 | for (j = 0; j < last; j++) { |
802 | cons = NEXT_TX(cons); |
803 | tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; |
804 | dma_unmap_page( |
805 | &pdev->dev, |
806 | dma_unmap_addr(tx_buf, mapping), |
807 | skb_frag_size(&skb_shinfo(skb)->frags[j]), |
808 | DMA_TO_DEVICE); |
809 | } |
810 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { |
811 | if (BNXT_CHIP_P5(bp)) { |
812 | /* PTP worker takes ownership of the skb */ |
813 | if (!bnxt_get_tx_ts_p5(bp, skb)) |
814 | skb = NULL; |
815 | else |
816 | atomic_inc(v: &bp->ptp_cfg->tx_avail); |
817 | } |
818 | } |
819 | |
820 | next_tx_int: |
821 | cons = NEXT_TX(cons); |
822 | |
823 | dev_consume_skb_any(skb); |
824 | } |
825 | |
826 | WRITE_ONCE(txr->tx_cons, cons); |
827 | |
828 | __netif_txq_completed_wake(txq, tx_pkts, tx_bytes, |
829 | bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, |
830 | READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING); |
831 | } |
832 | |
833 | static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) |
834 | { |
835 | struct bnxt_tx_ring_info *txr; |
836 | int i; |
837 | |
838 | bnxt_for_each_napi_tx(i, bnapi, txr) { |
839 | if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons)) |
840 | __bnxt_tx_int(bp, txr, budget); |
841 | } |
842 | bnapi->events &= ~BNXT_TX_CMP_EVENT; |
843 | } |
844 | |
845 | static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, |
846 | struct bnxt_rx_ring_info *rxr, |
847 | unsigned int *offset, |
848 | gfp_t gfp) |
849 | { |
850 | struct page *page; |
851 | |
852 | if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { |
853 | page = page_pool_dev_alloc_frag(pool: rxr->page_pool, offset, |
854 | BNXT_RX_PAGE_SIZE); |
855 | } else { |
856 | page = page_pool_dev_alloc_pages(pool: rxr->page_pool); |
857 | *offset = 0; |
858 | } |
859 | if (!page) |
860 | return NULL; |
861 | |
862 | *mapping = page_pool_get_dma_addr(page) + *offset; |
863 | return page; |
864 | } |
865 | |
866 | static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, |
867 | gfp_t gfp) |
868 | { |
869 | u8 *data; |
870 | struct pci_dev *pdev = bp->pdev; |
871 | |
872 | if (gfp == GFP_ATOMIC) |
873 | data = napi_alloc_frag(fragsz: bp->rx_buf_size); |
874 | else |
875 | data = netdev_alloc_frag(fragsz: bp->rx_buf_size); |
876 | if (!data) |
877 | return NULL; |
878 | |
879 | *mapping = dma_map_single_attrs(dev: &pdev->dev, ptr: data + bp->rx_dma_offset, |
880 | size: bp->rx_buf_use_size, dir: bp->rx_dir, |
881 | DMA_ATTR_WEAK_ORDERING); |
882 | |
883 | if (dma_mapping_error(dev: &pdev->dev, dma_addr: *mapping)) { |
884 | skb_free_frag(addr: data); |
885 | data = NULL; |
886 | } |
887 | return data; |
888 | } |
889 | |
890 | int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, |
891 | u16 prod, gfp_t gfp) |
892 | { |
893 | struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; |
894 | struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; |
895 | dma_addr_t mapping; |
896 | |
897 | if (BNXT_RX_PAGE_MODE(bp)) { |
898 | unsigned int offset; |
899 | struct page *page = |
900 | __bnxt_alloc_rx_page(bp, mapping: &mapping, rxr, offset: &offset, gfp); |
901 | |
902 | if (!page) |
903 | return -ENOMEM; |
904 | |
905 | mapping += bp->rx_dma_offset; |
906 | rx_buf->data = page; |
907 | rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; |
908 | } else { |
909 | u8 *data = __bnxt_alloc_rx_frag(bp, mapping: &mapping, gfp); |
910 | |
911 | if (!data) |
912 | return -ENOMEM; |
913 | |
914 | rx_buf->data = data; |
915 | rx_buf->data_ptr = data + bp->rx_offset; |
916 | } |
917 | rx_buf->mapping = mapping; |
918 | |
919 | rxbd->rx_bd_haddr = cpu_to_le64(mapping); |
920 | return 0; |
921 | } |
922 | |
923 | void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) |
924 | { |
925 | u16 prod = rxr->rx_prod; |
926 | struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; |
927 | struct bnxt *bp = rxr->bnapi->bp; |
928 | struct rx_bd *cons_bd, *prod_bd; |
929 | |
930 | prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; |
931 | cons_rx_buf = &rxr->rx_buf_ring[cons]; |
932 | |
933 | prod_rx_buf->data = data; |
934 | prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; |
935 | |
936 | prod_rx_buf->mapping = cons_rx_buf->mapping; |
937 | |
938 | prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; |
939 | cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)]; |
940 | |
941 | prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; |
942 | } |
943 | |
944 | static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) |
945 | { |
946 | u16 next, max = rxr->rx_agg_bmap_size; |
947 | |
948 | next = find_next_zero_bit(addr: rxr->rx_agg_bmap, size: max, offset: idx); |
949 | if (next >= max) |
950 | next = find_first_zero_bit(addr: rxr->rx_agg_bmap, size: max); |
951 | return next; |
952 | } |
953 | |
954 | static inline int bnxt_alloc_rx_page(struct bnxt *bp, |
955 | struct bnxt_rx_ring_info *rxr, |
956 | u16 prod, gfp_t gfp) |
957 | { |
958 | struct rx_bd *rxbd = |
959 | &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; |
960 | struct bnxt_sw_rx_agg_bd *rx_agg_buf; |
961 | struct page *page; |
962 | dma_addr_t mapping; |
963 | u16 sw_prod = rxr->rx_sw_agg_prod; |
964 | unsigned int offset = 0; |
965 | |
966 | page = __bnxt_alloc_rx_page(bp, mapping: &mapping, rxr, offset: &offset, gfp); |
967 | |
968 | if (!page) |
969 | return -ENOMEM; |
970 | |
971 | if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) |
972 | sw_prod = bnxt_find_next_agg_idx(rxr, idx: sw_prod); |
973 | |
974 | __set_bit(sw_prod, rxr->rx_agg_bmap); |
975 | rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; |
976 | rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); |
977 | |
978 | rx_agg_buf->page = page; |
979 | rx_agg_buf->offset = offset; |
980 | rx_agg_buf->mapping = mapping; |
981 | rxbd->rx_bd_haddr = cpu_to_le64(mapping); |
982 | rxbd->rx_bd_opaque = sw_prod; |
983 | return 0; |
984 | } |
985 | |
986 | static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, |
987 | struct bnxt_cp_ring_info *cpr, |
988 | u16 cp_cons, u16 curr) |
989 | { |
990 | struct rx_agg_cmp *agg; |
991 | |
992 | cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); |
993 | agg = (struct rx_agg_cmp *) |
994 | &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
995 | return agg; |
996 | } |
997 | |
998 | static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, |
999 | struct bnxt_rx_ring_info *rxr, |
1000 | u16 agg_id, u16 curr) |
1001 | { |
1002 | struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; |
1003 | |
1004 | return &tpa_info->agg_arr[curr]; |
1005 | } |
1006 | |
1007 | static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, |
1008 | u16 start, u32 agg_bufs, bool tpa) |
1009 | { |
1010 | struct bnxt_napi *bnapi = cpr->bnapi; |
1011 | struct bnxt *bp = bnapi->bp; |
1012 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
1013 | u16 prod = rxr->rx_agg_prod; |
1014 | u16 sw_prod = rxr->rx_sw_agg_prod; |
1015 | bool p5_tpa = false; |
1016 | u32 i; |
1017 | |
1018 | if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) |
1019 | p5_tpa = true; |
1020 | |
1021 | for (i = 0; i < agg_bufs; i++) { |
1022 | u16 cons; |
1023 | struct rx_agg_cmp *agg; |
1024 | struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; |
1025 | struct rx_bd *prod_bd; |
1026 | struct page *page; |
1027 | |
1028 | if (p5_tpa) |
1029 | agg = bnxt_get_tpa_agg_p5(bp, rxr, agg_id: idx, curr: start + i); |
1030 | else |
1031 | agg = bnxt_get_agg(bp, cpr, cp_cons: idx, curr: start + i); |
1032 | cons = agg->rx_agg_cmp_opaque; |
1033 | __clear_bit(cons, rxr->rx_agg_bmap); |
1034 | |
1035 | if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) |
1036 | sw_prod = bnxt_find_next_agg_idx(rxr, idx: sw_prod); |
1037 | |
1038 | __set_bit(sw_prod, rxr->rx_agg_bmap); |
1039 | prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; |
1040 | cons_rx_buf = &rxr->rx_agg_ring[cons]; |
1041 | |
1042 | /* It is possible for sw_prod to be equal to cons, so |
1043 | * set cons_rx_buf->page to NULL first. |
1044 | */ |
1045 | page = cons_rx_buf->page; |
1046 | cons_rx_buf->page = NULL; |
1047 | prod_rx_buf->page = page; |
1048 | prod_rx_buf->offset = cons_rx_buf->offset; |
1049 | |
1050 | prod_rx_buf->mapping = cons_rx_buf->mapping; |
1051 | |
1052 | prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; |
1053 | |
1054 | prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); |
1055 | prod_bd->rx_bd_opaque = sw_prod; |
1056 | |
1057 | prod = NEXT_RX_AGG(prod); |
1058 | sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); |
1059 | } |
1060 | rxr->rx_agg_prod = prod; |
1061 | rxr->rx_sw_agg_prod = sw_prod; |
1062 | } |
1063 | |
1064 | static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, |
1065 | struct bnxt_rx_ring_info *rxr, |
1066 | u16 cons, void *data, u8 *data_ptr, |
1067 | dma_addr_t dma_addr, |
1068 | unsigned int offset_and_len) |
1069 | { |
1070 | unsigned int len = offset_and_len & 0xffff; |
1071 | struct page *page = data; |
1072 | u16 prod = rxr->rx_prod; |
1073 | struct sk_buff *skb; |
1074 | int err; |
1075 | |
1076 | err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); |
1077 | if (unlikely(err)) { |
1078 | bnxt_reuse_rx_data(rxr, cons, data); |
1079 | return NULL; |
1080 | } |
1081 | dma_addr -= bp->rx_dma_offset; |
1082 | dma_sync_single_for_cpu(dev: &bp->pdev->dev, addr: dma_addr, BNXT_RX_PAGE_SIZE, |
1083 | dir: bp->rx_dir); |
1084 | skb = napi_build_skb(data: data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); |
1085 | if (!skb) { |
1086 | page_pool_recycle_direct(pool: rxr->page_pool, page); |
1087 | return NULL; |
1088 | } |
1089 | skb_mark_for_recycle(skb); |
1090 | skb_reserve(skb, len: bp->rx_offset); |
1091 | __skb_put(skb, len); |
1092 | |
1093 | return skb; |
1094 | } |
1095 | |
1096 | static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, |
1097 | struct bnxt_rx_ring_info *rxr, |
1098 | u16 cons, void *data, u8 *data_ptr, |
1099 | dma_addr_t dma_addr, |
1100 | unsigned int offset_and_len) |
1101 | { |
1102 | unsigned int payload = offset_and_len >> 16; |
1103 | unsigned int len = offset_and_len & 0xffff; |
1104 | skb_frag_t *frag; |
1105 | struct page *page = data; |
1106 | u16 prod = rxr->rx_prod; |
1107 | struct sk_buff *skb; |
1108 | int off, err; |
1109 | |
1110 | err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); |
1111 | if (unlikely(err)) { |
1112 | bnxt_reuse_rx_data(rxr, cons, data); |
1113 | return NULL; |
1114 | } |
1115 | dma_addr -= bp->rx_dma_offset; |
1116 | dma_sync_single_for_cpu(dev: &bp->pdev->dev, addr: dma_addr, BNXT_RX_PAGE_SIZE, |
1117 | dir: bp->rx_dir); |
1118 | |
1119 | if (unlikely(!payload)) |
1120 | payload = eth_get_headlen(dev: bp->dev, data: data_ptr, len); |
1121 | |
1122 | skb = napi_alloc_skb(napi: &rxr->bnapi->napi, length: payload); |
1123 | if (!skb) { |
1124 | page_pool_recycle_direct(pool: rxr->page_pool, page); |
1125 | return NULL; |
1126 | } |
1127 | |
1128 | skb_mark_for_recycle(skb); |
1129 | off = (void *)data_ptr - page_address(page); |
1130 | skb_add_rx_frag(skb, i: 0, page, off, size: len, BNXT_RX_PAGE_SIZE); |
1131 | memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, |
1132 | payload + NET_IP_ALIGN); |
1133 | |
1134 | frag = &skb_shinfo(skb)->frags[0]; |
1135 | skb_frag_size_sub(frag, delta: payload); |
1136 | skb_frag_off_add(frag, delta: payload); |
1137 | skb->data_len -= payload; |
1138 | skb->tail += payload; |
1139 | |
1140 | return skb; |
1141 | } |
1142 | |
1143 | static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, |
1144 | struct bnxt_rx_ring_info *rxr, u16 cons, |
1145 | void *data, u8 *data_ptr, |
1146 | dma_addr_t dma_addr, |
1147 | unsigned int offset_and_len) |
1148 | { |
1149 | u16 prod = rxr->rx_prod; |
1150 | struct sk_buff *skb; |
1151 | int err; |
1152 | |
1153 | err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); |
1154 | if (unlikely(err)) { |
1155 | bnxt_reuse_rx_data(rxr, cons, data); |
1156 | return NULL; |
1157 | } |
1158 | |
1159 | skb = napi_build_skb(data, frag_size: bp->rx_buf_size); |
1160 | dma_unmap_single_attrs(dev: &bp->pdev->dev, addr: dma_addr, size: bp->rx_buf_use_size, |
1161 | dir: bp->rx_dir, DMA_ATTR_WEAK_ORDERING); |
1162 | if (!skb) { |
1163 | skb_free_frag(addr: data); |
1164 | return NULL; |
1165 | } |
1166 | |
1167 | skb_reserve(skb, len: bp->rx_offset); |
1168 | skb_put(skb, len: offset_and_len & 0xffff); |
1169 | return skb; |
1170 | } |
1171 | |
1172 | static u32 __bnxt_rx_agg_pages(struct bnxt *bp, |
1173 | struct bnxt_cp_ring_info *cpr, |
1174 | struct skb_shared_info *shinfo, |
1175 | u16 idx, u32 agg_bufs, bool tpa, |
1176 | struct xdp_buff *xdp) |
1177 | { |
1178 | struct bnxt_napi *bnapi = cpr->bnapi; |
1179 | struct pci_dev *pdev = bp->pdev; |
1180 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
1181 | u16 prod = rxr->rx_agg_prod; |
1182 | u32 i, total_frag_len = 0; |
1183 | bool p5_tpa = false; |
1184 | |
1185 | if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) |
1186 | p5_tpa = true; |
1187 | |
1188 | for (i = 0; i < agg_bufs; i++) { |
1189 | skb_frag_t *frag = &shinfo->frags[i]; |
1190 | u16 cons, frag_len; |
1191 | struct rx_agg_cmp *agg; |
1192 | struct bnxt_sw_rx_agg_bd *cons_rx_buf; |
1193 | struct page *page; |
1194 | dma_addr_t mapping; |
1195 | |
1196 | if (p5_tpa) |
1197 | agg = bnxt_get_tpa_agg_p5(bp, rxr, agg_id: idx, curr: i); |
1198 | else |
1199 | agg = bnxt_get_agg(bp, cpr, cp_cons: idx, curr: i); |
1200 | cons = agg->rx_agg_cmp_opaque; |
1201 | frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & |
1202 | RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; |
1203 | |
1204 | cons_rx_buf = &rxr->rx_agg_ring[cons]; |
1205 | skb_frag_fill_page_desc(frag, page: cons_rx_buf->page, |
1206 | off: cons_rx_buf->offset, size: frag_len); |
1207 | shinfo->nr_frags = i + 1; |
1208 | __clear_bit(cons, rxr->rx_agg_bmap); |
1209 | |
1210 | /* It is possible for bnxt_alloc_rx_page() to allocate |
1211 | * a sw_prod index that equals the cons index, so we |
1212 | * need to clear the cons entry now. |
1213 | */ |
1214 | mapping = cons_rx_buf->mapping; |
1215 | page = cons_rx_buf->page; |
1216 | cons_rx_buf->page = NULL; |
1217 | |
1218 | if (xdp && page_is_pfmemalloc(page)) |
1219 | xdp_buff_set_frag_pfmemalloc(xdp); |
1220 | |
1221 | if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { |
1222 | --shinfo->nr_frags; |
1223 | cons_rx_buf->page = page; |
1224 | |
1225 | /* Update prod since possibly some pages have been |
1226 | * allocated already. |
1227 | */ |
1228 | rxr->rx_agg_prod = prod; |
1229 | bnxt_reuse_rx_agg_bufs(cpr, idx, start: i, agg_bufs: agg_bufs - i, tpa); |
1230 | return 0; |
1231 | } |
1232 | |
1233 | dma_sync_single_for_cpu(dev: &pdev->dev, addr: mapping, BNXT_RX_PAGE_SIZE, |
1234 | dir: bp->rx_dir); |
1235 | |
1236 | total_frag_len += frag_len; |
1237 | prod = NEXT_RX_AGG(prod); |
1238 | } |
1239 | rxr->rx_agg_prod = prod; |
1240 | return total_frag_len; |
1241 | } |
1242 | |
1243 | static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, |
1244 | struct bnxt_cp_ring_info *cpr, |
1245 | struct sk_buff *skb, u16 idx, |
1246 | u32 agg_bufs, bool tpa) |
1247 | { |
1248 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
1249 | u32 total_frag_len = 0; |
1250 | |
1251 | total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, |
1252 | agg_bufs, tpa, NULL); |
1253 | if (!total_frag_len) { |
1254 | skb_mark_for_recycle(skb); |
1255 | dev_kfree_skb(skb); |
1256 | return NULL; |
1257 | } |
1258 | |
1259 | skb->data_len += total_frag_len; |
1260 | skb->len += total_frag_len; |
1261 | skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs; |
1262 | return skb; |
1263 | } |
1264 | |
1265 | static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp, |
1266 | struct bnxt_cp_ring_info *cpr, |
1267 | struct xdp_buff *xdp, u16 idx, |
1268 | u32 agg_bufs, bool tpa) |
1269 | { |
1270 | struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp); |
1271 | u32 total_frag_len = 0; |
1272 | |
1273 | if (!xdp_buff_has_frags(xdp)) |
1274 | shinfo->nr_frags = 0; |
1275 | |
1276 | total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, |
1277 | idx, agg_bufs, tpa, xdp); |
1278 | if (total_frag_len) { |
1279 | xdp_buff_set_frags_flag(xdp); |
1280 | shinfo->nr_frags = agg_bufs; |
1281 | shinfo->xdp_frags_size = total_frag_len; |
1282 | } |
1283 | return total_frag_len; |
1284 | } |
1285 | |
1286 | static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, |
1287 | u8 agg_bufs, u32 *raw_cons) |
1288 | { |
1289 | u16 last; |
1290 | struct rx_agg_cmp *agg; |
1291 | |
1292 | *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); |
1293 | last = RING_CMP(*raw_cons); |
1294 | agg = (struct rx_agg_cmp *) |
1295 | &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; |
1296 | return RX_AGG_CMP_VALID(agg, *raw_cons); |
1297 | } |
1298 | |
1299 | static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, |
1300 | unsigned int len, |
1301 | dma_addr_t mapping) |
1302 | { |
1303 | struct bnxt *bp = bnapi->bp; |
1304 | struct pci_dev *pdev = bp->pdev; |
1305 | struct sk_buff *skb; |
1306 | |
1307 | skb = napi_alloc_skb(napi: &bnapi->napi, length: len); |
1308 | if (!skb) |
1309 | return NULL; |
1310 | |
1311 | dma_sync_single_for_cpu(dev: &pdev->dev, addr: mapping, size: bp->rx_copy_thresh, |
1312 | dir: bp->rx_dir); |
1313 | |
1314 | memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, |
1315 | len + NET_IP_ALIGN); |
1316 | |
1317 | dma_sync_single_for_device(dev: &pdev->dev, addr: mapping, size: bp->rx_copy_thresh, |
1318 | dir: bp->rx_dir); |
1319 | |
1320 | skb_put(skb, len); |
1321 | return skb; |
1322 | } |
1323 | |
1324 | static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, |
1325 | u32 *raw_cons, void *cmp) |
1326 | { |
1327 | struct rx_cmp *rxcmp = cmp; |
1328 | u32 tmp_raw_cons = *raw_cons; |
1329 | u8 cmp_type, agg_bufs = 0; |
1330 | |
1331 | cmp_type = RX_CMP_TYPE(rxcmp); |
1332 | |
1333 | if (cmp_type == CMP_TYPE_RX_L2_CMP) { |
1334 | agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & |
1335 | RX_CMP_AGG_BUFS) >> |
1336 | RX_CMP_AGG_BUFS_SHIFT; |
1337 | } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { |
1338 | struct rx_tpa_end_cmp *tpa_end = cmp; |
1339 | |
1340 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
1341 | return 0; |
1342 | |
1343 | agg_bufs = TPA_END_AGG_BUFS(tpa_end); |
1344 | } |
1345 | |
1346 | if (agg_bufs) { |
1347 | if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons: &tmp_raw_cons)) |
1348 | return -EBUSY; |
1349 | } |
1350 | *raw_cons = tmp_raw_cons; |
1351 | return 0; |
1352 | } |
1353 | |
1354 | static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) |
1355 | { |
1356 | struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; |
1357 | u16 idx = agg_id & MAX_TPA_P5_MASK; |
1358 | |
1359 | if (test_bit(idx, map->agg_idx_bmap)) |
1360 | idx = find_first_zero_bit(addr: map->agg_idx_bmap, |
1361 | BNXT_AGG_IDX_BMAP_SIZE); |
1362 | __set_bit(idx, map->agg_idx_bmap); |
1363 | map->agg_id_tbl[agg_id] = idx; |
1364 | return idx; |
1365 | } |
1366 | |
1367 | static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) |
1368 | { |
1369 | struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; |
1370 | |
1371 | __clear_bit(idx, map->agg_idx_bmap); |
1372 | } |
1373 | |
1374 | static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) |
1375 | { |
1376 | struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; |
1377 | |
1378 | return map->agg_id_tbl[agg_id]; |
1379 | } |
1380 | |
1381 | static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info, |
1382 | struct rx_tpa_start_cmp *tpa_start, |
1383 | struct rx_tpa_start_cmp_ext *tpa_start1) |
1384 | { |
1385 | tpa_info->cfa_code_valid = 1; |
1386 | tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); |
1387 | tpa_info->vlan_valid = 0; |
1388 | if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { |
1389 | tpa_info->vlan_valid = 1; |
1390 | tpa_info->metadata = |
1391 | le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); |
1392 | } |
1393 | } |
1394 | |
1395 | static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info, |
1396 | struct rx_tpa_start_cmp *tpa_start, |
1397 | struct rx_tpa_start_cmp_ext *tpa_start1) |
1398 | { |
1399 | tpa_info->vlan_valid = 0; |
1400 | if (TPA_START_VLAN_VALID(tpa_start)) { |
1401 | u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start); |
1402 | u32 vlan_proto = ETH_P_8021Q; |
1403 | |
1404 | tpa_info->vlan_valid = 1; |
1405 | if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD) |
1406 | vlan_proto = ETH_P_8021AD; |
1407 | tpa_info->metadata = vlan_proto << 16 | |
1408 | TPA_START_METADATA0_TCI(tpa_start1); |
1409 | } |
1410 | } |
1411 | |
1412 | static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, |
1413 | u8 cmp_type, struct rx_tpa_start_cmp *tpa_start, |
1414 | struct rx_tpa_start_cmp_ext *tpa_start1) |
1415 | { |
1416 | struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; |
1417 | struct bnxt_tpa_info *tpa_info; |
1418 | u16 cons, prod, agg_id; |
1419 | struct rx_bd *prod_bd; |
1420 | dma_addr_t mapping; |
1421 | |
1422 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
1423 | agg_id = TPA_START_AGG_ID_P5(tpa_start); |
1424 | agg_id = bnxt_alloc_agg_idx(rxr, agg_id); |
1425 | } else { |
1426 | agg_id = TPA_START_AGG_ID(tpa_start); |
1427 | } |
1428 | cons = tpa_start->rx_tpa_start_cmp_opaque; |
1429 | prod = rxr->rx_prod; |
1430 | cons_rx_buf = &rxr->rx_buf_ring[cons]; |
1431 | prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; |
1432 | tpa_info = &rxr->rx_tpa[agg_id]; |
1433 | |
1434 | if (unlikely(cons != rxr->rx_next_cons || |
1435 | TPA_START_ERROR(tpa_start))) { |
1436 | netdev_warn(dev: bp->dev, format: "TPA cons %x, expected cons %x, error code %x\n" , |
1437 | cons, rxr->rx_next_cons, |
1438 | TPA_START_ERROR_CODE(tpa_start1)); |
1439 | bnxt_sched_reset_rxr(bp, rxr); |
1440 | return; |
1441 | } |
1442 | prod_rx_buf->data = tpa_info->data; |
1443 | prod_rx_buf->data_ptr = tpa_info->data_ptr; |
1444 | |
1445 | mapping = tpa_info->mapping; |
1446 | prod_rx_buf->mapping = mapping; |
1447 | |
1448 | prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; |
1449 | |
1450 | prod_bd->rx_bd_haddr = cpu_to_le64(mapping); |
1451 | |
1452 | tpa_info->data = cons_rx_buf->data; |
1453 | tpa_info->data_ptr = cons_rx_buf->data_ptr; |
1454 | cons_rx_buf->data = NULL; |
1455 | tpa_info->mapping = cons_rx_buf->mapping; |
1456 | |
1457 | tpa_info->len = |
1458 | le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> |
1459 | RX_TPA_START_CMP_LEN_SHIFT; |
1460 | if (likely(TPA_START_HASH_VALID(tpa_start))) { |
1461 | tpa_info->hash_type = PKT_HASH_TYPE_L4; |
1462 | tpa_info->gso_type = SKB_GSO_TCPV4; |
1463 | if (TPA_START_IS_IPV6(tpa_start1)) |
1464 | tpa_info->gso_type = SKB_GSO_TCPV6; |
1465 | /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ |
1466 | else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP && |
1467 | TPA_START_HASH_TYPE(tpa_start) == 3) |
1468 | tpa_info->gso_type = SKB_GSO_TCPV6; |
1469 | tpa_info->rss_hash = |
1470 | le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); |
1471 | } else { |
1472 | tpa_info->hash_type = PKT_HASH_TYPE_NONE; |
1473 | tpa_info->gso_type = 0; |
1474 | netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n" ); |
1475 | } |
1476 | tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); |
1477 | tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); |
1478 | if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) |
1479 | bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1); |
1480 | else |
1481 | bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1); |
1482 | tpa_info->agg_count = 0; |
1483 | |
1484 | rxr->rx_prod = NEXT_RX(prod); |
1485 | cons = RING_RX(bp, NEXT_RX(cons)); |
1486 | rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); |
1487 | cons_rx_buf = &rxr->rx_buf_ring[cons]; |
1488 | |
1489 | bnxt_reuse_rx_data(rxr, cons, data: cons_rx_buf->data); |
1490 | rxr->rx_prod = NEXT_RX(rxr->rx_prod); |
1491 | cons_rx_buf->data = NULL; |
1492 | } |
1493 | |
1494 | static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) |
1495 | { |
1496 | if (agg_bufs) |
1497 | bnxt_reuse_rx_agg_bufs(cpr, idx, start: 0, agg_bufs, tpa: true); |
1498 | } |
1499 | |
1500 | #ifdef CONFIG_INET |
1501 | static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) |
1502 | { |
1503 | struct udphdr *uh = NULL; |
1504 | |
1505 | if (ip_proto == htons(ETH_P_IP)) { |
1506 | struct iphdr *iph = (struct iphdr *)skb->data; |
1507 | |
1508 | if (iph->protocol == IPPROTO_UDP) |
1509 | uh = (struct udphdr *)(iph + 1); |
1510 | } else { |
1511 | struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; |
1512 | |
1513 | if (iph->nexthdr == IPPROTO_UDP) |
1514 | uh = (struct udphdr *)(iph + 1); |
1515 | } |
1516 | if (uh) { |
1517 | if (uh->check) |
1518 | skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; |
1519 | else |
1520 | skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; |
1521 | } |
1522 | } |
1523 | #endif |
1524 | |
1525 | static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, |
1526 | int payload_off, int tcp_ts, |
1527 | struct sk_buff *skb) |
1528 | { |
1529 | #ifdef CONFIG_INET |
1530 | struct tcphdr *th; |
1531 | int len, nw_off; |
1532 | u16 outer_ip_off, inner_ip_off, inner_mac_off; |
1533 | u32 hdr_info = tpa_info->hdr_info; |
1534 | bool loopback = false; |
1535 | |
1536 | inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); |
1537 | inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); |
1538 | outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); |
1539 | |
1540 | /* If the packet is an internal loopback packet, the offsets will |
1541 | * have an extra 4 bytes. |
1542 | */ |
1543 | if (inner_mac_off == 4) { |
1544 | loopback = true; |
1545 | } else if (inner_mac_off > 4) { |
1546 | __be16 proto = *((__be16 *)(skb->data + inner_ip_off - |
1547 | ETH_HLEN - 2)); |
1548 | |
1549 | /* We only support inner iPv4/ipv6. If we don't see the |
1550 | * correct protocol ID, it must be a loopback packet where |
1551 | * the offsets are off by 4. |
1552 | */ |
1553 | if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) |
1554 | loopback = true; |
1555 | } |
1556 | if (loopback) { |
1557 | /* internal loopback packet, subtract all offsets by 4 */ |
1558 | inner_ip_off -= 4; |
1559 | inner_mac_off -= 4; |
1560 | outer_ip_off -= 4; |
1561 | } |
1562 | |
1563 | nw_off = inner_ip_off - ETH_HLEN; |
1564 | skb_set_network_header(skb, offset: nw_off); |
1565 | if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { |
1566 | struct ipv6hdr *iph = ipv6_hdr(skb); |
1567 | |
1568 | skb_set_transport_header(skb, offset: nw_off + sizeof(struct ipv6hdr)); |
1569 | len = skb->len - skb_transport_offset(skb); |
1570 | th = tcp_hdr(skb); |
1571 | th->check = ~tcp_v6_check(len, saddr: &iph->saddr, daddr: &iph->daddr, base: 0); |
1572 | } else { |
1573 | struct iphdr *iph = ip_hdr(skb); |
1574 | |
1575 | skb_set_transport_header(skb, offset: nw_off + sizeof(struct iphdr)); |
1576 | len = skb->len - skb_transport_offset(skb); |
1577 | th = tcp_hdr(skb); |
1578 | th->check = ~tcp_v4_check(len, saddr: iph->saddr, daddr: iph->daddr, base: 0); |
1579 | } |
1580 | |
1581 | if (inner_mac_off) { /* tunnel */ |
1582 | __be16 proto = *((__be16 *)(skb->data + outer_ip_off - |
1583 | ETH_HLEN - 2)); |
1584 | |
1585 | bnxt_gro_tunnel(skb, ip_proto: proto); |
1586 | } |
1587 | #endif |
1588 | return skb; |
1589 | } |
1590 | |
1591 | static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, |
1592 | int payload_off, int tcp_ts, |
1593 | struct sk_buff *skb) |
1594 | { |
1595 | #ifdef CONFIG_INET |
1596 | u16 outer_ip_off, inner_ip_off, inner_mac_off; |
1597 | u32 hdr_info = tpa_info->hdr_info; |
1598 | int iphdr_len, nw_off; |
1599 | |
1600 | inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); |
1601 | inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); |
1602 | outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); |
1603 | |
1604 | nw_off = inner_ip_off - ETH_HLEN; |
1605 | skb_set_network_header(skb, offset: nw_off); |
1606 | iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? |
1607 | sizeof(struct ipv6hdr) : sizeof(struct iphdr); |
1608 | skb_set_transport_header(skb, offset: nw_off + iphdr_len); |
1609 | |
1610 | if (inner_mac_off) { /* tunnel */ |
1611 | __be16 proto = *((__be16 *)(skb->data + outer_ip_off - |
1612 | ETH_HLEN - 2)); |
1613 | |
1614 | bnxt_gro_tunnel(skb, ip_proto: proto); |
1615 | } |
1616 | #endif |
1617 | return skb; |
1618 | } |
1619 | |
1620 | #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) |
1621 | #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) |
1622 | |
1623 | static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, |
1624 | int payload_off, int tcp_ts, |
1625 | struct sk_buff *skb) |
1626 | { |
1627 | #ifdef CONFIG_INET |
1628 | struct tcphdr *th; |
1629 | int len, nw_off, tcp_opt_len = 0; |
1630 | |
1631 | if (tcp_ts) |
1632 | tcp_opt_len = 12; |
1633 | |
1634 | if (tpa_info->gso_type == SKB_GSO_TCPV4) { |
1635 | struct iphdr *iph; |
1636 | |
1637 | nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - |
1638 | ETH_HLEN; |
1639 | skb_set_network_header(skb, offset: nw_off); |
1640 | iph = ip_hdr(skb); |
1641 | skb_set_transport_header(skb, offset: nw_off + sizeof(struct iphdr)); |
1642 | len = skb->len - skb_transport_offset(skb); |
1643 | th = tcp_hdr(skb); |
1644 | th->check = ~tcp_v4_check(len, saddr: iph->saddr, daddr: iph->daddr, base: 0); |
1645 | } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { |
1646 | struct ipv6hdr *iph; |
1647 | |
1648 | nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - |
1649 | ETH_HLEN; |
1650 | skb_set_network_header(skb, offset: nw_off); |
1651 | iph = ipv6_hdr(skb); |
1652 | skb_set_transport_header(skb, offset: nw_off + sizeof(struct ipv6hdr)); |
1653 | len = skb->len - skb_transport_offset(skb); |
1654 | th = tcp_hdr(skb); |
1655 | th->check = ~tcp_v6_check(len, saddr: &iph->saddr, daddr: &iph->daddr, base: 0); |
1656 | } else { |
1657 | dev_kfree_skb_any(skb); |
1658 | return NULL; |
1659 | } |
1660 | |
1661 | if (nw_off) /* tunnel */ |
1662 | bnxt_gro_tunnel(skb, ip_proto: skb->protocol); |
1663 | #endif |
1664 | return skb; |
1665 | } |
1666 | |
1667 | static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, |
1668 | struct bnxt_tpa_info *tpa_info, |
1669 | struct rx_tpa_end_cmp *tpa_end, |
1670 | struct rx_tpa_end_cmp_ext *tpa_end1, |
1671 | struct sk_buff *skb) |
1672 | { |
1673 | #ifdef CONFIG_INET |
1674 | int payload_off; |
1675 | u16 segs; |
1676 | |
1677 | segs = TPA_END_TPA_SEGS(tpa_end); |
1678 | if (segs == 1) |
1679 | return skb; |
1680 | |
1681 | NAPI_GRO_CB(skb)->count = segs; |
1682 | skb_shinfo(skb)->gso_size = |
1683 | le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); |
1684 | skb_shinfo(skb)->gso_type = tpa_info->gso_type; |
1685 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
1686 | payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); |
1687 | else |
1688 | payload_off = TPA_END_PAYLOAD_OFF(tpa_end); |
1689 | skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); |
1690 | if (likely(skb)) |
1691 | tcp_gro_complete(skb); |
1692 | #endif |
1693 | return skb; |
1694 | } |
1695 | |
1696 | /* Given the cfa_code of a received packet determine which |
1697 | * netdev (vf-rep or PF) the packet is destined to. |
1698 | */ |
1699 | static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) |
1700 | { |
1701 | struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); |
1702 | |
1703 | /* if vf-rep dev is NULL, the must belongs to the PF */ |
1704 | return dev ? dev : bp->dev; |
1705 | } |
1706 | |
1707 | static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, |
1708 | struct bnxt_cp_ring_info *cpr, |
1709 | u32 *raw_cons, |
1710 | struct rx_tpa_end_cmp *tpa_end, |
1711 | struct rx_tpa_end_cmp_ext *tpa_end1, |
1712 | u8 *event) |
1713 | { |
1714 | struct bnxt_napi *bnapi = cpr->bnapi; |
1715 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
1716 | struct net_device *dev = bp->dev; |
1717 | u8 *data_ptr, agg_bufs; |
1718 | unsigned int len; |
1719 | struct bnxt_tpa_info *tpa_info; |
1720 | dma_addr_t mapping; |
1721 | struct sk_buff *skb; |
1722 | u16 idx = 0, agg_id; |
1723 | void *data; |
1724 | bool gro; |
1725 | |
1726 | if (unlikely(bnapi->in_reset)) { |
1727 | int rc = bnxt_discard_rx(bp, cpr, raw_cons, cmp: tpa_end); |
1728 | |
1729 | if (rc < 0) |
1730 | return ERR_PTR(error: -EBUSY); |
1731 | return NULL; |
1732 | } |
1733 | |
1734 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
1735 | agg_id = TPA_END_AGG_ID_P5(tpa_end); |
1736 | agg_id = bnxt_lookup_agg_idx(rxr, agg_id); |
1737 | agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); |
1738 | tpa_info = &rxr->rx_tpa[agg_id]; |
1739 | if (unlikely(agg_bufs != tpa_info->agg_count)) { |
1740 | netdev_warn(dev: bp->dev, format: "TPA end agg_buf %d != expected agg_bufs %d\n" , |
1741 | agg_bufs, tpa_info->agg_count); |
1742 | agg_bufs = tpa_info->agg_count; |
1743 | } |
1744 | tpa_info->agg_count = 0; |
1745 | *event |= BNXT_AGG_EVENT; |
1746 | bnxt_free_agg_idx(rxr, idx: agg_id); |
1747 | idx = agg_id; |
1748 | gro = !!(bp->flags & BNXT_FLAG_GRO); |
1749 | } else { |
1750 | agg_id = TPA_END_AGG_ID(tpa_end); |
1751 | agg_bufs = TPA_END_AGG_BUFS(tpa_end); |
1752 | tpa_info = &rxr->rx_tpa[agg_id]; |
1753 | idx = RING_CMP(*raw_cons); |
1754 | if (agg_bufs) { |
1755 | if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) |
1756 | return ERR_PTR(error: -EBUSY); |
1757 | |
1758 | *event |= BNXT_AGG_EVENT; |
1759 | idx = NEXT_CMP(idx); |
1760 | } |
1761 | gro = !!TPA_END_GRO(tpa_end); |
1762 | } |
1763 | data = tpa_info->data; |
1764 | data_ptr = tpa_info->data_ptr; |
1765 | prefetch(data_ptr); |
1766 | len = tpa_info->len; |
1767 | mapping = tpa_info->mapping; |
1768 | |
1769 | if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { |
1770 | bnxt_abort_tpa(cpr, idx, agg_bufs); |
1771 | if (agg_bufs > MAX_SKB_FRAGS) |
1772 | netdev_warn(dev: bp->dev, format: "TPA frags %d exceeded MAX_SKB_FRAGS %d\n" , |
1773 | agg_bufs, (int)MAX_SKB_FRAGS); |
1774 | return NULL; |
1775 | } |
1776 | |
1777 | if (len <= bp->rx_copy_thresh) { |
1778 | skb = bnxt_copy_skb(bnapi, data: data_ptr, len, mapping); |
1779 | if (!skb) { |
1780 | bnxt_abort_tpa(cpr, idx, agg_bufs); |
1781 | cpr->sw_stats.rx.rx_oom_discards += 1; |
1782 | return NULL; |
1783 | } |
1784 | } else { |
1785 | u8 *new_data; |
1786 | dma_addr_t new_mapping; |
1787 | |
1788 | new_data = __bnxt_alloc_rx_frag(bp, mapping: &new_mapping, GFP_ATOMIC); |
1789 | if (!new_data) { |
1790 | bnxt_abort_tpa(cpr, idx, agg_bufs); |
1791 | cpr->sw_stats.rx.rx_oom_discards += 1; |
1792 | return NULL; |
1793 | } |
1794 | |
1795 | tpa_info->data = new_data; |
1796 | tpa_info->data_ptr = new_data + bp->rx_offset; |
1797 | tpa_info->mapping = new_mapping; |
1798 | |
1799 | skb = napi_build_skb(data, frag_size: bp->rx_buf_size); |
1800 | dma_unmap_single_attrs(dev: &bp->pdev->dev, addr: mapping, |
1801 | size: bp->rx_buf_use_size, dir: bp->rx_dir, |
1802 | DMA_ATTR_WEAK_ORDERING); |
1803 | |
1804 | if (!skb) { |
1805 | skb_free_frag(addr: data); |
1806 | bnxt_abort_tpa(cpr, idx, agg_bufs); |
1807 | cpr->sw_stats.rx.rx_oom_discards += 1; |
1808 | return NULL; |
1809 | } |
1810 | skb_reserve(skb, len: bp->rx_offset); |
1811 | skb_put(skb, len); |
1812 | } |
1813 | |
1814 | if (agg_bufs) { |
1815 | skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, tpa: true); |
1816 | if (!skb) { |
1817 | /* Page reuse already handled by bnxt_rx_pages(). */ |
1818 | cpr->sw_stats.rx.rx_oom_discards += 1; |
1819 | return NULL; |
1820 | } |
1821 | } |
1822 | |
1823 | if (tpa_info->cfa_code_valid) |
1824 | dev = bnxt_get_pkt_dev(bp, cfa_code: tpa_info->cfa_code); |
1825 | skb->protocol = eth_type_trans(skb, dev); |
1826 | |
1827 | if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) |
1828 | skb_set_hash(skb, hash: tpa_info->rss_hash, type: tpa_info->hash_type); |
1829 | |
1830 | if (tpa_info->vlan_valid && |
1831 | (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { |
1832 | __be16 vlan_proto = htons(tpa_info->metadata >> |
1833 | RX_CMP_FLAGS2_METADATA_TPID_SFT); |
1834 | u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; |
1835 | |
1836 | if (eth_type_vlan(ethertype: vlan_proto)) { |
1837 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci: vtag); |
1838 | } else { |
1839 | dev_kfree_skb(skb); |
1840 | return NULL; |
1841 | } |
1842 | } |
1843 | |
1844 | skb_checksum_none_assert(skb); |
1845 | if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { |
1846 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1847 | skb->csum_level = |
1848 | (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; |
1849 | } |
1850 | |
1851 | if (gro) |
1852 | skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); |
1853 | |
1854 | return skb; |
1855 | } |
1856 | |
1857 | static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, |
1858 | struct rx_agg_cmp *rx_agg) |
1859 | { |
1860 | u16 agg_id = TPA_AGG_AGG_ID(rx_agg); |
1861 | struct bnxt_tpa_info *tpa_info; |
1862 | |
1863 | agg_id = bnxt_lookup_agg_idx(rxr, agg_id); |
1864 | tpa_info = &rxr->rx_tpa[agg_id]; |
1865 | BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); |
1866 | tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; |
1867 | } |
1868 | |
1869 | static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, |
1870 | struct sk_buff *skb) |
1871 | { |
1872 | skb_mark_for_recycle(skb); |
1873 | |
1874 | if (skb->dev != bp->dev) { |
1875 | /* this packet belongs to a vf-rep */ |
1876 | bnxt_vf_rep_rx(bp, skb); |
1877 | return; |
1878 | } |
1879 | skb_record_rx_queue(skb, rx_queue: bnapi->index); |
1880 | napi_gro_receive(napi: &bnapi->napi, skb); |
1881 | } |
1882 | |
1883 | static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags, |
1884 | struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts) |
1885 | { |
1886 | u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); |
1887 | |
1888 | if (BNXT_PTP_RX_TS_VALID(flags)) |
1889 | goto ts_valid; |
1890 | if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags)) |
1891 | return false; |
1892 | |
1893 | ts_valid: |
1894 | *cmpl_ts = ts; |
1895 | return true; |
1896 | } |
1897 | |
1898 | static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type, |
1899 | struct rx_cmp *rxcmp, |
1900 | struct rx_cmp_ext *rxcmp1) |
1901 | { |
1902 | __be16 vlan_proto; |
1903 | u16 vtag; |
1904 | |
1905 | if (cmp_type == CMP_TYPE_RX_L2_CMP) { |
1906 | __le32 flags2 = rxcmp1->rx_cmp_flags2; |
1907 | u32 meta_data; |
1908 | |
1909 | if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN))) |
1910 | return skb; |
1911 | |
1912 | meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); |
1913 | vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; |
1914 | vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT); |
1915 | if (eth_type_vlan(ethertype: vlan_proto)) |
1916 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci: vtag); |
1917 | else |
1918 | goto vlan_err; |
1919 | } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { |
1920 | if (RX_CMP_VLAN_VALID(rxcmp)) { |
1921 | u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp); |
1922 | |
1923 | if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q) |
1924 | vlan_proto = htons(ETH_P_8021Q); |
1925 | else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD) |
1926 | vlan_proto = htons(ETH_P_8021AD); |
1927 | else |
1928 | goto vlan_err; |
1929 | vtag = RX_CMP_METADATA0_TCI(rxcmp1); |
1930 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci: vtag); |
1931 | } |
1932 | } |
1933 | return skb; |
1934 | vlan_err: |
1935 | dev_kfree_skb(skb); |
1936 | return NULL; |
1937 | } |
1938 | |
1939 | static enum pkt_hash_types (struct bnxt *bp, |
1940 | struct rx_cmp *rxcmp) |
1941 | { |
1942 | u8 ext_op; |
1943 | |
1944 | ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp); |
1945 | switch (ext_op) { |
1946 | case EXT_OP_INNER_4: |
1947 | case EXT_OP_OUTER_4: |
1948 | case EXT_OP_INNFL_3: |
1949 | case EXT_OP_OUTFL_3: |
1950 | return PKT_HASH_TYPE_L4; |
1951 | default: |
1952 | return PKT_HASH_TYPE_L3; |
1953 | } |
1954 | } |
1955 | |
1956 | /* returns the following: |
1957 | * 1 - 1 packet successfully received |
1958 | * 0 - successful TPA_START, packet not completed yet |
1959 | * -EBUSY - completion ring does not have all the agg buffers yet |
1960 | * -ENOMEM - packet aborted due to out of memory |
1961 | * -EIO - packet aborted due to hw error indicated in BD |
1962 | */ |
1963 | static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, |
1964 | u32 *raw_cons, u8 *event) |
1965 | { |
1966 | struct bnxt_napi *bnapi = cpr->bnapi; |
1967 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
1968 | struct net_device *dev = bp->dev; |
1969 | struct rx_cmp *rxcmp; |
1970 | struct rx_cmp_ext *rxcmp1; |
1971 | u32 tmp_raw_cons = *raw_cons; |
1972 | u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); |
1973 | struct bnxt_sw_rx_bd *rx_buf; |
1974 | unsigned int len; |
1975 | u8 *data_ptr, agg_bufs, cmp_type; |
1976 | bool xdp_active = false; |
1977 | dma_addr_t dma_addr; |
1978 | struct sk_buff *skb; |
1979 | struct xdp_buff xdp; |
1980 | u32 flags, misc; |
1981 | u32 cmpl_ts; |
1982 | void *data; |
1983 | int rc = 0; |
1984 | |
1985 | rxcmp = (struct rx_cmp *) |
1986 | &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
1987 | |
1988 | cmp_type = RX_CMP_TYPE(rxcmp); |
1989 | |
1990 | if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { |
1991 | bnxt_tpa_agg(bp, rxr, rx_agg: (struct rx_agg_cmp *)rxcmp); |
1992 | goto next_rx_no_prod_no_len; |
1993 | } |
1994 | |
1995 | tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); |
1996 | cp_cons = RING_CMP(tmp_raw_cons); |
1997 | rxcmp1 = (struct rx_cmp_ext *) |
1998 | &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
1999 | |
2000 | if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) |
2001 | return -EBUSY; |
2002 | |
2003 | /* The valid test of the entry must be done first before |
2004 | * reading any further. |
2005 | */ |
2006 | dma_rmb(); |
2007 | prod = rxr->rx_prod; |
2008 | |
2009 | if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP || |
2010 | cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) { |
2011 | bnxt_tpa_start(bp, rxr, cmp_type, |
2012 | tpa_start: (struct rx_tpa_start_cmp *)rxcmp, |
2013 | tpa_start1: (struct rx_tpa_start_cmp_ext *)rxcmp1); |
2014 | |
2015 | *event |= BNXT_RX_EVENT; |
2016 | goto next_rx_no_prod_no_len; |
2017 | |
2018 | } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { |
2019 | skb = bnxt_tpa_end(bp, cpr, raw_cons: &tmp_raw_cons, |
2020 | tpa_end: (struct rx_tpa_end_cmp *)rxcmp, |
2021 | tpa_end1: (struct rx_tpa_end_cmp_ext *)rxcmp1, event); |
2022 | |
2023 | if (IS_ERR(ptr: skb)) |
2024 | return -EBUSY; |
2025 | |
2026 | rc = -ENOMEM; |
2027 | if (likely(skb)) { |
2028 | bnxt_deliver_skb(bp, bnapi, skb); |
2029 | rc = 1; |
2030 | } |
2031 | *event |= BNXT_RX_EVENT; |
2032 | goto next_rx_no_prod_no_len; |
2033 | } |
2034 | |
2035 | cons = rxcmp->rx_cmp_opaque; |
2036 | if (unlikely(cons != rxr->rx_next_cons)) { |
2037 | int rc1 = bnxt_discard_rx(bp, cpr, raw_cons: &tmp_raw_cons, cmp: rxcmp); |
2038 | |
2039 | /* 0xffff is forced error, don't print it */ |
2040 | if (rxr->rx_next_cons != 0xffff) |
2041 | netdev_warn(dev: bp->dev, format: "RX cons %x != expected cons %x\n" , |
2042 | cons, rxr->rx_next_cons); |
2043 | bnxt_sched_reset_rxr(bp, rxr); |
2044 | if (rc1) |
2045 | return rc1; |
2046 | goto next_rx_no_prod_no_len; |
2047 | } |
2048 | rx_buf = &rxr->rx_buf_ring[cons]; |
2049 | data = rx_buf->data; |
2050 | data_ptr = rx_buf->data_ptr; |
2051 | prefetch(data_ptr); |
2052 | |
2053 | misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); |
2054 | agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; |
2055 | |
2056 | if (agg_bufs) { |
2057 | if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons: &tmp_raw_cons)) |
2058 | return -EBUSY; |
2059 | |
2060 | cp_cons = NEXT_CMP(cp_cons); |
2061 | *event |= BNXT_AGG_EVENT; |
2062 | } |
2063 | *event |= BNXT_RX_EVENT; |
2064 | |
2065 | rx_buf->data = NULL; |
2066 | if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { |
2067 | u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); |
2068 | |
2069 | bnxt_reuse_rx_data(rxr, cons, data); |
2070 | if (agg_bufs) |
2071 | bnxt_reuse_rx_agg_bufs(cpr, idx: cp_cons, start: 0, agg_bufs, |
2072 | tpa: false); |
2073 | |
2074 | rc = -EIO; |
2075 | if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { |
2076 | bnapi->cp_ring.sw_stats.rx.rx_buf_errors++; |
2077 | if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && |
2078 | !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { |
2079 | netdev_warn_once(bp->dev, "RX buffer error %x\n" , |
2080 | rx_err); |
2081 | bnxt_sched_reset_rxr(bp, rxr); |
2082 | } |
2083 | } |
2084 | goto next_rx_no_len; |
2085 | } |
2086 | |
2087 | flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type); |
2088 | len = flags >> RX_CMP_LEN_SHIFT; |
2089 | dma_addr = rx_buf->mapping; |
2090 | |
2091 | if (bnxt_xdp_attached(bp, rxr)) { |
2092 | bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, xdp: &xdp); |
2093 | if (agg_bufs) { |
2094 | u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, xdp: &xdp, |
2095 | idx: cp_cons, agg_bufs, |
2096 | tpa: false); |
2097 | if (!frag_len) { |
2098 | cpr->sw_stats.rx.rx_oom_discards += 1; |
2099 | rc = -ENOMEM; |
2100 | goto next_rx; |
2101 | } |
2102 | } |
2103 | xdp_active = true; |
2104 | } |
2105 | |
2106 | if (xdp_active) { |
2107 | if (bnxt_rx_xdp(bp, rxr, cons, xdp, page: data, data_ptr: &data_ptr, len: &len, event)) { |
2108 | rc = 1; |
2109 | goto next_rx; |
2110 | } |
2111 | } |
2112 | |
2113 | if (len <= bp->rx_copy_thresh) { |
2114 | skb = bnxt_copy_skb(bnapi, data: data_ptr, len, mapping: dma_addr); |
2115 | bnxt_reuse_rx_data(rxr, cons, data); |
2116 | if (!skb) { |
2117 | if (agg_bufs) { |
2118 | if (!xdp_active) |
2119 | bnxt_reuse_rx_agg_bufs(cpr, idx: cp_cons, start: 0, |
2120 | agg_bufs, tpa: false); |
2121 | else |
2122 | bnxt_xdp_buff_frags_free(rxr, xdp: &xdp); |
2123 | } |
2124 | cpr->sw_stats.rx.rx_oom_discards += 1; |
2125 | rc = -ENOMEM; |
2126 | goto next_rx; |
2127 | } |
2128 | } else { |
2129 | u32 payload; |
2130 | |
2131 | if (rx_buf->data_ptr == data_ptr) |
2132 | payload = misc & RX_CMP_PAYLOAD_OFFSET; |
2133 | else |
2134 | payload = 0; |
2135 | skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, |
2136 | payload | len); |
2137 | if (!skb) { |
2138 | cpr->sw_stats.rx.rx_oom_discards += 1; |
2139 | rc = -ENOMEM; |
2140 | goto next_rx; |
2141 | } |
2142 | } |
2143 | |
2144 | if (agg_bufs) { |
2145 | if (!xdp_active) { |
2146 | skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx: cp_cons, agg_bufs, tpa: false); |
2147 | if (!skb) { |
2148 | cpr->sw_stats.rx.rx_oom_discards += 1; |
2149 | rc = -ENOMEM; |
2150 | goto next_rx; |
2151 | } |
2152 | } else { |
2153 | skb = bnxt_xdp_build_skb(bp, skb, num_frags: agg_bufs, pool: rxr->page_pool, xdp: &xdp, rxcmp1); |
2154 | if (!skb) { |
2155 | /* we should be able to free the old skb here */ |
2156 | bnxt_xdp_buff_frags_free(rxr, xdp: &xdp); |
2157 | cpr->sw_stats.rx.rx_oom_discards += 1; |
2158 | rc = -ENOMEM; |
2159 | goto next_rx; |
2160 | } |
2161 | } |
2162 | } |
2163 | |
2164 | if (RX_CMP_HASH_VALID(rxcmp)) { |
2165 | enum pkt_hash_types type; |
2166 | |
2167 | if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { |
2168 | type = bnxt_rss_ext_op(bp, rxcmp); |
2169 | } else { |
2170 | u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); |
2171 | |
2172 | /* RSS profiles 1 and 3 with extract code 0 for inner |
2173 | * 4-tuple |
2174 | */ |
2175 | if (hash_type != 1 && hash_type != 3) |
2176 | type = PKT_HASH_TYPE_L3; |
2177 | else |
2178 | type = PKT_HASH_TYPE_L4; |
2179 | } |
2180 | skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); |
2181 | } |
2182 | |
2183 | if (cmp_type == CMP_TYPE_RX_L2_CMP) |
2184 | dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1)); |
2185 | skb->protocol = eth_type_trans(skb, dev); |
2186 | |
2187 | if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) { |
2188 | skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1); |
2189 | if (!skb) |
2190 | goto next_rx; |
2191 | } |
2192 | |
2193 | skb_checksum_none_assert(skb); |
2194 | if (RX_CMP_L4_CS_OK(rxcmp1)) { |
2195 | if (dev->features & NETIF_F_RXCSUM) { |
2196 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2197 | skb->csum_level = RX_CMP_ENCAP(rxcmp1); |
2198 | } |
2199 | } else { |
2200 | if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { |
2201 | if (dev->features & NETIF_F_RXCSUM) |
2202 | bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++; |
2203 | } |
2204 | } |
2205 | |
2206 | if (bnxt_rx_ts_valid(bp, flags, rxcmp1, cmpl_ts: &cmpl_ts)) { |
2207 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
2208 | u64 ns, ts; |
2209 | |
2210 | if (!bnxt_get_rx_ts_p5(bp, ts: &ts, pkt_ts: cmpl_ts)) { |
2211 | struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; |
2212 | |
2213 | spin_lock_bh(lock: &ptp->ptp_lock); |
2214 | ns = timecounter_cyc2time(tc: &ptp->tc, cycle_tstamp: ts); |
2215 | spin_unlock_bh(lock: &ptp->ptp_lock); |
2216 | memset(skb_hwtstamps(skb), 0, |
2217 | sizeof(*skb_hwtstamps(skb))); |
2218 | skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); |
2219 | } |
2220 | } |
2221 | } |
2222 | bnxt_deliver_skb(bp, bnapi, skb); |
2223 | rc = 1; |
2224 | |
2225 | next_rx: |
2226 | cpr->rx_packets += 1; |
2227 | cpr->rx_bytes += len; |
2228 | |
2229 | next_rx_no_len: |
2230 | rxr->rx_prod = NEXT_RX(prod); |
2231 | rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); |
2232 | |
2233 | next_rx_no_prod_no_len: |
2234 | *raw_cons = tmp_raw_cons; |
2235 | |
2236 | return rc; |
2237 | } |
2238 | |
2239 | /* In netpoll mode, if we are using a combined completion ring, we need to |
2240 | * discard the rx packets and recycle the buffers. |
2241 | */ |
2242 | static int bnxt_force_rx_discard(struct bnxt *bp, |
2243 | struct bnxt_cp_ring_info *cpr, |
2244 | u32 *raw_cons, u8 *event) |
2245 | { |
2246 | u32 tmp_raw_cons = *raw_cons; |
2247 | struct rx_cmp_ext *rxcmp1; |
2248 | struct rx_cmp *rxcmp; |
2249 | u16 cp_cons; |
2250 | u8 cmp_type; |
2251 | int rc; |
2252 | |
2253 | cp_cons = RING_CMP(tmp_raw_cons); |
2254 | rxcmp = (struct rx_cmp *) |
2255 | &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
2256 | |
2257 | tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); |
2258 | cp_cons = RING_CMP(tmp_raw_cons); |
2259 | rxcmp1 = (struct rx_cmp_ext *) |
2260 | &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
2261 | |
2262 | if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) |
2263 | return -EBUSY; |
2264 | |
2265 | /* The valid test of the entry must be done first before |
2266 | * reading any further. |
2267 | */ |
2268 | dma_rmb(); |
2269 | cmp_type = RX_CMP_TYPE(rxcmp); |
2270 | if (cmp_type == CMP_TYPE_RX_L2_CMP || |
2271 | cmp_type == CMP_TYPE_RX_L2_V3_CMP) { |
2272 | rxcmp1->rx_cmp_cfa_code_errors_v2 |= |
2273 | cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); |
2274 | } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { |
2275 | struct rx_tpa_end_cmp_ext *tpa_end1; |
2276 | |
2277 | tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; |
2278 | tpa_end1->rx_tpa_end_cmp_errors_v2 |= |
2279 | cpu_to_le32(RX_TPA_END_CMP_ERRORS); |
2280 | } |
2281 | rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); |
2282 | if (rc && rc != -EBUSY) |
2283 | cpr->sw_stats.rx.rx_netpoll_discards += 1; |
2284 | return rc; |
2285 | } |
2286 | |
2287 | u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) |
2288 | { |
2289 | struct bnxt_fw_health *fw_health = bp->fw_health; |
2290 | u32 reg = fw_health->regs[reg_idx]; |
2291 | u32 reg_type, reg_off, val = 0; |
2292 | |
2293 | reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); |
2294 | reg_off = BNXT_FW_HEALTH_REG_OFF(reg); |
2295 | switch (reg_type) { |
2296 | case BNXT_FW_HEALTH_REG_TYPE_CFG: |
2297 | pci_read_config_dword(dev: bp->pdev, where: reg_off, val: &val); |
2298 | break; |
2299 | case BNXT_FW_HEALTH_REG_TYPE_GRC: |
2300 | reg_off = fw_health->mapped_regs[reg_idx]; |
2301 | fallthrough; |
2302 | case BNXT_FW_HEALTH_REG_TYPE_BAR0: |
2303 | val = readl(addr: bp->bar0 + reg_off); |
2304 | break; |
2305 | case BNXT_FW_HEALTH_REG_TYPE_BAR1: |
2306 | val = readl(addr: bp->bar1 + reg_off); |
2307 | break; |
2308 | } |
2309 | if (reg_idx == BNXT_FW_RESET_INPROG_REG) |
2310 | val &= fw_health->fw_reset_inprog_reg_mask; |
2311 | return val; |
2312 | } |
2313 | |
2314 | static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) |
2315 | { |
2316 | int i; |
2317 | |
2318 | for (i = 0; i < bp->rx_nr_rings; i++) { |
2319 | u16 grp_idx = bp->rx_ring[i].bnapi->index; |
2320 | struct bnxt_ring_grp_info *grp_info; |
2321 | |
2322 | grp_info = &bp->grp_info[grp_idx]; |
2323 | if (grp_info->agg_fw_ring_id == ring_id) |
2324 | return grp_idx; |
2325 | } |
2326 | return INVALID_HW_RING_ID; |
2327 | } |
2328 | |
2329 | static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info) |
2330 | { |
2331 | struct bnxt *bp = container_of(link_info, struct bnxt, link_info); |
2332 | |
2333 | if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) |
2334 | return link_info->force_link_speed2; |
2335 | if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4) |
2336 | return link_info->force_pam4_link_speed; |
2337 | return link_info->force_link_speed; |
2338 | } |
2339 | |
2340 | static void bnxt_set_force_speed(struct bnxt_link_info *link_info) |
2341 | { |
2342 | struct bnxt *bp = container_of(link_info, struct bnxt, link_info); |
2343 | |
2344 | if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { |
2345 | link_info->req_link_speed = link_info->force_link_speed2; |
2346 | link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; |
2347 | switch (link_info->req_link_speed) { |
2348 | case BNXT_LINK_SPEED_50GB_PAM4: |
2349 | case BNXT_LINK_SPEED_100GB_PAM4: |
2350 | case BNXT_LINK_SPEED_200GB_PAM4: |
2351 | case BNXT_LINK_SPEED_400GB_PAM4: |
2352 | link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; |
2353 | break; |
2354 | case BNXT_LINK_SPEED_100GB_PAM4_112: |
2355 | case BNXT_LINK_SPEED_200GB_PAM4_112: |
2356 | case BNXT_LINK_SPEED_400GB_PAM4_112: |
2357 | link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112; |
2358 | break; |
2359 | default: |
2360 | link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; |
2361 | } |
2362 | return; |
2363 | } |
2364 | link_info->req_link_speed = link_info->force_link_speed; |
2365 | link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; |
2366 | if (link_info->force_pam4_link_speed) { |
2367 | link_info->req_link_speed = link_info->force_pam4_link_speed; |
2368 | link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; |
2369 | } |
2370 | } |
2371 | |
2372 | static void bnxt_set_auto_speed(struct bnxt_link_info *link_info) |
2373 | { |
2374 | struct bnxt *bp = container_of(link_info, struct bnxt, link_info); |
2375 | |
2376 | if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { |
2377 | link_info->advertising = link_info->auto_link_speeds2; |
2378 | return; |
2379 | } |
2380 | link_info->advertising = link_info->auto_link_speeds; |
2381 | link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; |
2382 | } |
2383 | |
2384 | static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info) |
2385 | { |
2386 | struct bnxt *bp = container_of(link_info, struct bnxt, link_info); |
2387 | |
2388 | if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { |
2389 | if (link_info->req_link_speed != link_info->force_link_speed2) |
2390 | return true; |
2391 | return false; |
2392 | } |
2393 | if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && |
2394 | link_info->req_link_speed != link_info->force_link_speed) |
2395 | return true; |
2396 | if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && |
2397 | link_info->req_link_speed != link_info->force_pam4_link_speed) |
2398 | return true; |
2399 | return false; |
2400 | } |
2401 | |
2402 | static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info) |
2403 | { |
2404 | struct bnxt *bp = container_of(link_info, struct bnxt, link_info); |
2405 | |
2406 | if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { |
2407 | if (link_info->advertising != link_info->auto_link_speeds2) |
2408 | return true; |
2409 | return false; |
2410 | } |
2411 | if (link_info->advertising != link_info->auto_link_speeds || |
2412 | link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) |
2413 | return true; |
2414 | return false; |
2415 | } |
2416 | |
2417 | #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \ |
2418 | ((data2) & \ |
2419 | ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK) |
2420 | |
2421 | #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \ |
2422 | (((data2) & \ |
2423 | ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\ |
2424 | ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT) |
2425 | |
2426 | #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \ |
2427 | ((data1) & \ |
2428 | ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK) |
2429 | |
2430 | #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \ |
2431 | (((data1) & \ |
2432 | ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\ |
2433 | ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING) |
2434 | |
2435 | /* Return true if the workqueue has to be scheduled */ |
2436 | static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) |
2437 | { |
2438 | u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1); |
2439 | |
2440 | switch (err_type) { |
2441 | case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL: |
2442 | netdev_err(dev: bp->dev, format: "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n" , |
2443 | BNXT_EVENT_INVALID_SIGNAL_DATA(data2)); |
2444 | break; |
2445 | case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM: |
2446 | netdev_warn(dev: bp->dev, format: "Pause Storm detected!\n" ); |
2447 | break; |
2448 | case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD: |
2449 | netdev_warn(dev: bp->dev, format: "One or more MMIO doorbells dropped by the device!\n" ); |
2450 | break; |
2451 | case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: { |
2452 | u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1); |
2453 | char *threshold_type; |
2454 | bool notify = false; |
2455 | char *dir_str; |
2456 | |
2457 | switch (type) { |
2458 | case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN: |
2459 | threshold_type = "warning" ; |
2460 | break; |
2461 | case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL: |
2462 | threshold_type = "critical" ; |
2463 | break; |
2464 | case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL: |
2465 | threshold_type = "fatal" ; |
2466 | break; |
2467 | case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN: |
2468 | threshold_type = "shutdown" ; |
2469 | break; |
2470 | default: |
2471 | netdev_err(dev: bp->dev, format: "Unknown Thermal threshold type event\n" ); |
2472 | return false; |
2473 | } |
2474 | if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) { |
2475 | dir_str = "above" ; |
2476 | notify = true; |
2477 | } else { |
2478 | dir_str = "below" ; |
2479 | } |
2480 | netdev_warn(dev: bp->dev, format: "Chip temperature has gone %s the %s thermal threshold!\n" , |
2481 | dir_str, threshold_type); |
2482 | netdev_warn(dev: bp->dev, format: "Temperature (In Celsius), Current: %lu, threshold: %lu\n" , |
2483 | BNXT_EVENT_THERMAL_CURRENT_TEMP(data2), |
2484 | BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)); |
2485 | if (notify) { |
2486 | bp->thermal_threshold_type = type; |
2487 | set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, addr: &bp->sp_event); |
2488 | return true; |
2489 | } |
2490 | return false; |
2491 | } |
2492 | default: |
2493 | netdev_err(dev: bp->dev, format: "FW reported unknown error type %u\n" , |
2494 | err_type); |
2495 | break; |
2496 | } |
2497 | return false; |
2498 | } |
2499 | |
2500 | #define BNXT_GET_EVENT_PORT(data) \ |
2501 | ((data) & \ |
2502 | ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) |
2503 | |
2504 | #define BNXT_EVENT_RING_TYPE(data2) \ |
2505 | ((data2) & \ |
2506 | ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK) |
2507 | |
2508 | #define BNXT_EVENT_RING_TYPE_RX(data2) \ |
2509 | (BNXT_EVENT_RING_TYPE(data2) == \ |
2510 | ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX) |
2511 | |
2512 | #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \ |
2513 | (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\ |
2514 | ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT) |
2515 | |
2516 | #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \ |
2517 | (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\ |
2518 | ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT) |
2519 | |
2520 | #define BNXT_PHC_BITS 48 |
2521 | |
2522 | static int bnxt_async_event_process(struct bnxt *bp, |
2523 | struct hwrm_async_event_cmpl *cmpl) |
2524 | { |
2525 | u16 event_id = le16_to_cpu(cmpl->event_id); |
2526 | u32 data1 = le32_to_cpu(cmpl->event_data1); |
2527 | u32 data2 = le32_to_cpu(cmpl->event_data2); |
2528 | |
2529 | netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n" , |
2530 | event_id, data1, data2); |
2531 | |
2532 | /* TODO CHIMP_FW: Define event id's for link change, error etc */ |
2533 | switch (event_id) { |
2534 | case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { |
2535 | struct bnxt_link_info *link_info = &bp->link_info; |
2536 | |
2537 | if (BNXT_VF(bp)) |
2538 | goto async_event_process_exit; |
2539 | |
2540 | /* print unsupported speed warning in forced speed mode only */ |
2541 | if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && |
2542 | (data1 & 0x20000)) { |
2543 | u16 fw_speed = bnxt_get_force_speed(link_info); |
2544 | u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); |
2545 | |
2546 | if (speed != SPEED_UNKNOWN) |
2547 | netdev_warn(dev: bp->dev, format: "Link speed %d no longer supported\n" , |
2548 | speed); |
2549 | } |
2550 | set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, addr: &bp->sp_event); |
2551 | } |
2552 | fallthrough; |
2553 | case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: |
2554 | case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: |
2555 | set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, addr: &bp->sp_event); |
2556 | fallthrough; |
2557 | case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: |
2558 | set_bit(BNXT_LINK_CHNG_SP_EVENT, addr: &bp->sp_event); |
2559 | break; |
2560 | case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: |
2561 | set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, addr: &bp->sp_event); |
2562 | break; |
2563 | case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { |
2564 | u16 port_id = BNXT_GET_EVENT_PORT(data1); |
2565 | |
2566 | if (BNXT_VF(bp)) |
2567 | break; |
2568 | |
2569 | if (bp->pf.port_id != port_id) |
2570 | break; |
2571 | |
2572 | set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, addr: &bp->sp_event); |
2573 | break; |
2574 | } |
2575 | case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: |
2576 | if (BNXT_PF(bp)) |
2577 | goto async_event_process_exit; |
2578 | set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, addr: &bp->sp_event); |
2579 | break; |
2580 | case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { |
2581 | char *type_str = "Solicited" ; |
2582 | |
2583 | if (!bp->fw_health) |
2584 | goto async_event_process_exit; |
2585 | |
2586 | bp->fw_reset_timestamp = jiffies; |
2587 | bp->fw_reset_min_dsecs = cmpl->timestamp_lo; |
2588 | if (!bp->fw_reset_min_dsecs) |
2589 | bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; |
2590 | bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); |
2591 | if (!bp->fw_reset_max_dsecs) |
2592 | bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; |
2593 | if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) { |
2594 | set_bit(BNXT_STATE_FW_ACTIVATE_RESET, addr: &bp->state); |
2595 | } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { |
2596 | type_str = "Fatal" ; |
2597 | bp->fw_health->fatalities++; |
2598 | set_bit(BNXT_STATE_FW_FATAL_COND, addr: &bp->state); |
2599 | } else if (data2 && BNXT_FW_STATUS_HEALTHY != |
2600 | EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) { |
2601 | type_str = "Non-fatal" ; |
2602 | bp->fw_health->survivals++; |
2603 | set_bit(BNXT_STATE_FW_NON_FATAL_COND, addr: &bp->state); |
2604 | } |
2605 | netif_warn(bp, hw, bp->dev, |
2606 | "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n" , |
2607 | type_str, data1, data2, |
2608 | bp->fw_reset_min_dsecs * 100, |
2609 | bp->fw_reset_max_dsecs * 100); |
2610 | set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, addr: &bp->sp_event); |
2611 | break; |
2612 | } |
2613 | case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { |
2614 | struct bnxt_fw_health *fw_health = bp->fw_health; |
2615 | char *status_desc = "healthy" ; |
2616 | u32 status; |
2617 | |
2618 | if (!fw_health) |
2619 | goto async_event_process_exit; |
2620 | |
2621 | if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) { |
2622 | fw_health->enabled = false; |
2623 | netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n" ); |
2624 | break; |
2625 | } |
2626 | fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); |
2627 | fw_health->tmr_multiplier = |
2628 | DIV_ROUND_UP(fw_health->polling_dsecs * HZ, |
2629 | bp->current_interval * 10); |
2630 | fw_health->tmr_counter = fw_health->tmr_multiplier; |
2631 | if (!fw_health->enabled) |
2632 | fw_health->last_fw_heartbeat = |
2633 | bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); |
2634 | fw_health->last_fw_reset_cnt = |
2635 | bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); |
2636 | status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); |
2637 | if (status != BNXT_FW_STATUS_HEALTHY) |
2638 | status_desc = "unhealthy" ; |
2639 | netif_info(bp, drv, bp->dev, |
2640 | "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n" , |
2641 | fw_health->primary ? "primary" : "backup" , status, |
2642 | status_desc, fw_health->last_fw_reset_cnt); |
2643 | if (!fw_health->enabled) { |
2644 | /* Make sure tmr_counter is set and visible to |
2645 | * bnxt_health_check() before setting enabled to true. |
2646 | */ |
2647 | smp_wmb(); |
2648 | fw_health->enabled = true; |
2649 | } |
2650 | goto async_event_process_exit; |
2651 | } |
2652 | case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: |
2653 | netif_notice(bp, hw, bp->dev, |
2654 | "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n" , |
2655 | data1, data2); |
2656 | goto async_event_process_exit; |
2657 | case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: { |
2658 | struct bnxt_rx_ring_info *rxr; |
2659 | u16 grp_idx; |
2660 | |
2661 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
2662 | goto async_event_process_exit; |
2663 | |
2664 | netdev_warn(dev: bp->dev, format: "Ring monitor event, ring type %lu id 0x%x\n" , |
2665 | BNXT_EVENT_RING_TYPE(data2), data1); |
2666 | if (!BNXT_EVENT_RING_TYPE_RX(data2)) |
2667 | goto async_event_process_exit; |
2668 | |
2669 | grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, ring_id: data1); |
2670 | if (grp_idx == INVALID_HW_RING_ID) { |
2671 | netdev_warn(dev: bp->dev, format: "Unknown RX agg ring id 0x%x\n" , |
2672 | data1); |
2673 | goto async_event_process_exit; |
2674 | } |
2675 | rxr = bp->bnapi[grp_idx]->rx_ring; |
2676 | bnxt_sched_reset_rxr(bp, rxr); |
2677 | goto async_event_process_exit; |
2678 | } |
2679 | case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: { |
2680 | struct bnxt_fw_health *fw_health = bp->fw_health; |
2681 | |
2682 | netif_notice(bp, hw, bp->dev, |
2683 | "Received firmware echo request, data1: 0x%x, data2: 0x%x\n" , |
2684 | data1, data2); |
2685 | if (fw_health) { |
2686 | fw_health->echo_req_data1 = data1; |
2687 | fw_health->echo_req_data2 = data2; |
2688 | set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, addr: &bp->sp_event); |
2689 | break; |
2690 | } |
2691 | goto async_event_process_exit; |
2692 | } |
2693 | case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: { |
2694 | bnxt_ptp_pps_event(bp, data1, data2); |
2695 | goto async_event_process_exit; |
2696 | } |
2697 | case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: { |
2698 | if (bnxt_event_error_report(bp, data1, data2)) |
2699 | break; |
2700 | goto async_event_process_exit; |
2701 | } |
2702 | case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: { |
2703 | switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) { |
2704 | case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE: |
2705 | if (BNXT_PTP_USE_RTC(bp)) { |
2706 | struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; |
2707 | u64 ns; |
2708 | |
2709 | if (!ptp) |
2710 | goto async_event_process_exit; |
2711 | |
2712 | spin_lock_bh(lock: &ptp->ptp_lock); |
2713 | bnxt_ptp_update_current_time(bp); |
2714 | ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) << |
2715 | BNXT_PHC_BITS) | ptp->current_time); |
2716 | bnxt_ptp_rtc_timecounter_init(ptp, ns); |
2717 | spin_unlock_bh(lock: &ptp->ptp_lock); |
2718 | } |
2719 | break; |
2720 | } |
2721 | goto async_event_process_exit; |
2722 | } |
2723 | case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: { |
2724 | u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff; |
2725 | |
2726 | hwrm_update_token(bp, seq: seq_id, s: BNXT_HWRM_DEFERRED); |
2727 | goto async_event_process_exit; |
2728 | } |
2729 | default: |
2730 | goto async_event_process_exit; |
2731 | } |
2732 | __bnxt_queue_sp_work(bp); |
2733 | async_event_process_exit: |
2734 | return 0; |
2735 | } |
2736 | |
2737 | static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) |
2738 | { |
2739 | u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; |
2740 | struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; |
2741 | struct hwrm_fwd_req_cmpl *fwd_req_cmpl = |
2742 | (struct hwrm_fwd_req_cmpl *)txcmp; |
2743 | |
2744 | switch (cmpl_type) { |
2745 | case CMPL_BASE_TYPE_HWRM_DONE: |
2746 | seq_id = le16_to_cpu(h_cmpl->sequence_id); |
2747 | hwrm_update_token(bp, seq: seq_id, s: BNXT_HWRM_COMPLETE); |
2748 | break; |
2749 | |
2750 | case CMPL_BASE_TYPE_HWRM_FWD_REQ: |
2751 | vf_id = le16_to_cpu(fwd_req_cmpl->source_id); |
2752 | |
2753 | if ((vf_id < bp->pf.first_vf_id) || |
2754 | (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { |
2755 | netdev_err(dev: bp->dev, format: "Msg contains invalid VF id %x\n" , |
2756 | vf_id); |
2757 | return -EINVAL; |
2758 | } |
2759 | |
2760 | set_bit(nr: vf_id - bp->pf.first_vf_id, addr: bp->pf.vf_event_bmap); |
2761 | bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT); |
2762 | break; |
2763 | |
2764 | case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: |
2765 | bnxt_async_event_process(bp, |
2766 | cmpl: (struct hwrm_async_event_cmpl *)txcmp); |
2767 | break; |
2768 | |
2769 | default: |
2770 | break; |
2771 | } |
2772 | |
2773 | return 0; |
2774 | } |
2775 | |
2776 | static irqreturn_t bnxt_msix(int irq, void *dev_instance) |
2777 | { |
2778 | struct bnxt_napi *bnapi = dev_instance; |
2779 | struct bnxt *bp = bnapi->bp; |
2780 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
2781 | u32 cons = RING_CMP(cpr->cp_raw_cons); |
2782 | |
2783 | cpr->event_ctr++; |
2784 | prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); |
2785 | napi_schedule(n: &bnapi->napi); |
2786 | return IRQ_HANDLED; |
2787 | } |
2788 | |
2789 | static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) |
2790 | { |
2791 | u32 raw_cons = cpr->cp_raw_cons; |
2792 | u16 cons = RING_CMP(raw_cons); |
2793 | struct tx_cmp *txcmp; |
2794 | |
2795 | txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; |
2796 | |
2797 | return TX_CMP_VALID(txcmp, raw_cons); |
2798 | } |
2799 | |
2800 | static irqreturn_t bnxt_inta(int irq, void *dev_instance) |
2801 | { |
2802 | struct bnxt_napi *bnapi = dev_instance; |
2803 | struct bnxt *bp = bnapi->bp; |
2804 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
2805 | u32 cons = RING_CMP(cpr->cp_raw_cons); |
2806 | u32 int_status; |
2807 | |
2808 | prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); |
2809 | |
2810 | if (!bnxt_has_work(bp, cpr)) { |
2811 | int_status = readl(addr: bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); |
2812 | /* return if erroneous interrupt */ |
2813 | if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) |
2814 | return IRQ_NONE; |
2815 | } |
2816 | |
2817 | /* disable ring IRQ */ |
2818 | BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); |
2819 | |
2820 | /* Return here if interrupt is shared and is disabled. */ |
2821 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) |
2822 | return IRQ_HANDLED; |
2823 | |
2824 | napi_schedule(n: &bnapi->napi); |
2825 | return IRQ_HANDLED; |
2826 | } |
2827 | |
2828 | static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, |
2829 | int budget) |
2830 | { |
2831 | struct bnxt_napi *bnapi = cpr->bnapi; |
2832 | u32 raw_cons = cpr->cp_raw_cons; |
2833 | u32 cons; |
2834 | int rx_pkts = 0; |
2835 | u8 event = 0; |
2836 | struct tx_cmp *txcmp; |
2837 | |
2838 | cpr->has_more_work = 0; |
2839 | cpr->had_work_done = 1; |
2840 | while (1) { |
2841 | u8 cmp_type; |
2842 | int rc; |
2843 | |
2844 | cons = RING_CMP(raw_cons); |
2845 | txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; |
2846 | |
2847 | if (!TX_CMP_VALID(txcmp, raw_cons)) |
2848 | break; |
2849 | |
2850 | /* The valid test of the entry must be done first before |
2851 | * reading any further. |
2852 | */ |
2853 | dma_rmb(); |
2854 | cmp_type = TX_CMP_TYPE(txcmp); |
2855 | if (cmp_type == CMP_TYPE_TX_L2_CMP || |
2856 | cmp_type == CMP_TYPE_TX_L2_COAL_CMP) { |
2857 | u32 opaque = txcmp->tx_cmp_opaque; |
2858 | struct bnxt_tx_ring_info *txr; |
2859 | u16 tx_freed; |
2860 | |
2861 | txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)]; |
2862 | event |= BNXT_TX_CMP_EVENT; |
2863 | if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP) |
2864 | txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp); |
2865 | else |
2866 | txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque); |
2867 | tx_freed = (txr->tx_hw_cons - txr->tx_cons) & |
2868 | bp->tx_ring_mask; |
2869 | /* return full budget so NAPI will complete. */ |
2870 | if (unlikely(tx_freed >= bp->tx_wake_thresh)) { |
2871 | rx_pkts = budget; |
2872 | raw_cons = NEXT_RAW_CMP(raw_cons); |
2873 | if (budget) |
2874 | cpr->has_more_work = 1; |
2875 | break; |
2876 | } |
2877 | } else if (cmp_type >= CMP_TYPE_RX_L2_CMP && |
2878 | cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) { |
2879 | if (likely(budget)) |
2880 | rc = bnxt_rx_pkt(bp, cpr, raw_cons: &raw_cons, event: &event); |
2881 | else |
2882 | rc = bnxt_force_rx_discard(bp, cpr, raw_cons: &raw_cons, |
2883 | event: &event); |
2884 | if (likely(rc >= 0)) |
2885 | rx_pkts += rc; |
2886 | /* Increment rx_pkts when rc is -ENOMEM to count towards |
2887 | * the NAPI budget. Otherwise, we may potentially loop |
2888 | * here forever if we consistently cannot allocate |
2889 | * buffers. |
2890 | */ |
2891 | else if (rc == -ENOMEM && budget) |
2892 | rx_pkts++; |
2893 | else if (rc == -EBUSY) /* partial completion */ |
2894 | break; |
2895 | } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE || |
2896 | cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ || |
2897 | cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) { |
2898 | bnxt_hwrm_handler(bp, txcmp); |
2899 | } |
2900 | raw_cons = NEXT_RAW_CMP(raw_cons); |
2901 | |
2902 | if (rx_pkts && rx_pkts == budget) { |
2903 | cpr->has_more_work = 1; |
2904 | break; |
2905 | } |
2906 | } |
2907 | |
2908 | if (event & BNXT_REDIRECT_EVENT) |
2909 | xdp_do_flush(); |
2910 | |
2911 | if (event & BNXT_TX_EVENT) { |
2912 | struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; |
2913 | u16 prod = txr->tx_prod; |
2914 | |
2915 | /* Sync BD data before updating doorbell */ |
2916 | wmb(); |
2917 | |
2918 | bnxt_db_write_relaxed(bp, db: &txr->tx_db, idx: prod); |
2919 | } |
2920 | |
2921 | cpr->cp_raw_cons = raw_cons; |
2922 | bnapi->events |= event; |
2923 | return rx_pkts; |
2924 | } |
2925 | |
2926 | static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, |
2927 | int budget) |
2928 | { |
2929 | if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault) |
2930 | bnapi->tx_int(bp, bnapi, budget); |
2931 | |
2932 | if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { |
2933 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
2934 | |
2935 | bnxt_db_write(bp, db: &rxr->rx_db, idx: rxr->rx_prod); |
2936 | } |
2937 | if (bnapi->events & BNXT_AGG_EVENT) { |
2938 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
2939 | |
2940 | bnxt_db_write(bp, db: &rxr->rx_agg_db, idx: rxr->rx_agg_prod); |
2941 | } |
2942 | bnapi->events &= BNXT_TX_CMP_EVENT; |
2943 | } |
2944 | |
2945 | static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, |
2946 | int budget) |
2947 | { |
2948 | struct bnxt_napi *bnapi = cpr->bnapi; |
2949 | int rx_pkts; |
2950 | |
2951 | rx_pkts = __bnxt_poll_work(bp, cpr, budget); |
2952 | |
2953 | /* ACK completion ring before freeing tx ring and producing new |
2954 | * buffers in rx/agg rings to prevent overflowing the completion |
2955 | * ring. |
2956 | */ |
2957 | bnxt_db_cq(bp, db: &cpr->cp_db, idx: cpr->cp_raw_cons); |
2958 | |
2959 | __bnxt_poll_work_done(bp, bnapi, budget); |
2960 | return rx_pkts; |
2961 | } |
2962 | |
2963 | static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) |
2964 | { |
2965 | struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); |
2966 | struct bnxt *bp = bnapi->bp; |
2967 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
2968 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
2969 | struct tx_cmp *txcmp; |
2970 | struct rx_cmp_ext *rxcmp1; |
2971 | u32 cp_cons, tmp_raw_cons; |
2972 | u32 raw_cons = cpr->cp_raw_cons; |
2973 | bool flush_xdp = false; |
2974 | u32 rx_pkts = 0; |
2975 | u8 event = 0; |
2976 | |
2977 | while (1) { |
2978 | int rc; |
2979 | |
2980 | cp_cons = RING_CMP(raw_cons); |
2981 | txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
2982 | |
2983 | if (!TX_CMP_VALID(txcmp, raw_cons)) |
2984 | break; |
2985 | |
2986 | /* The valid test of the entry must be done first before |
2987 | * reading any further. |
2988 | */ |
2989 | dma_rmb(); |
2990 | if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { |
2991 | tmp_raw_cons = NEXT_RAW_CMP(raw_cons); |
2992 | cp_cons = RING_CMP(tmp_raw_cons); |
2993 | rxcmp1 = (struct rx_cmp_ext *) |
2994 | &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
2995 | |
2996 | if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) |
2997 | break; |
2998 | |
2999 | /* force an error to recycle the buffer */ |
3000 | rxcmp1->rx_cmp_cfa_code_errors_v2 |= |
3001 | cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); |
3002 | |
3003 | rc = bnxt_rx_pkt(bp, cpr, raw_cons: &raw_cons, event: &event); |
3004 | if (likely(rc == -EIO) && budget) |
3005 | rx_pkts++; |
3006 | else if (rc == -EBUSY) /* partial completion */ |
3007 | break; |
3008 | if (event & BNXT_REDIRECT_EVENT) |
3009 | flush_xdp = true; |
3010 | } else if (unlikely(TX_CMP_TYPE(txcmp) == |
3011 | CMPL_BASE_TYPE_HWRM_DONE)) { |
3012 | bnxt_hwrm_handler(bp, txcmp); |
3013 | } else { |
3014 | netdev_err(dev: bp->dev, |
3015 | format: "Invalid completion received on special ring\n" ); |
3016 | } |
3017 | raw_cons = NEXT_RAW_CMP(raw_cons); |
3018 | |
3019 | if (rx_pkts == budget) |
3020 | break; |
3021 | } |
3022 | |
3023 | cpr->cp_raw_cons = raw_cons; |
3024 | BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); |
3025 | bnxt_db_write(bp, db: &rxr->rx_db, idx: rxr->rx_prod); |
3026 | |
3027 | if (event & BNXT_AGG_EVENT) |
3028 | bnxt_db_write(bp, db: &rxr->rx_agg_db, idx: rxr->rx_agg_prod); |
3029 | if (flush_xdp) |
3030 | xdp_do_flush(); |
3031 | |
3032 | if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { |
3033 | napi_complete_done(n: napi, work_done: rx_pkts); |
3034 | BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); |
3035 | } |
3036 | return rx_pkts; |
3037 | } |
3038 | |
3039 | static int bnxt_poll(struct napi_struct *napi, int budget) |
3040 | { |
3041 | struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); |
3042 | struct bnxt *bp = bnapi->bp; |
3043 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
3044 | int work_done = 0; |
3045 | |
3046 | if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { |
3047 | napi_complete(n: napi); |
3048 | return 0; |
3049 | } |
3050 | while (1) { |
3051 | work_done += bnxt_poll_work(bp, cpr, budget: budget - work_done); |
3052 | |
3053 | if (work_done >= budget) { |
3054 | if (!budget) |
3055 | BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); |
3056 | break; |
3057 | } |
3058 | |
3059 | if (!bnxt_has_work(bp, cpr)) { |
3060 | if (napi_complete_done(n: napi, work_done)) |
3061 | BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); |
3062 | break; |
3063 | } |
3064 | } |
3065 | if (bp->flags & BNXT_FLAG_DIM) { |
3066 | struct dim_sample dim_sample = {}; |
3067 | |
3068 | dim_update_sample(event_ctr: cpr->event_ctr, |
3069 | packets: cpr->rx_packets, |
3070 | bytes: cpr->rx_bytes, |
3071 | s: &dim_sample); |
3072 | net_dim(dim: &cpr->dim, end_sample: dim_sample); |
3073 | } |
3074 | return work_done; |
3075 | } |
3076 | |
3077 | static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) |
3078 | { |
3079 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
3080 | int i, work_done = 0; |
3081 | |
3082 | for (i = 0; i < cpr->cp_ring_count; i++) { |
3083 | struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; |
3084 | |
3085 | if (cpr2->had_nqe_notify) { |
3086 | work_done += __bnxt_poll_work(bp, cpr: cpr2, |
3087 | budget: budget - work_done); |
3088 | cpr->has_more_work |= cpr2->has_more_work; |
3089 | } |
3090 | } |
3091 | return work_done; |
3092 | } |
3093 | |
3094 | static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, |
3095 | u64 dbr_type, int budget) |
3096 | { |
3097 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
3098 | int i; |
3099 | |
3100 | for (i = 0; i < cpr->cp_ring_count; i++) { |
3101 | struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; |
3102 | struct bnxt_db_info *db; |
3103 | |
3104 | if (cpr2->had_work_done) { |
3105 | u32 tgl = 0; |
3106 | |
3107 | if (dbr_type == DBR_TYPE_CQ_ARMALL) { |
3108 | cpr2->had_nqe_notify = 0; |
3109 | tgl = cpr2->toggle; |
3110 | } |
3111 | db = &cpr2->cp_db; |
3112 | bnxt_writeq(bp, |
3113 | val: db->db_key64 | dbr_type | DB_TOGGLE(tgl) | |
3114 | DB_RING_IDX(db, cpr2->cp_raw_cons), |
3115 | addr: db->doorbell); |
3116 | cpr2->had_work_done = 0; |
3117 | } |
3118 | } |
3119 | __bnxt_poll_work_done(bp, bnapi, budget); |
3120 | } |
3121 | |
3122 | static int bnxt_poll_p5(struct napi_struct *napi, int budget) |
3123 | { |
3124 | struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); |
3125 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
3126 | struct bnxt_cp_ring_info *cpr_rx; |
3127 | u32 raw_cons = cpr->cp_raw_cons; |
3128 | struct bnxt *bp = bnapi->bp; |
3129 | struct nqe_cn *nqcmp; |
3130 | int work_done = 0; |
3131 | u32 cons; |
3132 | |
3133 | if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { |
3134 | napi_complete(n: napi); |
3135 | return 0; |
3136 | } |
3137 | if (cpr->has_more_work) { |
3138 | cpr->has_more_work = 0; |
3139 | work_done = __bnxt_poll_cqs(bp, bnapi, budget); |
3140 | } |
3141 | while (1) { |
3142 | u16 type; |
3143 | |
3144 | cons = RING_CMP(raw_cons); |
3145 | nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; |
3146 | |
3147 | if (!NQ_CMP_VALID(nqcmp, raw_cons)) { |
3148 | if (cpr->has_more_work) |
3149 | break; |
3150 | |
3151 | __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, |
3152 | budget); |
3153 | cpr->cp_raw_cons = raw_cons; |
3154 | if (napi_complete_done(n: napi, work_done)) |
3155 | BNXT_DB_NQ_ARM_P5(&cpr->cp_db, |
3156 | cpr->cp_raw_cons); |
3157 | goto poll_done; |
3158 | } |
3159 | |
3160 | /* The valid test of the entry must be done first before |
3161 | * reading any further. |
3162 | */ |
3163 | dma_rmb(); |
3164 | |
3165 | type = le16_to_cpu(nqcmp->type); |
3166 | if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) { |
3167 | u32 idx = le32_to_cpu(nqcmp->cq_handle_low); |
3168 | u32 cq_type = BNXT_NQ_HDL_TYPE(idx); |
3169 | struct bnxt_cp_ring_info *cpr2; |
3170 | |
3171 | /* No more budget for RX work */ |
3172 | if (budget && work_done >= budget && |
3173 | cq_type == BNXT_NQ_HDL_TYPE_RX) |
3174 | break; |
3175 | |
3176 | idx = BNXT_NQ_HDL_IDX(idx); |
3177 | cpr2 = &cpr->cp_ring_arr[idx]; |
3178 | cpr2->had_nqe_notify = 1; |
3179 | cpr2->toggle = NQE_CN_TOGGLE(type); |
3180 | work_done += __bnxt_poll_work(bp, cpr: cpr2, |
3181 | budget: budget - work_done); |
3182 | cpr->has_more_work |= cpr2->has_more_work; |
3183 | } else { |
3184 | bnxt_hwrm_handler(bp, txcmp: (struct tx_cmp *)nqcmp); |
3185 | } |
3186 | raw_cons = NEXT_RAW_CMP(raw_cons); |
3187 | } |
3188 | __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget); |
3189 | if (raw_cons != cpr->cp_raw_cons) { |
3190 | cpr->cp_raw_cons = raw_cons; |
3191 | BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); |
3192 | } |
3193 | poll_done: |
3194 | cpr_rx = &cpr->cp_ring_arr[0]; |
3195 | if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX && |
3196 | (bp->flags & BNXT_FLAG_DIM)) { |
3197 | struct dim_sample dim_sample = {}; |
3198 | |
3199 | dim_update_sample(event_ctr: cpr->event_ctr, |
3200 | packets: cpr_rx->rx_packets, |
3201 | bytes: cpr_rx->rx_bytes, |
3202 | s: &dim_sample); |
3203 | net_dim(dim: &cpr->dim, end_sample: dim_sample); |
3204 | } |
3205 | return work_done; |
3206 | } |
3207 | |
3208 | static void bnxt_free_tx_skbs(struct bnxt *bp) |
3209 | { |
3210 | int i, max_idx; |
3211 | struct pci_dev *pdev = bp->pdev; |
3212 | |
3213 | if (!bp->tx_ring) |
3214 | return; |
3215 | |
3216 | max_idx = bp->tx_nr_pages * TX_DESC_CNT; |
3217 | for (i = 0; i < bp->tx_nr_rings; i++) { |
3218 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
3219 | int j; |
3220 | |
3221 | if (!txr->tx_buf_ring) |
3222 | continue; |
3223 | |
3224 | for (j = 0; j < max_idx;) { |
3225 | struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; |
3226 | struct sk_buff *skb; |
3227 | int k, last; |
3228 | |
3229 | if (i < bp->tx_nr_rings_xdp && |
3230 | tx_buf->action == XDP_REDIRECT) { |
3231 | dma_unmap_single(&pdev->dev, |
3232 | dma_unmap_addr(tx_buf, mapping), |
3233 | dma_unmap_len(tx_buf, len), |
3234 | DMA_TO_DEVICE); |
3235 | xdp_return_frame(xdpf: tx_buf->xdpf); |
3236 | tx_buf->action = 0; |
3237 | tx_buf->xdpf = NULL; |
3238 | j++; |
3239 | continue; |
3240 | } |
3241 | |
3242 | skb = tx_buf->skb; |
3243 | if (!skb) { |
3244 | j++; |
3245 | continue; |
3246 | } |
3247 | |
3248 | tx_buf->skb = NULL; |
3249 | |
3250 | if (tx_buf->is_push) { |
3251 | dev_kfree_skb(skb); |
3252 | j += 2; |
3253 | continue; |
3254 | } |
3255 | |
3256 | dma_unmap_single(&pdev->dev, |
3257 | dma_unmap_addr(tx_buf, mapping), |
3258 | skb_headlen(skb), |
3259 | DMA_TO_DEVICE); |
3260 | |
3261 | last = tx_buf->nr_frags; |
3262 | j += 2; |
3263 | for (k = 0; k < last; k++, j++) { |
3264 | int ring_idx = j & bp->tx_ring_mask; |
3265 | skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; |
3266 | |
3267 | tx_buf = &txr->tx_buf_ring[ring_idx]; |
3268 | dma_unmap_page( |
3269 | &pdev->dev, |
3270 | dma_unmap_addr(tx_buf, mapping), |
3271 | skb_frag_size(frag), DMA_TO_DEVICE); |
3272 | } |
3273 | dev_kfree_skb(skb); |
3274 | } |
3275 | netdev_tx_reset_queue(q: netdev_get_tx_queue(dev: bp->dev, index: i)); |
3276 | } |
3277 | } |
3278 | |
3279 | static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) |
3280 | { |
3281 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; |
3282 | struct pci_dev *pdev = bp->pdev; |
3283 | struct bnxt_tpa_idx_map *map; |
3284 | int i, max_idx, max_agg_idx; |
3285 | |
3286 | max_idx = bp->rx_nr_pages * RX_DESC_CNT; |
3287 | max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; |
3288 | if (!rxr->rx_tpa) |
3289 | goto skip_rx_tpa_free; |
3290 | |
3291 | for (i = 0; i < bp->max_tpa; i++) { |
3292 | struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; |
3293 | u8 *data = tpa_info->data; |
3294 | |
3295 | if (!data) |
3296 | continue; |
3297 | |
3298 | dma_unmap_single_attrs(dev: &pdev->dev, addr: tpa_info->mapping, |
3299 | size: bp->rx_buf_use_size, dir: bp->rx_dir, |
3300 | DMA_ATTR_WEAK_ORDERING); |
3301 | |
3302 | tpa_info->data = NULL; |
3303 | |
3304 | skb_free_frag(addr: data); |
3305 | } |
3306 | |
3307 | skip_rx_tpa_free: |
3308 | if (!rxr->rx_buf_ring) |
3309 | goto skip_rx_buf_free; |
3310 | |
3311 | for (i = 0; i < max_idx; i++) { |
3312 | struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; |
3313 | dma_addr_t mapping = rx_buf->mapping; |
3314 | void *data = rx_buf->data; |
3315 | |
3316 | if (!data) |
3317 | continue; |
3318 | |
3319 | rx_buf->data = NULL; |
3320 | if (BNXT_RX_PAGE_MODE(bp)) { |
3321 | page_pool_recycle_direct(pool: rxr->page_pool, page: data); |
3322 | } else { |
3323 | dma_unmap_single_attrs(dev: &pdev->dev, addr: mapping, |
3324 | size: bp->rx_buf_use_size, dir: bp->rx_dir, |
3325 | DMA_ATTR_WEAK_ORDERING); |
3326 | skb_free_frag(addr: data); |
3327 | } |
3328 | } |
3329 | |
3330 | skip_rx_buf_free: |
3331 | if (!rxr->rx_agg_ring) |
3332 | goto skip_rx_agg_free; |
3333 | |
3334 | for (i = 0; i < max_agg_idx; i++) { |
3335 | struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; |
3336 | struct page *page = rx_agg_buf->page; |
3337 | |
3338 | if (!page) |
3339 | continue; |
3340 | |
3341 | rx_agg_buf->page = NULL; |
3342 | __clear_bit(i, rxr->rx_agg_bmap); |
3343 | |
3344 | page_pool_recycle_direct(pool: rxr->page_pool, page); |
3345 | } |
3346 | |
3347 | skip_rx_agg_free: |
3348 | map = rxr->rx_tpa_idx_map; |
3349 | if (map) |
3350 | memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); |
3351 | } |
3352 | |
3353 | static void bnxt_free_rx_skbs(struct bnxt *bp) |
3354 | { |
3355 | int i; |
3356 | |
3357 | if (!bp->rx_ring) |
3358 | return; |
3359 | |
3360 | for (i = 0; i < bp->rx_nr_rings; i++) |
3361 | bnxt_free_one_rx_ring_skbs(bp, ring_nr: i); |
3362 | } |
3363 | |
3364 | static void bnxt_free_skbs(struct bnxt *bp) |
3365 | { |
3366 | bnxt_free_tx_skbs(bp); |
3367 | bnxt_free_rx_skbs(bp); |
3368 | } |
3369 | |
3370 | static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len) |
3371 | { |
3372 | u8 init_val = ctxm->init_value; |
3373 | u16 offset = ctxm->init_offset; |
3374 | u8 *p2 = p; |
3375 | int i; |
3376 | |
3377 | if (!init_val) |
3378 | return; |
3379 | if (offset == BNXT_CTX_INIT_INVALID_OFFSET) { |
3380 | memset(p, init_val, len); |
3381 | return; |
3382 | } |
3383 | for (i = 0; i < len; i += ctxm->entry_size) |
3384 | *(p2 + i + offset) = init_val; |
3385 | } |
3386 | |
3387 | static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) |
3388 | { |
3389 | struct pci_dev *pdev = bp->pdev; |
3390 | int i; |
3391 | |
3392 | if (!rmem->pg_arr) |
3393 | goto skip_pages; |
3394 | |
3395 | for (i = 0; i < rmem->nr_pages; i++) { |
3396 | if (!rmem->pg_arr[i]) |
3397 | continue; |
3398 | |
3399 | dma_free_coherent(dev: &pdev->dev, size: rmem->page_size, |
3400 | cpu_addr: rmem->pg_arr[i], dma_handle: rmem->dma_arr[i]); |
3401 | |
3402 | rmem->pg_arr[i] = NULL; |
3403 | } |
3404 | skip_pages: |
3405 | if (rmem->pg_tbl) { |
3406 | size_t pg_tbl_size = rmem->nr_pages * 8; |
3407 | |
3408 | if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) |
3409 | pg_tbl_size = rmem->page_size; |
3410 | dma_free_coherent(dev: &pdev->dev, size: pg_tbl_size, |
3411 | cpu_addr: rmem->pg_tbl, dma_handle: rmem->pg_tbl_map); |
3412 | rmem->pg_tbl = NULL; |
3413 | } |
3414 | if (rmem->vmem_size && *rmem->vmem) { |
3415 | vfree(addr: *rmem->vmem); |
3416 | *rmem->vmem = NULL; |
3417 | } |
3418 | } |
3419 | |
3420 | static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) |
3421 | { |
3422 | struct pci_dev *pdev = bp->pdev; |
3423 | u64 valid_bit = 0; |
3424 | int i; |
3425 | |
3426 | if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) |
3427 | valid_bit = PTU_PTE_VALID; |
3428 | if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { |
3429 | size_t pg_tbl_size = rmem->nr_pages * 8; |
3430 | |
3431 | if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) |
3432 | pg_tbl_size = rmem->page_size; |
3433 | rmem->pg_tbl = dma_alloc_coherent(dev: &pdev->dev, size: pg_tbl_size, |
3434 | dma_handle: &rmem->pg_tbl_map, |
3435 | GFP_KERNEL); |
3436 | if (!rmem->pg_tbl) |
3437 | return -ENOMEM; |
3438 | } |
3439 | |
3440 | for (i = 0; i < rmem->nr_pages; i++) { |
3441 | u64 = valid_bit; |
3442 | |
3443 | rmem->pg_arr[i] = dma_alloc_coherent(dev: &pdev->dev, |
3444 | size: rmem->page_size, |
3445 | dma_handle: &rmem->dma_arr[i], |
3446 | GFP_KERNEL); |
3447 | if (!rmem->pg_arr[i]) |
3448 | return -ENOMEM; |
3449 | |
3450 | if (rmem->ctx_mem) |
3451 | bnxt_init_ctx_mem(ctxm: rmem->ctx_mem, p: rmem->pg_arr[i], |
3452 | len: rmem->page_size); |
3453 | if (rmem->nr_pages > 1 || rmem->depth > 0) { |
3454 | if (i == rmem->nr_pages - 2 && |
3455 | (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) |
3456 | extra_bits |= PTU_PTE_NEXT_TO_LAST; |
3457 | else if (i == rmem->nr_pages - 1 && |
3458 | (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) |
3459 | extra_bits |= PTU_PTE_LAST; |
3460 | rmem->pg_tbl[i] = |
3461 | cpu_to_le64(rmem->dma_arr[i] | extra_bits); |
3462 | } |
3463 | } |
3464 | |
3465 | if (rmem->vmem_size) { |
3466 | *rmem->vmem = vzalloc(size: rmem->vmem_size); |
3467 | if (!(*rmem->vmem)) |
3468 | return -ENOMEM; |
3469 | } |
3470 | return 0; |
3471 | } |
3472 | |
3473 | static void bnxt_free_tpa_info(struct bnxt *bp) |
3474 | { |
3475 | int i, j; |
3476 | |
3477 | for (i = 0; i < bp->rx_nr_rings; i++) { |
3478 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
3479 | |
3480 | kfree(objp: rxr->rx_tpa_idx_map); |
3481 | rxr->rx_tpa_idx_map = NULL; |
3482 | if (rxr->rx_tpa) { |
3483 | for (j = 0; j < bp->max_tpa; j++) { |
3484 | kfree(objp: rxr->rx_tpa[j].agg_arr); |
3485 | rxr->rx_tpa[j].agg_arr = NULL; |
3486 | } |
3487 | } |
3488 | kfree(objp: rxr->rx_tpa); |
3489 | rxr->rx_tpa = NULL; |
3490 | } |
3491 | } |
3492 | |
3493 | static int bnxt_alloc_tpa_info(struct bnxt *bp) |
3494 | { |
3495 | int i, j; |
3496 | |
3497 | bp->max_tpa = MAX_TPA; |
3498 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
3499 | if (!bp->max_tpa_v2) |
3500 | return 0; |
3501 | bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); |
3502 | } |
3503 | |
3504 | for (i = 0; i < bp->rx_nr_rings; i++) { |
3505 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
3506 | struct rx_agg_cmp *agg; |
3507 | |
3508 | rxr->rx_tpa = kcalloc(n: bp->max_tpa, size: sizeof(struct bnxt_tpa_info), |
3509 | GFP_KERNEL); |
3510 | if (!rxr->rx_tpa) |
3511 | return -ENOMEM; |
3512 | |
3513 | if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) |
3514 | continue; |
3515 | for (j = 0; j < bp->max_tpa; j++) { |
3516 | agg = kcalloc(MAX_SKB_FRAGS, size: sizeof(*agg), GFP_KERNEL); |
3517 | if (!agg) |
3518 | return -ENOMEM; |
3519 | rxr->rx_tpa[j].agg_arr = agg; |
3520 | } |
3521 | rxr->rx_tpa_idx_map = kzalloc(size: sizeof(*rxr->rx_tpa_idx_map), |
3522 | GFP_KERNEL); |
3523 | if (!rxr->rx_tpa_idx_map) |
3524 | return -ENOMEM; |
3525 | } |
3526 | return 0; |
3527 | } |
3528 | |
3529 | static void bnxt_free_rx_rings(struct bnxt *bp) |
3530 | { |
3531 | int i; |
3532 | |
3533 | if (!bp->rx_ring) |
3534 | return; |
3535 | |
3536 | bnxt_free_tpa_info(bp); |
3537 | for (i = 0; i < bp->rx_nr_rings; i++) { |
3538 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
3539 | struct bnxt_ring_struct *ring; |
3540 | |
3541 | if (rxr->xdp_prog) |
3542 | bpf_prog_put(prog: rxr->xdp_prog); |
3543 | |
3544 | if (xdp_rxq_info_is_reg(xdp_rxq: &rxr->xdp_rxq)) |
3545 | xdp_rxq_info_unreg(xdp_rxq: &rxr->xdp_rxq); |
3546 | |
3547 | page_pool_destroy(pool: rxr->page_pool); |
3548 | rxr->page_pool = NULL; |
3549 | |
3550 | kfree(objp: rxr->rx_agg_bmap); |
3551 | rxr->rx_agg_bmap = NULL; |
3552 | |
3553 | ring = &rxr->rx_ring_struct; |
3554 | bnxt_free_ring(bp, rmem: &ring->ring_mem); |
3555 | |
3556 | ring = &rxr->rx_agg_ring_struct; |
3557 | bnxt_free_ring(bp, rmem: &ring->ring_mem); |
3558 | } |
3559 | } |
3560 | |
3561 | static int bnxt_alloc_rx_page_pool(struct bnxt *bp, |
3562 | struct bnxt_rx_ring_info *rxr) |
3563 | { |
3564 | struct page_pool_params pp = { 0 }; |
3565 | |
3566 | pp.pool_size = bp->rx_agg_ring_size; |
3567 | if (BNXT_RX_PAGE_MODE(bp)) |
3568 | pp.pool_size += bp->rx_ring_size; |
3569 | pp.nid = dev_to_node(dev: &bp->pdev->dev); |
3570 | pp.napi = &rxr->bnapi->napi; |
3571 | pp.netdev = bp->dev; |
3572 | pp.dev = &bp->pdev->dev; |
3573 | pp.dma_dir = bp->rx_dir; |
3574 | pp.max_len = PAGE_SIZE; |
3575 | pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; |
3576 | |
3577 | rxr->page_pool = page_pool_create(params: &pp); |
3578 | if (IS_ERR(ptr: rxr->page_pool)) { |
3579 | int err = PTR_ERR(ptr: rxr->page_pool); |
3580 | |
3581 | rxr->page_pool = NULL; |
3582 | return err; |
3583 | } |
3584 | return 0; |
3585 | } |
3586 | |
3587 | static int bnxt_alloc_rx_rings(struct bnxt *bp) |
3588 | { |
3589 | int i, rc = 0, agg_rings = 0; |
3590 | |
3591 | if (!bp->rx_ring) |
3592 | return -ENOMEM; |
3593 | |
3594 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
3595 | agg_rings = 1; |
3596 | |
3597 | for (i = 0; i < bp->rx_nr_rings; i++) { |
3598 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
3599 | struct bnxt_ring_struct *ring; |
3600 | |
3601 | ring = &rxr->rx_ring_struct; |
3602 | |
3603 | rc = bnxt_alloc_rx_page_pool(bp, rxr); |
3604 | if (rc) |
3605 | return rc; |
3606 | |
3607 | rc = xdp_rxq_info_reg(xdp_rxq: &rxr->xdp_rxq, dev: bp->dev, queue_index: i, napi_id: 0); |
3608 | if (rc < 0) |
3609 | return rc; |
3610 | |
3611 | rc = xdp_rxq_info_reg_mem_model(xdp_rxq: &rxr->xdp_rxq, |
3612 | type: MEM_TYPE_PAGE_POOL, |
3613 | allocator: rxr->page_pool); |
3614 | if (rc) { |
3615 | xdp_rxq_info_unreg(xdp_rxq: &rxr->xdp_rxq); |
3616 | return rc; |
3617 | } |
3618 | |
3619 | rc = bnxt_alloc_ring(bp, rmem: &ring->ring_mem); |
3620 | if (rc) |
3621 | return rc; |
3622 | |
3623 | ring->grp_idx = i; |
3624 | if (agg_rings) { |
3625 | u16 mem_size; |
3626 | |
3627 | ring = &rxr->rx_agg_ring_struct; |
3628 | rc = bnxt_alloc_ring(bp, rmem: &ring->ring_mem); |
3629 | if (rc) |
3630 | return rc; |
3631 | |
3632 | ring->grp_idx = i; |
3633 | rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; |
3634 | mem_size = rxr->rx_agg_bmap_size / 8; |
3635 | rxr->rx_agg_bmap = kzalloc(size: mem_size, GFP_KERNEL); |
3636 | if (!rxr->rx_agg_bmap) |
3637 | return -ENOMEM; |
3638 | } |
3639 | } |
3640 | if (bp->flags & BNXT_FLAG_TPA) |
3641 | rc = bnxt_alloc_tpa_info(bp); |
3642 | return rc; |
3643 | } |
3644 | |
3645 | static void bnxt_free_tx_rings(struct bnxt *bp) |
3646 | { |
3647 | int i; |
3648 | struct pci_dev *pdev = bp->pdev; |
3649 | |
3650 | if (!bp->tx_ring) |
3651 | return; |
3652 | |
3653 | for (i = 0; i < bp->tx_nr_rings; i++) { |
3654 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
3655 | struct bnxt_ring_struct *ring; |
3656 | |
3657 | if (txr->tx_push) { |
3658 | dma_free_coherent(dev: &pdev->dev, size: bp->tx_push_size, |
3659 | cpu_addr: txr->tx_push, dma_handle: txr->tx_push_mapping); |
3660 | txr->tx_push = NULL; |
3661 | } |
3662 | |
3663 | ring = &txr->tx_ring_struct; |
3664 | |
3665 | bnxt_free_ring(bp, rmem: &ring->ring_mem); |
3666 | } |
3667 | } |
3668 | |
3669 | #define BNXT_TC_TO_RING_BASE(bp, tc) \ |
3670 | ((tc) * (bp)->tx_nr_rings_per_tc) |
3671 | |
3672 | #define BNXT_RING_TO_TC_OFF(bp, tx) \ |
3673 | ((tx) % (bp)->tx_nr_rings_per_tc) |
3674 | |
3675 | #define BNXT_RING_TO_TC(bp, tx) \ |
3676 | ((tx) / (bp)->tx_nr_rings_per_tc) |
3677 | |
3678 | static int bnxt_alloc_tx_rings(struct bnxt *bp) |
3679 | { |
3680 | int i, j, rc; |
3681 | struct pci_dev *pdev = bp->pdev; |
3682 | |
3683 | bp->tx_push_size = 0; |
3684 | if (bp->tx_push_thresh) { |
3685 | int push_size; |
3686 | |
3687 | push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + |
3688 | bp->tx_push_thresh); |
3689 | |
3690 | if (push_size > 256) { |
3691 | push_size = 0; |
3692 | bp->tx_push_thresh = 0; |
3693 | } |
3694 | |
3695 | bp->tx_push_size = push_size; |
3696 | } |
3697 | |
3698 | for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { |
3699 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
3700 | struct bnxt_ring_struct *ring; |
3701 | u8 qidx; |
3702 | |
3703 | ring = &txr->tx_ring_struct; |
3704 | |
3705 | rc = bnxt_alloc_ring(bp, rmem: &ring->ring_mem); |
3706 | if (rc) |
3707 | return rc; |
3708 | |
3709 | ring->grp_idx = txr->bnapi->index; |
3710 | if (bp->tx_push_size) { |
3711 | dma_addr_t mapping; |
3712 | |
3713 | /* One pre-allocated DMA buffer to backup |
3714 | * TX push operation |
3715 | */ |
3716 | txr->tx_push = dma_alloc_coherent(dev: &pdev->dev, |
3717 | size: bp->tx_push_size, |
3718 | dma_handle: &txr->tx_push_mapping, |
3719 | GFP_KERNEL); |
3720 | |
3721 | if (!txr->tx_push) |
3722 | return -ENOMEM; |
3723 | |
3724 | mapping = txr->tx_push_mapping + |
3725 | sizeof(struct tx_push_bd); |
3726 | txr->data_mapping = cpu_to_le64(mapping); |
3727 | } |
3728 | qidx = bp->tc_to_qidx[j]; |
3729 | ring->queue_id = bp->q_info[qidx].queue_id; |
3730 | spin_lock_init(&txr->xdp_tx_lock); |
3731 | if (i < bp->tx_nr_rings_xdp) |
3732 | continue; |
3733 | if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1)) |
3734 | j++; |
3735 | } |
3736 | return 0; |
3737 | } |
3738 | |
3739 | static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) |
3740 | { |
3741 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; |
3742 | |
3743 | kfree(objp: cpr->cp_desc_ring); |
3744 | cpr->cp_desc_ring = NULL; |
3745 | ring->ring_mem.pg_arr = NULL; |
3746 | kfree(objp: cpr->cp_desc_mapping); |
3747 | cpr->cp_desc_mapping = NULL; |
3748 | ring->ring_mem.dma_arr = NULL; |
3749 | } |
3750 | |
3751 | static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) |
3752 | { |
3753 | cpr->cp_desc_ring = kcalloc(n, size: sizeof(*cpr->cp_desc_ring), GFP_KERNEL); |
3754 | if (!cpr->cp_desc_ring) |
3755 | return -ENOMEM; |
3756 | cpr->cp_desc_mapping = kcalloc(n, size: sizeof(*cpr->cp_desc_mapping), |
3757 | GFP_KERNEL); |
3758 | if (!cpr->cp_desc_mapping) |
3759 | return -ENOMEM; |
3760 | return 0; |
3761 | } |
3762 | |
3763 | static void bnxt_free_all_cp_arrays(struct bnxt *bp) |
3764 | { |
3765 | int i; |
3766 | |
3767 | if (!bp->bnapi) |
3768 | return; |
3769 | for (i = 0; i < bp->cp_nr_rings; i++) { |
3770 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
3771 | |
3772 | if (!bnapi) |
3773 | continue; |
3774 | bnxt_free_cp_arrays(cpr: &bnapi->cp_ring); |
3775 | } |
3776 | } |
3777 | |
3778 | static int bnxt_alloc_all_cp_arrays(struct bnxt *bp) |
3779 | { |
3780 | int i, n = bp->cp_nr_pages; |
3781 | |
3782 | for (i = 0; i < bp->cp_nr_rings; i++) { |
3783 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
3784 | int rc; |
3785 | |
3786 | if (!bnapi) |
3787 | continue; |
3788 | rc = bnxt_alloc_cp_arrays(cpr: &bnapi->cp_ring, n); |
3789 | if (rc) |
3790 | return rc; |
3791 | } |
3792 | return 0; |
3793 | } |
3794 | |
3795 | static void bnxt_free_cp_rings(struct bnxt *bp) |
3796 | { |
3797 | int i; |
3798 | |
3799 | if (!bp->bnapi) |
3800 | return; |
3801 | |
3802 | for (i = 0; i < bp->cp_nr_rings; i++) { |
3803 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
3804 | struct bnxt_cp_ring_info *cpr; |
3805 | struct bnxt_ring_struct *ring; |
3806 | int j; |
3807 | |
3808 | if (!bnapi) |
3809 | continue; |
3810 | |
3811 | cpr = &bnapi->cp_ring; |
3812 | ring = &cpr->cp_ring_struct; |
3813 | |
3814 | bnxt_free_ring(bp, rmem: &ring->ring_mem); |
3815 | |
3816 | if (!cpr->cp_ring_arr) |
3817 | continue; |
3818 | |
3819 | for (j = 0; j < cpr->cp_ring_count; j++) { |
3820 | struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; |
3821 | |
3822 | ring = &cpr2->cp_ring_struct; |
3823 | bnxt_free_ring(bp, rmem: &ring->ring_mem); |
3824 | bnxt_free_cp_arrays(cpr: cpr2); |
3825 | } |
3826 | kfree(objp: cpr->cp_ring_arr); |
3827 | cpr->cp_ring_arr = NULL; |
3828 | cpr->cp_ring_count = 0; |
3829 | } |
3830 | } |
3831 | |
3832 | static int bnxt_alloc_cp_sub_ring(struct bnxt *bp, |
3833 | struct bnxt_cp_ring_info *cpr) |
3834 | { |
3835 | struct bnxt_ring_mem_info *rmem; |
3836 | struct bnxt_ring_struct *ring; |
3837 | int rc; |
3838 | |
3839 | rc = bnxt_alloc_cp_arrays(cpr, n: bp->cp_nr_pages); |
3840 | if (rc) { |
3841 | bnxt_free_cp_arrays(cpr); |
3842 | return -ENOMEM; |
3843 | } |
3844 | ring = &cpr->cp_ring_struct; |
3845 | rmem = &ring->ring_mem; |
3846 | rmem->nr_pages = bp->cp_nr_pages; |
3847 | rmem->page_size = HW_CMPD_RING_SIZE; |
3848 | rmem->pg_arr = (void **)cpr->cp_desc_ring; |
3849 | rmem->dma_arr = cpr->cp_desc_mapping; |
3850 | rmem->flags = BNXT_RMEM_RING_PTE_FLAG; |
3851 | rc = bnxt_alloc_ring(bp, rmem); |
3852 | if (rc) { |
3853 | bnxt_free_ring(bp, rmem); |
3854 | bnxt_free_cp_arrays(cpr); |
3855 | } |
3856 | return rc; |
3857 | } |
3858 | |
3859 | static int bnxt_alloc_cp_rings(struct bnxt *bp) |
3860 | { |
3861 | bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); |
3862 | int i, j, rc, ulp_base_vec, ulp_msix; |
3863 | int tcs = bp->num_tc; |
3864 | |
3865 | if (!tcs) |
3866 | tcs = 1; |
3867 | ulp_msix = bnxt_get_ulp_msix_num(bp); |
3868 | ulp_base_vec = bnxt_get_ulp_msix_base(bp); |
3869 | for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { |
3870 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
3871 | struct bnxt_cp_ring_info *cpr, *cpr2; |
3872 | struct bnxt_ring_struct *ring; |
3873 | int cp_count = 0, k; |
3874 | int rx = 0, tx = 0; |
3875 | |
3876 | if (!bnapi) |
3877 | continue; |
3878 | |
3879 | cpr = &bnapi->cp_ring; |
3880 | cpr->bnapi = bnapi; |
3881 | ring = &cpr->cp_ring_struct; |
3882 | |
3883 | rc = bnxt_alloc_ring(bp, rmem: &ring->ring_mem); |
3884 | if (rc) |
3885 | return rc; |
3886 | |
3887 | if (ulp_msix && i >= ulp_base_vec) |
3888 | ring->map_idx = i + ulp_msix; |
3889 | else |
3890 | ring->map_idx = i; |
3891 | |
3892 | if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) |
3893 | continue; |
3894 | |
3895 | if (i < bp->rx_nr_rings) { |
3896 | cp_count++; |
3897 | rx = 1; |
3898 | } |
3899 | if (i < bp->tx_nr_rings_xdp) { |
3900 | cp_count++; |
3901 | tx = 1; |
3902 | } else if ((sh && i < bp->tx_nr_rings) || |
3903 | (!sh && i >= bp->rx_nr_rings)) { |
3904 | cp_count += tcs; |
3905 | tx = 1; |
3906 | } |
3907 | |
3908 | cpr->cp_ring_arr = kcalloc(n: cp_count, size: sizeof(*cpr), |
3909 | GFP_KERNEL); |
3910 | if (!cpr->cp_ring_arr) |
3911 | return -ENOMEM; |
3912 | cpr->cp_ring_count = cp_count; |
3913 | |
3914 | for (k = 0; k < cp_count; k++) { |
3915 | cpr2 = &cpr->cp_ring_arr[k]; |
3916 | rc = bnxt_alloc_cp_sub_ring(bp, cpr: cpr2); |
3917 | if (rc) |
3918 | return rc; |
3919 | cpr2->bnapi = bnapi; |
3920 | cpr2->cp_idx = k; |
3921 | if (!k && rx) { |
3922 | bp->rx_ring[i].rx_cpr = cpr2; |
3923 | cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX; |
3924 | } else { |
3925 | int n, tc = k - rx; |
3926 | |
3927 | n = BNXT_TC_TO_RING_BASE(bp, tc) + j; |
3928 | bp->tx_ring[n].tx_cpr = cpr2; |
3929 | cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX; |
3930 | } |
3931 | } |
3932 | if (tx) |
3933 | j++; |
3934 | } |
3935 | return 0; |
3936 | } |
3937 | |
3938 | static void bnxt_init_ring_struct(struct bnxt *bp) |
3939 | { |
3940 | int i, j; |
3941 | |
3942 | for (i = 0; i < bp->cp_nr_rings; i++) { |
3943 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
3944 | struct bnxt_ring_mem_info *rmem; |
3945 | struct bnxt_cp_ring_info *cpr; |
3946 | struct bnxt_rx_ring_info *rxr; |
3947 | struct bnxt_tx_ring_info *txr; |
3948 | struct bnxt_ring_struct *ring; |
3949 | |
3950 | if (!bnapi) |
3951 | continue; |
3952 | |
3953 | cpr = &bnapi->cp_ring; |
3954 | ring = &cpr->cp_ring_struct; |
3955 | rmem = &ring->ring_mem; |
3956 | rmem->nr_pages = bp->cp_nr_pages; |
3957 | rmem->page_size = HW_CMPD_RING_SIZE; |
3958 | rmem->pg_arr = (void **)cpr->cp_desc_ring; |
3959 | rmem->dma_arr = cpr->cp_desc_mapping; |
3960 | rmem->vmem_size = 0; |
3961 | |
3962 | rxr = bnapi->rx_ring; |
3963 | if (!rxr) |
3964 | goto skip_rx; |
3965 | |
3966 | ring = &rxr->rx_ring_struct; |
3967 | rmem = &ring->ring_mem; |
3968 | rmem->nr_pages = bp->rx_nr_pages; |
3969 | rmem->page_size = HW_RXBD_RING_SIZE; |
3970 | rmem->pg_arr = (void **)rxr->rx_desc_ring; |
3971 | rmem->dma_arr = rxr->rx_desc_mapping; |
3972 | rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; |
3973 | rmem->vmem = (void **)&rxr->rx_buf_ring; |
3974 | |
3975 | ring = &rxr->rx_agg_ring_struct; |
3976 | rmem = &ring->ring_mem; |
3977 | rmem->nr_pages = bp->rx_agg_nr_pages; |
3978 | rmem->page_size = HW_RXBD_RING_SIZE; |
3979 | rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; |
3980 | rmem->dma_arr = rxr->rx_agg_desc_mapping; |
3981 | rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; |
3982 | rmem->vmem = (void **)&rxr->rx_agg_ring; |
3983 | |
3984 | skip_rx: |
3985 | bnxt_for_each_napi_tx(j, bnapi, txr) { |
3986 | ring = &txr->tx_ring_struct; |
3987 | rmem = &ring->ring_mem; |
3988 | rmem->nr_pages = bp->tx_nr_pages; |
3989 | rmem->page_size = HW_TXBD_RING_SIZE; |
3990 | rmem->pg_arr = (void **)txr->tx_desc_ring; |
3991 | rmem->dma_arr = txr->tx_desc_mapping; |
3992 | rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; |
3993 | rmem->vmem = (void **)&txr->tx_buf_ring; |
3994 | } |
3995 | } |
3996 | } |
3997 | |
3998 | static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) |
3999 | { |
4000 | int i; |
4001 | u32 prod; |
4002 | struct rx_bd **rx_buf_ring; |
4003 | |
4004 | rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; |
4005 | for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { |
4006 | int j; |
4007 | struct rx_bd *rxbd; |
4008 | |
4009 | rxbd = rx_buf_ring[i]; |
4010 | if (!rxbd) |
4011 | continue; |
4012 | |
4013 | for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { |
4014 | rxbd->rx_bd_len_flags_type = cpu_to_le32(type); |
4015 | rxbd->rx_bd_opaque = prod; |
4016 | } |
4017 | } |
4018 | } |
4019 | |
4020 | static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) |
4021 | { |
4022 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; |
4023 | struct net_device *dev = bp->dev; |
4024 | u32 prod; |
4025 | int i; |
4026 | |
4027 | prod = rxr->rx_prod; |
4028 | for (i = 0; i < bp->rx_ring_size; i++) { |
4029 | if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { |
4030 | netdev_warn(dev, format: "init'ed rx ring %d with %d/%d skbs only\n" , |
4031 | ring_nr, i, bp->rx_ring_size); |
4032 | break; |
4033 | } |
4034 | prod = NEXT_RX(prod); |
4035 | } |
4036 | rxr->rx_prod = prod; |
4037 | |
4038 | if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) |
4039 | return 0; |
4040 | |
4041 | prod = rxr->rx_agg_prod; |
4042 | for (i = 0; i < bp->rx_agg_ring_size; i++) { |
4043 | if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { |
4044 | netdev_warn(dev, format: "init'ed rx ring %d with %d/%d pages only\n" , |
4045 | ring_nr, i, bp->rx_ring_size); |
4046 | break; |
4047 | } |
4048 | prod = NEXT_RX_AGG(prod); |
4049 | } |
4050 | rxr->rx_agg_prod = prod; |
4051 | |
4052 | if (rxr->rx_tpa) { |
4053 | dma_addr_t mapping; |
4054 | u8 *data; |
4055 | |
4056 | for (i = 0; i < bp->max_tpa; i++) { |
4057 | data = __bnxt_alloc_rx_frag(bp, mapping: &mapping, GFP_KERNEL); |
4058 | if (!data) |
4059 | return -ENOMEM; |
4060 | |
4061 | rxr->rx_tpa[i].data = data; |
4062 | rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; |
4063 | rxr->rx_tpa[i].mapping = mapping; |
4064 | } |
4065 | } |
4066 | return 0; |
4067 | } |
4068 | |
4069 | static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) |
4070 | { |
4071 | struct bnxt_rx_ring_info *rxr; |
4072 | struct bnxt_ring_struct *ring; |
4073 | u32 type; |
4074 | |
4075 | type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | |
4076 | RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; |
4077 | |
4078 | if (NET_IP_ALIGN == 2) |
4079 | type |= RX_BD_FLAGS_SOP; |
4080 | |
4081 | rxr = &bp->rx_ring[ring_nr]; |
4082 | ring = &rxr->rx_ring_struct; |
4083 | bnxt_init_rxbd_pages(ring, type); |
4084 | |
4085 | netif_queue_set_napi(dev: bp->dev, queue_index: ring_nr, type: NETDEV_QUEUE_TYPE_RX, |
4086 | napi: &rxr->bnapi->napi); |
4087 | |
4088 | if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { |
4089 | bpf_prog_add(prog: bp->xdp_prog, i: 1); |
4090 | rxr->xdp_prog = bp->xdp_prog; |
4091 | } |
4092 | ring->fw_ring_id = INVALID_HW_RING_ID; |
4093 | |
4094 | ring = &rxr->rx_agg_ring_struct; |
4095 | ring->fw_ring_id = INVALID_HW_RING_ID; |
4096 | |
4097 | if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { |
4098 | type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | |
4099 | RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; |
4100 | |
4101 | bnxt_init_rxbd_pages(ring, type); |
4102 | } |
4103 | |
4104 | return bnxt_alloc_one_rx_ring(bp, ring_nr); |
4105 | } |
4106 | |
4107 | static void bnxt_init_cp_rings(struct bnxt *bp) |
4108 | { |
4109 | int i, j; |
4110 | |
4111 | for (i = 0; i < bp->cp_nr_rings; i++) { |
4112 | struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; |
4113 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; |
4114 | |
4115 | ring->fw_ring_id = INVALID_HW_RING_ID; |
4116 | cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; |
4117 | cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; |
4118 | if (!cpr->cp_ring_arr) |
4119 | continue; |
4120 | for (j = 0; j < cpr->cp_ring_count; j++) { |
4121 | struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; |
4122 | |
4123 | ring = &cpr2->cp_ring_struct; |
4124 | ring->fw_ring_id = INVALID_HW_RING_ID; |
4125 | cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; |
4126 | cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; |
4127 | } |
4128 | } |
4129 | } |
4130 | |
4131 | static int bnxt_init_rx_rings(struct bnxt *bp) |
4132 | { |
4133 | int i, rc = 0; |
4134 | |
4135 | if (BNXT_RX_PAGE_MODE(bp)) { |
4136 | bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; |
4137 | bp->rx_dma_offset = XDP_PACKET_HEADROOM; |
4138 | } else { |
4139 | bp->rx_offset = BNXT_RX_OFFSET; |
4140 | bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; |
4141 | } |
4142 | |
4143 | for (i = 0; i < bp->rx_nr_rings; i++) { |
4144 | rc = bnxt_init_one_rx_ring(bp, ring_nr: i); |
4145 | if (rc) |
4146 | break; |
4147 | } |
4148 | |
4149 | return rc; |
4150 | } |
4151 | |
4152 | static int bnxt_init_tx_rings(struct bnxt *bp) |
4153 | { |
4154 | u16 i; |
4155 | |
4156 | bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, |
4157 | BNXT_MIN_TX_DESC_CNT); |
4158 | |
4159 | for (i = 0; i < bp->tx_nr_rings; i++) { |
4160 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
4161 | struct bnxt_ring_struct *ring = &txr->tx_ring_struct; |
4162 | |
4163 | ring->fw_ring_id = INVALID_HW_RING_ID; |
4164 | |
4165 | if (i >= bp->tx_nr_rings_xdp) |
4166 | netif_queue_set_napi(dev: bp->dev, queue_index: i - bp->tx_nr_rings_xdp, |
4167 | type: NETDEV_QUEUE_TYPE_TX, |
4168 | napi: &txr->bnapi->napi); |
4169 | } |
4170 | |
4171 | return 0; |
4172 | } |
4173 | |
4174 | static void bnxt_free_ring_grps(struct bnxt *bp) |
4175 | { |
4176 | kfree(objp: bp->grp_info); |
4177 | bp->grp_info = NULL; |
4178 | } |
4179 | |
4180 | static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) |
4181 | { |
4182 | int i; |
4183 | |
4184 | if (irq_re_init) { |
4185 | bp->grp_info = kcalloc(n: bp->cp_nr_rings, |
4186 | size: sizeof(struct bnxt_ring_grp_info), |
4187 | GFP_KERNEL); |
4188 | if (!bp->grp_info) |
4189 | return -ENOMEM; |
4190 | } |
4191 | for (i = 0; i < bp->cp_nr_rings; i++) { |
4192 | if (irq_re_init) |
4193 | bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; |
4194 | bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; |
4195 | bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; |
4196 | bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; |
4197 | bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; |
4198 | } |
4199 | return 0; |
4200 | } |
4201 | |
4202 | static void bnxt_free_vnics(struct bnxt *bp) |
4203 | { |
4204 | kfree(objp: bp->vnic_info); |
4205 | bp->vnic_info = NULL; |
4206 | bp->nr_vnics = 0; |
4207 | } |
4208 | |
4209 | static int bnxt_alloc_vnics(struct bnxt *bp) |
4210 | { |
4211 | int num_vnics = 1; |
4212 | |
4213 | #ifdef CONFIG_RFS_ACCEL |
4214 | if (bp->flags & BNXT_FLAG_RFS) { |
4215 | if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) |
4216 | num_vnics++; |
4217 | else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) |
4218 | num_vnics += bp->rx_nr_rings; |
4219 | } |
4220 | #endif |
4221 | |
4222 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
4223 | num_vnics++; |
4224 | |
4225 | bp->vnic_info = kcalloc(n: num_vnics, size: sizeof(struct bnxt_vnic_info), |
4226 | GFP_KERNEL); |
4227 | if (!bp->vnic_info) |
4228 | return -ENOMEM; |
4229 | |
4230 | bp->nr_vnics = num_vnics; |
4231 | return 0; |
4232 | } |
4233 | |
4234 | static void bnxt_init_vnics(struct bnxt *bp) |
4235 | { |
4236 | struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; |
4237 | int i; |
4238 | |
4239 | for (i = 0; i < bp->nr_vnics; i++) { |
4240 | struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; |
4241 | int j; |
4242 | |
4243 | vnic->fw_vnic_id = INVALID_HW_RING_ID; |
4244 | for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) |
4245 | vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; |
4246 | |
4247 | vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; |
4248 | |
4249 | if (bp->vnic_info[i].rss_hash_key) { |
4250 | if (i == BNXT_VNIC_DEFAULT) { |
4251 | u8 *key = (void *)vnic->rss_hash_key; |
4252 | int k; |
4253 | |
4254 | if (!bp->rss_hash_key_valid && |
4255 | !bp->rss_hash_key_updated) { |
4256 | get_random_bytes(buf: bp->rss_hash_key, |
4257 | HW_HASH_KEY_SIZE); |
4258 | bp->rss_hash_key_updated = true; |
4259 | } |
4260 | |
4261 | memcpy(vnic->rss_hash_key, bp->rss_hash_key, |
4262 | HW_HASH_KEY_SIZE); |
4263 | |
4264 | if (!bp->rss_hash_key_updated) |
4265 | continue; |
4266 | |
4267 | bp->rss_hash_key_updated = false; |
4268 | bp->rss_hash_key_valid = true; |
4269 | |
4270 | bp->toeplitz_prefix = 0; |
4271 | for (k = 0; k < 8; k++) { |
4272 | bp->toeplitz_prefix <<= 8; |
4273 | bp->toeplitz_prefix |= key[k]; |
4274 | } |
4275 | } else { |
4276 | memcpy(vnic->rss_hash_key, vnic0->rss_hash_key, |
4277 | HW_HASH_KEY_SIZE); |
4278 | } |
4279 | } |
4280 | } |
4281 | } |
4282 | |
4283 | static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) |
4284 | { |
4285 | int pages; |
4286 | |
4287 | pages = ring_size / desc_per_pg; |
4288 | |
4289 | if (!pages) |
4290 | return 1; |
4291 | |
4292 | pages++; |
4293 | |
4294 | while (pages & (pages - 1)) |
4295 | pages++; |
4296 | |
4297 | return pages; |
4298 | } |
4299 | |
4300 | void bnxt_set_tpa_flags(struct bnxt *bp) |
4301 | { |
4302 | bp->flags &= ~BNXT_FLAG_TPA; |
4303 | if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) |
4304 | return; |
4305 | if (bp->dev->features & NETIF_F_LRO) |
4306 | bp->flags |= BNXT_FLAG_LRO; |
4307 | else if (bp->dev->features & NETIF_F_GRO_HW) |
4308 | bp->flags |= BNXT_FLAG_GRO; |
4309 | } |
4310 | |
4311 | /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must |
4312 | * be set on entry. |
4313 | */ |
4314 | void bnxt_set_ring_params(struct bnxt *bp) |
4315 | { |
4316 | u32 ring_size, rx_size, rx_space, max_rx_cmpl; |
4317 | u32 agg_factor = 0, agg_ring_size = 0; |
4318 | |
4319 | /* 8 for CRC and VLAN */ |
4320 | rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); |
4321 | |
4322 | rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) + |
4323 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
4324 | |
4325 | bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; |
4326 | ring_size = bp->rx_ring_size; |
4327 | bp->rx_agg_ring_size = 0; |
4328 | bp->rx_agg_nr_pages = 0; |
4329 | |
4330 | if (bp->flags & BNXT_FLAG_TPA) |
4331 | agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); |
4332 | |
4333 | bp->flags &= ~BNXT_FLAG_JUMBO; |
4334 | if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { |
4335 | u32 jumbo_factor; |
4336 | |
4337 | bp->flags |= BNXT_FLAG_JUMBO; |
4338 | jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; |
4339 | if (jumbo_factor > agg_factor) |
4340 | agg_factor = jumbo_factor; |
4341 | } |
4342 | if (agg_factor) { |
4343 | if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) { |
4344 | ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA; |
4345 | netdev_warn(dev: bp->dev, format: "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n" , |
4346 | bp->rx_ring_size, ring_size); |
4347 | bp->rx_ring_size = ring_size; |
4348 | } |
4349 | agg_ring_size = ring_size * agg_factor; |
4350 | |
4351 | bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(ring_size: agg_ring_size, |
4352 | RX_DESC_CNT); |
4353 | if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { |
4354 | u32 tmp = agg_ring_size; |
4355 | |
4356 | bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; |
4357 | agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; |
4358 | netdev_warn(dev: bp->dev, format: "rx agg ring size %d reduced to %d.\n" , |
4359 | tmp, agg_ring_size); |
4360 | } |
4361 | bp->rx_agg_ring_size = agg_ring_size; |
4362 | bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; |
4363 | |
4364 | if (BNXT_RX_PAGE_MODE(bp)) { |
4365 | rx_space = PAGE_SIZE; |
4366 | rx_size = PAGE_SIZE - |
4367 | ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) - |
4368 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
4369 | } else { |
4370 | rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); |
4371 | rx_space = rx_size + NET_SKB_PAD + |
4372 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
4373 | } |
4374 | } |
4375 | |
4376 | bp->rx_buf_use_size = rx_size; |
4377 | bp->rx_buf_size = rx_space; |
4378 | |
4379 | bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); |
4380 | bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; |
4381 | |
4382 | ring_size = bp->tx_ring_size; |
4383 | bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); |
4384 | bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; |
4385 | |
4386 | max_rx_cmpl = bp->rx_ring_size; |
4387 | /* MAX TPA needs to be added because TPA_START completions are |
4388 | * immediately recycled, so the TPA completions are not bound by |
4389 | * the RX ring size. |
4390 | */ |
4391 | if (bp->flags & BNXT_FLAG_TPA) |
4392 | max_rx_cmpl += bp->max_tpa; |
4393 | /* RX and TPA completions are 32-byte, all others are 16-byte */ |
4394 | ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; |
4395 | bp->cp_ring_size = ring_size; |
4396 | |
4397 | bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); |
4398 | if (bp->cp_nr_pages > MAX_CP_PAGES) { |
4399 | bp->cp_nr_pages = MAX_CP_PAGES; |
4400 | bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; |
4401 | netdev_warn(dev: bp->dev, format: "completion ring size %d reduced to %d.\n" , |
4402 | ring_size, bp->cp_ring_size); |
4403 | } |
4404 | bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; |
4405 | bp->cp_ring_mask = bp->cp_bit - 1; |
4406 | } |
4407 | |
4408 | /* Changing allocation mode of RX rings. |
4409 | * TODO: Update when extending xdp_rxq_info to support allocation modes. |
4410 | */ |
4411 | int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) |
4412 | { |
4413 | struct net_device *dev = bp->dev; |
4414 | |
4415 | if (page_mode) { |
4416 | bp->flags &= ~BNXT_FLAG_AGG_RINGS; |
4417 | bp->flags |= BNXT_FLAG_RX_PAGE_MODE; |
4418 | |
4419 | if (bp->xdp_prog->aux->xdp_has_frags) |
4420 | dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU); |
4421 | else |
4422 | dev->max_mtu = |
4423 | min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); |
4424 | if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { |
4425 | bp->flags |= BNXT_FLAG_JUMBO; |
4426 | bp->rx_skb_func = bnxt_rx_multi_page_skb; |
4427 | } else { |
4428 | bp->flags |= BNXT_FLAG_NO_AGG_RINGS; |
4429 | bp->rx_skb_func = bnxt_rx_page_skb; |
4430 | } |
4431 | bp->rx_dir = DMA_BIDIRECTIONAL; |
4432 | /* Disable LRO or GRO_HW */ |
4433 | netdev_update_features(dev); |
4434 | } else { |
4435 | dev->max_mtu = bp->max_mtu; |
4436 | bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; |
4437 | bp->rx_dir = DMA_FROM_DEVICE; |
4438 | bp->rx_skb_func = bnxt_rx_skb; |
4439 | } |
4440 | return 0; |
4441 | } |
4442 | |
4443 | static void bnxt_free_vnic_attributes(struct bnxt *bp) |
4444 | { |
4445 | int i; |
4446 | struct bnxt_vnic_info *vnic; |
4447 | struct pci_dev *pdev = bp->pdev; |
4448 | |
4449 | if (!bp->vnic_info) |
4450 | return; |
4451 | |
4452 | for (i = 0; i < bp->nr_vnics; i++) { |
4453 | vnic = &bp->vnic_info[i]; |
4454 | |
4455 | kfree(objp: vnic->fw_grp_ids); |
4456 | vnic->fw_grp_ids = NULL; |
4457 | |
4458 | kfree(objp: vnic->uc_list); |
4459 | vnic->uc_list = NULL; |
4460 | |
4461 | if (vnic->mc_list) { |
4462 | dma_free_coherent(dev: &pdev->dev, size: vnic->mc_list_size, |
4463 | cpu_addr: vnic->mc_list, dma_handle: vnic->mc_list_mapping); |
4464 | vnic->mc_list = NULL; |
4465 | } |
4466 | |
4467 | if (vnic->rss_table) { |
4468 | dma_free_coherent(dev: &pdev->dev, size: vnic->rss_table_size, |
4469 | cpu_addr: vnic->rss_table, |
4470 | dma_handle: vnic->rss_table_dma_addr); |
4471 | vnic->rss_table = NULL; |
4472 | } |
4473 | |
4474 | vnic->rss_hash_key = NULL; |
4475 | vnic->flags = 0; |
4476 | } |
4477 | } |
4478 | |
4479 | static int bnxt_alloc_vnic_attributes(struct bnxt *bp) |
4480 | { |
4481 | int i, rc = 0, size; |
4482 | struct bnxt_vnic_info *vnic; |
4483 | struct pci_dev *pdev = bp->pdev; |
4484 | int max_rings; |
4485 | |
4486 | for (i = 0; i < bp->nr_vnics; i++) { |
4487 | vnic = &bp->vnic_info[i]; |
4488 | |
4489 | if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { |
4490 | int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; |
4491 | |
4492 | if (mem_size > 0) { |
4493 | vnic->uc_list = kmalloc(size: mem_size, GFP_KERNEL); |
4494 | if (!vnic->uc_list) { |
4495 | rc = -ENOMEM; |
4496 | goto out; |
4497 | } |
4498 | } |
4499 | } |
4500 | |
4501 | if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { |
4502 | vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; |
4503 | vnic->mc_list = |
4504 | dma_alloc_coherent(dev: &pdev->dev, |
4505 | size: vnic->mc_list_size, |
4506 | dma_handle: &vnic->mc_list_mapping, |
4507 | GFP_KERNEL); |
4508 | if (!vnic->mc_list) { |
4509 | rc = -ENOMEM; |
4510 | goto out; |
4511 | } |
4512 | } |
4513 | |
4514 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
4515 | goto vnic_skip_grps; |
4516 | |
4517 | if (vnic->flags & BNXT_VNIC_RSS_FLAG) |
4518 | max_rings = bp->rx_nr_rings; |
4519 | else |
4520 | max_rings = 1; |
4521 | |
4522 | vnic->fw_grp_ids = kcalloc(n: max_rings, size: sizeof(u16), GFP_KERNEL); |
4523 | if (!vnic->fw_grp_ids) { |
4524 | rc = -ENOMEM; |
4525 | goto out; |
4526 | } |
4527 | vnic_skip_grps: |
4528 | if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && |
4529 | !(vnic->flags & BNXT_VNIC_RSS_FLAG)) |
4530 | continue; |
4531 | |
4532 | /* Allocate rss table and hash key */ |
4533 | size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); |
4534 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
4535 | size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); |
4536 | |
4537 | vnic->rss_table_size = size + HW_HASH_KEY_SIZE; |
4538 | vnic->rss_table = dma_alloc_coherent(dev: &pdev->dev, |
4539 | size: vnic->rss_table_size, |
4540 | dma_handle: &vnic->rss_table_dma_addr, |
4541 | GFP_KERNEL); |
4542 | if (!vnic->rss_table) { |
4543 | rc = -ENOMEM; |
4544 | goto out; |
4545 | } |
4546 | |
4547 | vnic->rss_hash_key = ((void *)vnic->rss_table) + size; |
4548 | vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; |
4549 | } |
4550 | return 0; |
4551 | |
4552 | out: |
4553 | return rc; |
4554 | } |
4555 | |
4556 | static void bnxt_free_hwrm_resources(struct bnxt *bp) |
4557 | { |
4558 | struct bnxt_hwrm_wait_token *token; |
4559 | |
4560 | dma_pool_destroy(pool: bp->hwrm_dma_pool); |
4561 | bp->hwrm_dma_pool = NULL; |
4562 | |
4563 | rcu_read_lock(); |
4564 | hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) |
4565 | WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED); |
4566 | rcu_read_unlock(); |
4567 | } |
4568 | |
4569 | static int bnxt_alloc_hwrm_resources(struct bnxt *bp) |
4570 | { |
4571 | bp->hwrm_dma_pool = dma_pool_create(name: "bnxt_hwrm" , dev: &bp->pdev->dev, |
4572 | BNXT_HWRM_DMA_SIZE, |
4573 | BNXT_HWRM_DMA_ALIGN, allocation: 0); |
4574 | if (!bp->hwrm_dma_pool) |
4575 | return -ENOMEM; |
4576 | |
4577 | INIT_HLIST_HEAD(&bp->hwrm_pending_list); |
4578 | |
4579 | return 0; |
4580 | } |
4581 | |
4582 | static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) |
4583 | { |
4584 | kfree(objp: stats->hw_masks); |
4585 | stats->hw_masks = NULL; |
4586 | kfree(objp: stats->sw_stats); |
4587 | stats->sw_stats = NULL; |
4588 | if (stats->hw_stats) { |
4589 | dma_free_coherent(dev: &bp->pdev->dev, size: stats->len, cpu_addr: stats->hw_stats, |
4590 | dma_handle: stats->hw_stats_map); |
4591 | stats->hw_stats = NULL; |
4592 | } |
4593 | } |
4594 | |
4595 | static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, |
4596 | bool alloc_masks) |
4597 | { |
4598 | stats->hw_stats = dma_alloc_coherent(dev: &bp->pdev->dev, size: stats->len, |
4599 | dma_handle: &stats->hw_stats_map, GFP_KERNEL); |
4600 | if (!stats->hw_stats) |
4601 | return -ENOMEM; |
4602 | |
4603 | stats->sw_stats = kzalloc(size: stats->len, GFP_KERNEL); |
4604 | if (!stats->sw_stats) |
4605 | goto stats_mem_err; |
4606 | |
4607 | if (alloc_masks) { |
4608 | stats->hw_masks = kzalloc(size: stats->len, GFP_KERNEL); |
4609 | if (!stats->hw_masks) |
4610 | goto stats_mem_err; |
4611 | } |
4612 | return 0; |
4613 | |
4614 | stats_mem_err: |
4615 | bnxt_free_stats_mem(bp, stats); |
4616 | return -ENOMEM; |
4617 | } |
4618 | |
4619 | static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) |
4620 | { |
4621 | int i; |
4622 | |
4623 | for (i = 0; i < count; i++) |
4624 | mask_arr[i] = mask; |
4625 | } |
4626 | |
4627 | static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) |
4628 | { |
4629 | int i; |
4630 | |
4631 | for (i = 0; i < count; i++) |
4632 | mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); |
4633 | } |
4634 | |
4635 | static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, |
4636 | struct bnxt_stats_mem *stats) |
4637 | { |
4638 | struct hwrm_func_qstats_ext_output *resp; |
4639 | struct hwrm_func_qstats_ext_input *req; |
4640 | __le64 *hw_masks; |
4641 | int rc; |
4642 | |
4643 | if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || |
4644 | !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) |
4645 | return -EOPNOTSUPP; |
4646 | |
4647 | rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT); |
4648 | if (rc) |
4649 | return rc; |
4650 | |
4651 | req->fid = cpu_to_le16(0xffff); |
4652 | req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; |
4653 | |
4654 | resp = hwrm_req_hold(bp, req); |
4655 | rc = hwrm_req_send(bp, req); |
4656 | if (!rc) { |
4657 | hw_masks = &resp->rx_ucast_pkts; |
4658 | bnxt_copy_hw_masks(mask_arr: stats->hw_masks, hw_mask_arr: hw_masks, count: stats->len / 8); |
4659 | } |
4660 | hwrm_req_drop(bp, req); |
4661 | return rc; |
4662 | } |
4663 | |
4664 | static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); |
4665 | static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); |
4666 | |
4667 | static void bnxt_init_stats(struct bnxt *bp) |
4668 | { |
4669 | struct bnxt_napi *bnapi = bp->bnapi[0]; |
4670 | struct bnxt_cp_ring_info *cpr; |
4671 | struct bnxt_stats_mem *stats; |
4672 | __le64 *rx_stats, *tx_stats; |
4673 | int rc, rx_count, tx_count; |
4674 | u64 *rx_masks, *tx_masks; |
4675 | u64 mask; |
4676 | u8 flags; |
4677 | |
4678 | cpr = &bnapi->cp_ring; |
4679 | stats = &cpr->stats; |
4680 | rc = bnxt_hwrm_func_qstat_ext(bp, stats); |
4681 | if (rc) { |
4682 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
4683 | mask = (1ULL << 48) - 1; |
4684 | else |
4685 | mask = -1ULL; |
4686 | bnxt_fill_masks(mask_arr: stats->hw_masks, mask, count: stats->len / 8); |
4687 | } |
4688 | if (bp->flags & BNXT_FLAG_PORT_STATS) { |
4689 | stats = &bp->port_stats; |
4690 | rx_stats = stats->hw_stats; |
4691 | rx_masks = stats->hw_masks; |
4692 | rx_count = sizeof(struct rx_port_stats) / 8; |
4693 | tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
4694 | tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
4695 | tx_count = sizeof(struct tx_port_stats) / 8; |
4696 | |
4697 | flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; |
4698 | rc = bnxt_hwrm_port_qstats(bp, flags); |
4699 | if (rc) { |
4700 | mask = (1ULL << 40) - 1; |
4701 | |
4702 | bnxt_fill_masks(mask_arr: rx_masks, mask, count: rx_count); |
4703 | bnxt_fill_masks(mask_arr: tx_masks, mask, count: tx_count); |
4704 | } else { |
4705 | bnxt_copy_hw_masks(mask_arr: rx_masks, hw_mask_arr: rx_stats, count: rx_count); |
4706 | bnxt_copy_hw_masks(mask_arr: tx_masks, hw_mask_arr: tx_stats, count: tx_count); |
4707 | bnxt_hwrm_port_qstats(bp, flags: 0); |
4708 | } |
4709 | } |
4710 | if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { |
4711 | stats = &bp->rx_port_stats_ext; |
4712 | rx_stats = stats->hw_stats; |
4713 | rx_masks = stats->hw_masks; |
4714 | rx_count = sizeof(struct rx_port_stats_ext) / 8; |
4715 | stats = &bp->tx_port_stats_ext; |
4716 | tx_stats = stats->hw_stats; |
4717 | tx_masks = stats->hw_masks; |
4718 | tx_count = sizeof(struct tx_port_stats_ext) / 8; |
4719 | |
4720 | flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; |
4721 | rc = bnxt_hwrm_port_qstats_ext(bp, flags); |
4722 | if (rc) { |
4723 | mask = (1ULL << 40) - 1; |
4724 | |
4725 | bnxt_fill_masks(mask_arr: rx_masks, mask, count: rx_count); |
4726 | if (tx_stats) |
4727 | bnxt_fill_masks(mask_arr: tx_masks, mask, count: tx_count); |
4728 | } else { |
4729 | bnxt_copy_hw_masks(mask_arr: rx_masks, hw_mask_arr: rx_stats, count: rx_count); |
4730 | if (tx_stats) |
4731 | bnxt_copy_hw_masks(mask_arr: tx_masks, hw_mask_arr: tx_stats, |
4732 | count: tx_count); |
4733 | bnxt_hwrm_port_qstats_ext(bp, flags: 0); |
4734 | } |
4735 | } |
4736 | } |
4737 | |
4738 | static void bnxt_free_port_stats(struct bnxt *bp) |
4739 | { |
4740 | bp->flags &= ~BNXT_FLAG_PORT_STATS; |
4741 | bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; |
4742 | |
4743 | bnxt_free_stats_mem(bp, stats: &bp->port_stats); |
4744 | bnxt_free_stats_mem(bp, stats: &bp->rx_port_stats_ext); |
4745 | bnxt_free_stats_mem(bp, stats: &bp->tx_port_stats_ext); |
4746 | } |
4747 | |
4748 | static void bnxt_free_ring_stats(struct bnxt *bp) |
4749 | { |
4750 | int i; |
4751 | |
4752 | if (!bp->bnapi) |
4753 | return; |
4754 | |
4755 | for (i = 0; i < bp->cp_nr_rings; i++) { |
4756 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
4757 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
4758 | |
4759 | bnxt_free_stats_mem(bp, stats: &cpr->stats); |
4760 | } |
4761 | } |
4762 | |
4763 | static int bnxt_alloc_stats(struct bnxt *bp) |
4764 | { |
4765 | u32 size, i; |
4766 | int rc; |
4767 | |
4768 | size = bp->hw_ring_stats_size; |
4769 | |
4770 | for (i = 0; i < bp->cp_nr_rings; i++) { |
4771 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
4772 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
4773 | |
4774 | cpr->stats.len = size; |
4775 | rc = bnxt_alloc_stats_mem(bp, stats: &cpr->stats, alloc_masks: !i); |
4776 | if (rc) |
4777 | return rc; |
4778 | |
4779 | cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; |
4780 | } |
4781 | |
4782 | if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) |
4783 | return 0; |
4784 | |
4785 | if (bp->port_stats.hw_stats) |
4786 | goto alloc_ext_stats; |
4787 | |
4788 | bp->port_stats.len = BNXT_PORT_STATS_SIZE; |
4789 | rc = bnxt_alloc_stats_mem(bp, stats: &bp->port_stats, alloc_masks: true); |
4790 | if (rc) |
4791 | return rc; |
4792 | |
4793 | bp->flags |= BNXT_FLAG_PORT_STATS; |
4794 | |
4795 | alloc_ext_stats: |
4796 | /* Display extended statistics only if FW supports it */ |
4797 | if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) |
4798 | if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) |
4799 | return 0; |
4800 | |
4801 | if (bp->rx_port_stats_ext.hw_stats) |
4802 | goto alloc_tx_ext_stats; |
4803 | |
4804 | bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); |
4805 | rc = bnxt_alloc_stats_mem(bp, stats: &bp->rx_port_stats_ext, alloc_masks: true); |
4806 | /* Extended stats are optional */ |
4807 | if (rc) |
4808 | return 0; |
4809 | |
4810 | alloc_tx_ext_stats: |
4811 | if (bp->tx_port_stats_ext.hw_stats) |
4812 | return 0; |
4813 | |
4814 | if (bp->hwrm_spec_code >= 0x10902 || |
4815 | (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { |
4816 | bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); |
4817 | rc = bnxt_alloc_stats_mem(bp, stats: &bp->tx_port_stats_ext, alloc_masks: true); |
4818 | /* Extended stats are optional */ |
4819 | if (rc) |
4820 | return 0; |
4821 | } |
4822 | bp->flags |= BNXT_FLAG_PORT_STATS_EXT; |
4823 | return 0; |
4824 | } |
4825 | |
4826 | static void bnxt_clear_ring_indices(struct bnxt *bp) |
4827 | { |
4828 | int i, j; |
4829 | |
4830 | if (!bp->bnapi) |
4831 | return; |
4832 | |
4833 | for (i = 0; i < bp->cp_nr_rings; i++) { |
4834 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
4835 | struct bnxt_cp_ring_info *cpr; |
4836 | struct bnxt_rx_ring_info *rxr; |
4837 | struct bnxt_tx_ring_info *txr; |
4838 | |
4839 | if (!bnapi) |
4840 | continue; |
4841 | |
4842 | cpr = &bnapi->cp_ring; |
4843 | cpr->cp_raw_cons = 0; |
4844 | |
4845 | bnxt_for_each_napi_tx(j, bnapi, txr) { |
4846 | txr->tx_prod = 0; |
4847 | txr->tx_cons = 0; |
4848 | txr->tx_hw_cons = 0; |
4849 | } |
4850 | |
4851 | rxr = bnapi->rx_ring; |
4852 | if (rxr) { |
4853 | rxr->rx_prod = 0; |
4854 | rxr->rx_agg_prod = 0; |
4855 | rxr->rx_sw_agg_prod = 0; |
4856 | rxr->rx_next_cons = 0; |
4857 | } |
4858 | bnapi->events = 0; |
4859 | } |
4860 | } |
4861 | |
4862 | void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) |
4863 | { |
4864 | u8 type = fltr->type, flags = fltr->flags; |
4865 | |
4866 | INIT_LIST_HEAD(list: &fltr->list); |
4867 | if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) || |
4868 | (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING)) |
4869 | list_add_tail(new: &fltr->list, head: &bp->usr_fltr_list); |
4870 | } |
4871 | |
4872 | void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) |
4873 | { |
4874 | if (!list_empty(head: &fltr->list)) |
4875 | list_del_init(entry: &fltr->list); |
4876 | } |
4877 | |
4878 | void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all) |
4879 | { |
4880 | struct bnxt_filter_base *usr_fltr, *tmp; |
4881 | |
4882 | list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { |
4883 | if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2) |
4884 | continue; |
4885 | bnxt_del_one_usr_fltr(bp, fltr: usr_fltr); |
4886 | } |
4887 | } |
4888 | |
4889 | static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) |
4890 | { |
4891 | hlist_del(n: &fltr->hash); |
4892 | bnxt_del_one_usr_fltr(bp, fltr); |
4893 | if (fltr->flags) { |
4894 | clear_bit(nr: fltr->sw_id, addr: bp->ntp_fltr_bmap); |
4895 | bp->ntp_fltr_count--; |
4896 | } |
4897 | kfree(objp: fltr); |
4898 | } |
4899 | |
4900 | static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all) |
4901 | { |
4902 | int i; |
4903 | |
4904 | /* Under rtnl_lock and all our NAPIs have been disabled. It's |
4905 | * safe to delete the hash table. |
4906 | */ |
4907 | for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { |
4908 | struct hlist_head *head; |
4909 | struct hlist_node *tmp; |
4910 | struct bnxt_ntuple_filter *fltr; |
4911 | |
4912 | head = &bp->ntp_fltr_hash_tbl[i]; |
4913 | hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { |
4914 | bnxt_del_l2_filter(bp, fltr: fltr->l2_fltr); |
4915 | if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || |
4916 | !list_empty(head: &fltr->base.list))) |
4917 | continue; |
4918 | bnxt_del_fltr(bp, fltr: &fltr->base); |
4919 | } |
4920 | } |
4921 | if (!all) |
4922 | return; |
4923 | |
4924 | bitmap_free(bitmap: bp->ntp_fltr_bmap); |
4925 | bp->ntp_fltr_bmap = NULL; |
4926 | bp->ntp_fltr_count = 0; |
4927 | } |
4928 | |
4929 | static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) |
4930 | { |
4931 | int i, rc = 0; |
4932 | |
4933 | if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap) |
4934 | return 0; |
4935 | |
4936 | for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) |
4937 | INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); |
4938 | |
4939 | bp->ntp_fltr_count = 0; |
4940 | bp->ntp_fltr_bmap = bitmap_zalloc(nbits: bp->max_fltr, GFP_KERNEL); |
4941 | |
4942 | if (!bp->ntp_fltr_bmap) |
4943 | rc = -ENOMEM; |
4944 | |
4945 | return rc; |
4946 | } |
4947 | |
4948 | static void bnxt_free_l2_filters(struct bnxt *bp, bool all) |
4949 | { |
4950 | int i; |
4951 | |
4952 | for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) { |
4953 | struct hlist_head *head; |
4954 | struct hlist_node *tmp; |
4955 | struct bnxt_l2_filter *fltr; |
4956 | |
4957 | head = &bp->l2_fltr_hash_tbl[i]; |
4958 | hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { |
4959 | if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || |
4960 | !list_empty(head: &fltr->base.list))) |
4961 | continue; |
4962 | bnxt_del_fltr(bp, fltr: &fltr->base); |
4963 | } |
4964 | } |
4965 | } |
4966 | |
4967 | static void bnxt_init_l2_fltr_tbl(struct bnxt *bp) |
4968 | { |
4969 | int i; |
4970 | |
4971 | for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) |
4972 | INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]); |
4973 | get_random_bytes(buf: &bp->hash_seed, len: sizeof(bp->hash_seed)); |
4974 | } |
4975 | |
4976 | static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) |
4977 | { |
4978 | bnxt_free_vnic_attributes(bp); |
4979 | bnxt_free_tx_rings(bp); |
4980 | bnxt_free_rx_rings(bp); |
4981 | bnxt_free_cp_rings(bp); |
4982 | bnxt_free_all_cp_arrays(bp); |
4983 | bnxt_free_ntp_fltrs(bp, all: false); |
4984 | bnxt_free_l2_filters(bp, all: false); |
4985 | if (irq_re_init) { |
4986 | bnxt_free_ring_stats(bp); |
4987 | if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) || |
4988 | test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) |
4989 | bnxt_free_port_stats(bp); |
4990 | bnxt_free_ring_grps(bp); |
4991 | bnxt_free_vnics(bp); |
4992 | kfree(objp: bp->tx_ring_map); |
4993 | bp->tx_ring_map = NULL; |
4994 | kfree(objp: bp->tx_ring); |
4995 | bp->tx_ring = NULL; |
4996 | kfree(objp: bp->rx_ring); |
4997 | bp->rx_ring = NULL; |
4998 | kfree(objp: bp->bnapi); |
4999 | bp->bnapi = NULL; |
5000 | } else { |
5001 | bnxt_clear_ring_indices(bp); |
5002 | } |
5003 | } |
5004 | |
5005 | static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) |
5006 | { |
5007 | int i, j, rc, size, arr_size; |
5008 | void *bnapi; |
5009 | |
5010 | if (irq_re_init) { |
5011 | /* Allocate bnapi mem pointer array and mem block for |
5012 | * all queues |
5013 | */ |
5014 | arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * |
5015 | bp->cp_nr_rings); |
5016 | size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); |
5017 | bnapi = kzalloc(size: arr_size + size * bp->cp_nr_rings, GFP_KERNEL); |
5018 | if (!bnapi) |
5019 | return -ENOMEM; |
5020 | |
5021 | bp->bnapi = bnapi; |
5022 | bnapi += arr_size; |
5023 | for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { |
5024 | bp->bnapi[i] = bnapi; |
5025 | bp->bnapi[i]->index = i; |
5026 | bp->bnapi[i]->bp = bp; |
5027 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
5028 | struct bnxt_cp_ring_info *cpr = |
5029 | &bp->bnapi[i]->cp_ring; |
5030 | |
5031 | cpr->cp_ring_struct.ring_mem.flags = |
5032 | BNXT_RMEM_RING_PTE_FLAG; |
5033 | } |
5034 | } |
5035 | |
5036 | bp->rx_ring = kcalloc(n: bp->rx_nr_rings, |
5037 | size: sizeof(struct bnxt_rx_ring_info), |
5038 | GFP_KERNEL); |
5039 | if (!bp->rx_ring) |
5040 | return -ENOMEM; |
5041 | |
5042 | for (i = 0; i < bp->rx_nr_rings; i++) { |
5043 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
5044 | |
5045 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
5046 | rxr->rx_ring_struct.ring_mem.flags = |
5047 | BNXT_RMEM_RING_PTE_FLAG; |
5048 | rxr->rx_agg_ring_struct.ring_mem.flags = |
5049 | BNXT_RMEM_RING_PTE_FLAG; |
5050 | } else { |
5051 | rxr->rx_cpr = &bp->bnapi[i]->cp_ring; |
5052 | } |
5053 | rxr->bnapi = bp->bnapi[i]; |
5054 | bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; |
5055 | } |
5056 | |
5057 | bp->tx_ring = kcalloc(n: bp->tx_nr_rings, |
5058 | size: sizeof(struct bnxt_tx_ring_info), |
5059 | GFP_KERNEL); |
5060 | if (!bp->tx_ring) |
5061 | return -ENOMEM; |
5062 | |
5063 | bp->tx_ring_map = kcalloc(n: bp->tx_nr_rings, size: sizeof(u16), |
5064 | GFP_KERNEL); |
5065 | |
5066 | if (!bp->tx_ring_map) |
5067 | return -ENOMEM; |
5068 | |
5069 | if (bp->flags & BNXT_FLAG_SHARED_RINGS) |
5070 | j = 0; |
5071 | else |
5072 | j = bp->rx_nr_rings; |
5073 | |
5074 | for (i = 0; i < bp->tx_nr_rings; i++) { |
5075 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
5076 | struct bnxt_napi *bnapi2; |
5077 | |
5078 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
5079 | txr->tx_ring_struct.ring_mem.flags = |
5080 | BNXT_RMEM_RING_PTE_FLAG; |
5081 | bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; |
5082 | if (i >= bp->tx_nr_rings_xdp) { |
5083 | int k = j + BNXT_RING_TO_TC_OFF(bp, i); |
5084 | |
5085 | bnapi2 = bp->bnapi[k]; |
5086 | txr->txq_index = i - bp->tx_nr_rings_xdp; |
5087 | txr->tx_napi_idx = |
5088 | BNXT_RING_TO_TC(bp, txr->txq_index); |
5089 | bnapi2->tx_ring[txr->tx_napi_idx] = txr; |
5090 | bnapi2->tx_int = bnxt_tx_int; |
5091 | } else { |
5092 | bnapi2 = bp->bnapi[j]; |
5093 | bnapi2->flags |= BNXT_NAPI_FLAG_XDP; |
5094 | bnapi2->tx_ring[0] = txr; |
5095 | bnapi2->tx_int = bnxt_tx_int_xdp; |
5096 | j++; |
5097 | } |
5098 | txr->bnapi = bnapi2; |
5099 | if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) |
5100 | txr->tx_cpr = &bnapi2->cp_ring; |
5101 | } |
5102 | |
5103 | rc = bnxt_alloc_stats(bp); |
5104 | if (rc) |
5105 | goto alloc_mem_err; |
5106 | bnxt_init_stats(bp); |
5107 | |
5108 | rc = bnxt_alloc_ntp_fltrs(bp); |
5109 | if (rc) |
5110 | goto alloc_mem_err; |
5111 | |
5112 | rc = bnxt_alloc_vnics(bp); |
5113 | if (rc) |
5114 | goto alloc_mem_err; |
5115 | } |
5116 | |
5117 | rc = bnxt_alloc_all_cp_arrays(bp); |
5118 | if (rc) |
5119 | goto alloc_mem_err; |
5120 | |
5121 | bnxt_init_ring_struct(bp); |
5122 | |
5123 | rc = bnxt_alloc_rx_rings(bp); |
5124 | if (rc) |
5125 | goto alloc_mem_err; |
5126 | |
5127 | rc = bnxt_alloc_tx_rings(bp); |
5128 | if (rc) |
5129 | goto alloc_mem_err; |
5130 | |
5131 | rc = bnxt_alloc_cp_rings(bp); |
5132 | if (rc) |
5133 | goto alloc_mem_err; |
5134 | |
5135 | bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG | |
5136 | BNXT_VNIC_MCAST_FLAG | |
5137 | BNXT_VNIC_UCAST_FLAG; |
5138 | if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS)) |
5139 | bp->vnic_info[BNXT_VNIC_NTUPLE].flags |= |
5140 | BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG; |
5141 | |
5142 | rc = bnxt_alloc_vnic_attributes(bp); |
5143 | if (rc) |
5144 | goto alloc_mem_err; |
5145 | return 0; |
5146 | |
5147 | alloc_mem_err: |
5148 | bnxt_free_mem(bp, irq_re_init: true); |
5149 | return rc; |
5150 | } |
5151 | |
5152 | static void bnxt_disable_int(struct bnxt *bp) |
5153 | { |
5154 | int i; |
5155 | |
5156 | if (!bp->bnapi) |
5157 | return; |
5158 | |
5159 | for (i = 0; i < bp->cp_nr_rings; i++) { |
5160 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
5161 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
5162 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; |
5163 | |
5164 | if (ring->fw_ring_id != INVALID_HW_RING_ID) |
5165 | bnxt_db_nq(bp, db: &cpr->cp_db, idx: cpr->cp_raw_cons); |
5166 | } |
5167 | } |
5168 | |
5169 | static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) |
5170 | { |
5171 | struct bnxt_napi *bnapi = bp->bnapi[n]; |
5172 | struct bnxt_cp_ring_info *cpr; |
5173 | |
5174 | cpr = &bnapi->cp_ring; |
5175 | return cpr->cp_ring_struct.map_idx; |
5176 | } |
5177 | |
5178 | static void bnxt_disable_int_sync(struct bnxt *bp) |
5179 | { |
5180 | int i; |
5181 | |
5182 | if (!bp->irq_tbl) |
5183 | return; |
5184 | |
5185 | atomic_inc(v: &bp->intr_sem); |
5186 | |
5187 | bnxt_disable_int(bp); |
5188 | for (i = 0; i < bp->cp_nr_rings; i++) { |
5189 | int map_idx = bnxt_cp_num_to_irq_num(bp, n: i); |
5190 | |
5191 | synchronize_irq(irq: bp->irq_tbl[map_idx].vector); |
5192 | } |
5193 | } |
5194 | |
5195 | static void bnxt_enable_int(struct bnxt *bp) |
5196 | { |
5197 | int i; |
5198 | |
5199 | atomic_set(v: &bp->intr_sem, i: 0); |
5200 | for (i = 0; i < bp->cp_nr_rings; i++) { |
5201 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
5202 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
5203 | |
5204 | bnxt_db_nq_arm(bp, db: &cpr->cp_db, idx: cpr->cp_raw_cons); |
5205 | } |
5206 | } |
5207 | |
5208 | int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, |
5209 | bool async_only) |
5210 | { |
5211 | DECLARE_BITMAP(async_events_bmap, 256); |
5212 | u32 *events = (u32 *)async_events_bmap; |
5213 | struct hwrm_func_drv_rgtr_output *resp; |
5214 | struct hwrm_func_drv_rgtr_input *req; |
5215 | u32 flags; |
5216 | int rc, i; |
5217 | |
5218 | rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR); |
5219 | if (rc) |
5220 | return rc; |
5221 | |
5222 | req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | |
5223 | FUNC_DRV_RGTR_REQ_ENABLES_VER | |
5224 | FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); |
5225 | |
5226 | req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); |
5227 | flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; |
5228 | if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) |
5229 | flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; |
5230 | if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) |
5231 | flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | |
5232 | FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; |
5233 | req->flags = cpu_to_le32(flags); |
5234 | req->ver_maj_8b = DRV_VER_MAJ; |
5235 | req->ver_min_8b = DRV_VER_MIN; |
5236 | req->ver_upd_8b = DRV_VER_UPD; |
5237 | req->ver_maj = cpu_to_le16(DRV_VER_MAJ); |
5238 | req->ver_min = cpu_to_le16(DRV_VER_MIN); |
5239 | req->ver_upd = cpu_to_le16(DRV_VER_UPD); |
5240 | |
5241 | if (BNXT_PF(bp)) { |
5242 | u32 data[8]; |
5243 | int i; |
5244 | |
5245 | memset(data, 0, sizeof(data)); |
5246 | for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { |
5247 | u16 cmd = bnxt_vf_req_snif[i]; |
5248 | unsigned int bit, idx; |
5249 | |
5250 | idx = cmd / 32; |
5251 | bit = cmd % 32; |
5252 | data[idx] |= 1 << bit; |
5253 | } |
5254 | |
5255 | for (i = 0; i < 8; i++) |
5256 | req->vf_req_fwd[i] = cpu_to_le32(data[i]); |
5257 | |
5258 | req->enables |= |
5259 | cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); |
5260 | } |
5261 | |
5262 | if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) |
5263 | req->flags |= cpu_to_le32( |
5264 | FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); |
5265 | |
5266 | memset(async_events_bmap, 0, sizeof(async_events_bmap)); |
5267 | for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { |
5268 | u16 event_id = bnxt_async_events_arr[i]; |
5269 | |
5270 | if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && |
5271 | !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) |
5272 | continue; |
5273 | if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE && |
5274 | !bp->ptp_cfg) |
5275 | continue; |
5276 | __set_bit(bnxt_async_events_arr[i], async_events_bmap); |
5277 | } |
5278 | if (bmap && bmap_size) { |
5279 | for (i = 0; i < bmap_size; i++) { |
5280 | if (test_bit(i, bmap)) |
5281 | __set_bit(i, async_events_bmap); |
5282 | } |
5283 | } |
5284 | for (i = 0; i < 8; i++) |
5285 | req->async_event_fwd[i] |= cpu_to_le32(events[i]); |
5286 | |
5287 | if (async_only) |
5288 | req->enables = |
5289 | cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); |
5290 | |
5291 | resp = hwrm_req_hold(bp, req); |
5292 | rc = hwrm_req_send(bp, req); |
5293 | if (!rc) { |
5294 | set_bit(BNXT_STATE_DRV_REGISTERED, addr: &bp->state); |
5295 | if (resp->flags & |
5296 | cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) |
5297 | bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; |
5298 | } |
5299 | hwrm_req_drop(bp, req); |
5300 | return rc; |
5301 | } |
5302 | |
5303 | int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) |
5304 | { |
5305 | struct hwrm_func_drv_unrgtr_input *req; |
5306 | int rc; |
5307 | |
5308 | if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, addr: &bp->state)) |
5309 | return 0; |
5310 | |
5311 | rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR); |
5312 | if (rc) |
5313 | return rc; |
5314 | return hwrm_req_send(bp, req); |
5315 | } |
5316 | |
5317 | static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa); |
5318 | |
5319 | static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) |
5320 | { |
5321 | struct hwrm_tunnel_dst_port_free_input *req; |
5322 | int rc; |
5323 | |
5324 | if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN && |
5325 | bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID) |
5326 | return 0; |
5327 | if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE && |
5328 | bp->nge_fw_dst_port_id == INVALID_HW_RING_ID) |
5329 | return 0; |
5330 | |
5331 | rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE); |
5332 | if (rc) |
5333 | return rc; |
5334 | |
5335 | req->tunnel_type = tunnel_type; |
5336 | |
5337 | switch (tunnel_type) { |
5338 | case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: |
5339 | req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); |
5340 | bp->vxlan_port = 0; |
5341 | bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; |
5342 | break; |
5343 | case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: |
5344 | req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); |
5345 | bp->nge_port = 0; |
5346 | bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; |
5347 | break; |
5348 | case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE: |
5349 | req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id); |
5350 | bp->vxlan_gpe_port = 0; |
5351 | bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID; |
5352 | break; |
5353 | default: |
5354 | break; |
5355 | } |
5356 | |
5357 | rc = hwrm_req_send(bp, req); |
5358 | if (rc) |
5359 | netdev_err(dev: bp->dev, format: "hwrm_tunnel_dst_port_free failed. rc:%d\n" , |
5360 | rc); |
5361 | if (bp->flags & BNXT_FLAG_TPA) |
5362 | bnxt_set_tpa(bp, set_tpa: true); |
5363 | return rc; |
5364 | } |
5365 | |
5366 | static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, |
5367 | u8 tunnel_type) |
5368 | { |
5369 | struct hwrm_tunnel_dst_port_alloc_output *resp; |
5370 | struct hwrm_tunnel_dst_port_alloc_input *req; |
5371 | int rc; |
5372 | |
5373 | rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC); |
5374 | if (rc) |
5375 | return rc; |
5376 | |
5377 | req->tunnel_type = tunnel_type; |
5378 | req->tunnel_dst_port_val = port; |
5379 | |
5380 | resp = hwrm_req_hold(bp, req); |
5381 | rc = hwrm_req_send(bp, req); |
5382 | if (rc) { |
5383 | netdev_err(dev: bp->dev, format: "hwrm_tunnel_dst_port_alloc failed. rc:%d\n" , |
5384 | rc); |
5385 | goto err_out; |
5386 | } |
5387 | |
5388 | switch (tunnel_type) { |
5389 | case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: |
5390 | bp->vxlan_port = port; |
5391 | bp->vxlan_fw_dst_port_id = |
5392 | le16_to_cpu(resp->tunnel_dst_port_id); |
5393 | break; |
5394 | case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: |
5395 | bp->nge_port = port; |
5396 | bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); |
5397 | break; |
5398 | case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE: |
5399 | bp->vxlan_gpe_port = port; |
5400 | bp->vxlan_gpe_fw_dst_port_id = |
5401 | le16_to_cpu(resp->tunnel_dst_port_id); |
5402 | break; |
5403 | default: |
5404 | break; |
5405 | } |
5406 | if (bp->flags & BNXT_FLAG_TPA) |
5407 | bnxt_set_tpa(bp, set_tpa: true); |
5408 | |
5409 | err_out: |
5410 | hwrm_req_drop(bp, req); |
5411 | return rc; |
5412 | } |
5413 | |
5414 | static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) |
5415 | { |
5416 | struct hwrm_cfa_l2_set_rx_mask_input *req; |
5417 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
5418 | int rc; |
5419 | |
5420 | rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK); |
5421 | if (rc) |
5422 | return rc; |
5423 | |
5424 | req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); |
5425 | if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { |
5426 | req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); |
5427 | req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); |
5428 | } |
5429 | req->mask = cpu_to_le32(vnic->rx_mask); |
5430 | return hwrm_req_send_silent(bp, req); |
5431 | } |
5432 | |
5433 | void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr) |
5434 | { |
5435 | if (!atomic_dec_and_test(v: &fltr->refcnt)) |
5436 | return; |
5437 | spin_lock_bh(lock: &bp->ntp_fltr_lock); |
5438 | if (!test_and_clear_bit(BNXT_FLTR_INSERTED, addr: &fltr->base.state)) { |
5439 | spin_unlock_bh(lock: &bp->ntp_fltr_lock); |
5440 | return; |
5441 | } |
5442 | hlist_del_rcu(n: &fltr->base.hash); |
5443 | bnxt_del_one_usr_fltr(bp, fltr: &fltr->base); |
5444 | if (fltr->base.flags) { |
5445 | clear_bit(nr: fltr->base.sw_id, addr: bp->ntp_fltr_bmap); |
5446 | bp->ntp_fltr_count--; |
5447 | } |
5448 | spin_unlock_bh(lock: &bp->ntp_fltr_lock); |
5449 | kfree_rcu(fltr, base.rcu); |
5450 | } |
5451 | |
5452 | static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp, |
5453 | struct bnxt_l2_key *key, |
5454 | u32 idx) |
5455 | { |
5456 | struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx]; |
5457 | struct bnxt_l2_filter *fltr; |
5458 | |
5459 | hlist_for_each_entry_rcu(fltr, head, base.hash) { |
5460 | struct bnxt_l2_key *l2_key = &fltr->l2_key; |
5461 | |
5462 | if (ether_addr_equal(addr1: l2_key->dst_mac_addr, addr2: key->dst_mac_addr) && |
5463 | l2_key->vlan == key->vlan) |
5464 | return fltr; |
5465 | } |
5466 | return NULL; |
5467 | } |
5468 | |
5469 | static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp, |
5470 | struct bnxt_l2_key *key, |
5471 | u32 idx) |
5472 | { |
5473 | struct bnxt_l2_filter *fltr = NULL; |
5474 | |
5475 | rcu_read_lock(); |
5476 | fltr = __bnxt_lookup_l2_filter(bp, key, idx); |
5477 | if (fltr) |
5478 | atomic_inc(v: &fltr->refcnt); |
5479 | rcu_read_unlock(); |
5480 | return fltr; |
5481 | } |
5482 | |
5483 | #define BNXT_IPV4_4TUPLE(bp, fkeys) \ |
5484 | (((fkeys)->basic.ip_proto == IPPROTO_TCP && \ |
5485 | (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \ |
5486 | ((fkeys)->basic.ip_proto == IPPROTO_UDP && \ |
5487 | (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)) |
5488 | |
5489 | #define BNXT_IPV6_4TUPLE(bp, fkeys) \ |
5490 | (((fkeys)->basic.ip_proto == IPPROTO_TCP && \ |
5491 | (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \ |
5492 | ((fkeys)->basic.ip_proto == IPPROTO_UDP && \ |
5493 | (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)) |
5494 | |
5495 | static u32 (struct bnxt *bp, struct flow_keys *fkeys) |
5496 | { |
5497 | if (fkeys->basic.n_proto == htons(ETH_P_IP)) { |
5498 | if (BNXT_IPV4_4TUPLE(bp, fkeys)) |
5499 | return sizeof(fkeys->addrs.v4addrs) + |
5500 | sizeof(fkeys->ports); |
5501 | |
5502 | if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) |
5503 | return sizeof(fkeys->addrs.v4addrs); |
5504 | } |
5505 | |
5506 | if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) { |
5507 | if (BNXT_IPV6_4TUPLE(bp, fkeys)) |
5508 | return sizeof(fkeys->addrs.v6addrs) + |
5509 | sizeof(fkeys->ports); |
5510 | |
5511 | if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) |
5512 | return sizeof(fkeys->addrs.v6addrs); |
5513 | } |
5514 | |
5515 | return 0; |
5516 | } |
5517 | |
5518 | static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys, |
5519 | const unsigned char *key) |
5520 | { |
5521 | u64 prefix = bp->toeplitz_prefix, hash = 0; |
5522 | struct bnxt_ipv4_tuple tuple4; |
5523 | struct bnxt_ipv6_tuple tuple6; |
5524 | int i, j, len = 0; |
5525 | u8 *four_tuple; |
5526 | |
5527 | len = bnxt_get_rss_flow_tuple_len(bp, fkeys); |
5528 | if (!len) |
5529 | return 0; |
5530 | |
5531 | if (fkeys->basic.n_proto == htons(ETH_P_IP)) { |
5532 | tuple4.v4addrs = fkeys->addrs.v4addrs; |
5533 | tuple4.ports = fkeys->ports; |
5534 | four_tuple = (unsigned char *)&tuple4; |
5535 | } else { |
5536 | tuple6.v6addrs = fkeys->addrs.v6addrs; |
5537 | tuple6.ports = fkeys->ports; |
5538 | four_tuple = (unsigned char *)&tuple6; |
5539 | } |
5540 | |
5541 | for (i = 0, j = 8; i < len; i++, j++) { |
5542 | u8 byte = four_tuple[i]; |
5543 | int bit; |
5544 | |
5545 | for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) { |
5546 | if (byte & 0x80) |
5547 | hash ^= prefix; |
5548 | } |
5549 | prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0; |
5550 | } |
5551 | |
5552 | /* The valid part of the hash is in the upper 32 bits. */ |
5553 | return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK; |
5554 | } |
5555 | |
5556 | #ifdef CONFIG_RFS_ACCEL |
5557 | static struct bnxt_l2_filter * |
5558 | bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key) |
5559 | { |
5560 | struct bnxt_l2_filter *fltr; |
5561 | u32 idx; |
5562 | |
5563 | idx = jhash2(k: &key->filter_key, BNXT_L2_KEY_SIZE, initval: bp->hash_seed) & |
5564 | BNXT_L2_FLTR_HASH_MASK; |
5565 | fltr = bnxt_lookup_l2_filter(bp, key, idx); |
5566 | return fltr; |
5567 | } |
5568 | #endif |
5569 | |
5570 | static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr, |
5571 | struct bnxt_l2_key *key, u32 idx) |
5572 | { |
5573 | struct hlist_head *head; |
5574 | |
5575 | ether_addr_copy(dst: fltr->l2_key.dst_mac_addr, src: key->dst_mac_addr); |
5576 | fltr->l2_key.vlan = key->vlan; |
5577 | fltr->base.type = BNXT_FLTR_TYPE_L2; |
5578 | if (fltr->base.flags) { |
5579 | int bit_id; |
5580 | |
5581 | bit_id = bitmap_find_free_region(bitmap: bp->ntp_fltr_bmap, |
5582 | bits: bp->max_fltr, order: 0); |
5583 | if (bit_id < 0) |
5584 | return -ENOMEM; |
5585 | fltr->base.sw_id = (u16)bit_id; |
5586 | bp->ntp_fltr_count++; |
5587 | } |
5588 | head = &bp->l2_fltr_hash_tbl[idx]; |
5589 | hlist_add_head_rcu(n: &fltr->base.hash, h: head); |
5590 | bnxt_insert_usr_fltr(bp, fltr: &fltr->base); |
5591 | set_bit(BNXT_FLTR_INSERTED, addr: &fltr->base.state); |
5592 | atomic_set(v: &fltr->refcnt, i: 1); |
5593 | return 0; |
5594 | } |
5595 | |
5596 | static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp, |
5597 | struct bnxt_l2_key *key, |
5598 | gfp_t gfp) |
5599 | { |
5600 | struct bnxt_l2_filter *fltr; |
5601 | u32 idx; |
5602 | int rc; |
5603 | |
5604 | idx = jhash2(k: &key->filter_key, BNXT_L2_KEY_SIZE, initval: bp->hash_seed) & |
5605 | BNXT_L2_FLTR_HASH_MASK; |
5606 | fltr = bnxt_lookup_l2_filter(bp, key, idx); |
5607 | if (fltr) |
5608 | return fltr; |
5609 | |
5610 | fltr = kzalloc(size: sizeof(*fltr), flags: gfp); |
5611 | if (!fltr) |
5612 | return ERR_PTR(error: -ENOMEM); |
5613 | spin_lock_bh(lock: &bp->ntp_fltr_lock); |
5614 | rc = bnxt_init_l2_filter(bp, fltr, key, idx); |
5615 | spin_unlock_bh(lock: &bp->ntp_fltr_lock); |
5616 | if (rc) { |
5617 | bnxt_del_l2_filter(bp, fltr); |
5618 | fltr = ERR_PTR(error: rc); |
5619 | } |
5620 | return fltr; |
5621 | } |
5622 | |
5623 | struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp, |
5624 | struct bnxt_l2_key *key, |
5625 | u16 flags) |
5626 | { |
5627 | struct bnxt_l2_filter *fltr; |
5628 | u32 idx; |
5629 | int rc; |
5630 | |
5631 | idx = jhash2(k: &key->filter_key, BNXT_L2_KEY_SIZE, initval: bp->hash_seed) & |
5632 | BNXT_L2_FLTR_HASH_MASK; |
5633 | spin_lock_bh(lock: &bp->ntp_fltr_lock); |
5634 | fltr = __bnxt_lookup_l2_filter(bp, key, idx); |
5635 | if (fltr) { |
5636 | fltr = ERR_PTR(error: -EEXIST); |
5637 | goto l2_filter_exit; |
5638 | } |
5639 | fltr = kzalloc(size: sizeof(*fltr), GFP_ATOMIC); |
5640 | if (!fltr) { |
5641 | fltr = ERR_PTR(error: -ENOMEM); |
5642 | goto l2_filter_exit; |
5643 | } |
5644 | fltr->base.flags = flags; |
5645 | rc = bnxt_init_l2_filter(bp, fltr, key, idx); |
5646 | if (rc) { |
5647 | spin_unlock_bh(lock: &bp->ntp_fltr_lock); |
5648 | bnxt_del_l2_filter(bp, fltr); |
5649 | return ERR_PTR(error: rc); |
5650 | } |
5651 | |
5652 | l2_filter_exit: |
5653 | spin_unlock_bh(lock: &bp->ntp_fltr_lock); |
5654 | return fltr; |
5655 | } |
5656 | |
5657 | static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx) |
5658 | { |
5659 | #ifdef CONFIG_BNXT_SRIOV |
5660 | struct bnxt_vf_info *vf = &pf->vf[vf_idx]; |
5661 | |
5662 | return vf->fw_fid; |
5663 | #else |
5664 | return INVALID_HW_RING_ID; |
5665 | #endif |
5666 | } |
5667 | |
5668 | int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr) |
5669 | { |
5670 | struct hwrm_cfa_l2_filter_free_input *req; |
5671 | u16 target_id = 0xffff; |
5672 | int rc; |
5673 | |
5674 | if (fltr->base.flags & BNXT_ACT_FUNC_DST) { |
5675 | struct bnxt_pf_info *pf = &bp->pf; |
5676 | |
5677 | if (fltr->base.vf_idx >= pf->active_vfs) |
5678 | return -EINVAL; |
5679 | |
5680 | target_id = bnxt_vf_target_id(pf, vf_idx: fltr->base.vf_idx); |
5681 | if (target_id == INVALID_HW_RING_ID) |
5682 | return -EINVAL; |
5683 | } |
5684 | |
5685 | rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); |
5686 | if (rc) |
5687 | return rc; |
5688 | |
5689 | req->target_id = cpu_to_le16(target_id); |
5690 | req->l2_filter_id = fltr->base.filter_id; |
5691 | return hwrm_req_send(bp, req); |
5692 | } |
5693 | |
5694 | int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr) |
5695 | { |
5696 | struct hwrm_cfa_l2_filter_alloc_output *resp; |
5697 | struct hwrm_cfa_l2_filter_alloc_input *req; |
5698 | u16 target_id = 0xffff; |
5699 | int rc; |
5700 | |
5701 | if (fltr->base.flags & BNXT_ACT_FUNC_DST) { |
5702 | struct bnxt_pf_info *pf = &bp->pf; |
5703 | |
5704 | if (fltr->base.vf_idx >= pf->active_vfs) |
5705 | return -EINVAL; |
5706 | |
5707 | target_id = bnxt_vf_target_id(pf, vf_idx: fltr->base.vf_idx); |
5708 | } |
5709 | rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC); |
5710 | if (rc) |
5711 | return rc; |
5712 | |
5713 | req->target_id = cpu_to_le16(target_id); |
5714 | req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); |
5715 | |
5716 | if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) |
5717 | req->flags |= |
5718 | cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); |
5719 | req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id); |
5720 | req->enables = |
5721 | cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | |
5722 | CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | |
5723 | CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); |
5724 | ether_addr_copy(dst: req->l2_addr, src: fltr->l2_key.dst_mac_addr); |
5725 | eth_broadcast_addr(addr: req->l2_addr_mask); |
5726 | |
5727 | if (fltr->l2_key.vlan) { |
5728 | req->enables |= |
5729 | cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN | |
5730 | CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK | |
5731 | CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS); |
5732 | req->num_vlans = 1; |
5733 | req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan); |
5734 | req->l2_ivlan_mask = cpu_to_le16(0xfff); |
5735 | } |
5736 | |
5737 | resp = hwrm_req_hold(bp, req); |
5738 | rc = hwrm_req_send(bp, req); |
5739 | if (!rc) { |
5740 | fltr->base.filter_id = resp->l2_filter_id; |
5741 | set_bit(BNXT_FLTR_VALID, addr: &fltr->base.state); |
5742 | } |
5743 | hwrm_req_drop(bp, req); |
5744 | return rc; |
5745 | } |
5746 | |
5747 | int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, |
5748 | struct bnxt_ntuple_filter *fltr) |
5749 | { |
5750 | struct hwrm_cfa_ntuple_filter_free_input *req; |
5751 | int rc; |
5752 | |
5753 | set_bit(BNXT_FLTR_FW_DELETED, addr: &fltr->base.state); |
5754 | rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); |
5755 | if (rc) |
5756 | return rc; |
5757 | |
5758 | req->ntuple_filter_id = fltr->base.filter_id; |
5759 | return hwrm_req_send(bp, req); |
5760 | } |
5761 | |
5762 | #define BNXT_NTP_FLTR_FLAGS \ |
5763 | (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ |
5764 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ |
5765 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ |
5766 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ |
5767 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ |
5768 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ |
5769 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ |
5770 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ |
5771 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ |
5772 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ |
5773 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ |
5774 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ |
5775 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) |
5776 | |
5777 | #define BNXT_NTP_TUNNEL_FLTR_FLAG \ |
5778 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
5779 | |
5780 | void bnxt_fill_ipv6_mask(__be32 mask[4]) |
5781 | { |
5782 | int i; |
5783 | |
5784 | for (i = 0; i < 4; i++) |
5785 | mask[i] = cpu_to_be32(~0); |
5786 | } |
5787 | |
5788 | static void |
5789 | bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, |
5790 | struct hwrm_cfa_ntuple_filter_alloc_input *req, |
5791 | u16 rxq) |
5792 | { |
5793 | if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { |
5794 | struct bnxt_vnic_info *vnic; |
5795 | u32 enables; |
5796 | |
5797 | vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; |
5798 | req->dst_id = cpu_to_le16(vnic->fw_vnic_id); |
5799 | enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX; |
5800 | req->enables |= cpu_to_le32(enables); |
5801 | req->rfs_ring_tbl_idx = cpu_to_le16(rxq); |
5802 | } else { |
5803 | u32 flags; |
5804 | |
5805 | flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; |
5806 | req->flags |= cpu_to_le32(flags); |
5807 | req->dst_id = cpu_to_le16(rxq); |
5808 | } |
5809 | } |
5810 | |
5811 | int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, |
5812 | struct bnxt_ntuple_filter *fltr) |
5813 | { |
5814 | struct hwrm_cfa_ntuple_filter_alloc_output *resp; |
5815 | struct hwrm_cfa_ntuple_filter_alloc_input *req; |
5816 | struct bnxt_flow_masks *masks = &fltr->fmasks; |
5817 | struct flow_keys *keys = &fltr->fkeys; |
5818 | struct bnxt_l2_filter *l2_fltr; |
5819 | struct bnxt_vnic_info *vnic; |
5820 | int rc; |
5821 | |
5822 | rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); |
5823 | if (rc) |
5824 | return rc; |
5825 | |
5826 | l2_fltr = fltr->l2_fltr; |
5827 | req->l2_filter_id = l2_fltr->base.filter_id; |
5828 | |
5829 | if (fltr->base.flags & BNXT_ACT_DROP) { |
5830 | req->flags = |
5831 | cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP); |
5832 | } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { |
5833 | bnxt_cfg_rfs_ring_tbl_idx(bp, req, rxq: fltr->base.rxq); |
5834 | } else { |
5835 | vnic = &bp->vnic_info[fltr->base.rxq + 1]; |
5836 | req->dst_id = cpu_to_le16(vnic->fw_vnic_id); |
5837 | } |
5838 | req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS); |
5839 | |
5840 | req->ethertype = htons(ETH_P_IP); |
5841 | req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; |
5842 | req->ip_protocol = keys->basic.ip_proto; |
5843 | |
5844 | if (keys->basic.n_proto == htons(ETH_P_IPV6)) { |
5845 | req->ethertype = htons(ETH_P_IPV6); |
5846 | req->ip_addr_type = |
5847 | CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; |
5848 | *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src; |
5849 | *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src; |
5850 | *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst; |
5851 | *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst; |
5852 | } else { |
5853 | req->src_ipaddr[0] = keys->addrs.v4addrs.src; |
5854 | req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src; |
5855 | req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; |
5856 | req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst; |
5857 | } |
5858 | if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { |
5859 | req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); |
5860 | req->tunnel_type = |
5861 | CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; |
5862 | } |
5863 | |
5864 | req->src_port = keys->ports.src; |
5865 | req->src_port_mask = masks->ports.src; |
5866 | req->dst_port = keys->ports.dst; |
5867 | req->dst_port_mask = masks->ports.dst; |
5868 | |
5869 | resp = hwrm_req_hold(bp, req); |
5870 | rc = hwrm_req_send(bp, req); |
5871 | if (!rc) |
5872 | fltr->base.filter_id = resp->ntuple_filter_id; |
5873 | hwrm_req_drop(bp, req); |
5874 | return rc; |
5875 | } |
5876 | |
5877 | static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, |
5878 | const u8 *mac_addr) |
5879 | { |
5880 | struct bnxt_l2_filter *fltr; |
5881 | struct bnxt_l2_key key; |
5882 | int rc; |
5883 | |
5884 | ether_addr_copy(dst: key.dst_mac_addr, src: mac_addr); |
5885 | key.vlan = 0; |
5886 | fltr = bnxt_alloc_l2_filter(bp, key: &key, GFP_KERNEL); |
5887 | if (IS_ERR(ptr: fltr)) |
5888 | return PTR_ERR(ptr: fltr); |
5889 | |
5890 | fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id; |
5891 | rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); |
5892 | if (rc) |
5893 | bnxt_del_l2_filter(bp, fltr); |
5894 | else |
5895 | bp->vnic_info[vnic_id].l2_filters[idx] = fltr; |
5896 | return rc; |
5897 | } |
5898 | |
5899 | static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) |
5900 | { |
5901 | u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ |
5902 | |
5903 | /* Any associated ntuple filters will also be cleared by firmware. */ |
5904 | for (i = 0; i < num_of_vnics; i++) { |
5905 | struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; |
5906 | |
5907 | for (j = 0; j < vnic->uc_filter_count; j++) { |
5908 | struct bnxt_l2_filter *fltr = vnic->l2_filters[j]; |
5909 | |
5910 | bnxt_hwrm_l2_filter_free(bp, fltr); |
5911 | bnxt_del_l2_filter(bp, fltr); |
5912 | } |
5913 | vnic->uc_filter_count = 0; |
5914 | } |
5915 | } |
5916 | |
5917 | #define BNXT_DFLT_TUNL_TPA_BMAP \ |
5918 | (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \ |
5919 | VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \ |
5920 | VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6) |
5921 | |
5922 | static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp, |
5923 | struct hwrm_vnic_tpa_cfg_input *req) |
5924 | { |
5925 | u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP; |
5926 | |
5927 | if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA)) |
5928 | return; |
5929 | |
5930 | if (bp->vxlan_port) |
5931 | tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN; |
5932 | if (bp->vxlan_gpe_port) |
5933 | tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE; |
5934 | if (bp->nge_port) |
5935 | tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE; |
5936 | |
5937 | req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN); |
5938 | req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap); |
5939 | } |
5940 | |
5941 | static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) |
5942 | { |
5943 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
5944 | u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; |
5945 | struct hwrm_vnic_tpa_cfg_input *req; |
5946 | int rc; |
5947 | |
5948 | if (vnic->fw_vnic_id == INVALID_HW_RING_ID) |
5949 | return 0; |
5950 | |
5951 | rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG); |
5952 | if (rc) |
5953 | return rc; |
5954 | |
5955 | if (tpa_flags) { |
5956 | u16 mss = bp->dev->mtu - 40; |
5957 | u32 nsegs, n, segs = 0, flags; |
5958 | |
5959 | flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | |
5960 | VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | |
5961 | VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | |
5962 | VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | |
5963 | VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; |
5964 | if (tpa_flags & BNXT_FLAG_GRO) |
5965 | flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; |
5966 | |
5967 | req->flags = cpu_to_le32(flags); |
5968 | |
5969 | req->enables = |
5970 | cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | |
5971 | VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | |
5972 | VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); |
5973 | |
5974 | /* Number of segs are log2 units, and first packet is not |
5975 | * included as part of this units. |
5976 | */ |
5977 | if (mss <= BNXT_RX_PAGE_SIZE) { |
5978 | n = BNXT_RX_PAGE_SIZE / mss; |
5979 | nsegs = (MAX_SKB_FRAGS - 1) * n; |
5980 | } else { |
5981 | n = mss / BNXT_RX_PAGE_SIZE; |
5982 | if (mss & (BNXT_RX_PAGE_SIZE - 1)) |
5983 | n++; |
5984 | nsegs = (MAX_SKB_FRAGS - n) / n; |
5985 | } |
5986 | |
5987 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
5988 | segs = MAX_TPA_SEGS_P5; |
5989 | max_aggs = bp->max_tpa; |
5990 | } else { |
5991 | segs = ilog2(nsegs); |
5992 | } |
5993 | req->max_agg_segs = cpu_to_le16(segs); |
5994 | req->max_aggs = cpu_to_le16(max_aggs); |
5995 | |
5996 | req->min_agg_len = cpu_to_le32(512); |
5997 | bnxt_hwrm_vnic_update_tunl_tpa(bp, req); |
5998 | } |
5999 | req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); |
6000 | |
6001 | return hwrm_req_send(bp, req); |
6002 | } |
6003 | |
6004 | static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) |
6005 | { |
6006 | struct bnxt_ring_grp_info *grp_info; |
6007 | |
6008 | grp_info = &bp->grp_info[ring->grp_idx]; |
6009 | return grp_info->cp_fw_ring_id; |
6010 | } |
6011 | |
6012 | static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) |
6013 | { |
6014 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
6015 | return rxr->rx_cpr->cp_ring_struct.fw_ring_id; |
6016 | else |
6017 | return bnxt_cp_ring_from_grp(bp, ring: &rxr->rx_ring_struct); |
6018 | } |
6019 | |
6020 | static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) |
6021 | { |
6022 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
6023 | return txr->tx_cpr->cp_ring_struct.fw_ring_id; |
6024 | else |
6025 | return bnxt_cp_ring_from_grp(bp, ring: &txr->tx_ring_struct); |
6026 | } |
6027 | |
6028 | static int (struct bnxt *bp) |
6029 | { |
6030 | int entries; |
6031 | |
6032 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
6033 | entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; |
6034 | else |
6035 | entries = HW_HASH_INDEX_SIZE; |
6036 | |
6037 | bp->rss_indir_tbl_entries = entries; |
6038 | bp->rss_indir_tbl = kmalloc_array(n: entries, size: sizeof(*bp->rss_indir_tbl), |
6039 | GFP_KERNEL); |
6040 | if (!bp->rss_indir_tbl) |
6041 | return -ENOMEM; |
6042 | return 0; |
6043 | } |
6044 | |
6045 | static void (struct bnxt *bp) |
6046 | { |
6047 | u16 max_rings, max_entries, pad, i; |
6048 | |
6049 | if (!bp->rx_nr_rings) |
6050 | return; |
6051 | |
6052 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
6053 | max_rings = bp->rx_nr_rings - 1; |
6054 | else |
6055 | max_rings = bp->rx_nr_rings; |
6056 | |
6057 | max_entries = bnxt_get_rxfh_indir_size(dev: bp->dev); |
6058 | |
6059 | for (i = 0; i < max_entries; i++) |
6060 | bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(index: i, n_rx_rings: max_rings); |
6061 | |
6062 | pad = bp->rss_indir_tbl_entries - max_entries; |
6063 | if (pad) |
6064 | memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); |
6065 | } |
6066 | |
6067 | static u16 (struct bnxt *bp) |
6068 | { |
6069 | u16 i, tbl_size, max_ring = 0; |
6070 | |
6071 | if (!bp->rss_indir_tbl) |
6072 | return 0; |
6073 | |
6074 | tbl_size = bnxt_get_rxfh_indir_size(dev: bp->dev); |
6075 | for (i = 0; i < tbl_size; i++) |
6076 | max_ring = max(max_ring, bp->rss_indir_tbl[i]); |
6077 | return max_ring; |
6078 | } |
6079 | |
6080 | int (struct bnxt *bp, int rx_rings) |
6081 | { |
6082 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
6083 | if (!rx_rings) |
6084 | return 0; |
6085 | return bnxt_calc_nr_ring_pages(ring_size: rx_rings - 1, |
6086 | BNXT_RSS_TABLE_ENTRIES_P5); |
6087 | } |
6088 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
6089 | return 2; |
6090 | return 1; |
6091 | } |
6092 | |
6093 | static void (struct bnxt *bp, struct bnxt_vnic_info *vnic) |
6094 | { |
6095 | bool = !(vnic->flags & BNXT_VNIC_RSS_FLAG); |
6096 | u16 i, j; |
6097 | |
6098 | /* Fill the RSS indirection table with ring group ids */ |
6099 | for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { |
6100 | if (!no_rss) |
6101 | j = bp->rss_indir_tbl[i]; |
6102 | vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); |
6103 | } |
6104 | } |
6105 | |
6106 | static void (struct bnxt *bp, |
6107 | struct bnxt_vnic_info *vnic) |
6108 | { |
6109 | __le16 *ring_tbl = vnic->rss_table; |
6110 | struct bnxt_rx_ring_info *rxr; |
6111 | u16 tbl_size, i; |
6112 | |
6113 | tbl_size = bnxt_get_rxfh_indir_size(dev: bp->dev); |
6114 | |
6115 | for (i = 0; i < tbl_size; i++) { |
6116 | u16 ring_id, j; |
6117 | |
6118 | if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) |
6119 | j = ethtool_rxfh_indir_default(index: i, n_rx_rings: bp->rx_nr_rings); |
6120 | else |
6121 | j = bp->rss_indir_tbl[i]; |
6122 | rxr = &bp->rx_ring[j]; |
6123 | |
6124 | ring_id = rxr->rx_ring_struct.fw_ring_id; |
6125 | *ring_tbl++ = cpu_to_le16(ring_id); |
6126 | ring_id = bnxt_cp_ring_for_rx(bp, rxr); |
6127 | *ring_tbl++ = cpu_to_le16(ring_id); |
6128 | } |
6129 | } |
6130 | |
6131 | static void |
6132 | (struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req, |
6133 | struct bnxt_vnic_info *vnic) |
6134 | { |
6135 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
6136 | bnxt_fill_hw_rss_tbl_p5(bp, vnic); |
6137 | if (bp->flags & BNXT_FLAG_CHIP_P7) |
6138 | req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT; |
6139 | } else { |
6140 | bnxt_fill_hw_rss_tbl(bp, vnic); |
6141 | } |
6142 | |
6143 | if (bp->rss_hash_delta) { |
6144 | req->hash_type = cpu_to_le32(bp->rss_hash_delta); |
6145 | if (bp->rss_hash_cfg & bp->rss_hash_delta) |
6146 | req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE; |
6147 | else |
6148 | req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE; |
6149 | } else { |
6150 | req->hash_type = cpu_to_le32(bp->rss_hash_cfg); |
6151 | } |
6152 | req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; |
6153 | req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); |
6154 | req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); |
6155 | } |
6156 | |
6157 | static int (struct bnxt *bp, u16 vnic_id, bool ) |
6158 | { |
6159 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
6160 | struct hwrm_vnic_rss_cfg_input *req; |
6161 | int rc; |
6162 | |
6163 | if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) || |
6164 | vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) |
6165 | return 0; |
6166 | |
6167 | rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); |
6168 | if (rc) |
6169 | return rc; |
6170 | |
6171 | if (set_rss) |
6172 | __bnxt_hwrm_vnic_set_rss(bp, req, vnic); |
6173 | req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); |
6174 | return hwrm_req_send(bp, req); |
6175 | } |
6176 | |
6177 | static int (struct bnxt *bp, u16 vnic_id, bool ) |
6178 | { |
6179 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
6180 | struct hwrm_vnic_rss_cfg_input *req; |
6181 | dma_addr_t ring_tbl_map; |
6182 | u32 i, nr_ctxs; |
6183 | int rc; |
6184 | |
6185 | rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); |
6186 | if (rc) |
6187 | return rc; |
6188 | |
6189 | req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); |
6190 | if (!set_rss) |
6191 | return hwrm_req_send(bp, req); |
6192 | |
6193 | __bnxt_hwrm_vnic_set_rss(bp, req, vnic); |
6194 | ring_tbl_map = vnic->rss_table_dma_addr; |
6195 | nr_ctxs = bnxt_get_nr_rss_ctxs(bp, rx_rings: bp->rx_nr_rings); |
6196 | |
6197 | hwrm_req_hold(bp, req); |
6198 | for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { |
6199 | req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); |
6200 | req->ring_table_pair_index = i; |
6201 | req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); |
6202 | rc = hwrm_req_send(bp, req); |
6203 | if (rc) |
6204 | goto exit; |
6205 | } |
6206 | |
6207 | exit: |
6208 | hwrm_req_drop(bp, req); |
6209 | return rc; |
6210 | } |
6211 | |
6212 | static void (struct bnxt *bp) |
6213 | { |
6214 | struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; |
6215 | struct hwrm_vnic_rss_qcfg_output *resp; |
6216 | struct hwrm_vnic_rss_qcfg_input *req; |
6217 | |
6218 | if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG)) |
6219 | return; |
6220 | |
6221 | req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); |
6222 | /* all contexts configured to same hash_type, zero always exists */ |
6223 | req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); |
6224 | resp = hwrm_req_hold(bp, req); |
6225 | if (!hwrm_req_send(bp, req)) { |
6226 | bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg; |
6227 | bp->rss_hash_delta = 0; |
6228 | } |
6229 | hwrm_req_drop(bp, req); |
6230 | } |
6231 | |
6232 | static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) |
6233 | { |
6234 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
6235 | struct hwrm_vnic_plcmodes_cfg_input *req; |
6236 | int rc; |
6237 | |
6238 | rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG); |
6239 | if (rc) |
6240 | return rc; |
6241 | |
6242 | req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); |
6243 | req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID); |
6244 | |
6245 | if (BNXT_RX_PAGE_MODE(bp)) { |
6246 | req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size); |
6247 | } else { |
6248 | req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | |
6249 | VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); |
6250 | req->enables |= |
6251 | cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); |
6252 | req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); |
6253 | req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); |
6254 | } |
6255 | req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); |
6256 | return hwrm_req_send(bp, req); |
6257 | } |
6258 | |
6259 | static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, |
6260 | u16 ctx_idx) |
6261 | { |
6262 | struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; |
6263 | |
6264 | if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) |
6265 | return; |
6266 | |
6267 | req->rss_cos_lb_ctx_id = |
6268 | cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); |
6269 | |
6270 | hwrm_req_send(bp, req); |
6271 | bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; |
6272 | } |
6273 | |
6274 | static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) |
6275 | { |
6276 | int i, j; |
6277 | |
6278 | for (i = 0; i < bp->nr_vnics; i++) { |
6279 | struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; |
6280 | |
6281 | for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { |
6282 | if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) |
6283 | bnxt_hwrm_vnic_ctx_free_one(bp, vnic_id: i, ctx_idx: j); |
6284 | } |
6285 | } |
6286 | bp->rsscos_nr_ctxs = 0; |
6287 | } |
6288 | |
6289 | static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) |
6290 | { |
6291 | struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; |
6292 | struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; |
6293 | int rc; |
6294 | |
6295 | rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); |
6296 | if (rc) |
6297 | return rc; |
6298 | |
6299 | resp = hwrm_req_hold(bp, req); |
6300 | rc = hwrm_req_send(bp, req); |
6301 | if (!rc) |
6302 | bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = |
6303 | le16_to_cpu(resp->rss_cos_lb_ctx_id); |
6304 | hwrm_req_drop(bp, req); |
6305 | |
6306 | return rc; |
6307 | } |
6308 | |
6309 | static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) |
6310 | { |
6311 | if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) |
6312 | return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; |
6313 | return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; |
6314 | } |
6315 | |
6316 | int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) |
6317 | { |
6318 | struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; |
6319 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
6320 | struct hwrm_vnic_cfg_input *req; |
6321 | unsigned int ring = 0, grp_idx; |
6322 | u16 def_vlan = 0; |
6323 | int rc; |
6324 | |
6325 | rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG); |
6326 | if (rc) |
6327 | return rc; |
6328 | |
6329 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
6330 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; |
6331 | |
6332 | req->default_rx_ring_id = |
6333 | cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); |
6334 | req->default_cmpl_ring_id = |
6335 | cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); |
6336 | req->enables = |
6337 | cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | |
6338 | VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); |
6339 | goto vnic_mru; |
6340 | } |
6341 | req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); |
6342 | /* Only RSS support for now TBD: COS & LB */ |
6343 | if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { |
6344 | req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); |
6345 | req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | |
6346 | VNIC_CFG_REQ_ENABLES_MRU); |
6347 | } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { |
6348 | req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]); |
6349 | req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | |
6350 | VNIC_CFG_REQ_ENABLES_MRU); |
6351 | req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); |
6352 | } else { |
6353 | req->rss_rule = cpu_to_le16(0xffff); |
6354 | } |
6355 | |
6356 | if (BNXT_CHIP_TYPE_NITRO_A0(bp) && |
6357 | (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { |
6358 | req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); |
6359 | req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); |
6360 | } else { |
6361 | req->cos_rule = cpu_to_le16(0xffff); |
6362 | } |
6363 | |
6364 | if (vnic->flags & BNXT_VNIC_RSS_FLAG) |
6365 | ring = 0; |
6366 | else if (vnic->flags & BNXT_VNIC_RFS_FLAG) |
6367 | ring = vnic_id - 1; |
6368 | else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) |
6369 | ring = bp->rx_nr_rings - 1; |
6370 | |
6371 | grp_idx = bp->rx_ring[ring].bnapi->index; |
6372 | req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); |
6373 | req->lb_rule = cpu_to_le16(0xffff); |
6374 | vnic_mru: |
6375 | req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); |
6376 | |
6377 | req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); |
6378 | #ifdef CONFIG_BNXT_SRIOV |
6379 | if (BNXT_VF(bp)) |
6380 | def_vlan = bp->vf.vlan; |
6381 | #endif |
6382 | if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) |
6383 | req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); |
6384 | if (!vnic_id && bnxt_ulp_registered(edev: bp->edev)) |
6385 | req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); |
6386 | |
6387 | return hwrm_req_send(bp, req); |
6388 | } |
6389 | |
6390 | static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) |
6391 | { |
6392 | if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { |
6393 | struct hwrm_vnic_free_input *req; |
6394 | |
6395 | if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) |
6396 | return; |
6397 | |
6398 | req->vnic_id = |
6399 | cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); |
6400 | |
6401 | hwrm_req_send(bp, req); |
6402 | bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; |
6403 | } |
6404 | } |
6405 | |
6406 | static void bnxt_hwrm_vnic_free(struct bnxt *bp) |
6407 | { |
6408 | u16 i; |
6409 | |
6410 | for (i = 0; i < bp->nr_vnics; i++) |
6411 | bnxt_hwrm_vnic_free_one(bp, vnic_id: i); |
6412 | } |
6413 | |
6414 | static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, |
6415 | unsigned int start_rx_ring_idx, |
6416 | unsigned int nr_rings) |
6417 | { |
6418 | unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; |
6419 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
6420 | struct hwrm_vnic_alloc_output *resp; |
6421 | struct hwrm_vnic_alloc_input *req; |
6422 | int rc; |
6423 | |
6424 | rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC); |
6425 | if (rc) |
6426 | return rc; |
6427 | |
6428 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
6429 | goto vnic_no_ring_grps; |
6430 | |
6431 | /* map ring groups to this vnic */ |
6432 | for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { |
6433 | grp_idx = bp->rx_ring[i].bnapi->index; |
6434 | if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { |
6435 | netdev_err(dev: bp->dev, format: "Not enough ring groups avail:%x req:%x\n" , |
6436 | j, nr_rings); |
6437 | break; |
6438 | } |
6439 | vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; |
6440 | } |
6441 | |
6442 | vnic_no_ring_grps: |
6443 | for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) |
6444 | vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; |
6445 | if (vnic_id == BNXT_VNIC_DEFAULT) |
6446 | req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); |
6447 | |
6448 | resp = hwrm_req_hold(bp, req); |
6449 | rc = hwrm_req_send(bp, req); |
6450 | if (!rc) |
6451 | vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); |
6452 | hwrm_req_drop(bp, req); |
6453 | return rc; |
6454 | } |
6455 | |
6456 | static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) |
6457 | { |
6458 | struct hwrm_vnic_qcaps_output *resp; |
6459 | struct hwrm_vnic_qcaps_input *req; |
6460 | int rc; |
6461 | |
6462 | bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); |
6463 | bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP; |
6464 | bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP; |
6465 | if (bp->hwrm_spec_code < 0x10600) |
6466 | return 0; |
6467 | |
6468 | rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS); |
6469 | if (rc) |
6470 | return rc; |
6471 | |
6472 | resp = hwrm_req_hold(bp, req); |
6473 | rc = hwrm_req_send(bp, req); |
6474 | if (!rc) { |
6475 | u32 flags = le32_to_cpu(resp->flags); |
6476 | |
6477 | if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && |
6478 | (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) |
6479 | bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP; |
6480 | if (flags & |
6481 | VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) |
6482 | bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; |
6483 | |
6484 | /* Older P5 fw before EXT_HW_STATS support did not set |
6485 | * VLAN_STRIP_CAP properly. |
6486 | */ |
6487 | if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || |
6488 | (BNXT_CHIP_P5(bp) && |
6489 | !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) |
6490 | bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; |
6491 | if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP) |
6492 | bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA; |
6493 | if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED) |
6494 | bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM; |
6495 | bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); |
6496 | if (bp->max_tpa_v2) { |
6497 | if (BNXT_CHIP_P5(bp)) |
6498 | bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; |
6499 | else |
6500 | bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7; |
6501 | } |
6502 | if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP) |
6503 | bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA; |
6504 | if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP) |
6505 | bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP; |
6506 | if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP) |
6507 | bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP; |
6508 | if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP) |
6509 | bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP; |
6510 | if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP) |
6511 | bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP; |
6512 | } |
6513 | hwrm_req_drop(bp, req); |
6514 | return rc; |
6515 | } |
6516 | |
6517 | static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) |
6518 | { |
6519 | struct hwrm_ring_grp_alloc_output *resp; |
6520 | struct hwrm_ring_grp_alloc_input *req; |
6521 | int rc; |
6522 | u16 i; |
6523 | |
6524 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
6525 | return 0; |
6526 | |
6527 | rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC); |
6528 | if (rc) |
6529 | return rc; |
6530 | |
6531 | resp = hwrm_req_hold(bp, req); |
6532 | for (i = 0; i < bp->rx_nr_rings; i++) { |
6533 | unsigned int grp_idx = bp->rx_ring[i].bnapi->index; |
6534 | |
6535 | req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); |
6536 | req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); |
6537 | req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); |
6538 | req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); |
6539 | |
6540 | rc = hwrm_req_send(bp, req); |
6541 | |
6542 | if (rc) |
6543 | break; |
6544 | |
6545 | bp->grp_info[grp_idx].fw_grp_id = |
6546 | le32_to_cpu(resp->ring_group_id); |
6547 | } |
6548 | hwrm_req_drop(bp, req); |
6549 | return rc; |
6550 | } |
6551 | |
6552 | static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) |
6553 | { |
6554 | struct hwrm_ring_grp_free_input *req; |
6555 | u16 i; |
6556 | |
6557 | if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) |
6558 | return; |
6559 | |
6560 | if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE)) |
6561 | return; |
6562 | |
6563 | hwrm_req_hold(bp, req); |
6564 | for (i = 0; i < bp->cp_nr_rings; i++) { |
6565 | if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) |
6566 | continue; |
6567 | req->ring_group_id = |
6568 | cpu_to_le32(bp->grp_info[i].fw_grp_id); |
6569 | |
6570 | hwrm_req_send(bp, req); |
6571 | bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; |
6572 | } |
6573 | hwrm_req_drop(bp, req); |
6574 | } |
6575 | |
6576 | static int hwrm_ring_alloc_send_msg(struct bnxt *bp, |
6577 | struct bnxt_ring_struct *ring, |
6578 | u32 ring_type, u32 map_index) |
6579 | { |
6580 | struct hwrm_ring_alloc_output *resp; |
6581 | struct hwrm_ring_alloc_input *req; |
6582 | struct bnxt_ring_mem_info *rmem = &ring->ring_mem; |
6583 | struct bnxt_ring_grp_info *grp_info; |
6584 | int rc, err = 0; |
6585 | u16 ring_id; |
6586 | |
6587 | rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC); |
6588 | if (rc) |
6589 | goto exit; |
6590 | |
6591 | req->enables = 0; |
6592 | if (rmem->nr_pages > 1) { |
6593 | req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); |
6594 | /* Page size is in log2 units */ |
6595 | req->page_size = BNXT_PAGE_SHIFT; |
6596 | req->page_tbl_depth = 1; |
6597 | } else { |
6598 | req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); |
6599 | } |
6600 | req->fbo = 0; |
6601 | /* Association of ring index with doorbell index and MSIX number */ |
6602 | req->logical_id = cpu_to_le16(map_index); |
6603 | |
6604 | switch (ring_type) { |
6605 | case HWRM_RING_ALLOC_TX: { |
6606 | struct bnxt_tx_ring_info *txr; |
6607 | |
6608 | txr = container_of(ring, struct bnxt_tx_ring_info, |
6609 | tx_ring_struct); |
6610 | req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; |
6611 | /* Association of transmit ring with completion ring */ |
6612 | grp_info = &bp->grp_info[ring->grp_idx]; |
6613 | req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); |
6614 | req->length = cpu_to_le32(bp->tx_ring_mask + 1); |
6615 | req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); |
6616 | req->queue_id = cpu_to_le16(ring->queue_id); |
6617 | if (bp->flags & BNXT_FLAG_TX_COAL_CMPL) |
6618 | req->cmpl_coal_cnt = |
6619 | RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64; |
6620 | break; |
6621 | } |
6622 | case HWRM_RING_ALLOC_RX: |
6623 | req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; |
6624 | req->length = cpu_to_le32(bp->rx_ring_mask + 1); |
6625 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
6626 | u16 flags = 0; |
6627 | |
6628 | /* Association of rx ring with stats context */ |
6629 | grp_info = &bp->grp_info[ring->grp_idx]; |
6630 | req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); |
6631 | req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); |
6632 | req->enables |= cpu_to_le32( |
6633 | RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); |
6634 | if (NET_IP_ALIGN == 2) |
6635 | flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; |
6636 | req->flags = cpu_to_le16(flags); |
6637 | } |
6638 | break; |
6639 | case HWRM_RING_ALLOC_AGG: |
6640 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
6641 | req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; |
6642 | /* Association of agg ring with rx ring */ |
6643 | grp_info = &bp->grp_info[ring->grp_idx]; |
6644 | req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); |
6645 | req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); |
6646 | req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); |
6647 | req->enables |= cpu_to_le32( |
6648 | RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | |
6649 | RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); |
6650 | } else { |
6651 | req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; |
6652 | } |
6653 | req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1); |
6654 | break; |
6655 | case HWRM_RING_ALLOC_CMPL: |
6656 | req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; |
6657 | req->length = cpu_to_le32(bp->cp_ring_mask + 1); |
6658 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
6659 | /* Association of cp ring with nq */ |
6660 | grp_info = &bp->grp_info[map_index]; |
6661 | req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); |
6662 | req->cq_handle = cpu_to_le64(ring->handle); |
6663 | req->enables |= cpu_to_le32( |
6664 | RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); |
6665 | } else if (bp->flags & BNXT_FLAG_USING_MSIX) { |
6666 | req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; |
6667 | } |
6668 | break; |
6669 | case HWRM_RING_ALLOC_NQ: |
6670 | req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; |
6671 | req->length = cpu_to_le32(bp->cp_ring_mask + 1); |
6672 | if (bp->flags & BNXT_FLAG_USING_MSIX) |
6673 | req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; |
6674 | break; |
6675 | default: |
6676 | netdev_err(dev: bp->dev, format: "hwrm alloc invalid ring type %d\n" , |
6677 | ring_type); |
6678 | return -1; |
6679 | } |
6680 | |
6681 | resp = hwrm_req_hold(bp, req); |
6682 | rc = hwrm_req_send(bp, req); |
6683 | err = le16_to_cpu(resp->error_code); |
6684 | ring_id = le16_to_cpu(resp->ring_id); |
6685 | hwrm_req_drop(bp, req); |
6686 | |
6687 | exit: |
6688 | if (rc || err) { |
6689 | netdev_err(dev: bp->dev, format: "hwrm_ring_alloc type %d failed. rc:%x err:%x\n" , |
6690 | ring_type, rc, err); |
6691 | return -EIO; |
6692 | } |
6693 | ring->fw_ring_id = ring_id; |
6694 | return rc; |
6695 | } |
6696 | |
6697 | static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) |
6698 | { |
6699 | int rc; |
6700 | |
6701 | if (BNXT_PF(bp)) { |
6702 | struct hwrm_func_cfg_input *req; |
6703 | |
6704 | rc = bnxt_hwrm_func_cfg_short_req_init(bp, req: &req); |
6705 | if (rc) |
6706 | return rc; |
6707 | |
6708 | req->fid = cpu_to_le16(0xffff); |
6709 | req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); |
6710 | req->async_event_cr = cpu_to_le16(idx); |
6711 | return hwrm_req_send(bp, req); |
6712 | } else { |
6713 | struct hwrm_func_vf_cfg_input *req; |
6714 | |
6715 | rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); |
6716 | if (rc) |
6717 | return rc; |
6718 | |
6719 | req->enables = |
6720 | cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); |
6721 | req->async_event_cr = cpu_to_le16(idx); |
6722 | return hwrm_req_send(bp, req); |
6723 | } |
6724 | } |
6725 | |
6726 | static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db, |
6727 | u32 ring_type) |
6728 | { |
6729 | switch (ring_type) { |
6730 | case HWRM_RING_ALLOC_TX: |
6731 | db->db_ring_mask = bp->tx_ring_mask; |
6732 | break; |
6733 | case HWRM_RING_ALLOC_RX: |
6734 | db->db_ring_mask = bp->rx_ring_mask; |
6735 | break; |
6736 | case HWRM_RING_ALLOC_AGG: |
6737 | db->db_ring_mask = bp->rx_agg_ring_mask; |
6738 | break; |
6739 | case HWRM_RING_ALLOC_CMPL: |
6740 | case HWRM_RING_ALLOC_NQ: |
6741 | db->db_ring_mask = bp->cp_ring_mask; |
6742 | break; |
6743 | } |
6744 | if (bp->flags & BNXT_FLAG_CHIP_P7) { |
6745 | db->db_epoch_mask = db->db_ring_mask + 1; |
6746 | db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask); |
6747 | } |
6748 | } |
6749 | |
6750 | static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, |
6751 | u32 map_idx, u32 xid) |
6752 | { |
6753 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
6754 | switch (ring_type) { |
6755 | case HWRM_RING_ALLOC_TX: |
6756 | db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; |
6757 | break; |
6758 | case HWRM_RING_ALLOC_RX: |
6759 | case HWRM_RING_ALLOC_AGG: |
6760 | db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; |
6761 | break; |
6762 | case HWRM_RING_ALLOC_CMPL: |
6763 | db->db_key64 = DBR_PATH_L2; |
6764 | break; |
6765 | case HWRM_RING_ALLOC_NQ: |
6766 | db->db_key64 = DBR_PATH_L2; |
6767 | break; |
6768 | } |
6769 | db->db_key64 |= (u64)xid << DBR_XID_SFT; |
6770 | |
6771 | if (bp->flags & BNXT_FLAG_CHIP_P7) |
6772 | db->db_key64 |= DBR_VALID; |
6773 | |
6774 | db->doorbell = bp->bar1 + bp->db_offset; |
6775 | } else { |
6776 | db->doorbell = bp->bar1 + map_idx * 0x80; |
6777 | switch (ring_type) { |
6778 | case HWRM_RING_ALLOC_TX: |
6779 | db->db_key32 = DB_KEY_TX; |
6780 | break; |
6781 | case HWRM_RING_ALLOC_RX: |
6782 | case HWRM_RING_ALLOC_AGG: |
6783 | db->db_key32 = DB_KEY_RX; |
6784 | break; |
6785 | case HWRM_RING_ALLOC_CMPL: |
6786 | db->db_key32 = DB_KEY_CP; |
6787 | break; |
6788 | } |
6789 | } |
6790 | bnxt_set_db_mask(bp, db, ring_type); |
6791 | } |
6792 | |
6793 | static int bnxt_hwrm_ring_alloc(struct bnxt *bp) |
6794 | { |
6795 | bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); |
6796 | int i, rc = 0; |
6797 | u32 type; |
6798 | |
6799 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
6800 | type = HWRM_RING_ALLOC_NQ; |
6801 | else |
6802 | type = HWRM_RING_ALLOC_CMPL; |
6803 | for (i = 0; i < bp->cp_nr_rings; i++) { |
6804 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
6805 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
6806 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; |
6807 | u32 map_idx = ring->map_idx; |
6808 | unsigned int vector; |
6809 | |
6810 | vector = bp->irq_tbl[map_idx].vector; |
6811 | disable_irq_nosync(irq: vector); |
6812 | rc = hwrm_ring_alloc_send_msg(bp, ring, ring_type: type, map_index: map_idx); |
6813 | if (rc) { |
6814 | enable_irq(irq: vector); |
6815 | goto err_out; |
6816 | } |
6817 | bnxt_set_db(bp, db: &cpr->cp_db, ring_type: type, map_idx, xid: ring->fw_ring_id); |
6818 | bnxt_db_nq(bp, db: &cpr->cp_db, idx: cpr->cp_raw_cons); |
6819 | enable_irq(irq: vector); |
6820 | bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; |
6821 | |
6822 | if (!i) { |
6823 | rc = bnxt_hwrm_set_async_event_cr(bp, idx: ring->fw_ring_id); |
6824 | if (rc) |
6825 | netdev_warn(dev: bp->dev, format: "Failed to set async event completion ring.\n" ); |
6826 | } |
6827 | } |
6828 | |
6829 | type = HWRM_RING_ALLOC_TX; |
6830 | for (i = 0; i < bp->tx_nr_rings; i++) { |
6831 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
6832 | struct bnxt_ring_struct *ring; |
6833 | u32 map_idx; |
6834 | |
6835 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
6836 | struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr; |
6837 | struct bnxt_napi *bnapi = txr->bnapi; |
6838 | u32 type2 = HWRM_RING_ALLOC_CMPL; |
6839 | |
6840 | ring = &cpr2->cp_ring_struct; |
6841 | ring->handle = BNXT_SET_NQ_HDL(cpr2); |
6842 | map_idx = bnapi->index; |
6843 | rc = hwrm_ring_alloc_send_msg(bp, ring, ring_type: type2, map_index: map_idx); |
6844 | if (rc) |
6845 | goto err_out; |
6846 | bnxt_set_db(bp, db: &cpr2->cp_db, ring_type: type2, map_idx, |
6847 | xid: ring->fw_ring_id); |
6848 | bnxt_db_cq(bp, db: &cpr2->cp_db, idx: cpr2->cp_raw_cons); |
6849 | } |
6850 | ring = &txr->tx_ring_struct; |
6851 | map_idx = i; |
6852 | rc = hwrm_ring_alloc_send_msg(bp, ring, ring_type: type, map_index: map_idx); |
6853 | if (rc) |
6854 | goto err_out; |
6855 | bnxt_set_db(bp, db: &txr->tx_db, ring_type: type, map_idx, xid: ring->fw_ring_id); |
6856 | } |
6857 | |
6858 | type = HWRM_RING_ALLOC_RX; |
6859 | for (i = 0; i < bp->rx_nr_rings; i++) { |
6860 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
6861 | struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; |
6862 | struct bnxt_napi *bnapi = rxr->bnapi; |
6863 | u32 map_idx = bnapi->index; |
6864 | |
6865 | rc = hwrm_ring_alloc_send_msg(bp, ring, ring_type: type, map_index: map_idx); |
6866 | if (rc) |
6867 | goto err_out; |
6868 | bnxt_set_db(bp, db: &rxr->rx_db, ring_type: type, map_idx, xid: ring->fw_ring_id); |
6869 | /* If we have agg rings, post agg buffers first. */ |
6870 | if (!agg_rings) |
6871 | bnxt_db_write(bp, db: &rxr->rx_db, idx: rxr->rx_prod); |
6872 | bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; |
6873 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
6874 | struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr; |
6875 | u32 type2 = HWRM_RING_ALLOC_CMPL; |
6876 | |
6877 | ring = &cpr2->cp_ring_struct; |
6878 | ring->handle = BNXT_SET_NQ_HDL(cpr2); |
6879 | rc = hwrm_ring_alloc_send_msg(bp, ring, ring_type: type2, map_index: map_idx); |
6880 | if (rc) |
6881 | goto err_out; |
6882 | bnxt_set_db(bp, db: &cpr2->cp_db, ring_type: type2, map_idx, |
6883 | xid: ring->fw_ring_id); |
6884 | bnxt_db_cq(bp, db: &cpr2->cp_db, idx: cpr2->cp_raw_cons); |
6885 | } |
6886 | } |
6887 | |
6888 | if (agg_rings) { |
6889 | type = HWRM_RING_ALLOC_AGG; |
6890 | for (i = 0; i < bp->rx_nr_rings; i++) { |
6891 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
6892 | struct bnxt_ring_struct *ring = |
6893 | &rxr->rx_agg_ring_struct; |
6894 | u32 grp_idx = ring->grp_idx; |
6895 | u32 map_idx = grp_idx + bp->rx_nr_rings; |
6896 | |
6897 | rc = hwrm_ring_alloc_send_msg(bp, ring, ring_type: type, map_index: map_idx); |
6898 | if (rc) |
6899 | goto err_out; |
6900 | |
6901 | bnxt_set_db(bp, db: &rxr->rx_agg_db, ring_type: type, map_idx, |
6902 | xid: ring->fw_ring_id); |
6903 | bnxt_db_write(bp, db: &rxr->rx_agg_db, idx: rxr->rx_agg_prod); |
6904 | bnxt_db_write(bp, db: &rxr->rx_db, idx: rxr->rx_prod); |
6905 | bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; |
6906 | } |
6907 | } |
6908 | err_out: |
6909 | return rc; |
6910 | } |
6911 | |
6912 | static int hwrm_ring_free_send_msg(struct bnxt *bp, |
6913 | struct bnxt_ring_struct *ring, |
6914 | u32 ring_type, int cmpl_ring_id) |
6915 | { |
6916 | struct hwrm_ring_free_output *resp; |
6917 | struct hwrm_ring_free_input *req; |
6918 | u16 error_code = 0; |
6919 | int rc; |
6920 | |
6921 | if (BNXT_NO_FW_ACCESS(bp)) |
6922 | return 0; |
6923 | |
6924 | rc = hwrm_req_init(bp, req, HWRM_RING_FREE); |
6925 | if (rc) |
6926 | goto exit; |
6927 | |
6928 | req->cmpl_ring = cpu_to_le16(cmpl_ring_id); |
6929 | req->ring_type = ring_type; |
6930 | req->ring_id = cpu_to_le16(ring->fw_ring_id); |
6931 | |
6932 | resp = hwrm_req_hold(bp, req); |
6933 | rc = hwrm_req_send(bp, req); |
6934 | error_code = le16_to_cpu(resp->error_code); |
6935 | hwrm_req_drop(bp, req); |
6936 | exit: |
6937 | if (rc || error_code) { |
6938 | netdev_err(dev: bp->dev, format: "hwrm_ring_free type %d failed. rc:%x err:%x\n" , |
6939 | ring_type, rc, error_code); |
6940 | return -EIO; |
6941 | } |
6942 | return 0; |
6943 | } |
6944 | |
6945 | static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) |
6946 | { |
6947 | u32 type; |
6948 | int i; |
6949 | |
6950 | if (!bp->bnapi) |
6951 | return; |
6952 | |
6953 | for (i = 0; i < bp->tx_nr_rings; i++) { |
6954 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
6955 | struct bnxt_ring_struct *ring = &txr->tx_ring_struct; |
6956 | |
6957 | if (ring->fw_ring_id != INVALID_HW_RING_ID) { |
6958 | u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); |
6959 | |
6960 | hwrm_ring_free_send_msg(bp, ring, |
6961 | RING_FREE_REQ_RING_TYPE_TX, |
6962 | cmpl_ring_id: close_path ? cmpl_ring_id : |
6963 | INVALID_HW_RING_ID); |
6964 | ring->fw_ring_id = INVALID_HW_RING_ID; |
6965 | } |
6966 | } |
6967 | |
6968 | for (i = 0; i < bp->rx_nr_rings; i++) { |
6969 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
6970 | struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; |
6971 | u32 grp_idx = rxr->bnapi->index; |
6972 | |
6973 | if (ring->fw_ring_id != INVALID_HW_RING_ID) { |
6974 | u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); |
6975 | |
6976 | hwrm_ring_free_send_msg(bp, ring, |
6977 | RING_FREE_REQ_RING_TYPE_RX, |
6978 | cmpl_ring_id: close_path ? cmpl_ring_id : |
6979 | INVALID_HW_RING_ID); |
6980 | ring->fw_ring_id = INVALID_HW_RING_ID; |
6981 | bp->grp_info[grp_idx].rx_fw_ring_id = |
6982 | INVALID_HW_RING_ID; |
6983 | } |
6984 | } |
6985 | |
6986 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
6987 | type = RING_FREE_REQ_RING_TYPE_RX_AGG; |
6988 | else |
6989 | type = RING_FREE_REQ_RING_TYPE_RX; |
6990 | for (i = 0; i < bp->rx_nr_rings; i++) { |
6991 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
6992 | struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; |
6993 | u32 grp_idx = rxr->bnapi->index; |
6994 | |
6995 | if (ring->fw_ring_id != INVALID_HW_RING_ID) { |
6996 | u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); |
6997 | |
6998 | hwrm_ring_free_send_msg(bp, ring, ring_type: type, |
6999 | cmpl_ring_id: close_path ? cmpl_ring_id : |
7000 | INVALID_HW_RING_ID); |
7001 | ring->fw_ring_id = INVALID_HW_RING_ID; |
7002 | bp->grp_info[grp_idx].agg_fw_ring_id = |
7003 | INVALID_HW_RING_ID; |
7004 | } |
7005 | } |
7006 | |
7007 | /* The completion rings are about to be freed. After that the |
7008 | * IRQ doorbell will not work anymore. So we need to disable |
7009 | * IRQ here. |
7010 | */ |
7011 | bnxt_disable_int_sync(bp); |
7012 | |
7013 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
7014 | type = RING_FREE_REQ_RING_TYPE_NQ; |
7015 | else |
7016 | type = RING_FREE_REQ_RING_TYPE_L2_CMPL; |
7017 | for (i = 0; i < bp->cp_nr_rings; i++) { |
7018 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
7019 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
7020 | struct bnxt_ring_struct *ring; |
7021 | int j; |
7022 | |
7023 | for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) { |
7024 | struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; |
7025 | |
7026 | ring = &cpr2->cp_ring_struct; |
7027 | if (ring->fw_ring_id == INVALID_HW_RING_ID) |
7028 | continue; |
7029 | hwrm_ring_free_send_msg(bp, ring, |
7030 | RING_FREE_REQ_RING_TYPE_L2_CMPL, |
7031 | INVALID_HW_RING_ID); |
7032 | ring->fw_ring_id = INVALID_HW_RING_ID; |
7033 | } |
7034 | ring = &cpr->cp_ring_struct; |
7035 | if (ring->fw_ring_id != INVALID_HW_RING_ID) { |
7036 | hwrm_ring_free_send_msg(bp, ring, ring_type: type, |
7037 | INVALID_HW_RING_ID); |
7038 | ring->fw_ring_id = INVALID_HW_RING_ID; |
7039 | bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; |
7040 | } |
7041 | } |
7042 | } |
7043 | |
7044 | static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, |
7045 | bool shared); |
7046 | static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, |
7047 | bool shared); |
7048 | |
7049 | static int bnxt_hwrm_get_rings(struct bnxt *bp) |
7050 | { |
7051 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
7052 | struct hwrm_func_qcfg_output *resp; |
7053 | struct hwrm_func_qcfg_input *req; |
7054 | int rc; |
7055 | |
7056 | if (bp->hwrm_spec_code < 0x10601) |
7057 | return 0; |
7058 | |
7059 | rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); |
7060 | if (rc) |
7061 | return rc; |
7062 | |
7063 | req->fid = cpu_to_le16(0xffff); |
7064 | resp = hwrm_req_hold(bp, req); |
7065 | rc = hwrm_req_send(bp, req); |
7066 | if (rc) { |
7067 | hwrm_req_drop(bp, req); |
7068 | return rc; |
7069 | } |
7070 | |
7071 | hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); |
7072 | if (BNXT_NEW_RM(bp)) { |
7073 | u16 cp, stats; |
7074 | |
7075 | hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); |
7076 | hw_resc->resv_hw_ring_grps = |
7077 | le32_to_cpu(resp->alloc_hw_ring_grps); |
7078 | hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); |
7079 | hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx); |
7080 | cp = le16_to_cpu(resp->alloc_cmpl_rings); |
7081 | stats = le16_to_cpu(resp->alloc_stat_ctx); |
7082 | hw_resc->resv_irqs = cp; |
7083 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
7084 | int rx = hw_resc->resv_rx_rings; |
7085 | int tx = hw_resc->resv_tx_rings; |
7086 | |
7087 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
7088 | rx >>= 1; |
7089 | if (cp < (rx + tx)) { |
7090 | rc = __bnxt_trim_rings(bp, rx: &rx, tx: &tx, max: cp, shared: false); |
7091 | if (rc) |
7092 | goto get_rings_exit; |
7093 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
7094 | rx <<= 1; |
7095 | hw_resc->resv_rx_rings = rx; |
7096 | hw_resc->resv_tx_rings = tx; |
7097 | } |
7098 | hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); |
7099 | hw_resc->resv_hw_ring_grps = rx; |
7100 | } |
7101 | hw_resc->resv_cp_rings = cp; |
7102 | hw_resc->resv_stat_ctxs = stats; |
7103 | } |
7104 | get_rings_exit: |
7105 | hwrm_req_drop(bp, req); |
7106 | return rc; |
7107 | } |
7108 | |
7109 | int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) |
7110 | { |
7111 | struct hwrm_func_qcfg_output *resp; |
7112 | struct hwrm_func_qcfg_input *req; |
7113 | int rc; |
7114 | |
7115 | if (bp->hwrm_spec_code < 0x10601) |
7116 | return 0; |
7117 | |
7118 | rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); |
7119 | if (rc) |
7120 | return rc; |
7121 | |
7122 | req->fid = cpu_to_le16(fid); |
7123 | resp = hwrm_req_hold(bp, req); |
7124 | rc = hwrm_req_send(bp, req); |
7125 | if (!rc) |
7126 | *tx_rings = le16_to_cpu(resp->alloc_tx_rings); |
7127 | |
7128 | hwrm_req_drop(bp, req); |
7129 | return rc; |
7130 | } |
7131 | |
7132 | static bool bnxt_rfs_supported(struct bnxt *bp); |
7133 | |
7134 | static struct hwrm_func_cfg_input * |
7135 | __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) |
7136 | { |
7137 | struct hwrm_func_cfg_input *req; |
7138 | u32 enables = 0; |
7139 | |
7140 | if (bnxt_hwrm_func_cfg_short_req_init(bp, req: &req)) |
7141 | return NULL; |
7142 | |
7143 | req->fid = cpu_to_le16(0xffff); |
7144 | enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; |
7145 | req->num_tx_rings = cpu_to_le16(hwr->tx); |
7146 | if (BNXT_NEW_RM(bp)) { |
7147 | enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; |
7148 | enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; |
7149 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
7150 | enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; |
7151 | enables |= hwr->cp_p5 ? |
7152 | FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; |
7153 | } else { |
7154 | enables |= hwr->cp ? |
7155 | FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; |
7156 | enables |= hwr->grp ? |
7157 | FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; |
7158 | } |
7159 | enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; |
7160 | enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : |
7161 | 0; |
7162 | req->num_rx_rings = cpu_to_le16(hwr->rx); |
7163 | req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); |
7164 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
7165 | req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); |
7166 | req->num_msix = cpu_to_le16(hwr->cp); |
7167 | } else { |
7168 | req->num_cmpl_rings = cpu_to_le16(hwr->cp); |
7169 | req->num_hw_ring_grps = cpu_to_le16(hwr->grp); |
7170 | } |
7171 | req->num_stat_ctxs = cpu_to_le16(hwr->stat); |
7172 | req->num_vnics = cpu_to_le16(hwr->vnic); |
7173 | } |
7174 | req->enables = cpu_to_le32(enables); |
7175 | return req; |
7176 | } |
7177 | |
7178 | static struct hwrm_func_vf_cfg_input * |
7179 | __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) |
7180 | { |
7181 | struct hwrm_func_vf_cfg_input *req; |
7182 | u32 enables = 0; |
7183 | |
7184 | if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) |
7185 | return NULL; |
7186 | |
7187 | enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; |
7188 | enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | |
7189 | FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; |
7190 | enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; |
7191 | enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; |
7192 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
7193 | enables |= hwr->cp_p5 ? |
7194 | FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; |
7195 | } else { |
7196 | enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; |
7197 | enables |= hwr->grp ? |
7198 | FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; |
7199 | } |
7200 | enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; |
7201 | enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; |
7202 | |
7203 | req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); |
7204 | req->num_tx_rings = cpu_to_le16(hwr->tx); |
7205 | req->num_rx_rings = cpu_to_le16(hwr->rx); |
7206 | req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); |
7207 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
7208 | req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); |
7209 | } else { |
7210 | req->num_cmpl_rings = cpu_to_le16(hwr->cp); |
7211 | req->num_hw_ring_grps = cpu_to_le16(hwr->grp); |
7212 | } |
7213 | req->num_stat_ctxs = cpu_to_le16(hwr->stat); |
7214 | req->num_vnics = cpu_to_le16(hwr->vnic); |
7215 | |
7216 | req->enables = cpu_to_le32(enables); |
7217 | return req; |
7218 | } |
7219 | |
7220 | static int |
7221 | bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) |
7222 | { |
7223 | struct hwrm_func_cfg_input *req; |
7224 | int rc; |
7225 | |
7226 | req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); |
7227 | if (!req) |
7228 | return -ENOMEM; |
7229 | |
7230 | if (!req->enables) { |
7231 | hwrm_req_drop(bp, req); |
7232 | return 0; |
7233 | } |
7234 | |
7235 | rc = hwrm_req_send(bp, req); |
7236 | if (rc) |
7237 | return rc; |
7238 | |
7239 | if (bp->hwrm_spec_code < 0x10601) |
7240 | bp->hw_resc.resv_tx_rings = hwr->tx; |
7241 | |
7242 | return bnxt_hwrm_get_rings(bp); |
7243 | } |
7244 | |
7245 | static int |
7246 | bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) |
7247 | { |
7248 | struct hwrm_func_vf_cfg_input *req; |
7249 | int rc; |
7250 | |
7251 | if (!BNXT_NEW_RM(bp)) { |
7252 | bp->hw_resc.resv_tx_rings = hwr->tx; |
7253 | return 0; |
7254 | } |
7255 | |
7256 | req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); |
7257 | if (!req) |
7258 | return -ENOMEM; |
7259 | |
7260 | rc = hwrm_req_send(bp, req); |
7261 | if (rc) |
7262 | return rc; |
7263 | |
7264 | return bnxt_hwrm_get_rings(bp); |
7265 | } |
7266 | |
7267 | static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) |
7268 | { |
7269 | if (BNXT_PF(bp)) |
7270 | return bnxt_hwrm_reserve_pf_rings(bp, hwr); |
7271 | else |
7272 | return bnxt_hwrm_reserve_vf_rings(bp, hwr); |
7273 | } |
7274 | |
7275 | int bnxt_nq_rings_in_use(struct bnxt *bp) |
7276 | { |
7277 | int cp = bp->cp_nr_rings; |
7278 | int ulp_msix, ulp_base; |
7279 | |
7280 | ulp_msix = bnxt_get_ulp_msix_num(bp); |
7281 | if (ulp_msix) { |
7282 | ulp_base = bnxt_get_ulp_msix_base(bp); |
7283 | cp += ulp_msix; |
7284 | if ((ulp_base + ulp_msix) > cp) |
7285 | cp = ulp_base + ulp_msix; |
7286 | } |
7287 | return cp; |
7288 | } |
7289 | |
7290 | static int bnxt_cp_rings_in_use(struct bnxt *bp) |
7291 | { |
7292 | int cp; |
7293 | |
7294 | if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) |
7295 | return bnxt_nq_rings_in_use(bp); |
7296 | |
7297 | cp = bp->tx_nr_rings + bp->rx_nr_rings; |
7298 | return cp; |
7299 | } |
7300 | |
7301 | static int bnxt_get_func_stat_ctxs(struct bnxt *bp) |
7302 | { |
7303 | int ulp_stat = bnxt_get_ulp_stat_ctxs(bp); |
7304 | int cp = bp->cp_nr_rings; |
7305 | |
7306 | if (!ulp_stat) |
7307 | return cp; |
7308 | |
7309 | if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp)) |
7310 | return bnxt_get_ulp_msix_base(bp) + ulp_stat; |
7311 | |
7312 | return cp + ulp_stat; |
7313 | } |
7314 | |
7315 | static int (struct bnxt *bp, struct bnxt_hw_rings *hwr) |
7316 | { |
7317 | if (!hwr->grp) |
7318 | return 0; |
7319 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
7320 | int = bnxt_get_nr_rss_ctxs(bp, rx_rings: hwr->grp); |
7321 | |
7322 | if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) |
7323 | rss_ctx *= hwr->vnic; |
7324 | return rss_ctx; |
7325 | } |
7326 | if (BNXT_VF(bp)) |
7327 | return BNXT_VF_MAX_RSS_CTX; |
7328 | if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp)) |
7329 | return hwr->grp + 1; |
7330 | return 1; |
7331 | } |
7332 | |
7333 | /* Check if a default RSS map needs to be setup. This function is only |
7334 | * used on older firmware that does not require reserving RX rings. |
7335 | */ |
7336 | static void (struct bnxt *bp) |
7337 | { |
7338 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
7339 | |
7340 | /* The RSS map is valid for RX rings set to resv_rx_rings */ |
7341 | if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { |
7342 | hw_resc->resv_rx_rings = bp->rx_nr_rings; |
7343 | if (!netif_is_rxfh_configured(dev: bp->dev)) |
7344 | bnxt_set_dflt_rss_indir_tbl(bp); |
7345 | } |
7346 | } |
7347 | |
7348 | static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings) |
7349 | { |
7350 | if (bp->flags & BNXT_FLAG_RFS) { |
7351 | if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) |
7352 | return 2; |
7353 | if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) |
7354 | return rx_rings + 1; |
7355 | } |
7356 | return 1; |
7357 | } |
7358 | |
7359 | static bool bnxt_need_reserve_rings(struct bnxt *bp) |
7360 | { |
7361 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
7362 | int cp = bnxt_cp_rings_in_use(bp); |
7363 | int nq = bnxt_nq_rings_in_use(bp); |
7364 | int rx = bp->rx_nr_rings, stat; |
7365 | int vnic, grp = rx; |
7366 | |
7367 | if (hw_resc->resv_tx_rings != bp->tx_nr_rings && |
7368 | bp->hwrm_spec_code >= 0x10601) |
7369 | return true; |
7370 | |
7371 | /* Old firmware does not need RX ring reservations but we still |
7372 | * need to setup a default RSS map when needed. With new firmware |
7373 | * we go through RX ring reservations first and then set up the |
7374 | * RSS map for the successfully reserved RX rings when needed. |
7375 | */ |
7376 | if (!BNXT_NEW_RM(bp)) { |
7377 | bnxt_check_rss_tbl_no_rmgr(bp); |
7378 | return false; |
7379 | } |
7380 | |
7381 | vnic = bnxt_get_total_vnics(bp, rx_rings: rx); |
7382 | |
7383 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
7384 | rx <<= 1; |
7385 | stat = bnxt_get_func_stat_ctxs(bp); |
7386 | if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || |
7387 | hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || |
7388 | (hw_resc->resv_hw_ring_grps != grp && |
7389 | !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))) |
7390 | return true; |
7391 | if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) && |
7392 | hw_resc->resv_irqs != nq) |
7393 | return true; |
7394 | return false; |
7395 | } |
7396 | |
7397 | static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) |
7398 | { |
7399 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
7400 | |
7401 | hwr->tx = hw_resc->resv_tx_rings; |
7402 | if (BNXT_NEW_RM(bp)) { |
7403 | hwr->rx = hw_resc->resv_rx_rings; |
7404 | hwr->cp = hw_resc->resv_irqs; |
7405 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
7406 | hwr->cp_p5 = hw_resc->resv_cp_rings; |
7407 | hwr->grp = hw_resc->resv_hw_ring_grps; |
7408 | hwr->vnic = hw_resc->resv_vnics; |
7409 | hwr->stat = hw_resc->resv_stat_ctxs; |
7410 | hwr->rss_ctx = hw_resc->resv_rsscos_ctxs; |
7411 | } |
7412 | } |
7413 | |
7414 | static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr) |
7415 | { |
7416 | return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic && |
7417 | hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)); |
7418 | } |
7419 | |
7420 | static int __bnxt_reserve_rings(struct bnxt *bp) |
7421 | { |
7422 | struct bnxt_hw_rings hwr = {0}; |
7423 | int rx_rings, rc; |
7424 | bool sh = false; |
7425 | int tx_cp; |
7426 | |
7427 | if (!bnxt_need_reserve_rings(bp)) |
7428 | return 0; |
7429 | |
7430 | hwr.cp = bnxt_nq_rings_in_use(bp); |
7431 | hwr.tx = bp->tx_nr_rings; |
7432 | hwr.rx = bp->rx_nr_rings; |
7433 | if (bp->flags & BNXT_FLAG_SHARED_RINGS) |
7434 | sh = true; |
7435 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
7436 | hwr.cp_p5 = hwr.rx + hwr.tx; |
7437 | |
7438 | hwr.vnic = bnxt_get_total_vnics(bp, rx_rings: hwr.rx); |
7439 | |
7440 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
7441 | hwr.rx <<= 1; |
7442 | hwr.grp = bp->rx_nr_rings; |
7443 | hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, hwr: &hwr); |
7444 | hwr.stat = bnxt_get_func_stat_ctxs(bp); |
7445 | |
7446 | rc = bnxt_hwrm_reserve_rings(bp, hwr: &hwr); |
7447 | if (rc) |
7448 | return rc; |
7449 | |
7450 | bnxt_copy_reserved_rings(bp, hwr: &hwr); |
7451 | |
7452 | rx_rings = hwr.rx; |
7453 | if (bp->flags & BNXT_FLAG_AGG_RINGS) { |
7454 | if (hwr.rx >= 2) { |
7455 | rx_rings = hwr.rx >> 1; |
7456 | } else { |
7457 | if (netif_running(dev: bp->dev)) |
7458 | return -ENOMEM; |
7459 | |
7460 | bp->flags &= ~BNXT_FLAG_AGG_RINGS; |
7461 | bp->flags |= BNXT_FLAG_NO_AGG_RINGS; |
7462 | bp->dev->hw_features &= ~NETIF_F_LRO; |
7463 | bp->dev->features &= ~NETIF_F_LRO; |
7464 | bnxt_set_ring_params(bp); |
7465 | } |
7466 | } |
7467 | rx_rings = min_t(int, rx_rings, hwr.grp); |
7468 | hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings); |
7469 | if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp)) |
7470 | hwr.stat -= bnxt_get_ulp_stat_ctxs(bp); |
7471 | hwr.cp = min_t(int, hwr.cp, hwr.stat); |
7472 | rc = bnxt_trim_rings(bp, rx: &rx_rings, tx: &hwr.tx, max: hwr.cp, shared: sh); |
7473 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
7474 | hwr.rx = rx_rings << 1; |
7475 | tx_cp = bnxt_num_tx_to_cp(bp, tx: hwr.tx); |
7476 | hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; |
7477 | bp->tx_nr_rings = hwr.tx; |
7478 | |
7479 | /* If we cannot reserve all the RX rings, reset the RSS map only |
7480 | * if absolutely necessary |
7481 | */ |
7482 | if (rx_rings != bp->rx_nr_rings) { |
7483 | netdev_warn(dev: bp->dev, format: "Able to reserve only %d out of %d requested RX rings\n" , |
7484 | rx_rings, bp->rx_nr_rings); |
7485 | if (netif_is_rxfh_configured(dev: bp->dev) && |
7486 | (bnxt_get_nr_rss_ctxs(bp, rx_rings: bp->rx_nr_rings) != |
7487 | bnxt_get_nr_rss_ctxs(bp, rx_rings) || |
7488 | bnxt_get_max_rss_ring(bp) >= rx_rings)) { |
7489 | netdev_warn(dev: bp->dev, format: "RSS table entries reverting to default\n" ); |
7490 | bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; |
7491 | } |
7492 | } |
7493 | bp->rx_nr_rings = rx_rings; |
7494 | bp->cp_nr_rings = hwr.cp; |
7495 | |
7496 | if (!bnxt_rings_ok(bp, hwr: &hwr)) |
7497 | return -ENOMEM; |
7498 | |
7499 | if (!netif_is_rxfh_configured(dev: bp->dev)) |
7500 | bnxt_set_dflt_rss_indir_tbl(bp); |
7501 | |
7502 | return rc; |
7503 | } |
7504 | |
7505 | static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) |
7506 | { |
7507 | struct hwrm_func_vf_cfg_input *req; |
7508 | u32 flags; |
7509 | |
7510 | if (!BNXT_NEW_RM(bp)) |
7511 | return 0; |
7512 | |
7513 | req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); |
7514 | flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | |
7515 | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | |
7516 | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | |
7517 | FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | |
7518 | FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | |
7519 | FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; |
7520 | if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) |
7521 | flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; |
7522 | |
7523 | req->flags = cpu_to_le32(flags); |
7524 | return hwrm_req_send_silent(bp, req); |
7525 | } |
7526 | |
7527 | static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) |
7528 | { |
7529 | struct hwrm_func_cfg_input *req; |
7530 | u32 flags; |
7531 | |
7532 | req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); |
7533 | flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; |
7534 | if (BNXT_NEW_RM(bp)) { |
7535 | flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | |
7536 | FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | |
7537 | FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | |
7538 | FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; |
7539 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
7540 | flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | |
7541 | FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; |
7542 | else |
7543 | flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; |
7544 | } |
7545 | |
7546 | req->flags = cpu_to_le32(flags); |
7547 | return hwrm_req_send_silent(bp, req); |
7548 | } |
7549 | |
7550 | static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) |
7551 | { |
7552 | if (bp->hwrm_spec_code < 0x10801) |
7553 | return 0; |
7554 | |
7555 | if (BNXT_PF(bp)) |
7556 | return bnxt_hwrm_check_pf_rings(bp, hwr); |
7557 | |
7558 | return bnxt_hwrm_check_vf_rings(bp, hwr); |
7559 | } |
7560 | |
7561 | static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) |
7562 | { |
7563 | struct bnxt_coal_cap *coal_cap = &bp->coal_cap; |
7564 | struct hwrm_ring_aggint_qcaps_output *resp; |
7565 | struct hwrm_ring_aggint_qcaps_input *req; |
7566 | int rc; |
7567 | |
7568 | coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; |
7569 | coal_cap->num_cmpl_dma_aggr_max = 63; |
7570 | coal_cap->num_cmpl_dma_aggr_during_int_max = 63; |
7571 | coal_cap->cmpl_aggr_dma_tmr_max = 65535; |
7572 | coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; |
7573 | coal_cap->int_lat_tmr_min_max = 65535; |
7574 | coal_cap->int_lat_tmr_max_max = 65535; |
7575 | coal_cap->num_cmpl_aggr_int_max = 65535; |
7576 | coal_cap->timer_units = 80; |
7577 | |
7578 | if (bp->hwrm_spec_code < 0x10902) |
7579 | return; |
7580 | |
7581 | if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS)) |
7582 | return; |
7583 | |
7584 | resp = hwrm_req_hold(bp, req); |
7585 | rc = hwrm_req_send_silent(bp, req); |
7586 | if (!rc) { |
7587 | coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); |
7588 | coal_cap->nq_params = le32_to_cpu(resp->nq_params); |
7589 | coal_cap->num_cmpl_dma_aggr_max = |
7590 | le16_to_cpu(resp->num_cmpl_dma_aggr_max); |
7591 | coal_cap->num_cmpl_dma_aggr_during_int_max = |
7592 | le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); |
7593 | coal_cap->cmpl_aggr_dma_tmr_max = |
7594 | le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); |
7595 | coal_cap->cmpl_aggr_dma_tmr_during_int_max = |
7596 | le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); |
7597 | coal_cap->int_lat_tmr_min_max = |
7598 | le16_to_cpu(resp->int_lat_tmr_min_max); |
7599 | coal_cap->int_lat_tmr_max_max = |
7600 | le16_to_cpu(resp->int_lat_tmr_max_max); |
7601 | coal_cap->num_cmpl_aggr_int_max = |
7602 | le16_to_cpu(resp->num_cmpl_aggr_int_max); |
7603 | coal_cap->timer_units = le16_to_cpu(resp->timer_units); |
7604 | } |
7605 | hwrm_req_drop(bp, req); |
7606 | } |
7607 | |
7608 | static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) |
7609 | { |
7610 | struct bnxt_coal_cap *coal_cap = &bp->coal_cap; |
7611 | |
7612 | return usec * 1000 / coal_cap->timer_units; |
7613 | } |
7614 | |
7615 | static void bnxt_hwrm_set_coal_params(struct bnxt *bp, |
7616 | struct bnxt_coal *hw_coal, |
7617 | struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) |
7618 | { |
7619 | struct bnxt_coal_cap *coal_cap = &bp->coal_cap; |
7620 | u16 val, tmr, max, flags = hw_coal->flags; |
7621 | u32 cmpl_params = coal_cap->cmpl_params; |
7622 | |
7623 | max = hw_coal->bufs_per_record * 128; |
7624 | if (hw_coal->budget) |
7625 | max = hw_coal->bufs_per_record * hw_coal->budget; |
7626 | max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); |
7627 | |
7628 | val = clamp_t(u16, hw_coal->coal_bufs, 1, max); |
7629 | req->num_cmpl_aggr_int = cpu_to_le16(val); |
7630 | |
7631 | val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); |
7632 | req->num_cmpl_dma_aggr = cpu_to_le16(val); |
7633 | |
7634 | val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, |
7635 | coal_cap->num_cmpl_dma_aggr_during_int_max); |
7636 | req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); |
7637 | |
7638 | tmr = bnxt_usec_to_coal_tmr(bp, usec: hw_coal->coal_ticks); |
7639 | tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); |
7640 | req->int_lat_tmr_max = cpu_to_le16(tmr); |
7641 | |
7642 | /* min timer set to 1/2 of interrupt timer */ |
7643 | if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { |
7644 | val = tmr / 2; |
7645 | val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); |
7646 | req->int_lat_tmr_min = cpu_to_le16(val); |
7647 | req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); |
7648 | } |
7649 | |
7650 | /* buf timer set to 1/4 of interrupt timer */ |
7651 | val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); |
7652 | req->cmpl_aggr_dma_tmr = cpu_to_le16(val); |
7653 | |
7654 | if (cmpl_params & |
7655 | RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { |
7656 | tmr = bnxt_usec_to_coal_tmr(bp, usec: hw_coal->coal_ticks_irq); |
7657 | val = clamp_t(u16, tmr, 1, |
7658 | coal_cap->cmpl_aggr_dma_tmr_during_int_max); |
7659 | req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); |
7660 | req->enables |= |
7661 | cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); |
7662 | } |
7663 | |
7664 | if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && |
7665 | hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) |
7666 | flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; |
7667 | req->flags = cpu_to_le16(flags); |
7668 | req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); |
7669 | } |
7670 | |
7671 | static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, |
7672 | struct bnxt_coal *hw_coal) |
7673 | { |
7674 | struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req; |
7675 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
7676 | struct bnxt_coal_cap *coal_cap = &bp->coal_cap; |
7677 | u32 nq_params = coal_cap->nq_params; |
7678 | u16 tmr; |
7679 | int rc; |
7680 | |
7681 | if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) |
7682 | return 0; |
7683 | |
7684 | rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); |
7685 | if (rc) |
7686 | return rc; |
7687 | |
7688 | req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); |
7689 | req->flags = |
7690 | cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); |
7691 | |
7692 | tmr = bnxt_usec_to_coal_tmr(bp, usec: hw_coal->coal_ticks) / 2; |
7693 | tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); |
7694 | req->int_lat_tmr_min = cpu_to_le16(tmr); |
7695 | req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); |
7696 | return hwrm_req_send(bp, req); |
7697 | } |
7698 | |
7699 | int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) |
7700 | { |
7701 | struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx; |
7702 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
7703 | struct bnxt_coal coal; |
7704 | int rc; |
7705 | |
7706 | /* Tick values in micro seconds. |
7707 | * 1 coal_buf x bufs_per_record = 1 completion record. |
7708 | */ |
7709 | memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); |
7710 | |
7711 | coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; |
7712 | coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; |
7713 | |
7714 | if (!bnapi->rx_ring) |
7715 | return -ENODEV; |
7716 | |
7717 | rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); |
7718 | if (rc) |
7719 | return rc; |
7720 | |
7721 | bnxt_hwrm_set_coal_params(bp, hw_coal: &coal, req: req_rx); |
7722 | |
7723 | req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); |
7724 | |
7725 | return hwrm_req_send(bp, req: req_rx); |
7726 | } |
7727 | |
7728 | static int |
7729 | bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, |
7730 | struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) |
7731 | { |
7732 | u16 ring_id = bnxt_cp_ring_for_rx(bp, rxr: bnapi->rx_ring); |
7733 | |
7734 | req->ring_id = cpu_to_le16(ring_id); |
7735 | return hwrm_req_send(bp, req); |
7736 | } |
7737 | |
7738 | static int |
7739 | bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, |
7740 | struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) |
7741 | { |
7742 | struct bnxt_tx_ring_info *txr; |
7743 | int i, rc; |
7744 | |
7745 | bnxt_for_each_napi_tx(i, bnapi, txr) { |
7746 | u16 ring_id; |
7747 | |
7748 | ring_id = bnxt_cp_ring_for_tx(bp, txr); |
7749 | req->ring_id = cpu_to_le16(ring_id); |
7750 | rc = hwrm_req_send(bp, req); |
7751 | if (rc) |
7752 | return rc; |
7753 | if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) |
7754 | return 0; |
7755 | } |
7756 | return 0; |
7757 | } |
7758 | |
7759 | int bnxt_hwrm_set_coal(struct bnxt *bp) |
7760 | { |
7761 | struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx; |
7762 | int i, rc; |
7763 | |
7764 | rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); |
7765 | if (rc) |
7766 | return rc; |
7767 | |
7768 | rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); |
7769 | if (rc) { |
7770 | hwrm_req_drop(bp, req: req_rx); |
7771 | return rc; |
7772 | } |
7773 | |
7774 | bnxt_hwrm_set_coal_params(bp, hw_coal: &bp->rx_coal, req: req_rx); |
7775 | bnxt_hwrm_set_coal_params(bp, hw_coal: &bp->tx_coal, req: req_tx); |
7776 | |
7777 | hwrm_req_hold(bp, req: req_rx); |
7778 | hwrm_req_hold(bp, req: req_tx); |
7779 | for (i = 0; i < bp->cp_nr_rings; i++) { |
7780 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
7781 | struct bnxt_coal *hw_coal; |
7782 | |
7783 | if (!bnapi->rx_ring) |
7784 | rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req: req_tx); |
7785 | else |
7786 | rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req: req_rx); |
7787 | if (rc) |
7788 | break; |
7789 | |
7790 | if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) |
7791 | continue; |
7792 | |
7793 | if (bnapi->rx_ring && bnapi->tx_ring[0]) { |
7794 | rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req: req_tx); |
7795 | if (rc) |
7796 | break; |
7797 | } |
7798 | if (bnapi->rx_ring) |
7799 | hw_coal = &bp->rx_coal; |
7800 | else |
7801 | hw_coal = &bp->tx_coal; |
7802 | __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); |
7803 | } |
7804 | hwrm_req_drop(bp, req: req_rx); |
7805 | hwrm_req_drop(bp, req: req_tx); |
7806 | return rc; |
7807 | } |
7808 | |
7809 | static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) |
7810 | { |
7811 | struct hwrm_stat_ctx_clr_stats_input *req0 = NULL; |
7812 | struct hwrm_stat_ctx_free_input *req; |
7813 | int i; |
7814 | |
7815 | if (!bp->bnapi) |
7816 | return; |
7817 | |
7818 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
7819 | return; |
7820 | |
7821 | if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE)) |
7822 | return; |
7823 | if (BNXT_FW_MAJ(bp) <= 20) { |
7824 | if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) { |
7825 | hwrm_req_drop(bp, req); |
7826 | return; |
7827 | } |
7828 | hwrm_req_hold(bp, req: req0); |
7829 | } |
7830 | hwrm_req_hold(bp, req); |
7831 | for (i = 0; i < bp->cp_nr_rings; i++) { |
7832 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
7833 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
7834 | |
7835 | if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { |
7836 | req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); |
7837 | if (req0) { |
7838 | req0->stat_ctx_id = req->stat_ctx_id; |
7839 | hwrm_req_send(bp, req: req0); |
7840 | } |
7841 | hwrm_req_send(bp, req); |
7842 | |
7843 | cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; |
7844 | } |
7845 | } |
7846 | hwrm_req_drop(bp, req); |
7847 | if (req0) |
7848 | hwrm_req_drop(bp, req: req0); |
7849 | } |
7850 | |
7851 | static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) |
7852 | { |
7853 | struct hwrm_stat_ctx_alloc_output *resp; |
7854 | struct hwrm_stat_ctx_alloc_input *req; |
7855 | int rc, i; |
7856 | |
7857 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
7858 | return 0; |
7859 | |
7860 | rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC); |
7861 | if (rc) |
7862 | return rc; |
7863 | |
7864 | req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); |
7865 | req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); |
7866 | |
7867 | resp = hwrm_req_hold(bp, req); |
7868 | for (i = 0; i < bp->cp_nr_rings; i++) { |
7869 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
7870 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
7871 | |
7872 | req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); |
7873 | |
7874 | rc = hwrm_req_send(bp, req); |
7875 | if (rc) |
7876 | break; |
7877 | |
7878 | cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); |
7879 | |
7880 | bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; |
7881 | } |
7882 | hwrm_req_drop(bp, req); |
7883 | return rc; |
7884 | } |
7885 | |
7886 | static int bnxt_hwrm_func_qcfg(struct bnxt *bp) |
7887 | { |
7888 | struct hwrm_func_qcfg_output *resp; |
7889 | struct hwrm_func_qcfg_input *req; |
7890 | u16 flags; |
7891 | int rc; |
7892 | |
7893 | rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); |
7894 | if (rc) |
7895 | return rc; |
7896 | |
7897 | req->fid = cpu_to_le16(0xffff); |
7898 | resp = hwrm_req_hold(bp, req); |
7899 | rc = hwrm_req_send(bp, req); |
7900 | if (rc) |
7901 | goto func_qcfg_exit; |
7902 | |
7903 | #ifdef CONFIG_BNXT_SRIOV |
7904 | if (BNXT_VF(bp)) { |
7905 | struct bnxt_vf_info *vf = &bp->vf; |
7906 | |
7907 | vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; |
7908 | } else { |
7909 | bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); |
7910 | } |
7911 | #endif |
7912 | flags = le16_to_cpu(resp->flags); |
7913 | if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | |
7914 | FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { |
7915 | bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; |
7916 | if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) |
7917 | bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; |
7918 | } |
7919 | if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) |
7920 | bp->flags |= BNXT_FLAG_MULTI_HOST; |
7921 | |
7922 | if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) |
7923 | bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; |
7924 | |
7925 | switch (resp->port_partition_type) { |
7926 | case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: |
7927 | case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: |
7928 | case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: |
7929 | bp->port_partition_type = resp->port_partition_type; |
7930 | break; |
7931 | } |
7932 | if (bp->hwrm_spec_code < 0x10707 || |
7933 | resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) |
7934 | bp->br_mode = BRIDGE_MODE_VEB; |
7935 | else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) |
7936 | bp->br_mode = BRIDGE_MODE_VEPA; |
7937 | else |
7938 | bp->br_mode = BRIDGE_MODE_UNDEF; |
7939 | |
7940 | bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); |
7941 | if (!bp->max_mtu) |
7942 | bp->max_mtu = BNXT_MAX_MTU; |
7943 | |
7944 | if (bp->db_size) |
7945 | goto func_qcfg_exit; |
7946 | |
7947 | bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024; |
7948 | if (BNXT_CHIP_P5(bp)) { |
7949 | if (BNXT_PF(bp)) |
7950 | bp->db_offset = DB_PF_OFFSET_P5; |
7951 | else |
7952 | bp->db_offset = DB_VF_OFFSET_P5; |
7953 | } |
7954 | bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * |
7955 | 1024); |
7956 | if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || |
7957 | bp->db_size <= bp->db_offset) |
7958 | bp->db_size = pci_resource_len(bp->pdev, 2); |
7959 | |
7960 | func_qcfg_exit: |
7961 | hwrm_req_drop(bp, req); |
7962 | return rc; |
7963 | } |
7964 | |
7965 | static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm, |
7966 | u8 init_val, u8 init_offset, |
7967 | bool init_mask_set) |
7968 | { |
7969 | ctxm->init_value = init_val; |
7970 | ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET; |
7971 | if (init_mask_set) |
7972 | ctxm->init_offset = init_offset * 4; |
7973 | else |
7974 | ctxm->init_value = 0; |
7975 | } |
7976 | |
7977 | static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max) |
7978 | { |
7979 | struct bnxt_ctx_mem_info *ctx = bp->ctx; |
7980 | u16 type; |
7981 | |
7982 | for (type = 0; type < ctx_max; type++) { |
7983 | struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; |
7984 | int n = 1; |
7985 | |
7986 | if (!ctxm->max_entries) |
7987 | continue; |
7988 | |
7989 | if (ctxm->instance_bmap) |
7990 | n = hweight32(ctxm->instance_bmap); |
7991 | ctxm->pg_info = kcalloc(n, size: sizeof(*ctxm->pg_info), GFP_KERNEL); |
7992 | if (!ctxm->pg_info) |
7993 | return -ENOMEM; |
7994 | } |
7995 | return 0; |
7996 | } |
7997 | |
7998 | #define BNXT_CTX_INIT_VALID(flags) \ |
7999 | (!!((flags) & \ |
8000 | FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT)) |
8001 | |
8002 | static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp) |
8003 | { |
8004 | struct hwrm_func_backing_store_qcaps_v2_output *resp; |
8005 | struct hwrm_func_backing_store_qcaps_v2_input *req; |
8006 | struct bnxt_ctx_mem_info *ctx; |
8007 | u16 type; |
8008 | int rc; |
8009 | |
8010 | rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2); |
8011 | if (rc) |
8012 | return rc; |
8013 | |
8014 | ctx = kzalloc(size: sizeof(*ctx), GFP_KERNEL); |
8015 | if (!ctx) |
8016 | return -ENOMEM; |
8017 | bp->ctx = ctx; |
8018 | |
8019 | resp = hwrm_req_hold(bp, req); |
8020 | |
8021 | for (type = 0; type < BNXT_CTX_V2_MAX; ) { |
8022 | struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; |
8023 | u8 init_val, init_off, i; |
8024 | __le32 *p; |
8025 | u32 flags; |
8026 | |
8027 | req->type = cpu_to_le16(type); |
8028 | rc = hwrm_req_send(bp, req); |
8029 | if (rc) |
8030 | goto ctx_done; |
8031 | flags = le32_to_cpu(resp->flags); |
8032 | type = le16_to_cpu(resp->next_valid_type); |
8033 | if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID)) |
8034 | continue; |
8035 | |
8036 | ctxm->type = le16_to_cpu(resp->type); |
8037 | ctxm->entry_size = le16_to_cpu(resp->entry_size); |
8038 | ctxm->flags = flags; |
8039 | ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map); |
8040 | ctxm->entry_multiple = resp->entry_multiple; |
8041 | ctxm->max_entries = le32_to_cpu(resp->max_num_entries); |
8042 | ctxm->min_entries = le32_to_cpu(resp->min_num_entries); |
8043 | init_val = resp->ctx_init_value; |
8044 | init_off = resp->ctx_init_offset; |
8045 | bnxt_init_ctx_initializer(ctxm, init_val, init_offset: init_off, |
8046 | BNXT_CTX_INIT_VALID(flags)); |
8047 | ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt, |
8048 | BNXT_MAX_SPLIT_ENTRY); |
8049 | for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt; |
8050 | i++, p++) |
8051 | ctxm->split[i] = le32_to_cpu(*p); |
8052 | } |
8053 | rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX); |
8054 | |
8055 | ctx_done: |
8056 | hwrm_req_drop(bp, req); |
8057 | return rc; |
8058 | } |
8059 | |
8060 | static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) |
8061 | { |
8062 | struct hwrm_func_backing_store_qcaps_output *resp; |
8063 | struct hwrm_func_backing_store_qcaps_input *req; |
8064 | int rc; |
8065 | |
8066 | if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) |
8067 | return 0; |
8068 | |
8069 | if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) |
8070 | return bnxt_hwrm_func_backing_store_qcaps_v2(bp); |
8071 | |
8072 | rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS); |
8073 | if (rc) |
8074 | return rc; |
8075 | |
8076 | resp = hwrm_req_hold(bp, req); |
8077 | rc = hwrm_req_send_silent(bp, req); |
8078 | if (!rc) { |
8079 | struct bnxt_ctx_mem_type *ctxm; |
8080 | struct bnxt_ctx_mem_info *ctx; |
8081 | u8 init_val, init_idx = 0; |
8082 | u16 init_mask; |
8083 | |
8084 | ctx = bp->ctx; |
8085 | if (!ctx) { |
8086 | ctx = kzalloc(size: sizeof(*ctx), GFP_KERNEL); |
8087 | if (!ctx) { |
8088 | rc = -ENOMEM; |
8089 | goto ctx_err; |
8090 | } |
8091 | bp->ctx = ctx; |
8092 | } |
8093 | init_val = resp->ctx_kind_initializer; |
8094 | init_mask = le16_to_cpu(resp->ctx_init_mask); |
8095 | |
8096 | ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; |
8097 | ctxm->max_entries = le32_to_cpu(resp->qp_max_entries); |
8098 | ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); |
8099 | ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); |
8100 | ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries); |
8101 | ctxm->entry_size = le16_to_cpu(resp->qp_entry_size); |
8102 | bnxt_init_ctx_initializer(ctxm, init_val, init_offset: resp->qp_init_offset, |
8103 | init_mask_set: (init_mask & (1 << init_idx++)) != 0); |
8104 | |
8105 | ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; |
8106 | ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); |
8107 | ctxm->max_entries = le32_to_cpu(resp->srq_max_entries); |
8108 | ctxm->entry_size = le16_to_cpu(resp->srq_entry_size); |
8109 | bnxt_init_ctx_initializer(ctxm, init_val, init_offset: resp->srq_init_offset, |
8110 | init_mask_set: (init_mask & (1 << init_idx++)) != 0); |
8111 | |
8112 | ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; |
8113 | ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); |
8114 | ctxm->max_entries = le32_to_cpu(resp->cq_max_entries); |
8115 | ctxm->entry_size = le16_to_cpu(resp->cq_entry_size); |
8116 | bnxt_init_ctx_initializer(ctxm, init_val, init_offset: resp->cq_init_offset, |
8117 | init_mask_set: (init_mask & (1 << init_idx++)) != 0); |
8118 | |
8119 | ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; |
8120 | ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries); |
8121 | ctxm->max_entries = ctxm->vnic_entries + |
8122 | le16_to_cpu(resp->vnic_max_ring_table_entries); |
8123 | ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size); |
8124 | bnxt_init_ctx_initializer(ctxm, init_val, |
8125 | init_offset: resp->vnic_init_offset, |
8126 | init_mask_set: (init_mask & (1 << init_idx++)) != 0); |
8127 | |
8128 | ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; |
8129 | ctxm->max_entries = le32_to_cpu(resp->stat_max_entries); |
8130 | ctxm->entry_size = le16_to_cpu(resp->stat_entry_size); |
8131 | bnxt_init_ctx_initializer(ctxm, init_val, |
8132 | init_offset: resp->stat_init_offset, |
8133 | init_mask_set: (init_mask & (1 << init_idx++)) != 0); |
8134 | |
8135 | ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; |
8136 | ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size); |
8137 | ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring); |
8138 | ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring); |
8139 | ctxm->entry_multiple = resp->tqm_entries_multiple; |
8140 | if (!ctxm->entry_multiple) |
8141 | ctxm->entry_multiple = 1; |
8142 | |
8143 | memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm)); |
8144 | |
8145 | ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; |
8146 | ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries); |
8147 | ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size); |
8148 | ctxm->mrav_num_entries_units = |
8149 | le16_to_cpu(resp->mrav_num_entries_units); |
8150 | bnxt_init_ctx_initializer(ctxm, init_val, |
8151 | init_offset: resp->mrav_init_offset, |
8152 | init_mask_set: (init_mask & (1 << init_idx++)) != 0); |
8153 | |
8154 | ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; |
8155 | ctxm->entry_size = le16_to_cpu(resp->tim_entry_size); |
8156 | ctxm->max_entries = le32_to_cpu(resp->tim_max_entries); |
8157 | |
8158 | ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; |
8159 | if (!ctx->tqm_fp_rings_count) |
8160 | ctx->tqm_fp_rings_count = bp->max_q; |
8161 | else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) |
8162 | ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; |
8163 | |
8164 | ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; |
8165 | memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm)); |
8166 | ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1; |
8167 | |
8168 | rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX); |
8169 | } else { |
8170 | rc = 0; |
8171 | } |
8172 | ctx_err: |
8173 | hwrm_req_drop(bp, req); |
8174 | return rc; |
8175 | } |
8176 | |
8177 | static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, |
8178 | __le64 *pg_dir) |
8179 | { |
8180 | if (!rmem->nr_pages) |
8181 | return; |
8182 | |
8183 | BNXT_SET_CTX_PAGE_ATTR(*pg_attr); |
8184 | if (rmem->depth >= 1) { |
8185 | if (rmem->depth == 2) |
8186 | *pg_attr |= 2; |
8187 | else |
8188 | *pg_attr |= 1; |
8189 | *pg_dir = cpu_to_le64(rmem->pg_tbl_map); |
8190 | } else { |
8191 | *pg_dir = cpu_to_le64(rmem->dma_arr[0]); |
8192 | } |
8193 | } |
8194 | |
8195 | #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ |
8196 | (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ |
8197 | FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ |
8198 | FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ |
8199 | FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ |
8200 | FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) |
8201 | |
8202 | static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) |
8203 | { |
8204 | struct hwrm_func_backing_store_cfg_input *req; |
8205 | struct bnxt_ctx_mem_info *ctx = bp->ctx; |
8206 | struct bnxt_ctx_pg_info *ctx_pg; |
8207 | struct bnxt_ctx_mem_type *ctxm; |
8208 | void **__req = (void **)&req; |
8209 | u32 req_len = sizeof(*req); |
8210 | __le32 *num_entries; |
8211 | __le64 *pg_dir; |
8212 | u32 flags = 0; |
8213 | u8 *pg_attr; |
8214 | u32 ena; |
8215 | int rc; |
8216 | int i; |
8217 | |
8218 | if (!ctx) |
8219 | return 0; |
8220 | |
8221 | if (req_len > bp->hwrm_max_ext_req_len) |
8222 | req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN; |
8223 | rc = __hwrm_req_init(bp, req: __req, HWRM_FUNC_BACKING_STORE_CFG, req_len); |
8224 | if (rc) |
8225 | return rc; |
8226 | |
8227 | req->enables = cpu_to_le32(enables); |
8228 | if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { |
8229 | ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; |
8230 | ctx_pg = ctxm->pg_info; |
8231 | req->qp_num_entries = cpu_to_le32(ctx_pg->entries); |
8232 | req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries); |
8233 | req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries); |
8234 | req->qp_entry_size = cpu_to_le16(ctxm->entry_size); |
8235 | bnxt_hwrm_set_pg_attr(rmem: &ctx_pg->ring_mem, |
8236 | pg_attr: &req->qpc_pg_size_qpc_lvl, |
8237 | pg_dir: &req->qpc_page_dir); |
8238 | |
8239 | if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD) |
8240 | req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries); |
8241 | } |
8242 | if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { |
8243 | ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; |
8244 | ctx_pg = ctxm->pg_info; |
8245 | req->srq_num_entries = cpu_to_le32(ctx_pg->entries); |
8246 | req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries); |
8247 | req->srq_entry_size = cpu_to_le16(ctxm->entry_size); |
8248 | bnxt_hwrm_set_pg_attr(rmem: &ctx_pg->ring_mem, |
8249 | pg_attr: &req->srq_pg_size_srq_lvl, |
8250 | pg_dir: &req->srq_page_dir); |
8251 | } |
8252 | if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { |
8253 | ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; |
8254 | ctx_pg = ctxm->pg_info; |
8255 | req->cq_num_entries = cpu_to_le32(ctx_pg->entries); |
8256 | req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries); |
8257 | req->cq_entry_size = cpu_to_le16(ctxm->entry_size); |
8258 | bnxt_hwrm_set_pg_attr(rmem: &ctx_pg->ring_mem, |
8259 | pg_attr: &req->cq_pg_size_cq_lvl, |
8260 | pg_dir: &req->cq_page_dir); |
8261 | } |
8262 | if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { |
8263 | ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; |
8264 | ctx_pg = ctxm->pg_info; |
8265 | req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries); |
8266 | req->vnic_num_ring_table_entries = |
8267 | cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries); |
8268 | req->vnic_entry_size = cpu_to_le16(ctxm->entry_size); |
8269 | bnxt_hwrm_set_pg_attr(rmem: &ctx_pg->ring_mem, |
8270 | pg_attr: &req->vnic_pg_size_vnic_lvl, |
8271 | pg_dir: &req->vnic_page_dir); |
8272 | } |
8273 | if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { |
8274 | ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; |
8275 | ctx_pg = ctxm->pg_info; |
8276 | req->stat_num_entries = cpu_to_le32(ctxm->max_entries); |
8277 | req->stat_entry_size = cpu_to_le16(ctxm->entry_size); |
8278 | bnxt_hwrm_set_pg_attr(rmem: &ctx_pg->ring_mem, |
8279 | pg_attr: &req->stat_pg_size_stat_lvl, |
8280 | pg_dir: &req->stat_page_dir); |
8281 | } |
8282 | if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { |
8283 | u32 units; |
8284 | |
8285 | ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; |
8286 | ctx_pg = ctxm->pg_info; |
8287 | req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); |
8288 | units = ctxm->mrav_num_entries_units; |
8289 | if (units) { |
8290 | u32 num_mr, num_ah = ctxm->mrav_av_entries; |
8291 | u32 entries; |
8292 | |
8293 | num_mr = ctx_pg->entries - num_ah; |
8294 | entries = ((num_mr / units) << 16) | (num_ah / units); |
8295 | req->mrav_num_entries = cpu_to_le32(entries); |
8296 | flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; |
8297 | } |
8298 | req->mrav_entry_size = cpu_to_le16(ctxm->entry_size); |
8299 | bnxt_hwrm_set_pg_attr(rmem: &ctx_pg->ring_mem, |
8300 | pg_attr: &req->mrav_pg_size_mrav_lvl, |
8301 | pg_dir: &req->mrav_page_dir); |
8302 | } |
8303 | if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { |
8304 | ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; |
8305 | ctx_pg = ctxm->pg_info; |
8306 | req->tim_num_entries = cpu_to_le32(ctx_pg->entries); |
8307 | req->tim_entry_size = cpu_to_le16(ctxm->entry_size); |
8308 | bnxt_hwrm_set_pg_attr(rmem: &ctx_pg->ring_mem, |
8309 | pg_attr: &req->tim_pg_size_tim_lvl, |
8310 | pg_dir: &req->tim_page_dir); |
8311 | } |
8312 | ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; |
8313 | for (i = 0, num_entries = &req->tqm_sp_num_entries, |
8314 | pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, |
8315 | pg_dir = &req->tqm_sp_page_dir, |
8316 | ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP, |
8317 | ctx_pg = ctxm->pg_info; |
8318 | i < BNXT_MAX_TQM_RINGS; |
8319 | ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i], |
8320 | i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { |
8321 | if (!(enables & ena)) |
8322 | continue; |
8323 | |
8324 | req->tqm_entry_size = cpu_to_le16(ctxm->entry_size); |
8325 | *num_entries = cpu_to_le32(ctx_pg->entries); |
8326 | bnxt_hwrm_set_pg_attr(rmem: &ctx_pg->ring_mem, pg_attr, pg_dir); |
8327 | } |
8328 | req->flags = cpu_to_le32(flags); |
8329 | return hwrm_req_send(bp, req); |
8330 | } |
8331 | |
8332 | static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, |
8333 | struct bnxt_ctx_pg_info *ctx_pg) |
8334 | { |
8335 | struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; |
8336 | |
8337 | rmem->page_size = BNXT_PAGE_SIZE; |
8338 | rmem->pg_arr = ctx_pg->ctx_pg_arr; |
8339 | rmem->dma_arr = ctx_pg->ctx_dma_arr; |
8340 | rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; |
8341 | if (rmem->depth >= 1) |
8342 | rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; |
8343 | return bnxt_alloc_ring(bp, rmem); |
8344 | } |
8345 | |
8346 | static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, |
8347 | struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, |
8348 | u8 depth, struct bnxt_ctx_mem_type *ctxm) |
8349 | { |
8350 | struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; |
8351 | int rc; |
8352 | |
8353 | if (!mem_size) |
8354 | return -EINVAL; |
8355 | |
8356 | ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); |
8357 | if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { |
8358 | ctx_pg->nr_pages = 0; |
8359 | return -EINVAL; |
8360 | } |
8361 | if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { |
8362 | int nr_tbls, i; |
8363 | |
8364 | rmem->depth = 2; |
8365 | ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, size: sizeof(ctx_pg), |
8366 | GFP_KERNEL); |
8367 | if (!ctx_pg->ctx_pg_tbl) |
8368 | return -ENOMEM; |
8369 | nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); |
8370 | rmem->nr_pages = nr_tbls; |
8371 | rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); |
8372 | if (rc) |
8373 | return rc; |
8374 | for (i = 0; i < nr_tbls; i++) { |
8375 | struct bnxt_ctx_pg_info *pg_tbl; |
8376 | |
8377 | pg_tbl = kzalloc(size: sizeof(*pg_tbl), GFP_KERNEL); |
8378 | if (!pg_tbl) |
8379 | return -ENOMEM; |
8380 | ctx_pg->ctx_pg_tbl[i] = pg_tbl; |
8381 | rmem = &pg_tbl->ring_mem; |
8382 | rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; |
8383 | rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; |
8384 | rmem->depth = 1; |
8385 | rmem->nr_pages = MAX_CTX_PAGES; |
8386 | rmem->ctx_mem = ctxm; |
8387 | if (i == (nr_tbls - 1)) { |
8388 | int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; |
8389 | |
8390 | if (rem) |
8391 | rmem->nr_pages = rem; |
8392 | } |
8393 | rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg: pg_tbl); |
8394 | if (rc) |
8395 | break; |
8396 | } |
8397 | } else { |
8398 | rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); |
8399 | if (rmem->nr_pages > 1 || depth) |
8400 | rmem->depth = 1; |
8401 | rmem->ctx_mem = ctxm; |
8402 | rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); |
8403 | } |
8404 | return rc; |
8405 | } |
8406 | |
8407 | static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, |
8408 | struct bnxt_ctx_pg_info *ctx_pg) |
8409 | { |
8410 | struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; |
8411 | |
8412 | if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || |
8413 | ctx_pg->ctx_pg_tbl) { |
8414 | int i, nr_tbls = rmem->nr_pages; |
8415 | |
8416 | for (i = 0; i < nr_tbls; i++) { |
8417 | struct bnxt_ctx_pg_info *pg_tbl; |
8418 | struct bnxt_ring_mem_info *rmem2; |
8419 | |
8420 | pg_tbl = ctx_pg->ctx_pg_tbl[i]; |
8421 | if (!pg_tbl) |
8422 | continue; |
8423 | rmem2 = &pg_tbl->ring_mem; |
8424 | bnxt_free_ring(bp, rmem: rmem2); |
8425 | ctx_pg->ctx_pg_arr[i] = NULL; |
8426 | kfree(objp: pg_tbl); |
8427 | ctx_pg->ctx_pg_tbl[i] = NULL; |
8428 | } |
8429 | kfree(objp: ctx_pg->ctx_pg_tbl); |
8430 | ctx_pg->ctx_pg_tbl = NULL; |
8431 | } |
8432 | bnxt_free_ring(bp, rmem); |
8433 | ctx_pg->nr_pages = 0; |
8434 | } |
8435 | |
8436 | static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp, |
8437 | struct bnxt_ctx_mem_type *ctxm, u32 entries, |
8438 | u8 pg_lvl) |
8439 | { |
8440 | struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; |
8441 | int i, rc = 0, n = 1; |
8442 | u32 mem_size; |
8443 | |
8444 | if (!ctxm->entry_size || !ctx_pg) |
8445 | return -EINVAL; |
8446 | if (ctxm->instance_bmap) |
8447 | n = hweight32(ctxm->instance_bmap); |
8448 | if (ctxm->entry_multiple) |
8449 | entries = roundup(entries, ctxm->entry_multiple); |
8450 | entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries); |
8451 | mem_size = entries * ctxm->entry_size; |
8452 | for (i = 0; i < n && !rc; i++) { |
8453 | ctx_pg[i].entries = entries; |
8454 | rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg: &ctx_pg[i], mem_size, depth: pg_lvl, |
8455 | ctxm: ctxm->init_value ? ctxm : NULL); |
8456 | } |
8457 | return rc; |
8458 | } |
8459 | |
8460 | static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp, |
8461 | struct bnxt_ctx_mem_type *ctxm, |
8462 | bool last) |
8463 | { |
8464 | struct hwrm_func_backing_store_cfg_v2_input *req; |
8465 | u32 instance_bmap = ctxm->instance_bmap; |
8466 | int i, j, rc = 0, n = 1; |
8467 | __le32 *p; |
8468 | |
8469 | if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info) |
8470 | return 0; |
8471 | |
8472 | if (instance_bmap) |
8473 | n = hweight32(ctxm->instance_bmap); |
8474 | else |
8475 | instance_bmap = 1; |
8476 | |
8477 | rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2); |
8478 | if (rc) |
8479 | return rc; |
8480 | hwrm_req_hold(bp, req); |
8481 | req->type = cpu_to_le16(ctxm->type); |
8482 | req->entry_size = cpu_to_le16(ctxm->entry_size); |
8483 | req->subtype_valid_cnt = ctxm->split_entry_cnt; |
8484 | for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++) |
8485 | p[i] = cpu_to_le32(ctxm->split[i]); |
8486 | for (i = 0, j = 0; j < n && !rc; i++) { |
8487 | struct bnxt_ctx_pg_info *ctx_pg; |
8488 | |
8489 | if (!(instance_bmap & (1 << i))) |
8490 | continue; |
8491 | req->instance = cpu_to_le16(i); |
8492 | ctx_pg = &ctxm->pg_info[j++]; |
8493 | if (!ctx_pg->entries) |
8494 | continue; |
8495 | req->num_entries = cpu_to_le32(ctx_pg->entries); |
8496 | bnxt_hwrm_set_pg_attr(rmem: &ctx_pg->ring_mem, |
8497 | pg_attr: &req->page_size_pbl_level, |
8498 | pg_dir: &req->page_dir); |
8499 | if (last && j == n) |
8500 | req->flags = |
8501 | cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE); |
8502 | rc = hwrm_req_send(bp, req); |
8503 | } |
8504 | hwrm_req_drop(bp, req); |
8505 | return rc; |
8506 | } |
8507 | |
8508 | static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena) |
8509 | { |
8510 | struct bnxt_ctx_mem_info *ctx = bp->ctx; |
8511 | struct bnxt_ctx_mem_type *ctxm; |
8512 | u16 last_type; |
8513 | int rc = 0; |
8514 | u16 type; |
8515 | |
8516 | if (!ena) |
8517 | return 0; |
8518 | else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) |
8519 | last_type = BNXT_CTX_MAX - 1; |
8520 | else |
8521 | last_type = BNXT_CTX_L2_MAX - 1; |
8522 | ctx->ctx_arr[last_type].last = 1; |
8523 | |
8524 | for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) { |
8525 | ctxm = &ctx->ctx_arr[type]; |
8526 | |
8527 | rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, last: ctxm->last); |
8528 | if (rc) |
8529 | return rc; |
8530 | } |
8531 | return 0; |
8532 | } |
8533 | |
8534 | void bnxt_free_ctx_mem(struct bnxt *bp) |
8535 | { |
8536 | struct bnxt_ctx_mem_info *ctx = bp->ctx; |
8537 | u16 type; |
8538 | |
8539 | if (!ctx) |
8540 | return; |
8541 | |
8542 | for (type = 0; type < BNXT_CTX_V2_MAX; type++) { |
8543 | struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; |
8544 | struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; |
8545 | int i, n = 1; |
8546 | |
8547 | if (!ctx_pg) |
8548 | continue; |
8549 | if (ctxm->instance_bmap) |
8550 | n = hweight32(ctxm->instance_bmap); |
8551 | for (i = 0; i < n; i++) |
8552 | bnxt_free_ctx_pg_tbls(bp, ctx_pg: &ctx_pg[i]); |
8553 | |
8554 | kfree(objp: ctx_pg); |
8555 | ctxm->pg_info = NULL; |
8556 | } |
8557 | |
8558 | ctx->flags &= ~BNXT_CTX_FLAG_INITED; |
8559 | kfree(objp: ctx); |
8560 | bp->ctx = NULL; |
8561 | } |
8562 | |
8563 | static int bnxt_alloc_ctx_mem(struct bnxt *bp) |
8564 | { |
8565 | struct bnxt_ctx_mem_type *ctxm; |
8566 | struct bnxt_ctx_mem_info *ctx; |
8567 | u32 l2_qps, qp1_qps, max_qps; |
8568 | u32 ena, entries_sp, entries; |
8569 | u32 srqs, max_srqs, min; |
8570 | u32 num_mr, num_ah; |
8571 | u32 = 0; |
8572 | u32 = 0; |
8573 | u32 fast_qpmd_qps; |
8574 | u8 pg_lvl = 1; |
8575 | int i, rc; |
8576 | |
8577 | rc = bnxt_hwrm_func_backing_store_qcaps(bp); |
8578 | if (rc) { |
8579 | netdev_err(dev: bp->dev, format: "Failed querying context mem capability, rc = %d.\n" , |
8580 | rc); |
8581 | return rc; |
8582 | } |
8583 | ctx = bp->ctx; |
8584 | if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) |
8585 | return 0; |
8586 | |
8587 | ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; |
8588 | l2_qps = ctxm->qp_l2_entries; |
8589 | qp1_qps = ctxm->qp_qp1_entries; |
8590 | fast_qpmd_qps = ctxm->qp_fast_qpmd_entries; |
8591 | max_qps = ctxm->max_entries; |
8592 | ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; |
8593 | srqs = ctxm->srq_l2_entries; |
8594 | max_srqs = ctxm->max_entries; |
8595 | ena = 0; |
8596 | if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { |
8597 | pg_lvl = 2; |
8598 | extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps); |
8599 | /* allocate extra qps if fw supports RoCE fast qp destroy feature */ |
8600 | extra_qps += fast_qpmd_qps; |
8601 | extra_srqs = min_t(u32, 8192, max_srqs - srqs); |
8602 | if (fast_qpmd_qps) |
8603 | ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD; |
8604 | } |
8605 | |
8606 | ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; |
8607 | rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries: l2_qps + qp1_qps + extra_qps, |
8608 | pg_lvl); |
8609 | if (rc) |
8610 | return rc; |
8611 | |
8612 | ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; |
8613 | rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries: srqs + extra_srqs, pg_lvl); |
8614 | if (rc) |
8615 | return rc; |
8616 | |
8617 | ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; |
8618 | rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries: ctxm->cq_l2_entries + |
8619 | extra_qps * 2, pg_lvl); |
8620 | if (rc) |
8621 | return rc; |
8622 | |
8623 | ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; |
8624 | rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries: ctxm->max_entries, pg_lvl: 1); |
8625 | if (rc) |
8626 | return rc; |
8627 | |
8628 | ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; |
8629 | rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries: ctxm->max_entries, pg_lvl: 1); |
8630 | if (rc) |
8631 | return rc; |
8632 | |
8633 | if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) |
8634 | goto skip_rdma; |
8635 | |
8636 | ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; |
8637 | /* 128K extra is needed to accommodate static AH context |
8638 | * allocation by f/w. |
8639 | */ |
8640 | num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256); |
8641 | num_ah = min_t(u32, num_mr, 1024 * 128); |
8642 | ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1; |
8643 | if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah) |
8644 | ctxm->mrav_av_entries = num_ah; |
8645 | |
8646 | rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries: num_mr + num_ah, pg_lvl: 2); |
8647 | if (rc) |
8648 | return rc; |
8649 | ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; |
8650 | |
8651 | ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; |
8652 | rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries: l2_qps + qp1_qps + extra_qps, pg_lvl: 1); |
8653 | if (rc) |
8654 | return rc; |
8655 | ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; |
8656 | |
8657 | skip_rdma: |
8658 | ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; |
8659 | min = ctxm->min_entries; |
8660 | entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps + |
8661 | 2 * (extra_qps + qp1_qps) + min; |
8662 | rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries: entries_sp, pg_lvl: 2); |
8663 | if (rc) |
8664 | return rc; |
8665 | |
8666 | ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; |
8667 | entries = l2_qps + 2 * (extra_qps + qp1_qps); |
8668 | rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, pg_lvl: 2); |
8669 | if (rc) |
8670 | return rc; |
8671 | for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) |
8672 | ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; |
8673 | ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; |
8674 | |
8675 | if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) |
8676 | rc = bnxt_backing_store_cfg_v2(bp, ena); |
8677 | else |
8678 | rc = bnxt_hwrm_func_backing_store_cfg(bp, enables: ena); |
8679 | if (rc) { |
8680 | netdev_err(dev: bp->dev, format: "Failed configuring context mem, rc = %d.\n" , |
8681 | rc); |
8682 | return rc; |
8683 | } |
8684 | ctx->flags |= BNXT_CTX_FLAG_INITED; |
8685 | return 0; |
8686 | } |
8687 | |
8688 | int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) |
8689 | { |
8690 | struct hwrm_func_resource_qcaps_output *resp; |
8691 | struct hwrm_func_resource_qcaps_input *req; |
8692 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
8693 | int rc; |
8694 | |
8695 | rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS); |
8696 | if (rc) |
8697 | return rc; |
8698 | |
8699 | req->fid = cpu_to_le16(0xffff); |
8700 | resp = hwrm_req_hold(bp, req); |
8701 | rc = hwrm_req_send_silent(bp, req); |
8702 | if (rc) |
8703 | goto hwrm_func_resc_qcaps_exit; |
8704 | |
8705 | hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); |
8706 | if (!all) |
8707 | goto hwrm_func_resc_qcaps_exit; |
8708 | |
8709 | hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); |
8710 | hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); |
8711 | hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); |
8712 | hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); |
8713 | hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); |
8714 | hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); |
8715 | hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); |
8716 | hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); |
8717 | hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); |
8718 | hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); |
8719 | hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); |
8720 | hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); |
8721 | hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); |
8722 | hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); |
8723 | hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); |
8724 | hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); |
8725 | |
8726 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
8727 | u16 max_msix = le16_to_cpu(resp->max_msix); |
8728 | |
8729 | hw_resc->max_nqs = max_msix; |
8730 | hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; |
8731 | } |
8732 | |
8733 | if (BNXT_PF(bp)) { |
8734 | struct bnxt_pf_info *pf = &bp->pf; |
8735 | |
8736 | pf->vf_resv_strategy = |
8737 | le16_to_cpu(resp->vf_reservation_strategy); |
8738 | if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) |
8739 | pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; |
8740 | } |
8741 | hwrm_func_resc_qcaps_exit: |
8742 | hwrm_req_drop(bp, req); |
8743 | return rc; |
8744 | } |
8745 | |
8746 | static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) |
8747 | { |
8748 | struct hwrm_port_mac_ptp_qcfg_output *resp; |
8749 | struct hwrm_port_mac_ptp_qcfg_input *req; |
8750 | struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; |
8751 | bool phc_cfg; |
8752 | u8 flags; |
8753 | int rc; |
8754 | |
8755 | if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) { |
8756 | rc = -ENODEV; |
8757 | goto no_ptp; |
8758 | } |
8759 | |
8760 | rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG); |
8761 | if (rc) |
8762 | goto no_ptp; |
8763 | |
8764 | req->port_id = cpu_to_le16(bp->pf.port_id); |
8765 | resp = hwrm_req_hold(bp, req); |
8766 | rc = hwrm_req_send(bp, req); |
8767 | if (rc) |
8768 | goto exit; |
8769 | |
8770 | flags = resp->flags; |
8771 | if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { |
8772 | rc = -ENODEV; |
8773 | goto exit; |
8774 | } |
8775 | if (!ptp) { |
8776 | ptp = kzalloc(size: sizeof(*ptp), GFP_KERNEL); |
8777 | if (!ptp) { |
8778 | rc = -ENOMEM; |
8779 | goto exit; |
8780 | } |
8781 | ptp->bp = bp; |
8782 | bp->ptp_cfg = ptp; |
8783 | } |
8784 | if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) { |
8785 | ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); |
8786 | ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper); |
8787 | } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
8788 | ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; |
8789 | ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; |
8790 | } else { |
8791 | rc = -ENODEV; |
8792 | goto exit; |
8793 | } |
8794 | phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0; |
8795 | rc = bnxt_ptp_init(bp, phc_cfg); |
8796 | if (rc) |
8797 | netdev_warn(dev: bp->dev, format: "PTP initialization failed.\n" ); |
8798 | exit: |
8799 | hwrm_req_drop(bp, req); |
8800 | if (!rc) |
8801 | return 0; |
8802 | |
8803 | no_ptp: |
8804 | bnxt_ptp_clear(bp); |
8805 | kfree(objp: ptp); |
8806 | bp->ptp_cfg = NULL; |
8807 | return rc; |
8808 | } |
8809 | |
8810 | static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) |
8811 | { |
8812 | struct hwrm_func_qcaps_output *resp; |
8813 | struct hwrm_func_qcaps_input *req; |
8814 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
8815 | u32 flags, flags_ext, flags_ext2; |
8816 | int rc; |
8817 | |
8818 | rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); |
8819 | if (rc) |
8820 | return rc; |
8821 | |
8822 | req->fid = cpu_to_le16(0xffff); |
8823 | resp = hwrm_req_hold(bp, req); |
8824 | rc = hwrm_req_send(bp, req); |
8825 | if (rc) |
8826 | goto hwrm_func_qcaps_exit; |
8827 | |
8828 | flags = le32_to_cpu(resp->flags); |
8829 | if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) |
8830 | bp->flags |= BNXT_FLAG_ROCEV1_CAP; |
8831 | if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) |
8832 | bp->flags |= BNXT_FLAG_ROCEV2_CAP; |
8833 | if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) |
8834 | bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; |
8835 | if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) |
8836 | bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; |
8837 | if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) |
8838 | bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; |
8839 | if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) |
8840 | bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; |
8841 | if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) |
8842 | bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; |
8843 | if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) |
8844 | bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; |
8845 | if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED) |
8846 | bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS; |
8847 | |
8848 | flags_ext = le32_to_cpu(resp->flags_ext); |
8849 | if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) |
8850 | bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; |
8851 | if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED)) |
8852 | bp->fw_cap |= BNXT_FW_CAP_PTP_PPS; |
8853 | if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED) |
8854 | bp->fw_cap |= BNXT_FW_CAP_PTP_RTC; |
8855 | if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT)) |
8856 | bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF; |
8857 | if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED)) |
8858 | bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; |
8859 | if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED) |
8860 | bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2; |
8861 | if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP) |
8862 | bp->flags |= BNXT_FLAG_TX_COAL_CMPL; |
8863 | |
8864 | flags_ext2 = le32_to_cpu(resp->flags_ext2); |
8865 | if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED) |
8866 | bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS; |
8867 | if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED) |
8868 | bp->flags |= BNXT_FLAG_UDP_GSO_CAP; |
8869 | |
8870 | bp->tx_push_thresh = 0; |
8871 | if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && |
8872 | BNXT_FW_MAJ(bp) > 217) |
8873 | bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; |
8874 | |
8875 | hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); |
8876 | hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); |
8877 | hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); |
8878 | hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); |
8879 | hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); |
8880 | if (!hw_resc->max_hw_ring_grps) |
8881 | hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; |
8882 | hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); |
8883 | hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); |
8884 | hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); |
8885 | |
8886 | hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records); |
8887 | hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records); |
8888 | hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); |
8889 | hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); |
8890 | hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); |
8891 | hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); |
8892 | |
8893 | if (BNXT_PF(bp)) { |
8894 | struct bnxt_pf_info *pf = &bp->pf; |
8895 | |
8896 | pf->fw_fid = le16_to_cpu(resp->fid); |
8897 | pf->port_id = le16_to_cpu(resp->port_id); |
8898 | memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); |
8899 | pf->first_vf_id = le16_to_cpu(resp->first_vf_id); |
8900 | pf->max_vfs = le16_to_cpu(resp->max_vfs); |
8901 | bp->flags &= ~BNXT_FLAG_WOL_CAP; |
8902 | if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) |
8903 | bp->flags |= BNXT_FLAG_WOL_CAP; |
8904 | if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) { |
8905 | bp->fw_cap |= BNXT_FW_CAP_PTP; |
8906 | } else { |
8907 | bnxt_ptp_clear(bp); |
8908 | kfree(objp: bp->ptp_cfg); |
8909 | bp->ptp_cfg = NULL; |
8910 | } |
8911 | } else { |
8912 | #ifdef CONFIG_BNXT_SRIOV |
8913 | struct bnxt_vf_info *vf = &bp->vf; |
8914 | |
8915 | vf->fw_fid = le16_to_cpu(resp->fid); |
8916 | memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); |
8917 | #endif |
8918 | } |
8919 | |
8920 | hwrm_func_qcaps_exit: |
8921 | hwrm_req_drop(bp, req); |
8922 | return rc; |
8923 | } |
8924 | |
8925 | static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp) |
8926 | { |
8927 | struct hwrm_dbg_qcaps_output *resp; |
8928 | struct hwrm_dbg_qcaps_input *req; |
8929 | int rc; |
8930 | |
8931 | bp->fw_dbg_cap = 0; |
8932 | if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS)) |
8933 | return; |
8934 | |
8935 | rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS); |
8936 | if (rc) |
8937 | return; |
8938 | |
8939 | req->fid = cpu_to_le16(0xffff); |
8940 | resp = hwrm_req_hold(bp, req); |
8941 | rc = hwrm_req_send(bp, req); |
8942 | if (rc) |
8943 | goto hwrm_dbg_qcaps_exit; |
8944 | |
8945 | bp->fw_dbg_cap = le32_to_cpu(resp->flags); |
8946 | |
8947 | hwrm_dbg_qcaps_exit: |
8948 | hwrm_req_drop(bp, req); |
8949 | } |
8950 | |
8951 | static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); |
8952 | |
8953 | int bnxt_hwrm_func_qcaps(struct bnxt *bp) |
8954 | { |
8955 | int rc; |
8956 | |
8957 | rc = __bnxt_hwrm_func_qcaps(bp); |
8958 | if (rc) |
8959 | return rc; |
8960 | |
8961 | bnxt_hwrm_dbg_qcaps(bp); |
8962 | |
8963 | rc = bnxt_hwrm_queue_qportcfg(bp); |
8964 | if (rc) { |
8965 | netdev_err(dev: bp->dev, format: "hwrm query qportcfg failure rc: %d\n" , rc); |
8966 | return rc; |
8967 | } |
8968 | if (bp->hwrm_spec_code >= 0x10803) { |
8969 | rc = bnxt_alloc_ctx_mem(bp); |
8970 | if (rc) |
8971 | return rc; |
8972 | rc = bnxt_hwrm_func_resc_qcaps(bp, all: true); |
8973 | if (!rc) |
8974 | bp->fw_cap |= BNXT_FW_CAP_NEW_RM; |
8975 | } |
8976 | return 0; |
8977 | } |
8978 | |
8979 | static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) |
8980 | { |
8981 | struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; |
8982 | struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req; |
8983 | u32 flags; |
8984 | int rc; |
8985 | |
8986 | if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) |
8987 | return 0; |
8988 | |
8989 | rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS); |
8990 | if (rc) |
8991 | return rc; |
8992 | |
8993 | resp = hwrm_req_hold(bp, req); |
8994 | rc = hwrm_req_send(bp, req); |
8995 | if (rc) |
8996 | goto hwrm_cfa_adv_qcaps_exit; |
8997 | |
8998 | flags = le32_to_cpu(resp->flags); |
8999 | if (flags & |
9000 | CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) |
9001 | bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; |
9002 | |
9003 | if (flags & |
9004 | CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED) |
9005 | bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3; |
9006 | |
9007 | if (flags & |
9008 | CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED) |
9009 | bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO; |
9010 | |
9011 | hwrm_cfa_adv_qcaps_exit: |
9012 | hwrm_req_drop(bp, req); |
9013 | return rc; |
9014 | } |
9015 | |
9016 | static int __bnxt_alloc_fw_health(struct bnxt *bp) |
9017 | { |
9018 | if (bp->fw_health) |
9019 | return 0; |
9020 | |
9021 | bp->fw_health = kzalloc(size: sizeof(*bp->fw_health), GFP_KERNEL); |
9022 | if (!bp->fw_health) |
9023 | return -ENOMEM; |
9024 | |
9025 | mutex_init(&bp->fw_health->lock); |
9026 | return 0; |
9027 | } |
9028 | |
9029 | static int bnxt_alloc_fw_health(struct bnxt *bp) |
9030 | { |
9031 | int rc; |
9032 | |
9033 | if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && |
9034 | !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) |
9035 | return 0; |
9036 | |
9037 | rc = __bnxt_alloc_fw_health(bp); |
9038 | if (rc) { |
9039 | bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; |
9040 | bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; |
9041 | return rc; |
9042 | } |
9043 | |
9044 | return 0; |
9045 | } |
9046 | |
9047 | static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) |
9048 | { |
9049 | writel(val: reg & BNXT_GRC_BASE_MASK, addr: bp->bar0 + |
9050 | BNXT_GRCPF_REG_WINDOW_BASE_OUT + |
9051 | BNXT_FW_HEALTH_WIN_MAP_OFF); |
9052 | } |
9053 | |
9054 | static void bnxt_inv_fw_health_reg(struct bnxt *bp) |
9055 | { |
9056 | struct bnxt_fw_health *fw_health = bp->fw_health; |
9057 | u32 reg_type; |
9058 | |
9059 | if (!fw_health) |
9060 | return; |
9061 | |
9062 | reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]); |
9063 | if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) |
9064 | fw_health->status_reliable = false; |
9065 | |
9066 | reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]); |
9067 | if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) |
9068 | fw_health->resets_reliable = false; |
9069 | } |
9070 | |
9071 | static void bnxt_try_map_fw_health_reg(struct bnxt *bp) |
9072 | { |
9073 | void __iomem *hs; |
9074 | u32 status_loc; |
9075 | u32 reg_type; |
9076 | u32 sig; |
9077 | |
9078 | if (bp->fw_health) |
9079 | bp->fw_health->status_reliable = false; |
9080 | |
9081 | __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); |
9082 | hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); |
9083 | |
9084 | sig = readl(addr: hs + offsetof(struct hcomm_status, sig_ver)); |
9085 | if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) { |
9086 | if (!bp->chip_num) { |
9087 | __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE); |
9088 | bp->chip_num = readl(addr: bp->bar0 + |
9089 | BNXT_FW_HEALTH_WIN_BASE + |
9090 | BNXT_GRC_REG_CHIP_NUM); |
9091 | } |
9092 | if (!BNXT_CHIP_P5(bp)) |
9093 | return; |
9094 | |
9095 | status_loc = BNXT_GRC_REG_STATUS_P5 | |
9096 | BNXT_FW_HEALTH_REG_TYPE_BAR0; |
9097 | } else { |
9098 | status_loc = readl(addr: hs + offsetof(struct hcomm_status, |
9099 | fw_status_loc)); |
9100 | } |
9101 | |
9102 | if (__bnxt_alloc_fw_health(bp)) { |
9103 | netdev_warn(dev: bp->dev, format: "no memory for firmware status checks\n" ); |
9104 | return; |
9105 | } |
9106 | |
9107 | bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; |
9108 | reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc); |
9109 | if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) { |
9110 | __bnxt_map_fw_health_reg(bp, reg: status_loc); |
9111 | bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = |
9112 | BNXT_FW_HEALTH_WIN_OFF(status_loc); |
9113 | } |
9114 | |
9115 | bp->fw_health->status_reliable = true; |
9116 | } |
9117 | |
9118 | static int bnxt_map_fw_health_regs(struct bnxt *bp) |
9119 | { |
9120 | struct bnxt_fw_health *fw_health = bp->fw_health; |
9121 | u32 reg_base = 0xffffffff; |
9122 | int i; |
9123 | |
9124 | bp->fw_health->status_reliable = false; |
9125 | bp->fw_health->resets_reliable = false; |
9126 | /* Only pre-map the monitoring GRC registers using window 3 */ |
9127 | for (i = 0; i < 4; i++) { |
9128 | u32 reg = fw_health->regs[i]; |
9129 | |
9130 | if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) |
9131 | continue; |
9132 | if (reg_base == 0xffffffff) |
9133 | reg_base = reg & BNXT_GRC_BASE_MASK; |
9134 | if ((reg & BNXT_GRC_BASE_MASK) != reg_base) |
9135 | return -ERANGE; |
9136 | fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); |
9137 | } |
9138 | bp->fw_health->status_reliable = true; |
9139 | bp->fw_health->resets_reliable = true; |
9140 | if (reg_base == 0xffffffff) |
9141 | return 0; |
9142 | |
9143 | __bnxt_map_fw_health_reg(bp, reg: reg_base); |
9144 | return 0; |
9145 | } |
9146 | |
9147 | static void bnxt_remap_fw_health_regs(struct bnxt *bp) |
9148 | { |
9149 | if (!bp->fw_health) |
9150 | return; |
9151 | |
9152 | if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) { |
9153 | bp->fw_health->status_reliable = true; |
9154 | bp->fw_health->resets_reliable = true; |
9155 | } else { |
9156 | bnxt_try_map_fw_health_reg(bp); |
9157 | } |
9158 | } |
9159 | |
9160 | static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) |
9161 | { |
9162 | struct bnxt_fw_health *fw_health = bp->fw_health; |
9163 | struct hwrm_error_recovery_qcfg_output *resp; |
9164 | struct hwrm_error_recovery_qcfg_input *req; |
9165 | int rc, i; |
9166 | |
9167 | if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) |
9168 | return 0; |
9169 | |
9170 | rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG); |
9171 | if (rc) |
9172 | return rc; |
9173 | |
9174 | resp = hwrm_req_hold(bp, req); |
9175 | rc = hwrm_req_send(bp, req); |
9176 | if (rc) |
9177 | goto err_recovery_out; |
9178 | fw_health->flags = le32_to_cpu(resp->flags); |
9179 | if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && |
9180 | !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { |
9181 | rc = -EINVAL; |
9182 | goto err_recovery_out; |
9183 | } |
9184 | fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); |
9185 | fw_health->master_func_wait_dsecs = |
9186 | le32_to_cpu(resp->master_func_wait_period); |
9187 | fw_health->normal_func_wait_dsecs = |
9188 | le32_to_cpu(resp->normal_func_wait_period); |
9189 | fw_health->post_reset_wait_dsecs = |
9190 | le32_to_cpu(resp->master_func_wait_period_after_reset); |
9191 | fw_health->post_reset_max_wait_dsecs = |
9192 | le32_to_cpu(resp->max_bailout_time_after_reset); |
9193 | fw_health->regs[BNXT_FW_HEALTH_REG] = |
9194 | le32_to_cpu(resp->fw_health_status_reg); |
9195 | fw_health->regs[BNXT_FW_HEARTBEAT_REG] = |
9196 | le32_to_cpu(resp->fw_heartbeat_reg); |
9197 | fw_health->regs[BNXT_FW_RESET_CNT_REG] = |
9198 | le32_to_cpu(resp->fw_reset_cnt_reg); |
9199 | fw_health->regs[BNXT_FW_RESET_INPROG_REG] = |
9200 | le32_to_cpu(resp->reset_inprogress_reg); |
9201 | fw_health->fw_reset_inprog_reg_mask = |
9202 | le32_to_cpu(resp->reset_inprogress_reg_mask); |
9203 | fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; |
9204 | if (fw_health->fw_reset_seq_cnt >= 16) { |
9205 | rc = -EINVAL; |
9206 | goto err_recovery_out; |
9207 | } |
9208 | for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { |
9209 | fw_health->fw_reset_seq_regs[i] = |
9210 | le32_to_cpu(resp->reset_reg[i]); |
9211 | fw_health->fw_reset_seq_vals[i] = |
9212 | le32_to_cpu(resp->reset_reg_val[i]); |
9213 | fw_health->fw_reset_seq_delay_msec[i] = |
9214 | resp->delay_after_reset[i]; |
9215 | } |
9216 | err_recovery_out: |
9217 | hwrm_req_drop(bp, req); |
9218 | if (!rc) |
9219 | rc = bnxt_map_fw_health_regs(bp); |
9220 | if (rc) |
9221 | bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; |
9222 | return rc; |
9223 | } |
9224 | |
9225 | static int bnxt_hwrm_func_reset(struct bnxt *bp) |
9226 | { |
9227 | struct hwrm_func_reset_input *req; |
9228 | int rc; |
9229 | |
9230 | rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET); |
9231 | if (rc) |
9232 | return rc; |
9233 | |
9234 | req->enables = 0; |
9235 | hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT); |
9236 | return hwrm_req_send(bp, req); |
9237 | } |
9238 | |
9239 | static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) |
9240 | { |
9241 | struct hwrm_nvm_get_dev_info_output nvm_info; |
9242 | |
9243 | if (!bnxt_hwrm_nvm_get_dev_info(bp, nvm_dev_info: &nvm_info)) |
9244 | snprintf(buf: bp->nvm_cfg_ver, FW_VER_STR_LEN, fmt: "%d.%d.%d" , |
9245 | nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min, |
9246 | nvm_info.nvm_cfg_ver_upd); |
9247 | } |
9248 | |
9249 | static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) |
9250 | { |
9251 | struct hwrm_queue_qportcfg_output *resp; |
9252 | struct hwrm_queue_qportcfg_input *req; |
9253 | u8 i, j, *qptr; |
9254 | bool no_rdma; |
9255 | int rc = 0; |
9256 | |
9257 | rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG); |
9258 | if (rc) |
9259 | return rc; |
9260 | |
9261 | resp = hwrm_req_hold(bp, req); |
9262 | rc = hwrm_req_send(bp, req); |
9263 | if (rc) |
9264 | goto qportcfg_exit; |
9265 | |
9266 | if (!resp->max_configurable_queues) { |
9267 | rc = -EINVAL; |
9268 | goto qportcfg_exit; |
9269 | } |
9270 | bp->max_tc = resp->max_configurable_queues; |
9271 | bp->max_lltc = resp->max_configurable_lossless_queues; |
9272 | if (bp->max_tc > BNXT_MAX_QUEUE) |
9273 | bp->max_tc = BNXT_MAX_QUEUE; |
9274 | |
9275 | no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); |
9276 | qptr = &resp->queue_id0; |
9277 | for (i = 0, j = 0; i < bp->max_tc; i++) { |
9278 | bp->q_info[j].queue_id = *qptr; |
9279 | bp->q_ids[i] = *qptr++; |
9280 | bp->q_info[j].queue_profile = *qptr++; |
9281 | bp->tc_to_qidx[j] = j; |
9282 | if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || |
9283 | (no_rdma && BNXT_PF(bp))) |
9284 | j++; |
9285 | } |
9286 | bp->max_q = bp->max_tc; |
9287 | bp->max_tc = max_t(u8, j, 1); |
9288 | |
9289 | if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) |
9290 | bp->max_tc = 1; |
9291 | |
9292 | if (bp->max_lltc > bp->max_tc) |
9293 | bp->max_lltc = bp->max_tc; |
9294 | |
9295 | qportcfg_exit: |
9296 | hwrm_req_drop(bp, req); |
9297 | return rc; |
9298 | } |
9299 | |
9300 | static int bnxt_hwrm_poll(struct bnxt *bp) |
9301 | { |
9302 | struct hwrm_ver_get_input *req; |
9303 | int rc; |
9304 | |
9305 | rc = hwrm_req_init(bp, req, HWRM_VER_GET); |
9306 | if (rc) |
9307 | return rc; |
9308 | |
9309 | req->hwrm_intf_maj = HWRM_VERSION_MAJOR; |
9310 | req->hwrm_intf_min = HWRM_VERSION_MINOR; |
9311 | req->hwrm_intf_upd = HWRM_VERSION_UPDATE; |
9312 | |
9313 | hwrm_req_flags(bp, req, flags: BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT); |
9314 | rc = hwrm_req_send(bp, req); |
9315 | return rc; |
9316 | } |
9317 | |
9318 | static int bnxt_hwrm_ver_get(struct bnxt *bp) |
9319 | { |
9320 | struct hwrm_ver_get_output *resp; |
9321 | struct hwrm_ver_get_input *req; |
9322 | u16 fw_maj, fw_min, fw_bld, fw_rsv; |
9323 | u32 dev_caps_cfg, hwrm_ver; |
9324 | int rc, len; |
9325 | |
9326 | rc = hwrm_req_init(bp, req, HWRM_VER_GET); |
9327 | if (rc) |
9328 | return rc; |
9329 | |
9330 | hwrm_req_flags(bp, req, flags: BNXT_HWRM_FULL_WAIT); |
9331 | bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; |
9332 | req->hwrm_intf_maj = HWRM_VERSION_MAJOR; |
9333 | req->hwrm_intf_min = HWRM_VERSION_MINOR; |
9334 | req->hwrm_intf_upd = HWRM_VERSION_UPDATE; |
9335 | |
9336 | resp = hwrm_req_hold(bp, req); |
9337 | rc = hwrm_req_send(bp, req); |
9338 | if (rc) |
9339 | goto hwrm_ver_get_exit; |
9340 | |
9341 | memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); |
9342 | |
9343 | bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | |
9344 | resp->hwrm_intf_min_8b << 8 | |
9345 | resp->hwrm_intf_upd_8b; |
9346 | if (resp->hwrm_intf_maj_8b < 1) { |
9347 | netdev_warn(dev: bp->dev, format: "HWRM interface %d.%d.%d is older than 1.0.0.\n" , |
9348 | resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, |
9349 | resp->hwrm_intf_upd_8b); |
9350 | netdev_warn(dev: bp->dev, format: "Please update firmware with HWRM interface 1.0.0 or newer.\n" ); |
9351 | } |
9352 | |
9353 | hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | |
9354 | HWRM_VERSION_UPDATE; |
9355 | |
9356 | if (bp->hwrm_spec_code > hwrm_ver) |
9357 | snprintf(buf: bp->hwrm_ver_supp, FW_VER_STR_LEN, fmt: "%d.%d.%d" , |
9358 | HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, |
9359 | HWRM_VERSION_UPDATE); |
9360 | else |
9361 | snprintf(buf: bp->hwrm_ver_supp, FW_VER_STR_LEN, fmt: "%d.%d.%d" , |
9362 | resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, |
9363 | resp->hwrm_intf_upd_8b); |
9364 | |
9365 | fw_maj = le16_to_cpu(resp->hwrm_fw_major); |
9366 | if (bp->hwrm_spec_code > 0x10803 && fw_maj) { |
9367 | fw_min = le16_to_cpu(resp->hwrm_fw_minor); |
9368 | fw_bld = le16_to_cpu(resp->hwrm_fw_build); |
9369 | fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); |
9370 | len = FW_VER_STR_LEN; |
9371 | } else { |
9372 | fw_maj = resp->hwrm_fw_maj_8b; |
9373 | fw_min = resp->hwrm_fw_min_8b; |
9374 | fw_bld = resp->hwrm_fw_bld_8b; |
9375 | fw_rsv = resp->hwrm_fw_rsvd_8b; |
9376 | len = BC_HWRM_STR_LEN; |
9377 | } |
9378 | bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); |
9379 | snprintf(buf: bp->fw_ver_str, size: len, fmt: "%d.%d.%d.%d" , fw_maj, fw_min, fw_bld, |
9380 | fw_rsv); |
9381 | |
9382 | if (strlen(resp->active_pkg_name)) { |
9383 | int fw_ver_len = strlen(bp->fw_ver_str); |
9384 | |
9385 | snprintf(buf: bp->fw_ver_str + fw_ver_len, |
9386 | FW_VER_STR_LEN - fw_ver_len - 1, fmt: "/pkg %s" , |
9387 | resp->active_pkg_name); |
9388 | bp->fw_cap |= BNXT_FW_CAP_PKG_VER; |
9389 | } |
9390 | |
9391 | bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); |
9392 | if (!bp->hwrm_cmd_timeout) |
9393 | bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; |
9394 | bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000; |
9395 | if (!bp->hwrm_cmd_max_timeout) |
9396 | bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT; |
9397 | else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT) |
9398 | netdev_warn(dev: bp->dev, format: "Device requests max timeout of %d seconds, may trigger hung task watchdog\n" , |
9399 | bp->hwrm_cmd_max_timeout / 1000); |
9400 | |
9401 | if (resp->hwrm_intf_maj_8b >= 1) { |
9402 | bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); |
9403 | bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); |
9404 | } |
9405 | if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) |
9406 | bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; |
9407 | |
9408 | bp->chip_num = le16_to_cpu(resp->chip_num); |
9409 | bp->chip_rev = resp->chip_rev; |
9410 | if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && |
9411 | !resp->chip_metal) |
9412 | bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; |
9413 | |
9414 | dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); |
9415 | if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && |
9416 | (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) |
9417 | bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; |
9418 | |
9419 | if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) |
9420 | bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; |
9421 | |
9422 | if (dev_caps_cfg & |
9423 | VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) |
9424 | bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; |
9425 | |
9426 | if (dev_caps_cfg & |
9427 | VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) |
9428 | bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; |
9429 | |
9430 | if (dev_caps_cfg & |
9431 | VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) |
9432 | bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; |
9433 | |
9434 | hwrm_ver_get_exit: |
9435 | hwrm_req_drop(bp, req); |
9436 | return rc; |
9437 | } |
9438 | |
9439 | int bnxt_hwrm_fw_set_time(struct bnxt *bp) |
9440 | { |
9441 | struct hwrm_fw_set_time_input *req; |
9442 | struct tm tm; |
9443 | time64_t now = ktime_get_real_seconds(); |
9444 | int rc; |
9445 | |
9446 | if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || |
9447 | bp->hwrm_spec_code < 0x10400) |
9448 | return -EOPNOTSUPP; |
9449 | |
9450 | time64_to_tm(totalsecs: now, offset: 0, result: &tm); |
9451 | rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME); |
9452 | if (rc) |
9453 | return rc; |
9454 | |
9455 | req->year = cpu_to_le16(1900 + tm.tm_year); |
9456 | req->month = 1 + tm.tm_mon; |
9457 | req->day = tm.tm_mday; |
9458 | req->hour = tm.tm_hour; |
9459 | req->minute = tm.tm_min; |
9460 | req->second = tm.tm_sec; |
9461 | return hwrm_req_send(bp, req); |
9462 | } |
9463 | |
9464 | static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) |
9465 | { |
9466 | u64 sw_tmp; |
9467 | |
9468 | hw &= mask; |
9469 | sw_tmp = (*sw & ~mask) | hw; |
9470 | if (hw < (*sw & mask)) |
9471 | sw_tmp += mask + 1; |
9472 | WRITE_ONCE(*sw, sw_tmp); |
9473 | } |
9474 | |
9475 | static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, |
9476 | int count, bool ignore_zero) |
9477 | { |
9478 | int i; |
9479 | |
9480 | for (i = 0; i < count; i++) { |
9481 | u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); |
9482 | |
9483 | if (ignore_zero && !hw) |
9484 | continue; |
9485 | |
9486 | if (masks[i] == -1ULL) |
9487 | sw_stats[i] = hw; |
9488 | else |
9489 | bnxt_add_one_ctr(hw, sw: &sw_stats[i], mask: masks[i]); |
9490 | } |
9491 | } |
9492 | |
9493 | static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) |
9494 | { |
9495 | if (!stats->hw_stats) |
9496 | return; |
9497 | |
9498 | __bnxt_accumulate_stats(hw_stats: stats->hw_stats, sw_stats: stats->sw_stats, |
9499 | masks: stats->hw_masks, count: stats->len / 8, ignore_zero: false); |
9500 | } |
9501 | |
9502 | static void bnxt_accumulate_all_stats(struct bnxt *bp) |
9503 | { |
9504 | struct bnxt_stats_mem *ring0_stats; |
9505 | bool ignore_zero = false; |
9506 | int i; |
9507 | |
9508 | /* Chip bug. Counter intermittently becomes 0. */ |
9509 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
9510 | ignore_zero = true; |
9511 | |
9512 | for (i = 0; i < bp->cp_nr_rings; i++) { |
9513 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
9514 | struct bnxt_cp_ring_info *cpr; |
9515 | struct bnxt_stats_mem *stats; |
9516 | |
9517 | cpr = &bnapi->cp_ring; |
9518 | stats = &cpr->stats; |
9519 | if (!i) |
9520 | ring0_stats = stats; |
9521 | __bnxt_accumulate_stats(hw_stats: stats->hw_stats, sw_stats: stats->sw_stats, |
9522 | masks: ring0_stats->hw_masks, |
9523 | count: ring0_stats->len / 8, ignore_zero); |
9524 | } |
9525 | if (bp->flags & BNXT_FLAG_PORT_STATS) { |
9526 | struct bnxt_stats_mem *stats = &bp->port_stats; |
9527 | __le64 *hw_stats = stats->hw_stats; |
9528 | u64 *sw_stats = stats->sw_stats; |
9529 | u64 *masks = stats->hw_masks; |
9530 | int cnt; |
9531 | |
9532 | cnt = sizeof(struct rx_port_stats) / 8; |
9533 | __bnxt_accumulate_stats(hw_stats, sw_stats, masks, count: cnt, ignore_zero: false); |
9534 | |
9535 | hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
9536 | sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
9537 | masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
9538 | cnt = sizeof(struct tx_port_stats) / 8; |
9539 | __bnxt_accumulate_stats(hw_stats, sw_stats, masks, count: cnt, ignore_zero: false); |
9540 | } |
9541 | if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { |
9542 | bnxt_accumulate_stats(stats: &bp->rx_port_stats_ext); |
9543 | bnxt_accumulate_stats(stats: &bp->tx_port_stats_ext); |
9544 | } |
9545 | } |
9546 | |
9547 | static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) |
9548 | { |
9549 | struct hwrm_port_qstats_input *req; |
9550 | struct bnxt_pf_info *pf = &bp->pf; |
9551 | int rc; |
9552 | |
9553 | if (!(bp->flags & BNXT_FLAG_PORT_STATS)) |
9554 | return 0; |
9555 | |
9556 | if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) |
9557 | return -EOPNOTSUPP; |
9558 | |
9559 | rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS); |
9560 | if (rc) |
9561 | return rc; |
9562 | |
9563 | req->flags = flags; |
9564 | req->port_id = cpu_to_le16(pf->port_id); |
9565 | req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + |
9566 | BNXT_TX_PORT_STATS_BYTE_OFFSET); |
9567 | req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); |
9568 | return hwrm_req_send(bp, req); |
9569 | } |
9570 | |
9571 | static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) |
9572 | { |
9573 | struct hwrm_queue_pri2cos_qcfg_output *resp_qc; |
9574 | struct hwrm_queue_pri2cos_qcfg_input *req_qc; |
9575 | struct hwrm_port_qstats_ext_output *resp_qs; |
9576 | struct hwrm_port_qstats_ext_input *req_qs; |
9577 | struct bnxt_pf_info *pf = &bp->pf; |
9578 | u32 tx_stat_size; |
9579 | int rc; |
9580 | |
9581 | if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) |
9582 | return 0; |
9583 | |
9584 | if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) |
9585 | return -EOPNOTSUPP; |
9586 | |
9587 | rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT); |
9588 | if (rc) |
9589 | return rc; |
9590 | |
9591 | req_qs->flags = flags; |
9592 | req_qs->port_id = cpu_to_le16(pf->port_id); |
9593 | req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); |
9594 | req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); |
9595 | tx_stat_size = bp->tx_port_stats_ext.hw_stats ? |
9596 | sizeof(struct tx_port_stats_ext) : 0; |
9597 | req_qs->tx_stat_size = cpu_to_le16(tx_stat_size); |
9598 | req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); |
9599 | resp_qs = hwrm_req_hold(bp, req: req_qs); |
9600 | rc = hwrm_req_send(bp, req: req_qs); |
9601 | if (!rc) { |
9602 | bp->fw_rx_stats_ext_size = |
9603 | le16_to_cpu(resp_qs->rx_stat_size) / 8; |
9604 | if (BNXT_FW_MAJ(bp) < 220 && |
9605 | bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY) |
9606 | bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY; |
9607 | |
9608 | bp->fw_tx_stats_ext_size = tx_stat_size ? |
9609 | le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; |
9610 | } else { |
9611 | bp->fw_rx_stats_ext_size = 0; |
9612 | bp->fw_tx_stats_ext_size = 0; |
9613 | } |
9614 | hwrm_req_drop(bp, req: req_qs); |
9615 | |
9616 | if (flags) |
9617 | return rc; |
9618 | |
9619 | if (bp->fw_tx_stats_ext_size <= |
9620 | offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { |
9621 | bp->pri2cos_valid = 0; |
9622 | return rc; |
9623 | } |
9624 | |
9625 | rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG); |
9626 | if (rc) |
9627 | return rc; |
9628 | |
9629 | req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); |
9630 | |
9631 | resp_qc = hwrm_req_hold(bp, req: req_qc); |
9632 | rc = hwrm_req_send(bp, req: req_qc); |
9633 | if (!rc) { |
9634 | u8 *pri2cos; |
9635 | int i, j; |
9636 | |
9637 | pri2cos = &resp_qc->pri0_cos_queue_id; |
9638 | for (i = 0; i < 8; i++) { |
9639 | u8 queue_id = pri2cos[i]; |
9640 | u8 queue_idx; |
9641 | |
9642 | /* Per port queue IDs start from 0, 10, 20, etc */ |
9643 | queue_idx = queue_id % 10; |
9644 | if (queue_idx > BNXT_MAX_QUEUE) { |
9645 | bp->pri2cos_valid = false; |
9646 | hwrm_req_drop(bp, req: req_qc); |
9647 | return rc; |
9648 | } |
9649 | for (j = 0; j < bp->max_q; j++) { |
9650 | if (bp->q_ids[j] == queue_id) |
9651 | bp->pri2cos_idx[i] = queue_idx; |
9652 | } |
9653 | } |
9654 | bp->pri2cos_valid = true; |
9655 | } |
9656 | hwrm_req_drop(bp, req: req_qc); |
9657 | |
9658 | return rc; |
9659 | } |
9660 | |
9661 | static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) |
9662 | { |
9663 | bnxt_hwrm_tunnel_dst_port_free(bp, |
9664 | TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); |
9665 | bnxt_hwrm_tunnel_dst_port_free(bp, |
9666 | TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); |
9667 | } |
9668 | |
9669 | static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) |
9670 | { |
9671 | int rc, i; |
9672 | u32 tpa_flags = 0; |
9673 | |
9674 | if (set_tpa) |
9675 | tpa_flags = bp->flags & BNXT_FLAG_TPA; |
9676 | else if (BNXT_NO_FW_ACCESS(bp)) |
9677 | return 0; |
9678 | for (i = 0; i < bp->nr_vnics; i++) { |
9679 | rc = bnxt_hwrm_vnic_set_tpa(bp, vnic_id: i, tpa_flags); |
9680 | if (rc) { |
9681 | netdev_err(dev: bp->dev, format: "hwrm vnic set tpa failure rc for vnic %d: %x\n" , |
9682 | i, rc); |
9683 | return rc; |
9684 | } |
9685 | } |
9686 | return 0; |
9687 | } |
9688 | |
9689 | static void (struct bnxt *bp) |
9690 | { |
9691 | int i; |
9692 | |
9693 | for (i = 0; i < bp->nr_vnics; i++) |
9694 | bnxt_hwrm_vnic_set_rss(bp, vnic_id: i, set_rss: false); |
9695 | } |
9696 | |
9697 | static void bnxt_clear_vnic(struct bnxt *bp) |
9698 | { |
9699 | if (!bp->vnic_info) |
9700 | return; |
9701 | |
9702 | bnxt_hwrm_clear_vnic_filter(bp); |
9703 | if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) { |
9704 | /* clear all RSS setting before free vnic ctx */ |
9705 | bnxt_hwrm_clear_vnic_rss(bp); |
9706 | bnxt_hwrm_vnic_ctx_free(bp); |
9707 | } |
9708 | /* before free the vnic, undo the vnic tpa settings */ |
9709 | if (bp->flags & BNXT_FLAG_TPA) |
9710 | bnxt_set_tpa(bp, set_tpa: false); |
9711 | bnxt_hwrm_vnic_free(bp); |
9712 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
9713 | bnxt_hwrm_vnic_ctx_free(bp); |
9714 | } |
9715 | |
9716 | static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, |
9717 | bool irq_re_init) |
9718 | { |
9719 | bnxt_clear_vnic(bp); |
9720 | bnxt_hwrm_ring_free(bp, close_path); |
9721 | bnxt_hwrm_ring_grp_free(bp); |
9722 | if (irq_re_init) { |
9723 | bnxt_hwrm_stat_ctx_free(bp); |
9724 | bnxt_hwrm_free_tunnel_ports(bp); |
9725 | } |
9726 | } |
9727 | |
9728 | static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) |
9729 | { |
9730 | struct hwrm_func_cfg_input *req; |
9731 | u8 evb_mode; |
9732 | int rc; |
9733 | |
9734 | if (br_mode == BRIDGE_MODE_VEB) |
9735 | evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; |
9736 | else if (br_mode == BRIDGE_MODE_VEPA) |
9737 | evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; |
9738 | else |
9739 | return -EINVAL; |
9740 | |
9741 | rc = bnxt_hwrm_func_cfg_short_req_init(bp, req: &req); |
9742 | if (rc) |
9743 | return rc; |
9744 | |
9745 | req->fid = cpu_to_le16(0xffff); |
9746 | req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); |
9747 | req->evb_mode = evb_mode; |
9748 | return hwrm_req_send(bp, req); |
9749 | } |
9750 | |
9751 | static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) |
9752 | { |
9753 | struct hwrm_func_cfg_input *req; |
9754 | int rc; |
9755 | |
9756 | if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) |
9757 | return 0; |
9758 | |
9759 | rc = bnxt_hwrm_func_cfg_short_req_init(bp, req: &req); |
9760 | if (rc) |
9761 | return rc; |
9762 | |
9763 | req->fid = cpu_to_le16(0xffff); |
9764 | req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); |
9765 | req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; |
9766 | if (size == 128) |
9767 | req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; |
9768 | |
9769 | return hwrm_req_send(bp, req); |
9770 | } |
9771 | |
9772 | static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) |
9773 | { |
9774 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
9775 | int rc; |
9776 | |
9777 | if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) |
9778 | goto skip_rss_ctx; |
9779 | |
9780 | /* allocate context for vnic */ |
9781 | rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, ctx_idx: 0); |
9782 | if (rc) { |
9783 | netdev_err(dev: bp->dev, format: "hwrm vnic %d alloc failure rc: %x\n" , |
9784 | vnic_id, rc); |
9785 | goto vnic_setup_err; |
9786 | } |
9787 | bp->rsscos_nr_ctxs++; |
9788 | |
9789 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { |
9790 | rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, ctx_idx: 1); |
9791 | if (rc) { |
9792 | netdev_err(dev: bp->dev, format: "hwrm vnic %d cos ctx alloc failure rc: %x\n" , |
9793 | vnic_id, rc); |
9794 | goto vnic_setup_err; |
9795 | } |
9796 | bp->rsscos_nr_ctxs++; |
9797 | } |
9798 | |
9799 | : |
9800 | /* configure default vnic, ring grp */ |
9801 | rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); |
9802 | if (rc) { |
9803 | netdev_err(dev: bp->dev, format: "hwrm vnic %d cfg failure rc: %x\n" , |
9804 | vnic_id, rc); |
9805 | goto vnic_setup_err; |
9806 | } |
9807 | |
9808 | /* Enable RSS hashing on vnic */ |
9809 | rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, set_rss: true); |
9810 | if (rc) { |
9811 | netdev_err(dev: bp->dev, format: "hwrm vnic %d set rss failure rc: %x\n" , |
9812 | vnic_id, rc); |
9813 | goto vnic_setup_err; |
9814 | } |
9815 | |
9816 | if (bp->flags & BNXT_FLAG_AGG_RINGS) { |
9817 | rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); |
9818 | if (rc) { |
9819 | netdev_err(dev: bp->dev, format: "hwrm vnic %d set hds failure rc: %x\n" , |
9820 | vnic_id, rc); |
9821 | } |
9822 | } |
9823 | |
9824 | vnic_setup_err: |
9825 | return rc; |
9826 | } |
9827 | |
9828 | static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) |
9829 | { |
9830 | int rc, i, nr_ctxs; |
9831 | |
9832 | nr_ctxs = bnxt_get_nr_rss_ctxs(bp, rx_rings: bp->rx_nr_rings); |
9833 | for (i = 0; i < nr_ctxs; i++) { |
9834 | rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, ctx_idx: i); |
9835 | if (rc) { |
9836 | netdev_err(dev: bp->dev, format: "hwrm vnic %d ctx %d alloc failure rc: %x\n" , |
9837 | vnic_id, i, rc); |
9838 | break; |
9839 | } |
9840 | bp->rsscos_nr_ctxs++; |
9841 | } |
9842 | if (i < nr_ctxs) |
9843 | return -ENOMEM; |
9844 | |
9845 | rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, set_rss: true); |
9846 | if (rc) { |
9847 | netdev_err(dev: bp->dev, format: "hwrm vnic %d set rss failure rc: %d\n" , |
9848 | vnic_id, rc); |
9849 | return rc; |
9850 | } |
9851 | rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); |
9852 | if (rc) { |
9853 | netdev_err(dev: bp->dev, format: "hwrm vnic %d cfg failure rc: %x\n" , |
9854 | vnic_id, rc); |
9855 | return rc; |
9856 | } |
9857 | if (bp->flags & BNXT_FLAG_AGG_RINGS) { |
9858 | rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); |
9859 | if (rc) { |
9860 | netdev_err(dev: bp->dev, format: "hwrm vnic %d set hds failure rc: %x\n" , |
9861 | vnic_id, rc); |
9862 | } |
9863 | } |
9864 | return rc; |
9865 | } |
9866 | |
9867 | static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) |
9868 | { |
9869 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
9870 | return __bnxt_setup_vnic_p5(bp, vnic_id); |
9871 | else |
9872 | return __bnxt_setup_vnic(bp, vnic_id); |
9873 | } |
9874 | |
9875 | static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, u16 vnic_id, |
9876 | u16 start_rx_ring_idx, int rx_rings) |
9877 | { |
9878 | int rc; |
9879 | |
9880 | rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, nr_rings: rx_rings); |
9881 | if (rc) { |
9882 | netdev_err(dev: bp->dev, format: "hwrm vnic %d alloc failure rc: %x\n" , |
9883 | vnic_id, rc); |
9884 | return rc; |
9885 | } |
9886 | return bnxt_setup_vnic(bp, vnic_id); |
9887 | } |
9888 | |
9889 | static int bnxt_alloc_rfs_vnics(struct bnxt *bp) |
9890 | { |
9891 | int i, rc = 0; |
9892 | |
9893 | if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) |
9894 | return bnxt_alloc_and_setup_vnic(bp, BNXT_VNIC_NTUPLE, start_rx_ring_idx: 0, |
9895 | rx_rings: bp->rx_nr_rings); |
9896 | |
9897 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
9898 | return 0; |
9899 | |
9900 | for (i = 0; i < bp->rx_nr_rings; i++) { |
9901 | struct bnxt_vnic_info *vnic; |
9902 | u16 vnic_id = i + 1; |
9903 | u16 ring_id = i; |
9904 | |
9905 | if (vnic_id >= bp->nr_vnics) |
9906 | break; |
9907 | |
9908 | vnic = &bp->vnic_info[vnic_id]; |
9909 | vnic->flags |= BNXT_VNIC_RFS_FLAG; |
9910 | if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) |
9911 | vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; |
9912 | if (bnxt_alloc_and_setup_vnic(bp, vnic_id, start_rx_ring_idx: ring_id, rx_rings: 1)) |
9913 | break; |
9914 | } |
9915 | return rc; |
9916 | } |
9917 | |
9918 | /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */ |
9919 | static bool bnxt_promisc_ok(struct bnxt *bp) |
9920 | { |
9921 | #ifdef CONFIG_BNXT_SRIOV |
9922 | if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, vf: &bp->vf)) |
9923 | return false; |
9924 | #endif |
9925 | return true; |
9926 | } |
9927 | |
9928 | static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) |
9929 | { |
9930 | unsigned int rc = 0; |
9931 | |
9932 | rc = bnxt_hwrm_vnic_alloc(bp, vnic_id: 1, start_rx_ring_idx: bp->rx_nr_rings - 1, nr_rings: 1); |
9933 | if (rc) { |
9934 | netdev_err(dev: bp->dev, format: "Cannot allocate special vnic for NS2 A0: %x\n" , |
9935 | rc); |
9936 | return rc; |
9937 | } |
9938 | |
9939 | rc = bnxt_hwrm_vnic_cfg(bp, vnic_id: 1); |
9940 | if (rc) { |
9941 | netdev_err(dev: bp->dev, format: "Cannot allocate special vnic for NS2 A0: %x\n" , |
9942 | rc); |
9943 | return rc; |
9944 | } |
9945 | return rc; |
9946 | } |
9947 | |
9948 | static int bnxt_cfg_rx_mode(struct bnxt *); |
9949 | static bool bnxt_mc_list_updated(struct bnxt *, u32 *); |
9950 | |
9951 | static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) |
9952 | { |
9953 | struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; |
9954 | int rc = 0; |
9955 | unsigned int rx_nr_rings = bp->rx_nr_rings; |
9956 | |
9957 | if (irq_re_init) { |
9958 | rc = bnxt_hwrm_stat_ctx_alloc(bp); |
9959 | if (rc) { |
9960 | netdev_err(dev: bp->dev, format: "hwrm stat ctx alloc failure rc: %x\n" , |
9961 | rc); |
9962 | goto err_out; |
9963 | } |
9964 | } |
9965 | |
9966 | rc = bnxt_hwrm_ring_alloc(bp); |
9967 | if (rc) { |
9968 | netdev_err(dev: bp->dev, format: "hwrm ring alloc failure rc: %x\n" , rc); |
9969 | goto err_out; |
9970 | } |
9971 | |
9972 | rc = bnxt_hwrm_ring_grp_alloc(bp); |
9973 | if (rc) { |
9974 | netdev_err(dev: bp->dev, format: "hwrm_ring_grp alloc failure: %x\n" , rc); |
9975 | goto err_out; |
9976 | } |
9977 | |
9978 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
9979 | rx_nr_rings--; |
9980 | |
9981 | /* default vnic 0 */ |
9982 | rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, start_rx_ring_idx: 0, nr_rings: rx_nr_rings); |
9983 | if (rc) { |
9984 | netdev_err(dev: bp->dev, format: "hwrm vnic alloc failure rc: %x\n" , rc); |
9985 | goto err_out; |
9986 | } |
9987 | |
9988 | if (BNXT_VF(bp)) |
9989 | bnxt_hwrm_func_qcfg(bp); |
9990 | |
9991 | rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT); |
9992 | if (rc) |
9993 | goto err_out; |
9994 | if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) |
9995 | bnxt_hwrm_update_rss_hash_cfg(bp); |
9996 | |
9997 | if (bp->flags & BNXT_FLAG_RFS) { |
9998 | rc = bnxt_alloc_rfs_vnics(bp); |
9999 | if (rc) |
10000 | goto err_out; |
10001 | } |
10002 | |
10003 | if (bp->flags & BNXT_FLAG_TPA) { |
10004 | rc = bnxt_set_tpa(bp, set_tpa: true); |
10005 | if (rc) |
10006 | goto err_out; |
10007 | } |
10008 | |
10009 | if (BNXT_VF(bp)) |
10010 | bnxt_update_vf_mac(bp); |
10011 | |
10012 | /* Filter for default vnic 0 */ |
10013 | rc = bnxt_hwrm_set_vnic_filter(bp, vnic_id: 0, idx: 0, mac_addr: bp->dev->dev_addr); |
10014 | if (rc) { |
10015 | if (BNXT_VF(bp) && rc == -ENODEV) |
10016 | netdev_err(dev: bp->dev, format: "Cannot configure L2 filter while PF is unavailable\n" ); |
10017 | else |
10018 | netdev_err(dev: bp->dev, format: "HWRM vnic filter failure rc: %x\n" , rc); |
10019 | goto err_out; |
10020 | } |
10021 | vnic->uc_filter_count = 1; |
10022 | |
10023 | vnic->rx_mask = 0; |
10024 | if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) |
10025 | goto skip_rx_mask; |
10026 | |
10027 | if (bp->dev->flags & IFF_BROADCAST) |
10028 | vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; |
10029 | |
10030 | if (bp->dev->flags & IFF_PROMISC) |
10031 | vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; |
10032 | |
10033 | if (bp->dev->flags & IFF_ALLMULTI) { |
10034 | vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; |
10035 | vnic->mc_list_count = 0; |
10036 | } else if (bp->dev->flags & IFF_MULTICAST) { |
10037 | u32 mask = 0; |
10038 | |
10039 | bnxt_mc_list_updated(bp, &mask); |
10040 | vnic->rx_mask |= mask; |
10041 | } |
10042 | |
10043 | rc = bnxt_cfg_rx_mode(bp); |
10044 | if (rc) |
10045 | goto err_out; |
10046 | |
10047 | skip_rx_mask: |
10048 | rc = bnxt_hwrm_set_coal(bp); |
10049 | if (rc) |
10050 | netdev_warn(dev: bp->dev, format: "HWRM set coalescing failure rc: %x\n" , |
10051 | rc); |
10052 | |
10053 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { |
10054 | rc = bnxt_setup_nitroa0_vnic(bp); |
10055 | if (rc) |
10056 | netdev_err(dev: bp->dev, format: "Special vnic setup failure for NS2 A0 rc: %x\n" , |
10057 | rc); |
10058 | } |
10059 | |
10060 | if (BNXT_VF(bp)) { |
10061 | bnxt_hwrm_func_qcfg(bp); |
10062 | netdev_update_features(dev: bp->dev); |
10063 | } |
10064 | |
10065 | return 0; |
10066 | |
10067 | err_out: |
10068 | bnxt_hwrm_resource_free(bp, close_path: 0, irq_re_init: true); |
10069 | |
10070 | return rc; |
10071 | } |
10072 | |
10073 | static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) |
10074 | { |
10075 | bnxt_hwrm_resource_free(bp, close_path: 1, irq_re_init); |
10076 | return 0; |
10077 | } |
10078 | |
10079 | static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) |
10080 | { |
10081 | bnxt_init_cp_rings(bp); |
10082 | bnxt_init_rx_rings(bp); |
10083 | bnxt_init_tx_rings(bp); |
10084 | bnxt_init_ring_grps(bp, irq_re_init); |
10085 | bnxt_init_vnics(bp); |
10086 | |
10087 | return bnxt_init_chip(bp, irq_re_init); |
10088 | } |
10089 | |
10090 | static int bnxt_set_real_num_queues(struct bnxt *bp) |
10091 | { |
10092 | int rc; |
10093 | struct net_device *dev = bp->dev; |
10094 | |
10095 | rc = netif_set_real_num_tx_queues(dev, txq: bp->tx_nr_rings - |
10096 | bp->tx_nr_rings_xdp); |
10097 | if (rc) |
10098 | return rc; |
10099 | |
10100 | rc = netif_set_real_num_rx_queues(dev, rxq: bp->rx_nr_rings); |
10101 | if (rc) |
10102 | return rc; |
10103 | |
10104 | #ifdef CONFIG_RFS_ACCEL |
10105 | if (bp->flags & BNXT_FLAG_RFS) |
10106 | dev->rx_cpu_rmap = alloc_irq_cpu_rmap(size: bp->rx_nr_rings); |
10107 | #endif |
10108 | |
10109 | return rc; |
10110 | } |
10111 | |
10112 | static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, |
10113 | bool shared) |
10114 | { |
10115 | int _rx = *rx, _tx = *tx; |
10116 | |
10117 | if (shared) { |
10118 | *rx = min_t(int, _rx, max); |
10119 | *tx = min_t(int, _tx, max); |
10120 | } else { |
10121 | if (max < 2) |
10122 | return -ENOMEM; |
10123 | |
10124 | while (_rx + _tx > max) { |
10125 | if (_rx > _tx && _rx > 1) |
10126 | _rx--; |
10127 | else if (_tx > 1) |
10128 | _tx--; |
10129 | } |
10130 | *rx = _rx; |
10131 | *tx = _tx; |
10132 | } |
10133 | return 0; |
10134 | } |
10135 | |
10136 | static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp) |
10137 | { |
10138 | return (tx - tx_xdp) / tx_sets + tx_xdp; |
10139 | } |
10140 | |
10141 | int bnxt_num_tx_to_cp(struct bnxt *bp, int tx) |
10142 | { |
10143 | int tcs = bp->num_tc; |
10144 | |
10145 | if (!tcs) |
10146 | tcs = 1; |
10147 | return __bnxt_num_tx_to_cp(bp, tx, tx_sets: tcs, tx_xdp: bp->tx_nr_rings_xdp); |
10148 | } |
10149 | |
10150 | static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp) |
10151 | { |
10152 | int tcs = bp->num_tc; |
10153 | |
10154 | return (tx_cp - bp->tx_nr_rings_xdp) * tcs + |
10155 | bp->tx_nr_rings_xdp; |
10156 | } |
10157 | |
10158 | static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, |
10159 | bool sh) |
10160 | { |
10161 | int tx_cp = bnxt_num_tx_to_cp(bp, tx: *tx); |
10162 | |
10163 | if (tx_cp != *tx) { |
10164 | int tx_saved = tx_cp, rc; |
10165 | |
10166 | rc = __bnxt_trim_rings(bp, rx, tx: &tx_cp, max, shared: sh); |
10167 | if (rc) |
10168 | return rc; |
10169 | if (tx_cp != tx_saved) |
10170 | *tx = bnxt_num_cp_to_tx(bp, tx_cp); |
10171 | return 0; |
10172 | } |
10173 | return __bnxt_trim_rings(bp, rx, tx, max, shared: sh); |
10174 | } |
10175 | |
10176 | static void bnxt_setup_msix(struct bnxt *bp) |
10177 | { |
10178 | const int len = sizeof(bp->irq_tbl[0].name); |
10179 | struct net_device *dev = bp->dev; |
10180 | int tcs, i; |
10181 | |
10182 | tcs = bp->num_tc; |
10183 | if (tcs) { |
10184 | int i, off, count; |
10185 | |
10186 | for (i = 0; i < tcs; i++) { |
10187 | count = bp->tx_nr_rings_per_tc; |
10188 | off = BNXT_TC_TO_RING_BASE(bp, i); |
10189 | netdev_set_tc_queue(dev, tc: i, count, offset: off); |
10190 | } |
10191 | } |
10192 | |
10193 | for (i = 0; i < bp->cp_nr_rings; i++) { |
10194 | int map_idx = bnxt_cp_num_to_irq_num(bp, n: i); |
10195 | char *attr; |
10196 | |
10197 | if (bp->flags & BNXT_FLAG_SHARED_RINGS) |
10198 | attr = "TxRx" ; |
10199 | else if (i < bp->rx_nr_rings) |
10200 | attr = "rx" ; |
10201 | else |
10202 | attr = "tx" ; |
10203 | |
10204 | snprintf(buf: bp->irq_tbl[map_idx].name, size: len, fmt: "%s-%s-%d" , dev->name, |
10205 | attr, i); |
10206 | bp->irq_tbl[map_idx].handler = bnxt_msix; |
10207 | } |
10208 | } |
10209 | |
10210 | static void bnxt_setup_inta(struct bnxt *bp) |
10211 | { |
10212 | const int len = sizeof(bp->irq_tbl[0].name); |
10213 | |
10214 | if (bp->num_tc) { |
10215 | netdev_reset_tc(dev: bp->dev); |
10216 | bp->num_tc = 0; |
10217 | } |
10218 | |
10219 | snprintf(buf: bp->irq_tbl[0].name, size: len, fmt: "%s-%s-%d" , bp->dev->name, "TxRx" , |
10220 | 0); |
10221 | bp->irq_tbl[0].handler = bnxt_inta; |
10222 | } |
10223 | |
10224 | static int bnxt_init_int_mode(struct bnxt *bp); |
10225 | |
10226 | static int bnxt_setup_int_mode(struct bnxt *bp) |
10227 | { |
10228 | int rc; |
10229 | |
10230 | if (!bp->irq_tbl) { |
10231 | rc = bnxt_init_int_mode(bp); |
10232 | if (rc || !bp->irq_tbl) |
10233 | return rc ?: -ENODEV; |
10234 | } |
10235 | |
10236 | if (bp->flags & BNXT_FLAG_USING_MSIX) |
10237 | bnxt_setup_msix(bp); |
10238 | else |
10239 | bnxt_setup_inta(bp); |
10240 | |
10241 | rc = bnxt_set_real_num_queues(bp); |
10242 | return rc; |
10243 | } |
10244 | |
10245 | static unsigned int (struct bnxt *bp) |
10246 | { |
10247 | return bp->hw_resc.max_rsscos_ctxs; |
10248 | } |
10249 | |
10250 | static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) |
10251 | { |
10252 | return bp->hw_resc.max_vnics; |
10253 | } |
10254 | |
10255 | unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) |
10256 | { |
10257 | return bp->hw_resc.max_stat_ctxs; |
10258 | } |
10259 | |
10260 | unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) |
10261 | { |
10262 | return bp->hw_resc.max_cp_rings; |
10263 | } |
10264 | |
10265 | static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) |
10266 | { |
10267 | unsigned int cp = bp->hw_resc.max_cp_rings; |
10268 | |
10269 | if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) |
10270 | cp -= bnxt_get_ulp_msix_num(bp); |
10271 | |
10272 | return cp; |
10273 | } |
10274 | |
10275 | static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) |
10276 | { |
10277 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
10278 | |
10279 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
10280 | return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); |
10281 | |
10282 | return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); |
10283 | } |
10284 | |
10285 | static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) |
10286 | { |
10287 | bp->hw_resc.max_irqs = max_irqs; |
10288 | } |
10289 | |
10290 | unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) |
10291 | { |
10292 | unsigned int cp; |
10293 | |
10294 | cp = bnxt_get_max_func_cp_rings_for_en(bp); |
10295 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
10296 | return cp - bp->rx_nr_rings - bp->tx_nr_rings; |
10297 | else |
10298 | return cp - bp->cp_nr_rings; |
10299 | } |
10300 | |
10301 | unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) |
10302 | { |
10303 | return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); |
10304 | } |
10305 | |
10306 | int bnxt_get_avail_msix(struct bnxt *bp, int num) |
10307 | { |
10308 | int max_cp = bnxt_get_max_func_cp_rings(bp); |
10309 | int max_irq = bnxt_get_max_func_irqs(bp); |
10310 | int total_req = bp->cp_nr_rings + num; |
10311 | int max_idx, avail_msix; |
10312 | |
10313 | max_idx = bp->total_irqs; |
10314 | if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) |
10315 | max_idx = min_t(int, bp->total_irqs, max_cp); |
10316 | avail_msix = max_idx - bp->cp_nr_rings; |
10317 | if (!BNXT_NEW_RM(bp) || avail_msix >= num) |
10318 | return avail_msix; |
10319 | |
10320 | if (max_irq < total_req) { |
10321 | num = max_irq - bp->cp_nr_rings; |
10322 | if (num <= 0) |
10323 | return 0; |
10324 | } |
10325 | return num; |
10326 | } |
10327 | |
10328 | static int bnxt_get_num_msix(struct bnxt *bp) |
10329 | { |
10330 | if (!BNXT_NEW_RM(bp)) |
10331 | return bnxt_get_max_func_irqs(bp); |
10332 | |
10333 | return bnxt_nq_rings_in_use(bp); |
10334 | } |
10335 | |
10336 | static int bnxt_init_msix(struct bnxt *bp) |
10337 | { |
10338 | int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp; |
10339 | struct msix_entry *msix_ent; |
10340 | |
10341 | total_vecs = bnxt_get_num_msix(bp); |
10342 | max = bnxt_get_max_func_irqs(bp); |
10343 | if (total_vecs > max) |
10344 | total_vecs = max; |
10345 | |
10346 | if (!total_vecs) |
10347 | return 0; |
10348 | |
10349 | msix_ent = kcalloc(n: total_vecs, size: sizeof(struct msix_entry), GFP_KERNEL); |
10350 | if (!msix_ent) |
10351 | return -ENOMEM; |
10352 | |
10353 | for (i = 0; i < total_vecs; i++) { |
10354 | msix_ent[i].entry = i; |
10355 | msix_ent[i].vector = 0; |
10356 | } |
10357 | |
10358 | if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) |
10359 | min = 2; |
10360 | |
10361 | total_vecs = pci_enable_msix_range(dev: bp->pdev, entries: msix_ent, minvec: min, maxvec: total_vecs); |
10362 | ulp_msix = bnxt_get_ulp_msix_num(bp); |
10363 | if (total_vecs < 0 || total_vecs < ulp_msix) { |
10364 | rc = -ENODEV; |
10365 | goto msix_setup_exit; |
10366 | } |
10367 | |
10368 | bp->irq_tbl = kcalloc(n: total_vecs, size: sizeof(struct bnxt_irq), GFP_KERNEL); |
10369 | if (bp->irq_tbl) { |
10370 | for (i = 0; i < total_vecs; i++) |
10371 | bp->irq_tbl[i].vector = msix_ent[i].vector; |
10372 | |
10373 | bp->total_irqs = total_vecs; |
10374 | /* Trim rings based upon num of vectors allocated */ |
10375 | rc = bnxt_trim_rings(bp, rx: &bp->rx_nr_rings, tx: &bp->tx_nr_rings, |
10376 | max: total_vecs - ulp_msix, sh: min == 1); |
10377 | if (rc) |
10378 | goto msix_setup_exit; |
10379 | |
10380 | tx_cp = bnxt_num_tx_to_cp(bp, tx: bp->tx_nr_rings); |
10381 | bp->cp_nr_rings = (min == 1) ? |
10382 | max_t(int, tx_cp, bp->rx_nr_rings) : |
10383 | tx_cp + bp->rx_nr_rings; |
10384 | |
10385 | } else { |
10386 | rc = -ENOMEM; |
10387 | goto msix_setup_exit; |
10388 | } |
10389 | bp->flags |= BNXT_FLAG_USING_MSIX; |
10390 | kfree(objp: msix_ent); |
10391 | return 0; |
10392 | |
10393 | msix_setup_exit: |
10394 | netdev_err(dev: bp->dev, format: "bnxt_init_msix err: %x\n" , rc); |
10395 | kfree(objp: bp->irq_tbl); |
10396 | bp->irq_tbl = NULL; |
10397 | pci_disable_msix(dev: bp->pdev); |
10398 | kfree(objp: msix_ent); |
10399 | return rc; |
10400 | } |
10401 | |
10402 | static int bnxt_init_inta(struct bnxt *bp) |
10403 | { |
10404 | bp->irq_tbl = kzalloc(size: sizeof(struct bnxt_irq), GFP_KERNEL); |
10405 | if (!bp->irq_tbl) |
10406 | return -ENOMEM; |
10407 | |
10408 | bp->total_irqs = 1; |
10409 | bp->rx_nr_rings = 1; |
10410 | bp->tx_nr_rings = 1; |
10411 | bp->cp_nr_rings = 1; |
10412 | bp->flags |= BNXT_FLAG_SHARED_RINGS; |
10413 | bp->irq_tbl[0].vector = bp->pdev->irq; |
10414 | return 0; |
10415 | } |
10416 | |
10417 | static int bnxt_init_int_mode(struct bnxt *bp) |
10418 | { |
10419 | int rc = -ENODEV; |
10420 | |
10421 | if (bp->flags & BNXT_FLAG_MSIX_CAP) |
10422 | rc = bnxt_init_msix(bp); |
10423 | |
10424 | if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { |
10425 | /* fallback to INTA */ |
10426 | rc = bnxt_init_inta(bp); |
10427 | } |
10428 | return rc; |
10429 | } |
10430 | |
10431 | static void bnxt_clear_int_mode(struct bnxt *bp) |
10432 | { |
10433 | if (bp->flags & BNXT_FLAG_USING_MSIX) |
10434 | pci_disable_msix(dev: bp->pdev); |
10435 | |
10436 | kfree(objp: bp->irq_tbl); |
10437 | bp->irq_tbl = NULL; |
10438 | bp->flags &= ~BNXT_FLAG_USING_MSIX; |
10439 | } |
10440 | |
10441 | int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) |
10442 | { |
10443 | bool irq_cleared = false; |
10444 | int tcs = bp->num_tc; |
10445 | int rc; |
10446 | |
10447 | if (!bnxt_need_reserve_rings(bp)) |
10448 | return 0; |
10449 | |
10450 | if (irq_re_init && BNXT_NEW_RM(bp) && |
10451 | bnxt_get_num_msix(bp) != bp->total_irqs) { |
10452 | bnxt_ulp_irq_stop(bp); |
10453 | bnxt_clear_int_mode(bp); |
10454 | irq_cleared = true; |
10455 | } |
10456 | rc = __bnxt_reserve_rings(bp); |
10457 | if (irq_cleared) { |
10458 | if (!rc) |
10459 | rc = bnxt_init_int_mode(bp); |
10460 | bnxt_ulp_irq_restart(bp, err: rc); |
10461 | } |
10462 | if (rc) { |
10463 | netdev_err(dev: bp->dev, format: "ring reservation/IRQ init failure rc: %d\n" , rc); |
10464 | return rc; |
10465 | } |
10466 | if (tcs && (bp->tx_nr_rings_per_tc * tcs != |
10467 | bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { |
10468 | netdev_err(dev: bp->dev, format: "tx ring reservation failure\n" ); |
10469 | netdev_reset_tc(dev: bp->dev); |
10470 | bp->num_tc = 0; |
10471 | if (bp->tx_nr_rings_xdp) |
10472 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; |
10473 | else |
10474 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
10475 | return -ENOMEM; |
10476 | } |
10477 | return 0; |
10478 | } |
10479 | |
10480 | static void bnxt_free_irq(struct bnxt *bp) |
10481 | { |
10482 | struct bnxt_irq *irq; |
10483 | int i; |
10484 | |
10485 | #ifdef CONFIG_RFS_ACCEL |
10486 | free_irq_cpu_rmap(rmap: bp->dev->rx_cpu_rmap); |
10487 | bp->dev->rx_cpu_rmap = NULL; |
10488 | #endif |
10489 | if (!bp->irq_tbl || !bp->bnapi) |
10490 | return; |
10491 | |
10492 | for (i = 0; i < bp->cp_nr_rings; i++) { |
10493 | int map_idx = bnxt_cp_num_to_irq_num(bp, n: i); |
10494 | |
10495 | irq = &bp->irq_tbl[map_idx]; |
10496 | if (irq->requested) { |
10497 | if (irq->have_cpumask) { |
10498 | irq_set_affinity_hint(irq: irq->vector, NULL); |
10499 | free_cpumask_var(mask: irq->cpu_mask); |
10500 | irq->have_cpumask = 0; |
10501 | } |
10502 | free_irq(irq->vector, bp->bnapi[i]); |
10503 | } |
10504 | |
10505 | irq->requested = 0; |
10506 | } |
10507 | } |
10508 | |
10509 | static int bnxt_request_irq(struct bnxt *bp) |
10510 | { |
10511 | int i, j, rc = 0; |
10512 | unsigned long flags = 0; |
10513 | #ifdef CONFIG_RFS_ACCEL |
10514 | struct cpu_rmap *rmap; |
10515 | #endif |
10516 | |
10517 | rc = bnxt_setup_int_mode(bp); |
10518 | if (rc) { |
10519 | netdev_err(dev: bp->dev, format: "bnxt_setup_int_mode err: %x\n" , |
10520 | rc); |
10521 | return rc; |
10522 | } |
10523 | #ifdef CONFIG_RFS_ACCEL |
10524 | rmap = bp->dev->rx_cpu_rmap; |
10525 | #endif |
10526 | if (!(bp->flags & BNXT_FLAG_USING_MSIX)) |
10527 | flags = IRQF_SHARED; |
10528 | |
10529 | for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { |
10530 | int map_idx = bnxt_cp_num_to_irq_num(bp, n: i); |
10531 | struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; |
10532 | |
10533 | #ifdef CONFIG_RFS_ACCEL |
10534 | if (rmap && bp->bnapi[i]->rx_ring) { |
10535 | rc = irq_cpu_rmap_add(rmap, irq: irq->vector); |
10536 | if (rc) |
10537 | netdev_warn(dev: bp->dev, format: "failed adding irq rmap for ring %d\n" , |
10538 | j); |
10539 | j++; |
10540 | } |
10541 | #endif |
10542 | rc = request_irq(irq: irq->vector, handler: irq->handler, flags, name: irq->name, |
10543 | dev: bp->bnapi[i]); |
10544 | if (rc) |
10545 | break; |
10546 | |
10547 | netif_napi_set_irq(napi: &bp->bnapi[i]->napi, irq: irq->vector); |
10548 | irq->requested = 1; |
10549 | |
10550 | if (zalloc_cpumask_var(mask: &irq->cpu_mask, GFP_KERNEL)) { |
10551 | int numa_node = dev_to_node(dev: &bp->pdev->dev); |
10552 | |
10553 | irq->have_cpumask = 1; |
10554 | cpumask_set_cpu(cpu: cpumask_local_spread(i, node: numa_node), |
10555 | dstp: irq->cpu_mask); |
10556 | rc = irq_set_affinity_hint(irq: irq->vector, m: irq->cpu_mask); |
10557 | if (rc) { |
10558 | netdev_warn(dev: bp->dev, |
10559 | format: "Set affinity failed, IRQ = %d\n" , |
10560 | irq->vector); |
10561 | break; |
10562 | } |
10563 | } |
10564 | } |
10565 | return rc; |
10566 | } |
10567 | |
10568 | static void bnxt_del_napi(struct bnxt *bp) |
10569 | { |
10570 | int i; |
10571 | |
10572 | if (!bp->bnapi) |
10573 | return; |
10574 | |
10575 | for (i = 0; i < bp->rx_nr_rings; i++) |
10576 | netif_queue_set_napi(dev: bp->dev, queue_index: i, type: NETDEV_QUEUE_TYPE_RX, NULL); |
10577 | for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++) |
10578 | netif_queue_set_napi(dev: bp->dev, queue_index: i, type: NETDEV_QUEUE_TYPE_TX, NULL); |
10579 | |
10580 | for (i = 0; i < bp->cp_nr_rings; i++) { |
10581 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
10582 | |
10583 | __netif_napi_del(napi: &bnapi->napi); |
10584 | } |
10585 | /* We called __netif_napi_del(), we need |
10586 | * to respect an RCU grace period before freeing napi structures. |
10587 | */ |
10588 | synchronize_net(); |
10589 | } |
10590 | |
10591 | static void bnxt_init_napi(struct bnxt *bp) |
10592 | { |
10593 | int i; |
10594 | unsigned int cp_nr_rings = bp->cp_nr_rings; |
10595 | struct bnxt_napi *bnapi; |
10596 | |
10597 | if (bp->flags & BNXT_FLAG_USING_MSIX) { |
10598 | int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; |
10599 | |
10600 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
10601 | poll_fn = bnxt_poll_p5; |
10602 | else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
10603 | cp_nr_rings--; |
10604 | for (i = 0; i < cp_nr_rings; i++) { |
10605 | bnapi = bp->bnapi[i]; |
10606 | netif_napi_add(dev: bp->dev, napi: &bnapi->napi, poll: poll_fn); |
10607 | } |
10608 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { |
10609 | bnapi = bp->bnapi[cp_nr_rings]; |
10610 | netif_napi_add(dev: bp->dev, napi: &bnapi->napi, |
10611 | poll: bnxt_poll_nitroa0); |
10612 | } |
10613 | } else { |
10614 | bnapi = bp->bnapi[0]; |
10615 | netif_napi_add(dev: bp->dev, napi: &bnapi->napi, poll: bnxt_poll); |
10616 | } |
10617 | } |
10618 | |
10619 | static void bnxt_disable_napi(struct bnxt *bp) |
10620 | { |
10621 | int i; |
10622 | |
10623 | if (!bp->bnapi || |
10624 | test_and_set_bit(BNXT_STATE_NAPI_DISABLED, addr: &bp->state)) |
10625 | return; |
10626 | |
10627 | for (i = 0; i < bp->cp_nr_rings; i++) { |
10628 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
10629 | struct bnxt_cp_ring_info *cpr; |
10630 | |
10631 | cpr = &bnapi->cp_ring; |
10632 | if (bnapi->tx_fault) |
10633 | cpr->sw_stats.tx.tx_resets++; |
10634 | if (bnapi->in_reset) |
10635 | cpr->sw_stats.rx.rx_resets++; |
10636 | napi_disable(n: &bnapi->napi); |
10637 | if (bnapi->rx_ring) |
10638 | cancel_work_sync(work: &cpr->dim.work); |
10639 | } |
10640 | } |
10641 | |
10642 | static void bnxt_enable_napi(struct bnxt *bp) |
10643 | { |
10644 | int i; |
10645 | |
10646 | clear_bit(BNXT_STATE_NAPI_DISABLED, addr: &bp->state); |
10647 | for (i = 0; i < bp->cp_nr_rings; i++) { |
10648 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
10649 | struct bnxt_cp_ring_info *cpr; |
10650 | |
10651 | bnapi->tx_fault = 0; |
10652 | |
10653 | cpr = &bnapi->cp_ring; |
10654 | bnapi->in_reset = false; |
10655 | |
10656 | if (bnapi->rx_ring) { |
10657 | INIT_WORK(&cpr->dim.work, bnxt_dim_work); |
10658 | cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
10659 | } |
10660 | napi_enable(n: &bnapi->napi); |
10661 | } |
10662 | } |
10663 | |
10664 | void bnxt_tx_disable(struct bnxt *bp) |
10665 | { |
10666 | int i; |
10667 | struct bnxt_tx_ring_info *txr; |
10668 | |
10669 | if (bp->tx_ring) { |
10670 | for (i = 0; i < bp->tx_nr_rings; i++) { |
10671 | txr = &bp->tx_ring[i]; |
10672 | WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); |
10673 | } |
10674 | } |
10675 | /* Make sure napi polls see @dev_state change */ |
10676 | synchronize_net(); |
10677 | /* Drop carrier first to prevent TX timeout */ |
10678 | netif_carrier_off(dev: bp->dev); |
10679 | /* Stop all TX queues */ |
10680 | netif_tx_disable(dev: bp->dev); |
10681 | } |
10682 | |
10683 | void bnxt_tx_enable(struct bnxt *bp) |
10684 | { |
10685 | int i; |
10686 | struct bnxt_tx_ring_info *txr; |
10687 | |
10688 | for (i = 0; i < bp->tx_nr_rings; i++) { |
10689 | txr = &bp->tx_ring[i]; |
10690 | WRITE_ONCE(txr->dev_state, 0); |
10691 | } |
10692 | /* Make sure napi polls see @dev_state change */ |
10693 | synchronize_net(); |
10694 | netif_tx_wake_all_queues(dev: bp->dev); |
10695 | if (BNXT_LINK_IS_UP(bp)) |
10696 | netif_carrier_on(dev: bp->dev); |
10697 | } |
10698 | |
10699 | static char *bnxt_report_fec(struct bnxt_link_info *link_info) |
10700 | { |
10701 | u8 active_fec = link_info->active_fec_sig_mode & |
10702 | PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; |
10703 | |
10704 | switch (active_fec) { |
10705 | default: |
10706 | case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: |
10707 | return "None" ; |
10708 | case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: |
10709 | return "Clause 74 BaseR" ; |
10710 | case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: |
10711 | return "Clause 91 RS(528,514)" ; |
10712 | case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: |
10713 | return "Clause 91 RS544_1XN" ; |
10714 | case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: |
10715 | return "Clause 91 RS(544,514)" ; |
10716 | case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: |
10717 | return "Clause 91 RS272_1XN" ; |
10718 | case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: |
10719 | return "Clause 91 RS(272,257)" ; |
10720 | } |
10721 | } |
10722 | |
10723 | void bnxt_report_link(struct bnxt *bp) |
10724 | { |
10725 | if (BNXT_LINK_IS_UP(bp)) { |
10726 | const char *signal = "" ; |
10727 | const char *flow_ctrl; |
10728 | const char *duplex; |
10729 | u32 speed; |
10730 | u16 fec; |
10731 | |
10732 | netif_carrier_on(dev: bp->dev); |
10733 | speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); |
10734 | if (speed == SPEED_UNKNOWN) { |
10735 | netdev_info(dev: bp->dev, format: "NIC Link is Up, speed unknown\n" ); |
10736 | return; |
10737 | } |
10738 | if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) |
10739 | duplex = "full" ; |
10740 | else |
10741 | duplex = "half" ; |
10742 | if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) |
10743 | flow_ctrl = "ON - receive & transmit" ; |
10744 | else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) |
10745 | flow_ctrl = "ON - transmit" ; |
10746 | else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) |
10747 | flow_ctrl = "ON - receive" ; |
10748 | else |
10749 | flow_ctrl = "none" ; |
10750 | if (bp->link_info.phy_qcfg_resp.option_flags & |
10751 | PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) { |
10752 | u8 sig_mode = bp->link_info.active_fec_sig_mode & |
10753 | PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; |
10754 | switch (sig_mode) { |
10755 | case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ: |
10756 | signal = "(NRZ) " ; |
10757 | break; |
10758 | case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4: |
10759 | signal = "(PAM4 56Gbps) " ; |
10760 | break; |
10761 | case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112: |
10762 | signal = "(PAM4 112Gbps) " ; |
10763 | break; |
10764 | default: |
10765 | break; |
10766 | } |
10767 | } |
10768 | netdev_info(dev: bp->dev, format: "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n" , |
10769 | speed, signal, duplex, flow_ctrl); |
10770 | if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) |
10771 | netdev_info(dev: bp->dev, format: "EEE is %s\n" , |
10772 | bp->eee.eee_active ? "active" : |
10773 | "not active" ); |
10774 | fec = bp->link_info.fec_cfg; |
10775 | if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) |
10776 | netdev_info(dev: bp->dev, format: "FEC autoneg %s encoding: %s\n" , |
10777 | (fec & BNXT_FEC_AUTONEG) ? "on" : "off" , |
10778 | bnxt_report_fec(link_info: &bp->link_info)); |
10779 | } else { |
10780 | netif_carrier_off(dev: bp->dev); |
10781 | netdev_err(dev: bp->dev, format: "NIC Link is Down\n" ); |
10782 | } |
10783 | } |
10784 | |
10785 | static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) |
10786 | { |
10787 | if (!resp->supported_speeds_auto_mode && |
10788 | !resp->supported_speeds_force_mode && |
10789 | !resp->supported_pam4_speeds_auto_mode && |
10790 | !resp->supported_pam4_speeds_force_mode && |
10791 | !resp->supported_speeds2_auto_mode && |
10792 | !resp->supported_speeds2_force_mode) |
10793 | return true; |
10794 | return false; |
10795 | } |
10796 | |
10797 | static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) |
10798 | { |
10799 | struct bnxt_link_info *link_info = &bp->link_info; |
10800 | struct hwrm_port_phy_qcaps_output *resp; |
10801 | struct hwrm_port_phy_qcaps_input *req; |
10802 | int rc = 0; |
10803 | |
10804 | if (bp->hwrm_spec_code < 0x10201) |
10805 | return 0; |
10806 | |
10807 | rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); |
10808 | if (rc) |
10809 | return rc; |
10810 | |
10811 | resp = hwrm_req_hold(bp, req); |
10812 | rc = hwrm_req_send(bp, req); |
10813 | if (rc) |
10814 | goto hwrm_phy_qcaps_exit; |
10815 | |
10816 | bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8); |
10817 | if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { |
10818 | struct ethtool_keee *eee = &bp->eee; |
10819 | u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); |
10820 | |
10821 | _bnxt_fw_to_linkmode(mode: eee->supported, fw_speeds); |
10822 | bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & |
10823 | PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; |
10824 | bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & |
10825 | PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; |
10826 | } |
10827 | |
10828 | if (bp->hwrm_spec_code >= 0x10a01) { |
10829 | if (bnxt_phy_qcaps_no_speed(resp)) { |
10830 | link_info->phy_state = BNXT_PHY_STATE_DISABLED; |
10831 | netdev_warn(dev: bp->dev, format: "Ethernet link disabled\n" ); |
10832 | } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) { |
10833 | link_info->phy_state = BNXT_PHY_STATE_ENABLED; |
10834 | netdev_info(dev: bp->dev, format: "Ethernet link enabled\n" ); |
10835 | /* Phy re-enabled, reprobe the speeds */ |
10836 | link_info->support_auto_speeds = 0; |
10837 | link_info->support_pam4_auto_speeds = 0; |
10838 | link_info->support_auto_speeds2 = 0; |
10839 | } |
10840 | } |
10841 | if (resp->supported_speeds_auto_mode) |
10842 | link_info->support_auto_speeds = |
10843 | le16_to_cpu(resp->supported_speeds_auto_mode); |
10844 | if (resp->supported_pam4_speeds_auto_mode) |
10845 | link_info->support_pam4_auto_speeds = |
10846 | le16_to_cpu(resp->supported_pam4_speeds_auto_mode); |
10847 | if (resp->supported_speeds2_auto_mode) |
10848 | link_info->support_auto_speeds2 = |
10849 | le16_to_cpu(resp->supported_speeds2_auto_mode); |
10850 | |
10851 | bp->port_count = resp->port_cnt; |
10852 | |
10853 | hwrm_phy_qcaps_exit: |
10854 | hwrm_req_drop(bp, req); |
10855 | return rc; |
10856 | } |
10857 | |
10858 | static bool bnxt_support_dropped(u16 advertising, u16 supported) |
10859 | { |
10860 | u16 diff = advertising ^ supported; |
10861 | |
10862 | return ((supported | diff) != supported); |
10863 | } |
10864 | |
10865 | static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info) |
10866 | { |
10867 | struct bnxt *bp = container_of(link_info, struct bnxt, link_info); |
10868 | |
10869 | /* Check if any advertised speeds are no longer supported. The caller |
10870 | * holds the link_lock mutex, so we can modify link_info settings. |
10871 | */ |
10872 | if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { |
10873 | if (bnxt_support_dropped(advertising: link_info->advertising, |
10874 | supported: link_info->support_auto_speeds2)) { |
10875 | link_info->advertising = link_info->support_auto_speeds2; |
10876 | return true; |
10877 | } |
10878 | return false; |
10879 | } |
10880 | if (bnxt_support_dropped(advertising: link_info->advertising, |
10881 | supported: link_info->support_auto_speeds)) { |
10882 | link_info->advertising = link_info->support_auto_speeds; |
10883 | return true; |
10884 | } |
10885 | if (bnxt_support_dropped(advertising: link_info->advertising_pam4, |
10886 | supported: link_info->support_pam4_auto_speeds)) { |
10887 | link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; |
10888 | return true; |
10889 | } |
10890 | return false; |
10891 | } |
10892 | |
10893 | int bnxt_update_link(struct bnxt *bp, bool chng_link_state) |
10894 | { |
10895 | struct bnxt_link_info *link_info = &bp->link_info; |
10896 | struct hwrm_port_phy_qcfg_output *resp; |
10897 | struct hwrm_port_phy_qcfg_input *req; |
10898 | u8 link_state = link_info->link_state; |
10899 | bool support_changed; |
10900 | int rc; |
10901 | |
10902 | rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); |
10903 | if (rc) |
10904 | return rc; |
10905 | |
10906 | resp = hwrm_req_hold(bp, req); |
10907 | rc = hwrm_req_send(bp, req); |
10908 | if (rc) { |
10909 | hwrm_req_drop(bp, req); |
10910 | if (BNXT_VF(bp) && rc == -ENODEV) { |
10911 | netdev_warn(dev: bp->dev, format: "Cannot obtain link state while PF unavailable.\n" ); |
10912 | rc = 0; |
10913 | } |
10914 | return rc; |
10915 | } |
10916 | |
10917 | memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); |
10918 | link_info->phy_link_status = resp->link; |
10919 | link_info->duplex = resp->duplex_cfg; |
10920 | if (bp->hwrm_spec_code >= 0x10800) |
10921 | link_info->duplex = resp->duplex_state; |
10922 | link_info->pause = resp->pause; |
10923 | link_info->auto_mode = resp->auto_mode; |
10924 | link_info->auto_pause_setting = resp->auto_pause; |
10925 | link_info->lp_pause = resp->link_partner_adv_pause; |
10926 | link_info->force_pause_setting = resp->force_pause; |
10927 | link_info->duplex_setting = resp->duplex_cfg; |
10928 | if (link_info->phy_link_status == BNXT_LINK_LINK) { |
10929 | link_info->link_speed = le16_to_cpu(resp->link_speed); |
10930 | if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) |
10931 | link_info->active_lanes = resp->active_lanes; |
10932 | } else { |
10933 | link_info->link_speed = 0; |
10934 | link_info->active_lanes = 0; |
10935 | } |
10936 | link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); |
10937 | link_info->force_pam4_link_speed = |
10938 | le16_to_cpu(resp->force_pam4_link_speed); |
10939 | link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2); |
10940 | link_info->support_speeds = le16_to_cpu(resp->support_speeds); |
10941 | link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); |
10942 | link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2); |
10943 | link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); |
10944 | link_info->auto_pam4_link_speeds = |
10945 | le16_to_cpu(resp->auto_pam4_link_speed_mask); |
10946 | link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2); |
10947 | link_info->lp_auto_link_speeds = |
10948 | le16_to_cpu(resp->link_partner_adv_speeds); |
10949 | link_info->lp_auto_pam4_link_speeds = |
10950 | resp->link_partner_pam4_adv_speeds; |
10951 | link_info->preemphasis = le32_to_cpu(resp->preemphasis); |
10952 | link_info->phy_ver[0] = resp->phy_maj; |
10953 | link_info->phy_ver[1] = resp->phy_min; |
10954 | link_info->phy_ver[2] = resp->phy_bld; |
10955 | link_info->media_type = resp->media_type; |
10956 | link_info->phy_type = resp->phy_type; |
10957 | link_info->transceiver = resp->xcvr_pkg_type; |
10958 | link_info->phy_addr = resp->eee_config_phy_addr & |
10959 | PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; |
10960 | link_info->module_status = resp->module_status; |
10961 | |
10962 | if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) { |
10963 | struct ethtool_keee *eee = &bp->eee; |
10964 | u16 fw_speeds; |
10965 | |
10966 | eee->eee_active = 0; |
10967 | if (resp->eee_config_phy_addr & |
10968 | PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { |
10969 | eee->eee_active = 1; |
10970 | fw_speeds = le16_to_cpu( |
10971 | resp->link_partner_adv_eee_link_speed_mask); |
10972 | _bnxt_fw_to_linkmode(mode: eee->lp_advertised, fw_speeds); |
10973 | } |
10974 | |
10975 | /* Pull initial EEE config */ |
10976 | if (!chng_link_state) { |
10977 | if (resp->eee_config_phy_addr & |
10978 | PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) |
10979 | eee->eee_enabled = 1; |
10980 | |
10981 | fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); |
10982 | _bnxt_fw_to_linkmode(mode: eee->advertised, fw_speeds); |
10983 | |
10984 | if (resp->eee_config_phy_addr & |
10985 | PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { |
10986 | __le32 tmr; |
10987 | |
10988 | eee->tx_lpi_enabled = 1; |
10989 | tmr = resp->xcvr_identifier_type_tx_lpi_timer; |
10990 | eee->tx_lpi_timer = le32_to_cpu(tmr) & |
10991 | PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; |
10992 | } |
10993 | } |
10994 | } |
10995 | |
10996 | link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; |
10997 | if (bp->hwrm_spec_code >= 0x10504) { |
10998 | link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); |
10999 | link_info->active_fec_sig_mode = resp->active_fec_signal_mode; |
11000 | } |
11001 | /* TODO: need to add more logic to report VF link */ |
11002 | if (chng_link_state) { |
11003 | if (link_info->phy_link_status == BNXT_LINK_LINK) |
11004 | link_info->link_state = BNXT_LINK_STATE_UP; |
11005 | else |
11006 | link_info->link_state = BNXT_LINK_STATE_DOWN; |
11007 | if (link_state != link_info->link_state) |
11008 | bnxt_report_link(bp); |
11009 | } else { |
11010 | /* always link down if not require to update link state */ |
11011 | link_info->link_state = BNXT_LINK_STATE_DOWN; |
11012 | } |
11013 | hwrm_req_drop(bp, req); |
11014 | |
11015 | if (!BNXT_PHY_CFG_ABLE(bp)) |
11016 | return 0; |
11017 | |
11018 | support_changed = bnxt_support_speed_dropped(link_info); |
11019 | if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) |
11020 | bnxt_hwrm_set_link_setting(bp, true, false); |
11021 | return 0; |
11022 | } |
11023 | |
11024 | static void bnxt_get_port_module_status(struct bnxt *bp) |
11025 | { |
11026 | struct bnxt_link_info *link_info = &bp->link_info; |
11027 | struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; |
11028 | u8 module_status; |
11029 | |
11030 | if (bnxt_update_link(bp, chng_link_state: true)) |
11031 | return; |
11032 | |
11033 | module_status = link_info->module_status; |
11034 | switch (module_status) { |
11035 | case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: |
11036 | case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: |
11037 | case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: |
11038 | netdev_warn(dev: bp->dev, format: "Unqualified SFP+ module detected on port %d\n" , |
11039 | bp->pf.port_id); |
11040 | if (bp->hwrm_spec_code >= 0x10201) { |
11041 | netdev_warn(dev: bp->dev, format: "Module part number %s\n" , |
11042 | resp->phy_vendor_partnumber); |
11043 | } |
11044 | if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) |
11045 | netdev_warn(dev: bp->dev, format: "TX is disabled\n" ); |
11046 | if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) |
11047 | netdev_warn(dev: bp->dev, format: "SFP+ module is shutdown\n" ); |
11048 | } |
11049 | } |
11050 | |
11051 | static void |
11052 | bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) |
11053 | { |
11054 | if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { |
11055 | if (bp->hwrm_spec_code >= 0x10201) |
11056 | req->auto_pause = |
11057 | PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; |
11058 | if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) |
11059 | req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; |
11060 | if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) |
11061 | req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; |
11062 | req->enables |= |
11063 | cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); |
11064 | } else { |
11065 | if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) |
11066 | req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; |
11067 | if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) |
11068 | req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; |
11069 | req->enables |= |
11070 | cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); |
11071 | if (bp->hwrm_spec_code >= 0x10201) { |
11072 | req->auto_pause = req->force_pause; |
11073 | req->enables |= cpu_to_le32( |
11074 | PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); |
11075 | } |
11076 | } |
11077 | } |
11078 | |
11079 | static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) |
11080 | { |
11081 | if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { |
11082 | req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; |
11083 | if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { |
11084 | req->enables |= |
11085 | cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK); |
11086 | req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising); |
11087 | } else if (bp->link_info.advertising) { |
11088 | req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); |
11089 | req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); |
11090 | } |
11091 | if (bp->link_info.advertising_pam4) { |
11092 | req->enables |= |
11093 | cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK); |
11094 | req->auto_link_pam4_speed_mask = |
11095 | cpu_to_le16(bp->link_info.advertising_pam4); |
11096 | } |
11097 | req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); |
11098 | req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); |
11099 | } else { |
11100 | req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); |
11101 | if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { |
11102 | req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed); |
11103 | req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2); |
11104 | netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n" , |
11105 | (u32)bp->link_info.req_link_speed); |
11106 | } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { |
11107 | req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); |
11108 | req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); |
11109 | } else { |
11110 | req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); |
11111 | } |
11112 | } |
11113 | |
11114 | /* tell chimp that the setting takes effect immediately */ |
11115 | req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); |
11116 | } |
11117 | |
11118 | int bnxt_hwrm_set_pause(struct bnxt *bp) |
11119 | { |
11120 | struct hwrm_port_phy_cfg_input *req; |
11121 | int rc; |
11122 | |
11123 | rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); |
11124 | if (rc) |
11125 | return rc; |
11126 | |
11127 | bnxt_hwrm_set_pause_common(bp, req); |
11128 | |
11129 | if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || |
11130 | bp->link_info.force_link_chng) |
11131 | bnxt_hwrm_set_link_common(bp, req); |
11132 | |
11133 | rc = hwrm_req_send(bp, req); |
11134 | if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { |
11135 | /* since changing of pause setting doesn't trigger any link |
11136 | * change event, the driver needs to update the current pause |
11137 | * result upon successfully return of the phy_cfg command |
11138 | */ |
11139 | bp->link_info.pause = |
11140 | bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; |
11141 | bp->link_info.auto_pause_setting = 0; |
11142 | if (!bp->link_info.force_link_chng) |
11143 | bnxt_report_link(bp); |
11144 | } |
11145 | bp->link_info.force_link_chng = false; |
11146 | return rc; |
11147 | } |
11148 | |
11149 | static void bnxt_hwrm_set_eee(struct bnxt *bp, |
11150 | struct hwrm_port_phy_cfg_input *req) |
11151 | { |
11152 | struct ethtool_keee *eee = &bp->eee; |
11153 | |
11154 | if (eee->eee_enabled) { |
11155 | u16 eee_speeds; |
11156 | u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; |
11157 | |
11158 | if (eee->tx_lpi_enabled) |
11159 | flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; |
11160 | else |
11161 | flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; |
11162 | |
11163 | req->flags |= cpu_to_le32(flags); |
11164 | eee_speeds = bnxt_get_fw_auto_link_speeds(mode: eee->advertised); |
11165 | req->eee_link_speed_mask = cpu_to_le16(eee_speeds); |
11166 | req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); |
11167 | } else { |
11168 | req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); |
11169 | } |
11170 | } |
11171 | |
11172 | int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) |
11173 | { |
11174 | struct hwrm_port_phy_cfg_input *req; |
11175 | int rc; |
11176 | |
11177 | rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); |
11178 | if (rc) |
11179 | return rc; |
11180 | |
11181 | if (set_pause) |
11182 | bnxt_hwrm_set_pause_common(bp, req); |
11183 | |
11184 | bnxt_hwrm_set_link_common(bp, req); |
11185 | |
11186 | if (set_eee) |
11187 | bnxt_hwrm_set_eee(bp, req); |
11188 | return hwrm_req_send(bp, req); |
11189 | } |
11190 | |
11191 | static int bnxt_hwrm_shutdown_link(struct bnxt *bp) |
11192 | { |
11193 | struct hwrm_port_phy_cfg_input *req; |
11194 | int rc; |
11195 | |
11196 | if (!BNXT_SINGLE_PF(bp)) |
11197 | return 0; |
11198 | |
11199 | if (pci_num_vf(dev: bp->pdev) && |
11200 | !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) |
11201 | return 0; |
11202 | |
11203 | rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); |
11204 | if (rc) |
11205 | return rc; |
11206 | |
11207 | req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); |
11208 | rc = hwrm_req_send(bp, req); |
11209 | if (!rc) { |
11210 | mutex_lock(&bp->link_lock); |
11211 | /* Device is not obliged link down in certain scenarios, even |
11212 | * when forced. Setting the state unknown is consistent with |
11213 | * driver startup and will force link state to be reported |
11214 | * during subsequent open based on PORT_PHY_QCFG. |
11215 | */ |
11216 | bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN; |
11217 | mutex_unlock(lock: &bp->link_lock); |
11218 | } |
11219 | return rc; |
11220 | } |
11221 | |
11222 | static int bnxt_fw_reset_via_optee(struct bnxt *bp) |
11223 | { |
11224 | #ifdef CONFIG_TEE_BNXT_FW |
11225 | int rc = tee_bnxt_fw_load(); |
11226 | |
11227 | if (rc) |
11228 | netdev_err(dev: bp->dev, format: "Failed FW reset via OP-TEE, rc=%d\n" , rc); |
11229 | |
11230 | return rc; |
11231 | #else |
11232 | netdev_err(bp->dev, "OP-TEE not supported\n" ); |
11233 | return -ENODEV; |
11234 | #endif |
11235 | } |
11236 | |
11237 | static int bnxt_try_recover_fw(struct bnxt *bp) |
11238 | { |
11239 | if (bp->fw_health && bp->fw_health->status_reliable) { |
11240 | int retry = 0, rc; |
11241 | u32 sts; |
11242 | |
11243 | do { |
11244 | sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); |
11245 | rc = bnxt_hwrm_poll(bp); |
11246 | if (!BNXT_FW_IS_BOOTING(sts) && |
11247 | !BNXT_FW_IS_RECOVERING(sts)) |
11248 | break; |
11249 | retry++; |
11250 | } while (rc == -EBUSY && retry < BNXT_FW_RETRY); |
11251 | |
11252 | if (!BNXT_FW_IS_HEALTHY(sts)) { |
11253 | netdev_err(dev: bp->dev, |
11254 | format: "Firmware not responding, status: 0x%x\n" , |
11255 | sts); |
11256 | rc = -ENODEV; |
11257 | } |
11258 | if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) { |
11259 | netdev_warn(dev: bp->dev, format: "Firmware recover via OP-TEE requested\n" ); |
11260 | return bnxt_fw_reset_via_optee(bp); |
11261 | } |
11262 | return rc; |
11263 | } |
11264 | |
11265 | return -ENODEV; |
11266 | } |
11267 | |
11268 | static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset) |
11269 | { |
11270 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
11271 | |
11272 | if (!BNXT_NEW_RM(bp)) |
11273 | return; /* no resource reservations required */ |
11274 | |
11275 | hw_resc->resv_cp_rings = 0; |
11276 | hw_resc->resv_stat_ctxs = 0; |
11277 | hw_resc->resv_irqs = 0; |
11278 | hw_resc->resv_tx_rings = 0; |
11279 | hw_resc->resv_rx_rings = 0; |
11280 | hw_resc->resv_hw_ring_grps = 0; |
11281 | hw_resc->resv_vnics = 0; |
11282 | hw_resc->resv_rsscos_ctxs = 0; |
11283 | if (!fw_reset) { |
11284 | bp->tx_nr_rings = 0; |
11285 | bp->rx_nr_rings = 0; |
11286 | } |
11287 | } |
11288 | |
11289 | int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) |
11290 | { |
11291 | int rc; |
11292 | |
11293 | if (!BNXT_NEW_RM(bp)) |
11294 | return 0; /* no resource reservations required */ |
11295 | |
11296 | rc = bnxt_hwrm_func_resc_qcaps(bp, all: true); |
11297 | if (rc) |
11298 | netdev_err(dev: bp->dev, format: "resc_qcaps failed\n" ); |
11299 | |
11300 | bnxt_clear_reservations(bp, fw_reset); |
11301 | |
11302 | return rc; |
11303 | } |
11304 | |
11305 | static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) |
11306 | { |
11307 | struct hwrm_func_drv_if_change_output *resp; |
11308 | struct hwrm_func_drv_if_change_input *req; |
11309 | bool fw_reset = !bp->irq_tbl; |
11310 | bool resc_reinit = false; |
11311 | int rc, retry = 0; |
11312 | u32 flags = 0; |
11313 | |
11314 | if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) |
11315 | return 0; |
11316 | |
11317 | rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE); |
11318 | if (rc) |
11319 | return rc; |
11320 | |
11321 | if (up) |
11322 | req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); |
11323 | resp = hwrm_req_hold(bp, req); |
11324 | |
11325 | hwrm_req_flags(bp, req, flags: BNXT_HWRM_FULL_WAIT); |
11326 | while (retry < BNXT_FW_IF_RETRY) { |
11327 | rc = hwrm_req_send(bp, req); |
11328 | if (rc != -EAGAIN) |
11329 | break; |
11330 | |
11331 | msleep(msecs: 50); |
11332 | retry++; |
11333 | } |
11334 | |
11335 | if (rc == -EAGAIN) { |
11336 | hwrm_req_drop(bp, req); |
11337 | return rc; |
11338 | } else if (!rc) { |
11339 | flags = le32_to_cpu(resp->flags); |
11340 | } else if (up) { |
11341 | rc = bnxt_try_recover_fw(bp); |
11342 | fw_reset = true; |
11343 | } |
11344 | hwrm_req_drop(bp, req); |
11345 | if (rc) |
11346 | return rc; |
11347 | |
11348 | if (!up) { |
11349 | bnxt_inv_fw_health_reg(bp); |
11350 | return 0; |
11351 | } |
11352 | |
11353 | if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) |
11354 | resc_reinit = true; |
11355 | if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE || |
11356 | test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) |
11357 | fw_reset = true; |
11358 | else |
11359 | bnxt_remap_fw_health_regs(bp); |
11360 | |
11361 | if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { |
11362 | netdev_err(dev: bp->dev, format: "RESET_DONE not set during FW reset.\n" ); |
11363 | set_bit(BNXT_STATE_ABORT_ERR, addr: &bp->state); |
11364 | return -ENODEV; |
11365 | } |
11366 | if (resc_reinit || fw_reset) { |
11367 | if (fw_reset) { |
11368 | set_bit(BNXT_STATE_FW_RESET_DET, addr: &bp->state); |
11369 | if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) |
11370 | bnxt_ulp_stop(bp); |
11371 | bnxt_free_ctx_mem(bp); |
11372 | bnxt_dcb_free(bp); |
11373 | rc = bnxt_fw_init_one(bp); |
11374 | if (rc) { |
11375 | clear_bit(BNXT_STATE_FW_RESET_DET, addr: &bp->state); |
11376 | set_bit(BNXT_STATE_ABORT_ERR, addr: &bp->state); |
11377 | return rc; |
11378 | } |
11379 | bnxt_clear_int_mode(bp); |
11380 | rc = bnxt_init_int_mode(bp); |
11381 | if (rc) { |
11382 | clear_bit(BNXT_STATE_FW_RESET_DET, addr: &bp->state); |
11383 | netdev_err(dev: bp->dev, format: "init int mode failed\n" ); |
11384 | return rc; |
11385 | } |
11386 | } |
11387 | rc = bnxt_cancel_reservations(bp, fw_reset); |
11388 | } |
11389 | return rc; |
11390 | } |
11391 | |
11392 | static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) |
11393 | { |
11394 | struct hwrm_port_led_qcaps_output *resp; |
11395 | struct hwrm_port_led_qcaps_input *req; |
11396 | struct bnxt_pf_info *pf = &bp->pf; |
11397 | int rc; |
11398 | |
11399 | bp->num_leds = 0; |
11400 | if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) |
11401 | return 0; |
11402 | |
11403 | rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS); |
11404 | if (rc) |
11405 | return rc; |
11406 | |
11407 | req->port_id = cpu_to_le16(pf->port_id); |
11408 | resp = hwrm_req_hold(bp, req); |
11409 | rc = hwrm_req_send(bp, req); |
11410 | if (rc) { |
11411 | hwrm_req_drop(bp, req); |
11412 | return rc; |
11413 | } |
11414 | if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { |
11415 | int i; |
11416 | |
11417 | bp->num_leds = resp->num_leds; |
11418 | memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * |
11419 | bp->num_leds); |
11420 | for (i = 0; i < bp->num_leds; i++) { |
11421 | struct bnxt_led_info *led = &bp->leds[i]; |
11422 | __le16 caps = led->led_state_caps; |
11423 | |
11424 | if (!led->led_group_id || |
11425 | !BNXT_LED_ALT_BLINK_CAP(caps)) { |
11426 | bp->num_leds = 0; |
11427 | break; |
11428 | } |
11429 | } |
11430 | } |
11431 | hwrm_req_drop(bp, req); |
11432 | return 0; |
11433 | } |
11434 | |
11435 | int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) |
11436 | { |
11437 | struct hwrm_wol_filter_alloc_output *resp; |
11438 | struct hwrm_wol_filter_alloc_input *req; |
11439 | int rc; |
11440 | |
11441 | rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC); |
11442 | if (rc) |
11443 | return rc; |
11444 | |
11445 | req->port_id = cpu_to_le16(bp->pf.port_id); |
11446 | req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; |
11447 | req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); |
11448 | memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN); |
11449 | |
11450 | resp = hwrm_req_hold(bp, req); |
11451 | rc = hwrm_req_send(bp, req); |
11452 | if (!rc) |
11453 | bp->wol_filter_id = resp->wol_filter_id; |
11454 | hwrm_req_drop(bp, req); |
11455 | return rc; |
11456 | } |
11457 | |
11458 | int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) |
11459 | { |
11460 | struct hwrm_wol_filter_free_input *req; |
11461 | int rc; |
11462 | |
11463 | rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE); |
11464 | if (rc) |
11465 | return rc; |
11466 | |
11467 | req->port_id = cpu_to_le16(bp->pf.port_id); |
11468 | req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); |
11469 | req->wol_filter_id = bp->wol_filter_id; |
11470 | |
11471 | return hwrm_req_send(bp, req); |
11472 | } |
11473 | |
11474 | static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) |
11475 | { |
11476 | struct hwrm_wol_filter_qcfg_output *resp; |
11477 | struct hwrm_wol_filter_qcfg_input *req; |
11478 | u16 next_handle = 0; |
11479 | int rc; |
11480 | |
11481 | rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG); |
11482 | if (rc) |
11483 | return rc; |
11484 | |
11485 | req->port_id = cpu_to_le16(bp->pf.port_id); |
11486 | req->handle = cpu_to_le16(handle); |
11487 | resp = hwrm_req_hold(bp, req); |
11488 | rc = hwrm_req_send(bp, req); |
11489 | if (!rc) { |
11490 | next_handle = le16_to_cpu(resp->next_handle); |
11491 | if (next_handle != 0) { |
11492 | if (resp->wol_type == |
11493 | WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { |
11494 | bp->wol = 1; |
11495 | bp->wol_filter_id = resp->wol_filter_id; |
11496 | } |
11497 | } |
11498 | } |
11499 | hwrm_req_drop(bp, req); |
11500 | return next_handle; |
11501 | } |
11502 | |
11503 | static void bnxt_get_wol_settings(struct bnxt *bp) |
11504 | { |
11505 | u16 handle = 0; |
11506 | |
11507 | bp->wol = 0; |
11508 | if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) |
11509 | return; |
11510 | |
11511 | do { |
11512 | handle = bnxt_hwrm_get_wol_fltrs(bp, handle); |
11513 | } while (handle && handle != 0xffff); |
11514 | } |
11515 | |
11516 | static bool bnxt_eee_config_ok(struct bnxt *bp) |
11517 | { |
11518 | struct ethtool_keee *eee = &bp->eee; |
11519 | struct bnxt_link_info *link_info = &bp->link_info; |
11520 | |
11521 | if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) |
11522 | return true; |
11523 | |
11524 | if (eee->eee_enabled) { |
11525 | __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); |
11526 | __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); |
11527 | |
11528 | _bnxt_fw_to_linkmode(mode: advertising, fw_speeds: link_info->advertising); |
11529 | |
11530 | if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { |
11531 | eee->eee_enabled = 0; |
11532 | return false; |
11533 | } |
11534 | if (linkmode_andnot(dst: tmp, src1: eee->advertised, src2: advertising)) { |
11535 | linkmode_and(dst: eee->advertised, a: advertising, |
11536 | b: eee->supported); |
11537 | return false; |
11538 | } |
11539 | } |
11540 | return true; |
11541 | } |
11542 | |
11543 | static int bnxt_update_phy_setting(struct bnxt *bp) |
11544 | { |
11545 | int rc; |
11546 | bool update_link = false; |
11547 | bool update_pause = false; |
11548 | bool update_eee = false; |
11549 | struct bnxt_link_info *link_info = &bp->link_info; |
11550 | |
11551 | rc = bnxt_update_link(bp, chng_link_state: true); |
11552 | if (rc) { |
11553 | netdev_err(dev: bp->dev, format: "failed to update link (rc: %x)\n" , |
11554 | rc); |
11555 | return rc; |
11556 | } |
11557 | if (!BNXT_SINGLE_PF(bp)) |
11558 | return 0; |
11559 | |
11560 | if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && |
11561 | (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != |
11562 | link_info->req_flow_ctrl) |
11563 | update_pause = true; |
11564 | if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && |
11565 | link_info->force_pause_setting != link_info->req_flow_ctrl) |
11566 | update_pause = true; |
11567 | if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { |
11568 | if (BNXT_AUTO_MODE(link_info->auto_mode)) |
11569 | update_link = true; |
11570 | if (bnxt_force_speed_updated(link_info)) |
11571 | update_link = true; |
11572 | if (link_info->req_duplex != link_info->duplex_setting) |
11573 | update_link = true; |
11574 | } else { |
11575 | if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) |
11576 | update_link = true; |
11577 | if (bnxt_auto_speed_updated(link_info)) |
11578 | update_link = true; |
11579 | } |
11580 | |
11581 | /* The last close may have shutdown the link, so need to call |
11582 | * PHY_CFG to bring it back up. |
11583 | */ |
11584 | if (!BNXT_LINK_IS_UP(bp)) |
11585 | update_link = true; |
11586 | |
11587 | if (!bnxt_eee_config_ok(bp)) |
11588 | update_eee = true; |
11589 | |
11590 | if (update_link) |
11591 | rc = bnxt_hwrm_set_link_setting(bp, set_pause: update_pause, set_eee: update_eee); |
11592 | else if (update_pause) |
11593 | rc = bnxt_hwrm_set_pause(bp); |
11594 | if (rc) { |
11595 | netdev_err(dev: bp->dev, format: "failed to update phy setting (rc: %x)\n" , |
11596 | rc); |
11597 | return rc; |
11598 | } |
11599 | |
11600 | return rc; |
11601 | } |
11602 | |
11603 | /* Common routine to pre-map certain register block to different GRC window. |
11604 | * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows |
11605 | * in PF and 3 windows in VF that can be customized to map in different |
11606 | * register blocks. |
11607 | */ |
11608 | static void bnxt_preset_reg_win(struct bnxt *bp) |
11609 | { |
11610 | if (BNXT_PF(bp)) { |
11611 | /* CAG registers map to GRC window #4 */ |
11612 | writel(BNXT_CAG_REG_BASE, |
11613 | addr: bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); |
11614 | } |
11615 | } |
11616 | |
11617 | static int bnxt_init_dflt_ring_mode(struct bnxt *bp); |
11618 | |
11619 | static int bnxt_reinit_after_abort(struct bnxt *bp) |
11620 | { |
11621 | int rc; |
11622 | |
11623 | if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) |
11624 | return -EBUSY; |
11625 | |
11626 | if (bp->dev->reg_state == NETREG_UNREGISTERED) |
11627 | return -ENODEV; |
11628 | |
11629 | rc = bnxt_fw_init_one(bp); |
11630 | if (!rc) { |
11631 | bnxt_clear_int_mode(bp); |
11632 | rc = bnxt_init_int_mode(bp); |
11633 | if (!rc) { |
11634 | clear_bit(BNXT_STATE_ABORT_ERR, addr: &bp->state); |
11635 | set_bit(BNXT_STATE_FW_RESET_DET, addr: &bp->state); |
11636 | } |
11637 | } |
11638 | return rc; |
11639 | } |
11640 | |
11641 | static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) |
11642 | { |
11643 | struct bnxt_ntuple_filter *ntp_fltr; |
11644 | struct bnxt_l2_filter *l2_fltr; |
11645 | |
11646 | if (list_empty(head: &fltr->list)) |
11647 | return; |
11648 | |
11649 | if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) { |
11650 | ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base); |
11651 | l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; |
11652 | atomic_inc(v: &l2_fltr->refcnt); |
11653 | ntp_fltr->l2_fltr = l2_fltr; |
11654 | if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, fltr: ntp_fltr)) { |
11655 | bnxt_del_ntp_filter(bp, fltr: ntp_fltr); |
11656 | netdev_err(dev: bp->dev, format: "restoring previously configured ntuple filter id %d failed\n" , |
11657 | fltr->sw_id); |
11658 | } |
11659 | } else if (fltr->type == BNXT_FLTR_TYPE_L2) { |
11660 | l2_fltr = container_of(fltr, struct bnxt_l2_filter, base); |
11661 | if (bnxt_hwrm_l2_filter_alloc(bp, fltr: l2_fltr)) { |
11662 | bnxt_del_l2_filter(bp, fltr: l2_fltr); |
11663 | netdev_err(dev: bp->dev, format: "restoring previously configured l2 filter id %d failed\n" , |
11664 | fltr->sw_id); |
11665 | } |
11666 | } |
11667 | } |
11668 | |
11669 | static void bnxt_cfg_usr_fltrs(struct bnxt *bp) |
11670 | { |
11671 | struct bnxt_filter_base *usr_fltr, *tmp; |
11672 | |
11673 | list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) |
11674 | bnxt_cfg_one_usr_fltr(bp, fltr: usr_fltr); |
11675 | } |
11676 | |
11677 | static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) |
11678 | { |
11679 | int rc = 0; |
11680 | |
11681 | bnxt_preset_reg_win(bp); |
11682 | netif_carrier_off(dev: bp->dev); |
11683 | if (irq_re_init) { |
11684 | /* Reserve rings now if none were reserved at driver probe. */ |
11685 | rc = bnxt_init_dflt_ring_mode(bp); |
11686 | if (rc) { |
11687 | netdev_err(dev: bp->dev, format: "Failed to reserve default rings at open\n" ); |
11688 | return rc; |
11689 | } |
11690 | } |
11691 | rc = bnxt_reserve_rings(bp, irq_re_init); |
11692 | if (rc) |
11693 | return rc; |
11694 | if ((bp->flags & BNXT_FLAG_RFS) && |
11695 | !(bp->flags & BNXT_FLAG_USING_MSIX)) { |
11696 | /* disable RFS if falling back to INTA */ |
11697 | bp->dev->hw_features &= ~NETIF_F_NTUPLE; |
11698 | bp->flags &= ~BNXT_FLAG_RFS; |
11699 | } |
11700 | |
11701 | rc = bnxt_alloc_mem(bp, irq_re_init); |
11702 | if (rc) { |
11703 | netdev_err(dev: bp->dev, format: "bnxt_alloc_mem err: %x\n" , rc); |
11704 | goto open_err_free_mem; |
11705 | } |
11706 | |
11707 | if (irq_re_init) { |
11708 | bnxt_init_napi(bp); |
11709 | rc = bnxt_request_irq(bp); |
11710 | if (rc) { |
11711 | netdev_err(dev: bp->dev, format: "bnxt_request_irq err: %x\n" , rc); |
11712 | goto open_err_irq; |
11713 | } |
11714 | } |
11715 | |
11716 | rc = bnxt_init_nic(bp, irq_re_init); |
11717 | if (rc) { |
11718 | netdev_err(dev: bp->dev, format: "bnxt_init_nic err: %x\n" , rc); |
11719 | goto open_err_irq; |
11720 | } |
11721 | |
11722 | bnxt_enable_napi(bp); |
11723 | bnxt_debug_dev_init(bp); |
11724 | |
11725 | if (link_re_init) { |
11726 | mutex_lock(&bp->link_lock); |
11727 | rc = bnxt_update_phy_setting(bp); |
11728 | mutex_unlock(lock: &bp->link_lock); |
11729 | if (rc) { |
11730 | netdev_warn(dev: bp->dev, format: "failed to update phy settings\n" ); |
11731 | if (BNXT_SINGLE_PF(bp)) { |
11732 | bp->link_info.phy_retry = true; |
11733 | bp->link_info.phy_retry_expires = |
11734 | jiffies + 5 * HZ; |
11735 | } |
11736 | } |
11737 | } |
11738 | |
11739 | if (irq_re_init) |
11740 | udp_tunnel_nic_reset_ntf(dev: bp->dev); |
11741 | |
11742 | if (bp->tx_nr_rings_xdp < num_possible_cpus()) { |
11743 | if (!static_key_enabled(&bnxt_xdp_locking_key)) |
11744 | static_branch_enable(&bnxt_xdp_locking_key); |
11745 | } else if (static_key_enabled(&bnxt_xdp_locking_key)) { |
11746 | static_branch_disable(&bnxt_xdp_locking_key); |
11747 | } |
11748 | set_bit(BNXT_STATE_OPEN, addr: &bp->state); |
11749 | bnxt_enable_int(bp); |
11750 | /* Enable TX queues */ |
11751 | bnxt_tx_enable(bp); |
11752 | mod_timer(timer: &bp->timer, expires: jiffies + bp->current_interval); |
11753 | /* Poll link status and check for SFP+ module status */ |
11754 | mutex_lock(&bp->link_lock); |
11755 | bnxt_get_port_module_status(bp); |
11756 | mutex_unlock(lock: &bp->link_lock); |
11757 | |
11758 | /* VF-reps may need to be re-opened after the PF is re-opened */ |
11759 | if (BNXT_PF(bp)) |
11760 | bnxt_vf_reps_open(bp); |
11761 | if (bp->ptp_cfg) |
11762 | atomic_set(v: &bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS); |
11763 | bnxt_ptp_init_rtc(bp, phc_cfg: true); |
11764 | bnxt_ptp_cfg_tstamp_filters(bp); |
11765 | bnxt_cfg_usr_fltrs(bp); |
11766 | return 0; |
11767 | |
11768 | open_err_irq: |
11769 | bnxt_del_napi(bp); |
11770 | |
11771 | open_err_free_mem: |
11772 | bnxt_free_skbs(bp); |
11773 | bnxt_free_irq(bp); |
11774 | bnxt_free_mem(bp, irq_re_init: true); |
11775 | return rc; |
11776 | } |
11777 | |
11778 | /* rtnl_lock held */ |
11779 | int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) |
11780 | { |
11781 | int rc = 0; |
11782 | |
11783 | if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) |
11784 | rc = -EIO; |
11785 | if (!rc) |
11786 | rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); |
11787 | if (rc) { |
11788 | netdev_err(dev: bp->dev, format: "nic open fail (rc: %x)\n" , rc); |
11789 | dev_close(dev: bp->dev); |
11790 | } |
11791 | return rc; |
11792 | } |
11793 | |
11794 | /* rtnl_lock held, open the NIC half way by allocating all resources, but |
11795 | * NAPI, IRQ, and TX are not enabled. This is mainly used for offline |
11796 | * self tests. |
11797 | */ |
11798 | int bnxt_half_open_nic(struct bnxt *bp) |
11799 | { |
11800 | int rc = 0; |
11801 | |
11802 | if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { |
11803 | netdev_err(dev: bp->dev, format: "A previous firmware reset has not completed, aborting half open\n" ); |
11804 | rc = -ENODEV; |
11805 | goto half_open_err; |
11806 | } |
11807 | |
11808 | rc = bnxt_alloc_mem(bp, irq_re_init: true); |
11809 | if (rc) { |
11810 | netdev_err(dev: bp->dev, format: "bnxt_alloc_mem err: %x\n" , rc); |
11811 | goto half_open_err; |
11812 | } |
11813 | bnxt_init_napi(bp); |
11814 | set_bit(BNXT_STATE_HALF_OPEN, addr: &bp->state); |
11815 | rc = bnxt_init_nic(bp, irq_re_init: true); |
11816 | if (rc) { |
11817 | clear_bit(BNXT_STATE_HALF_OPEN, addr: &bp->state); |
11818 | bnxt_del_napi(bp); |
11819 | netdev_err(dev: bp->dev, format: "bnxt_init_nic err: %x\n" , rc); |
11820 | goto half_open_err; |
11821 | } |
11822 | return 0; |
11823 | |
11824 | half_open_err: |
11825 | bnxt_free_skbs(bp); |
11826 | bnxt_free_mem(bp, irq_re_init: true); |
11827 | dev_close(dev: bp->dev); |
11828 | return rc; |
11829 | } |
11830 | |
11831 | /* rtnl_lock held, this call can only be made after a previous successful |
11832 | * call to bnxt_half_open_nic(). |
11833 | */ |
11834 | void bnxt_half_close_nic(struct bnxt *bp) |
11835 | { |
11836 | bnxt_hwrm_resource_free(bp, close_path: false, irq_re_init: true); |
11837 | bnxt_del_napi(bp); |
11838 | bnxt_free_skbs(bp); |
11839 | bnxt_free_mem(bp, irq_re_init: true); |
11840 | clear_bit(BNXT_STATE_HALF_OPEN, addr: &bp->state); |
11841 | } |
11842 | |
11843 | void bnxt_reenable_sriov(struct bnxt *bp) |
11844 | { |
11845 | if (BNXT_PF(bp)) { |
11846 | struct bnxt_pf_info *pf = &bp->pf; |
11847 | int n = pf->active_vfs; |
11848 | |
11849 | if (n) |
11850 | bnxt_cfg_hw_sriov(bp, num_vfs: &n, reset: true); |
11851 | } |
11852 | } |
11853 | |
11854 | static int bnxt_open(struct net_device *dev) |
11855 | { |
11856 | struct bnxt *bp = netdev_priv(dev); |
11857 | int rc; |
11858 | |
11859 | if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { |
11860 | rc = bnxt_reinit_after_abort(bp); |
11861 | if (rc) { |
11862 | if (rc == -EBUSY) |
11863 | netdev_err(dev: bp->dev, format: "A previous firmware reset has not completed, aborting\n" ); |
11864 | else |
11865 | netdev_err(dev: bp->dev, format: "Failed to reinitialize after aborted firmware reset\n" ); |
11866 | return -ENODEV; |
11867 | } |
11868 | } |
11869 | |
11870 | rc = bnxt_hwrm_if_change(bp, up: true); |
11871 | if (rc) |
11872 | return rc; |
11873 | |
11874 | rc = __bnxt_open_nic(bp, irq_re_init: true, link_re_init: true); |
11875 | if (rc) { |
11876 | bnxt_hwrm_if_change(bp, up: false); |
11877 | } else { |
11878 | if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, addr: &bp->state)) { |
11879 | if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { |
11880 | bnxt_ulp_start(bp, err: 0); |
11881 | bnxt_reenable_sriov(bp); |
11882 | } |
11883 | } |
11884 | } |
11885 | |
11886 | return rc; |
11887 | } |
11888 | |
11889 | static bool bnxt_drv_busy(struct bnxt *bp) |
11890 | { |
11891 | return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || |
11892 | test_bit(BNXT_STATE_READ_STATS, &bp->state)); |
11893 | } |
11894 | |
11895 | static void bnxt_get_ring_stats(struct bnxt *bp, |
11896 | struct rtnl_link_stats64 *stats); |
11897 | |
11898 | static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, |
11899 | bool link_re_init) |
11900 | { |
11901 | /* Close the VF-reps before closing PF */ |
11902 | if (BNXT_PF(bp)) |
11903 | bnxt_vf_reps_close(bp); |
11904 | |
11905 | /* Change device state to avoid TX queue wake up's */ |
11906 | bnxt_tx_disable(bp); |
11907 | |
11908 | clear_bit(BNXT_STATE_OPEN, addr: &bp->state); |
11909 | smp_mb__after_atomic(); |
11910 | while (bnxt_drv_busy(bp)) |
11911 | msleep(msecs: 20); |
11912 | |
11913 | /* Flush rings and disable interrupts */ |
11914 | bnxt_shutdown_nic(bp, irq_re_init); |
11915 | |
11916 | /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ |
11917 | |
11918 | bnxt_debug_dev_exit(bp); |
11919 | bnxt_disable_napi(bp); |
11920 | del_timer_sync(timer: &bp->timer); |
11921 | bnxt_free_skbs(bp); |
11922 | |
11923 | /* Save ring stats before shutdown */ |
11924 | if (bp->bnapi && irq_re_init) { |
11925 | bnxt_get_ring_stats(bp, stats: &bp->net_stats_prev); |
11926 | bnxt_get_ring_err_stats(bp, stats: &bp->ring_err_stats_prev); |
11927 | } |
11928 | if (irq_re_init) { |
11929 | bnxt_free_irq(bp); |
11930 | bnxt_del_napi(bp); |
11931 | } |
11932 | bnxt_free_mem(bp, irq_re_init); |
11933 | } |
11934 | |
11935 | void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) |
11936 | { |
11937 | if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { |
11938 | /* If we get here, it means firmware reset is in progress |
11939 | * while we are trying to close. We can safely proceed with |
11940 | * the close because we are holding rtnl_lock(). Some firmware |
11941 | * messages may fail as we proceed to close. We set the |
11942 | * ABORT_ERR flag here so that the FW reset thread will later |
11943 | * abort when it gets the rtnl_lock() and sees the flag. |
11944 | */ |
11945 | netdev_warn(dev: bp->dev, format: "FW reset in progress during close, FW reset will be aborted\n" ); |
11946 | set_bit(BNXT_STATE_ABORT_ERR, addr: &bp->state); |
11947 | } |
11948 | |
11949 | #ifdef CONFIG_BNXT_SRIOV |
11950 | if (bp->sriov_cfg) { |
11951 | int rc; |
11952 | |
11953 | rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, |
11954 | !bp->sriov_cfg, |
11955 | BNXT_SRIOV_CFG_WAIT_TMO); |
11956 | if (!rc) |
11957 | netdev_warn(dev: bp->dev, format: "timeout waiting for SRIOV config operation to complete, proceeding to close!\n" ); |
11958 | else if (rc < 0) |
11959 | netdev_warn(dev: bp->dev, format: "SRIOV config operation interrupted, proceeding to close!\n" ); |
11960 | } |
11961 | #endif |
11962 | __bnxt_close_nic(bp, irq_re_init, link_re_init); |
11963 | } |
11964 | |
11965 | static int bnxt_close(struct net_device *dev) |
11966 | { |
11967 | struct bnxt *bp = netdev_priv(dev); |
11968 | |
11969 | bnxt_close_nic(bp, irq_re_init: true, link_re_init: true); |
11970 | bnxt_hwrm_shutdown_link(bp); |
11971 | bnxt_hwrm_if_change(bp, up: false); |
11972 | return 0; |
11973 | } |
11974 | |
11975 | static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, |
11976 | u16 *val) |
11977 | { |
11978 | struct hwrm_port_phy_mdio_read_output *resp; |
11979 | struct hwrm_port_phy_mdio_read_input *req; |
11980 | int rc; |
11981 | |
11982 | if (bp->hwrm_spec_code < 0x10a00) |
11983 | return -EOPNOTSUPP; |
11984 | |
11985 | rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ); |
11986 | if (rc) |
11987 | return rc; |
11988 | |
11989 | req->port_id = cpu_to_le16(bp->pf.port_id); |
11990 | req->phy_addr = phy_addr; |
11991 | req->reg_addr = cpu_to_le16(reg & 0x1f); |
11992 | if (mdio_phy_id_is_c45(phy_id: phy_addr)) { |
11993 | req->cl45_mdio = 1; |
11994 | req->phy_addr = mdio_phy_id_prtad(phy_id: phy_addr); |
11995 | req->dev_addr = mdio_phy_id_devad(phy_id: phy_addr); |
11996 | req->reg_addr = cpu_to_le16(reg); |
11997 | } |
11998 | |
11999 | resp = hwrm_req_hold(bp, req); |
12000 | rc = hwrm_req_send(bp, req); |
12001 | if (!rc) |
12002 | *val = le16_to_cpu(resp->reg_data); |
12003 | hwrm_req_drop(bp, req); |
12004 | return rc; |
12005 | } |
12006 | |
12007 | static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, |
12008 | u16 val) |
12009 | { |
12010 | struct hwrm_port_phy_mdio_write_input *req; |
12011 | int rc; |
12012 | |
12013 | if (bp->hwrm_spec_code < 0x10a00) |
12014 | return -EOPNOTSUPP; |
12015 | |
12016 | rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE); |
12017 | if (rc) |
12018 | return rc; |
12019 | |
12020 | req->port_id = cpu_to_le16(bp->pf.port_id); |
12021 | req->phy_addr = phy_addr; |
12022 | req->reg_addr = cpu_to_le16(reg & 0x1f); |
12023 | if (mdio_phy_id_is_c45(phy_id: phy_addr)) { |
12024 | req->cl45_mdio = 1; |
12025 | req->phy_addr = mdio_phy_id_prtad(phy_id: phy_addr); |
12026 | req->dev_addr = mdio_phy_id_devad(phy_id: phy_addr); |
12027 | req->reg_addr = cpu_to_le16(reg); |
12028 | } |
12029 | req->reg_data = cpu_to_le16(val); |
12030 | |
12031 | return hwrm_req_send(bp, req); |
12032 | } |
12033 | |
12034 | /* rtnl_lock held */ |
12035 | static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
12036 | { |
12037 | struct mii_ioctl_data *mdio = if_mii(rq: ifr); |
12038 | struct bnxt *bp = netdev_priv(dev); |
12039 | int rc; |
12040 | |
12041 | switch (cmd) { |
12042 | case SIOCGMIIPHY: |
12043 | mdio->phy_id = bp->link_info.phy_addr; |
12044 | |
12045 | fallthrough; |
12046 | case SIOCGMIIREG: { |
12047 | u16 mii_regval = 0; |
12048 | |
12049 | if (!netif_running(dev)) |
12050 | return -EAGAIN; |
12051 | |
12052 | rc = bnxt_hwrm_port_phy_read(bp, phy_addr: mdio->phy_id, reg: mdio->reg_num, |
12053 | val: &mii_regval); |
12054 | mdio->val_out = mii_regval; |
12055 | return rc; |
12056 | } |
12057 | |
12058 | case SIOCSMIIREG: |
12059 | if (!netif_running(dev)) |
12060 | return -EAGAIN; |
12061 | |
12062 | return bnxt_hwrm_port_phy_write(bp, phy_addr: mdio->phy_id, reg: mdio->reg_num, |
12063 | val: mdio->val_in); |
12064 | |
12065 | case SIOCSHWTSTAMP: |
12066 | return bnxt_hwtstamp_set(dev, ifr); |
12067 | |
12068 | case SIOCGHWTSTAMP: |
12069 | return bnxt_hwtstamp_get(dev, ifr); |
12070 | |
12071 | default: |
12072 | /* do nothing */ |
12073 | break; |
12074 | } |
12075 | return -EOPNOTSUPP; |
12076 | } |
12077 | |
12078 | static void bnxt_get_ring_stats(struct bnxt *bp, |
12079 | struct rtnl_link_stats64 *stats) |
12080 | { |
12081 | int i; |
12082 | |
12083 | for (i = 0; i < bp->cp_nr_rings; i++) { |
12084 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
12085 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
12086 | u64 *sw = cpr->stats.sw_stats; |
12087 | |
12088 | stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); |
12089 | stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); |
12090 | stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); |
12091 | |
12092 | stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); |
12093 | stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); |
12094 | stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); |
12095 | |
12096 | stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); |
12097 | stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); |
12098 | stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); |
12099 | |
12100 | stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); |
12101 | stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); |
12102 | stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); |
12103 | |
12104 | stats->rx_missed_errors += |
12105 | BNXT_GET_RING_STATS64(sw, rx_discard_pkts); |
12106 | |
12107 | stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); |
12108 | |
12109 | stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); |
12110 | |
12111 | stats->rx_dropped += |
12112 | cpr->sw_stats.rx.rx_netpoll_discards + |
12113 | cpr->sw_stats.rx.rx_oom_discards; |
12114 | } |
12115 | } |
12116 | |
12117 | static void bnxt_add_prev_stats(struct bnxt *bp, |
12118 | struct rtnl_link_stats64 *stats) |
12119 | { |
12120 | struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; |
12121 | |
12122 | stats->rx_packets += prev_stats->rx_packets; |
12123 | stats->tx_packets += prev_stats->tx_packets; |
12124 | stats->rx_bytes += prev_stats->rx_bytes; |
12125 | stats->tx_bytes += prev_stats->tx_bytes; |
12126 | stats->rx_missed_errors += prev_stats->rx_missed_errors; |
12127 | stats->multicast += prev_stats->multicast; |
12128 | stats->rx_dropped += prev_stats->rx_dropped; |
12129 | stats->tx_dropped += prev_stats->tx_dropped; |
12130 | } |
12131 | |
12132 | static void |
12133 | bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) |
12134 | { |
12135 | struct bnxt *bp = netdev_priv(dev); |
12136 | |
12137 | set_bit(BNXT_STATE_READ_STATS, addr: &bp->state); |
12138 | /* Make sure bnxt_close_nic() sees that we are reading stats before |
12139 | * we check the BNXT_STATE_OPEN flag. |
12140 | */ |
12141 | smp_mb__after_atomic(); |
12142 | if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { |
12143 | clear_bit(BNXT_STATE_READ_STATS, addr: &bp->state); |
12144 | *stats = bp->net_stats_prev; |
12145 | return; |
12146 | } |
12147 | |
12148 | bnxt_get_ring_stats(bp, stats); |
12149 | bnxt_add_prev_stats(bp, stats); |
12150 | |
12151 | if (bp->flags & BNXT_FLAG_PORT_STATS) { |
12152 | u64 *rx = bp->port_stats.sw_stats; |
12153 | u64 *tx = bp->port_stats.sw_stats + |
12154 | BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
12155 | |
12156 | stats->rx_crc_errors = |
12157 | BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); |
12158 | stats->rx_frame_errors = |
12159 | BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); |
12160 | stats->rx_length_errors = |
12161 | BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + |
12162 | BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + |
12163 | BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); |
12164 | stats->rx_errors = |
12165 | BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + |
12166 | BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); |
12167 | stats->collisions = |
12168 | BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); |
12169 | stats->tx_fifo_errors = |
12170 | BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); |
12171 | stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); |
12172 | } |
12173 | clear_bit(BNXT_STATE_READ_STATS, addr: &bp->state); |
12174 | } |
12175 | |
12176 | static void bnxt_get_one_ring_err_stats(struct bnxt *bp, |
12177 | struct bnxt_total_ring_err_stats *stats, |
12178 | struct bnxt_cp_ring_info *cpr) |
12179 | { |
12180 | struct bnxt_sw_stats *sw_stats = &cpr->sw_stats; |
12181 | u64 *hw_stats = cpr->stats.sw_stats; |
12182 | |
12183 | stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors; |
12184 | stats->rx_total_resets += sw_stats->rx.rx_resets; |
12185 | stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors; |
12186 | stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards; |
12187 | stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards; |
12188 | stats->rx_total_ring_discards += |
12189 | BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts); |
12190 | stats->tx_total_resets += sw_stats->tx.tx_resets; |
12191 | stats->tx_total_ring_discards += |
12192 | BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts); |
12193 | stats->total_missed_irqs += sw_stats->cmn.missed_irqs; |
12194 | } |
12195 | |
12196 | void bnxt_get_ring_err_stats(struct bnxt *bp, |
12197 | struct bnxt_total_ring_err_stats *stats) |
12198 | { |
12199 | int i; |
12200 | |
12201 | for (i = 0; i < bp->cp_nr_rings; i++) |
12202 | bnxt_get_one_ring_err_stats(bp, stats, cpr: &bp->bnapi[i]->cp_ring); |
12203 | } |
12204 | |
12205 | static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) |
12206 | { |
12207 | struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; |
12208 | struct net_device *dev = bp->dev; |
12209 | struct netdev_hw_addr *ha; |
12210 | u8 *haddr; |
12211 | int mc_count = 0; |
12212 | bool update = false; |
12213 | int off = 0; |
12214 | |
12215 | netdev_for_each_mc_addr(ha, dev) { |
12216 | if (mc_count >= BNXT_MAX_MC_ADDRS) { |
12217 | *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; |
12218 | vnic->mc_list_count = 0; |
12219 | return false; |
12220 | } |
12221 | haddr = ha->addr; |
12222 | if (!ether_addr_equal(addr1: haddr, addr2: vnic->mc_list + off)) { |
12223 | memcpy(vnic->mc_list + off, haddr, ETH_ALEN); |
12224 | update = true; |
12225 | } |
12226 | off += ETH_ALEN; |
12227 | mc_count++; |
12228 | } |
12229 | if (mc_count) |
12230 | *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; |
12231 | |
12232 | if (mc_count != vnic->mc_list_count) { |
12233 | vnic->mc_list_count = mc_count; |
12234 | update = true; |
12235 | } |
12236 | return update; |
12237 | } |
12238 | |
12239 | static bool bnxt_uc_list_updated(struct bnxt *bp) |
12240 | { |
12241 | struct net_device *dev = bp->dev; |
12242 | struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; |
12243 | struct netdev_hw_addr *ha; |
12244 | int off = 0; |
12245 | |
12246 | if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) |
12247 | return true; |
12248 | |
12249 | netdev_for_each_uc_addr(ha, dev) { |
12250 | if (!ether_addr_equal(addr1: ha->addr, addr2: vnic->uc_list + off)) |
12251 | return true; |
12252 | |
12253 | off += ETH_ALEN; |
12254 | } |
12255 | return false; |
12256 | } |
12257 | |
12258 | static void bnxt_set_rx_mode(struct net_device *dev) |
12259 | { |
12260 | struct bnxt *bp = netdev_priv(dev); |
12261 | struct bnxt_vnic_info *vnic; |
12262 | bool mc_update = false; |
12263 | bool uc_update; |
12264 | u32 mask; |
12265 | |
12266 | if (!test_bit(BNXT_STATE_OPEN, &bp->state)) |
12267 | return; |
12268 | |
12269 | vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; |
12270 | mask = vnic->rx_mask; |
12271 | mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | |
12272 | CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | |
12273 | CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | |
12274 | CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); |
12275 | |
12276 | if (dev->flags & IFF_PROMISC) |
12277 | mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; |
12278 | |
12279 | uc_update = bnxt_uc_list_updated(bp); |
12280 | |
12281 | if (dev->flags & IFF_BROADCAST) |
12282 | mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; |
12283 | if (dev->flags & IFF_ALLMULTI) { |
12284 | mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; |
12285 | vnic->mc_list_count = 0; |
12286 | } else if (dev->flags & IFF_MULTICAST) { |
12287 | mc_update = bnxt_mc_list_updated(bp, rx_mask: &mask); |
12288 | } |
12289 | |
12290 | if (mask != vnic->rx_mask || uc_update || mc_update) { |
12291 | vnic->rx_mask = mask; |
12292 | |
12293 | bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); |
12294 | } |
12295 | } |
12296 | |
12297 | static int bnxt_cfg_rx_mode(struct bnxt *bp) |
12298 | { |
12299 | struct net_device *dev = bp->dev; |
12300 | struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; |
12301 | struct netdev_hw_addr *ha; |
12302 | int i, off = 0, rc; |
12303 | bool uc_update; |
12304 | |
12305 | netif_addr_lock_bh(dev); |
12306 | uc_update = bnxt_uc_list_updated(bp); |
12307 | netif_addr_unlock_bh(dev); |
12308 | |
12309 | if (!uc_update) |
12310 | goto skip_uc; |
12311 | |
12312 | for (i = 1; i < vnic->uc_filter_count; i++) { |
12313 | struct bnxt_l2_filter *fltr = vnic->l2_filters[i]; |
12314 | |
12315 | bnxt_hwrm_l2_filter_free(bp, fltr); |
12316 | bnxt_del_l2_filter(bp, fltr); |
12317 | } |
12318 | |
12319 | vnic->uc_filter_count = 1; |
12320 | |
12321 | netif_addr_lock_bh(dev); |
12322 | if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { |
12323 | vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; |
12324 | } else { |
12325 | netdev_for_each_uc_addr(ha, dev) { |
12326 | memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); |
12327 | off += ETH_ALEN; |
12328 | vnic->uc_filter_count++; |
12329 | } |
12330 | } |
12331 | netif_addr_unlock_bh(dev); |
12332 | |
12333 | for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { |
12334 | rc = bnxt_hwrm_set_vnic_filter(bp, vnic_id: 0, idx: i, mac_addr: vnic->uc_list + off); |
12335 | if (rc) { |
12336 | if (BNXT_VF(bp) && rc == -ENODEV) { |
12337 | if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, addr: &bp->state)) |
12338 | netdev_warn(dev: bp->dev, format: "Cannot configure L2 filters while PF is unavailable, will retry\n" ); |
12339 | else |
12340 | netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n" ); |
12341 | rc = 0; |
12342 | } else { |
12343 | netdev_err(dev: bp->dev, format: "HWRM vnic filter failure rc: %x\n" , rc); |
12344 | } |
12345 | vnic->uc_filter_count = i; |
12346 | return rc; |
12347 | } |
12348 | } |
12349 | if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, addr: &bp->state)) |
12350 | netdev_notice(dev: bp->dev, format: "Retry of L2 filter configuration successful.\n" ); |
12351 | |
12352 | skip_uc: |
12353 | if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) && |
12354 | !bnxt_promisc_ok(bp)) |
12355 | vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; |
12356 | rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic_id: 0); |
12357 | if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { |
12358 | netdev_info(dev: bp->dev, format: "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n" , |
12359 | rc); |
12360 | vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; |
12361 | vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; |
12362 | vnic->mc_list_count = 0; |
12363 | rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic_id: 0); |
12364 | } |
12365 | if (rc) |
12366 | netdev_err(dev: bp->dev, format: "HWRM cfa l2 rx mask failure rc: %d\n" , |
12367 | rc); |
12368 | |
12369 | return rc; |
12370 | } |
12371 | |
12372 | static bool bnxt_can_reserve_rings(struct bnxt *bp) |
12373 | { |
12374 | #ifdef CONFIG_BNXT_SRIOV |
12375 | if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { |
12376 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
12377 | |
12378 | /* No minimum rings were provisioned by the PF. Don't |
12379 | * reserve rings by default when device is down. |
12380 | */ |
12381 | if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) |
12382 | return true; |
12383 | |
12384 | if (!netif_running(dev: bp->dev)) |
12385 | return false; |
12386 | } |
12387 | #endif |
12388 | return true; |
12389 | } |
12390 | |
12391 | /* If the chip and firmware supports RFS */ |
12392 | static bool bnxt_rfs_supported(struct bnxt *bp) |
12393 | { |
12394 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
12395 | if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) |
12396 | return true; |
12397 | return false; |
12398 | } |
12399 | /* 212 firmware is broken for aRFS */ |
12400 | if (BNXT_FW_MAJ(bp) == 212) |
12401 | return false; |
12402 | if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) |
12403 | return true; |
12404 | if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) |
12405 | return true; |
12406 | return false; |
12407 | } |
12408 | |
12409 | /* If runtime conditions support RFS */ |
12410 | static bool bnxt_rfs_capable(struct bnxt *bp) |
12411 | { |
12412 | struct bnxt_hw_rings hwr = {0}; |
12413 | int max_vnics, ; |
12414 | |
12415 | hwr.rss_ctx = 1; |
12416 | if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { |
12417 | /* 2 VNICS: default + Ntuple */ |
12418 | hwr.vnic = 2; |
12419 | hwr.rss_ctx = bnxt_get_nr_rss_ctxs(bp, rx_rings: bp->rx_nr_rings) * |
12420 | hwr.vnic; |
12421 | goto check_reserve_vnic; |
12422 | } |
12423 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
12424 | return bnxt_rfs_supported(bp); |
12425 | if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) |
12426 | return false; |
12427 | |
12428 | hwr.vnic = 1 + bp->rx_nr_rings; |
12429 | check_reserve_vnic: |
12430 | max_vnics = bnxt_get_max_func_vnics(bp); |
12431 | max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); |
12432 | |
12433 | if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && |
12434 | !(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)) |
12435 | hwr.rss_ctx = hwr.vnic; |
12436 | |
12437 | if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) { |
12438 | if (bp->rx_nr_rings > 1) |
12439 | netdev_warn(dev: bp->dev, |
12440 | format: "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n" , |
12441 | min(max_rss_ctxs - 1, max_vnics - 1)); |
12442 | return false; |
12443 | } |
12444 | |
12445 | if (!BNXT_NEW_RM(bp)) |
12446 | return true; |
12447 | |
12448 | if (hwr.vnic == bp->hw_resc.resv_vnics && |
12449 | hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) |
12450 | return true; |
12451 | |
12452 | bnxt_hwrm_reserve_rings(bp, hwr: &hwr); |
12453 | if (hwr.vnic <= bp->hw_resc.resv_vnics && |
12454 | hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) |
12455 | return true; |
12456 | |
12457 | netdev_warn(dev: bp->dev, format: "Unable to reserve resources to support NTUPLE filters.\n" ); |
12458 | hwr.vnic = 1; |
12459 | hwr.rss_ctx = 0; |
12460 | bnxt_hwrm_reserve_rings(bp, hwr: &hwr); |
12461 | return false; |
12462 | } |
12463 | |
12464 | static netdev_features_t bnxt_fix_features(struct net_device *dev, |
12465 | netdev_features_t features) |
12466 | { |
12467 | struct bnxt *bp = netdev_priv(dev); |
12468 | netdev_features_t vlan_features; |
12469 | |
12470 | if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) |
12471 | features &= ~NETIF_F_NTUPLE; |
12472 | |
12473 | if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog) |
12474 | features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); |
12475 | |
12476 | if (!(features & NETIF_F_GRO)) |
12477 | features &= ~NETIF_F_GRO_HW; |
12478 | |
12479 | if (features & NETIF_F_GRO_HW) |
12480 | features &= ~NETIF_F_LRO; |
12481 | |
12482 | /* Both CTAG and STAG VLAN accelaration on the RX side have to be |
12483 | * turned on or off together. |
12484 | */ |
12485 | vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; |
12486 | if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) { |
12487 | if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) |
12488 | features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; |
12489 | else if (vlan_features) |
12490 | features |= BNXT_HW_FEATURE_VLAN_ALL_RX; |
12491 | } |
12492 | #ifdef CONFIG_BNXT_SRIOV |
12493 | if (BNXT_VF(bp) && bp->vf.vlan) |
12494 | features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; |
12495 | #endif |
12496 | return features; |
12497 | } |
12498 | |
12499 | static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init, |
12500 | bool link_re_init, u32 flags, bool update_tpa) |
12501 | { |
12502 | bnxt_close_nic(bp, irq_re_init, link_re_init); |
12503 | bp->flags = flags; |
12504 | if (update_tpa) |
12505 | bnxt_set_ring_params(bp); |
12506 | return bnxt_open_nic(bp, irq_re_init, link_re_init); |
12507 | } |
12508 | |
12509 | static int bnxt_set_features(struct net_device *dev, netdev_features_t features) |
12510 | { |
12511 | bool update_tpa = false, update_ntuple = false; |
12512 | struct bnxt *bp = netdev_priv(dev); |
12513 | u32 flags = bp->flags; |
12514 | u32 changes; |
12515 | int rc = 0; |
12516 | bool re_init = false; |
12517 | |
12518 | flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; |
12519 | if (features & NETIF_F_GRO_HW) |
12520 | flags |= BNXT_FLAG_GRO; |
12521 | else if (features & NETIF_F_LRO) |
12522 | flags |= BNXT_FLAG_LRO; |
12523 | |
12524 | if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) |
12525 | flags &= ~BNXT_FLAG_TPA; |
12526 | |
12527 | if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) |
12528 | flags |= BNXT_FLAG_STRIP_VLAN; |
12529 | |
12530 | if (features & NETIF_F_NTUPLE) |
12531 | flags |= BNXT_FLAG_RFS; |
12532 | else |
12533 | bnxt_clear_usr_fltrs(bp, all: true); |
12534 | |
12535 | changes = flags ^ bp->flags; |
12536 | if (changes & BNXT_FLAG_TPA) { |
12537 | update_tpa = true; |
12538 | if ((bp->flags & BNXT_FLAG_TPA) == 0 || |
12539 | (flags & BNXT_FLAG_TPA) == 0 || |
12540 | (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) |
12541 | re_init = true; |
12542 | } |
12543 | |
12544 | if (changes & ~BNXT_FLAG_TPA) |
12545 | re_init = true; |
12546 | |
12547 | if (changes & BNXT_FLAG_RFS) |
12548 | update_ntuple = true; |
12549 | |
12550 | if (flags != bp->flags) { |
12551 | u32 old_flags = bp->flags; |
12552 | |
12553 | if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { |
12554 | bp->flags = flags; |
12555 | if (update_tpa) |
12556 | bnxt_set_ring_params(bp); |
12557 | return rc; |
12558 | } |
12559 | |
12560 | if (update_ntuple) |
12561 | return bnxt_reinit_features(bp, irq_re_init: true, link_re_init: false, flags, update_tpa); |
12562 | |
12563 | if (re_init) |
12564 | return bnxt_reinit_features(bp, irq_re_init: false, link_re_init: false, flags, update_tpa); |
12565 | |
12566 | if (update_tpa) { |
12567 | bp->flags = flags; |
12568 | rc = bnxt_set_tpa(bp, |
12569 | set_tpa: (flags & BNXT_FLAG_TPA) ? |
12570 | true : false); |
12571 | if (rc) |
12572 | bp->flags = old_flags; |
12573 | } |
12574 | } |
12575 | return rc; |
12576 | } |
12577 | |
12578 | static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off, |
12579 | u8 **nextp) |
12580 | { |
12581 | struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off); |
12582 | struct hop_jumbo_hdr *jhdr; |
12583 | int hdr_count = 0; |
12584 | u8 *nexthdr; |
12585 | int start; |
12586 | |
12587 | /* Check that there are at most 2 IPv6 extension headers, no |
12588 | * fragment header, and each is <= 64 bytes. |
12589 | */ |
12590 | start = nw_off + sizeof(*ip6h); |
12591 | nexthdr = &ip6h->nexthdr; |
12592 | while (ipv6_ext_hdr(nexthdr: *nexthdr)) { |
12593 | struct ipv6_opt_hdr *hp; |
12594 | int hdrlen; |
12595 | |
12596 | if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE || |
12597 | *nexthdr == NEXTHDR_FRAGMENT) |
12598 | return false; |
12599 | hp = __skb_header_pointer(NULL, offset: start, len: sizeof(*hp), data: skb->data, |
12600 | hlen: skb_headlen(skb), NULL); |
12601 | if (!hp) |
12602 | return false; |
12603 | if (*nexthdr == NEXTHDR_AUTH) |
12604 | hdrlen = ipv6_authlen(hp); |
12605 | else |
12606 | hdrlen = ipv6_optlen(hp); |
12607 | |
12608 | if (hdrlen > 64) |
12609 | return false; |
12610 | |
12611 | /* The ext header may be a hop-by-hop header inserted for |
12612 | * big TCP purposes. This will be removed before sending |
12613 | * from NIC, so do not count it. |
12614 | */ |
12615 | if (*nexthdr == NEXTHDR_HOP) { |
12616 | if (likely(skb->len <= GRO_LEGACY_MAX_SIZE)) |
12617 | goto increment_hdr; |
12618 | |
12619 | jhdr = (struct hop_jumbo_hdr *)hp; |
12620 | if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || |
12621 | jhdr->nexthdr != IPPROTO_TCP) |
12622 | goto increment_hdr; |
12623 | |
12624 | goto next_hdr; |
12625 | } |
12626 | increment_hdr: |
12627 | hdr_count++; |
12628 | next_hdr: |
12629 | nexthdr = &hp->nexthdr; |
12630 | start += hdrlen; |
12631 | } |
12632 | if (nextp) { |
12633 | /* Caller will check inner protocol */ |
12634 | if (skb->encapsulation) { |
12635 | *nextp = nexthdr; |
12636 | return true; |
12637 | } |
12638 | *nextp = NULL; |
12639 | } |
12640 | /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */ |
12641 | return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP; |
12642 | } |
12643 | |
12644 | /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */ |
12645 | static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb) |
12646 | { |
12647 | struct udphdr *uh = udp_hdr(skb); |
12648 | __be16 udp_port = uh->dest; |
12649 | |
12650 | if (udp_port != bp->vxlan_port && udp_port != bp->nge_port && |
12651 | udp_port != bp->vxlan_gpe_port) |
12652 | return false; |
12653 | if (skb->inner_protocol == htons(ETH_P_TEB)) { |
12654 | struct ethhdr *eh = inner_eth_hdr(skb); |
12655 | |
12656 | switch (eh->h_proto) { |
12657 | case htons(ETH_P_IP): |
12658 | return true; |
12659 | case htons(ETH_P_IPV6): |
12660 | return bnxt_exthdr_check(bp, skb, |
12661 | nw_off: skb_inner_network_offset(skb), |
12662 | NULL); |
12663 | } |
12664 | } else if (skb->inner_protocol == htons(ETH_P_IP)) { |
12665 | return true; |
12666 | } else if (skb->inner_protocol == htons(ETH_P_IPV6)) { |
12667 | return bnxt_exthdr_check(bp, skb, nw_off: skb_inner_network_offset(skb), |
12668 | NULL); |
12669 | } |
12670 | return false; |
12671 | } |
12672 | |
12673 | static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto) |
12674 | { |
12675 | switch (l4_proto) { |
12676 | case IPPROTO_UDP: |
12677 | return bnxt_udp_tunl_check(bp, skb); |
12678 | case IPPROTO_IPIP: |
12679 | return true; |
12680 | case IPPROTO_GRE: { |
12681 | switch (skb->inner_protocol) { |
12682 | default: |
12683 | return false; |
12684 | case htons(ETH_P_IP): |
12685 | return true; |
12686 | case htons(ETH_P_IPV6): |
12687 | fallthrough; |
12688 | } |
12689 | } |
12690 | case IPPROTO_IPV6: |
12691 | /* Check ext headers of inner ipv6 */ |
12692 | return bnxt_exthdr_check(bp, skb, nw_off: skb_inner_network_offset(skb), |
12693 | NULL); |
12694 | } |
12695 | return false; |
12696 | } |
12697 | |
12698 | static netdev_features_t bnxt_features_check(struct sk_buff *skb, |
12699 | struct net_device *dev, |
12700 | netdev_features_t features) |
12701 | { |
12702 | struct bnxt *bp = netdev_priv(dev); |
12703 | u8 *l4_proto; |
12704 | |
12705 | features = vlan_features_check(skb, features); |
12706 | switch (vlan_get_protocol(skb)) { |
12707 | case htons(ETH_P_IP): |
12708 | if (!skb->encapsulation) |
12709 | return features; |
12710 | l4_proto = &ip_hdr(skb)->protocol; |
12711 | if (bnxt_tunl_check(bp, skb, l4_proto: *l4_proto)) |
12712 | return features; |
12713 | break; |
12714 | case htons(ETH_P_IPV6): |
12715 | if (!bnxt_exthdr_check(bp, skb, nw_off: skb_network_offset(skb), |
12716 | nextp: &l4_proto)) |
12717 | break; |
12718 | if (!l4_proto || bnxt_tunl_check(bp, skb, l4_proto: *l4_proto)) |
12719 | return features; |
12720 | break; |
12721 | } |
12722 | return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); |
12723 | } |
12724 | |
12725 | int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, |
12726 | u32 *reg_buf) |
12727 | { |
12728 | struct hwrm_dbg_read_direct_output *resp; |
12729 | struct hwrm_dbg_read_direct_input *req; |
12730 | __le32 *dbg_reg_buf; |
12731 | dma_addr_t mapping; |
12732 | int rc, i; |
12733 | |
12734 | rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT); |
12735 | if (rc) |
12736 | return rc; |
12737 | |
12738 | dbg_reg_buf = hwrm_req_dma_slice(bp, req, size: num_words * 4, |
12739 | dma: &mapping); |
12740 | if (!dbg_reg_buf) { |
12741 | rc = -ENOMEM; |
12742 | goto dbg_rd_reg_exit; |
12743 | } |
12744 | |
12745 | req->host_dest_addr = cpu_to_le64(mapping); |
12746 | |
12747 | resp = hwrm_req_hold(bp, req); |
12748 | req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); |
12749 | req->read_len32 = cpu_to_le32(num_words); |
12750 | |
12751 | rc = hwrm_req_send(bp, req); |
12752 | if (rc || resp->error_code) { |
12753 | rc = -EIO; |
12754 | goto dbg_rd_reg_exit; |
12755 | } |
12756 | for (i = 0; i < num_words; i++) |
12757 | reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); |
12758 | |
12759 | dbg_rd_reg_exit: |
12760 | hwrm_req_drop(bp, req); |
12761 | return rc; |
12762 | } |
12763 | |
12764 | static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, |
12765 | u32 ring_id, u32 *prod, u32 *cons) |
12766 | { |
12767 | struct hwrm_dbg_ring_info_get_output *resp; |
12768 | struct hwrm_dbg_ring_info_get_input *req; |
12769 | int rc; |
12770 | |
12771 | rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET); |
12772 | if (rc) |
12773 | return rc; |
12774 | |
12775 | req->ring_type = ring_type; |
12776 | req->fw_ring_id = cpu_to_le32(ring_id); |
12777 | resp = hwrm_req_hold(bp, req); |
12778 | rc = hwrm_req_send(bp, req); |
12779 | if (!rc) { |
12780 | *prod = le32_to_cpu(resp->producer_index); |
12781 | *cons = le32_to_cpu(resp->consumer_index); |
12782 | } |
12783 | hwrm_req_drop(bp, req); |
12784 | return rc; |
12785 | } |
12786 | |
12787 | static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) |
12788 | { |
12789 | struct bnxt_tx_ring_info *txr; |
12790 | int i = bnapi->index, j; |
12791 | |
12792 | bnxt_for_each_napi_tx(j, bnapi, txr) |
12793 | netdev_info(dev: bnapi->bp->dev, format: "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n" , |
12794 | i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, |
12795 | txr->tx_cons); |
12796 | } |
12797 | |
12798 | static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) |
12799 | { |
12800 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
12801 | int i = bnapi->index; |
12802 | |
12803 | if (!rxr) |
12804 | return; |
12805 | |
12806 | netdev_info(dev: bnapi->bp->dev, format: "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n" , |
12807 | i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, |
12808 | rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, |
12809 | rxr->rx_sw_agg_prod); |
12810 | } |
12811 | |
12812 | static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) |
12813 | { |
12814 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
12815 | int i = bnapi->index; |
12816 | |
12817 | netdev_info(dev: bnapi->bp->dev, format: "[%d]: cp{fw_ring: %d raw_cons: %x}\n" , |
12818 | i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); |
12819 | } |
12820 | |
12821 | static void bnxt_dbg_dump_states(struct bnxt *bp) |
12822 | { |
12823 | int i; |
12824 | struct bnxt_napi *bnapi; |
12825 | |
12826 | for (i = 0; i < bp->cp_nr_rings; i++) { |
12827 | bnapi = bp->bnapi[i]; |
12828 | if (netif_msg_drv(bp)) { |
12829 | bnxt_dump_tx_sw_state(bnapi); |
12830 | bnxt_dump_rx_sw_state(bnapi); |
12831 | bnxt_dump_cp_sw_state(bnapi); |
12832 | } |
12833 | } |
12834 | } |
12835 | |
12836 | static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) |
12837 | { |
12838 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; |
12839 | struct hwrm_ring_reset_input *req; |
12840 | struct bnxt_napi *bnapi = rxr->bnapi; |
12841 | struct bnxt_cp_ring_info *cpr; |
12842 | u16 cp_ring_id; |
12843 | int rc; |
12844 | |
12845 | rc = hwrm_req_init(bp, req, HWRM_RING_RESET); |
12846 | if (rc) |
12847 | return rc; |
12848 | |
12849 | cpr = &bnapi->cp_ring; |
12850 | cp_ring_id = cpr->cp_ring_struct.fw_ring_id; |
12851 | req->cmpl_ring = cpu_to_le16(cp_ring_id); |
12852 | req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; |
12853 | req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); |
12854 | return hwrm_req_send_silent(bp, req); |
12855 | } |
12856 | |
12857 | static void bnxt_reset_task(struct bnxt *bp, bool silent) |
12858 | { |
12859 | if (!silent) |
12860 | bnxt_dbg_dump_states(bp); |
12861 | if (netif_running(dev: bp->dev)) { |
12862 | int rc; |
12863 | |
12864 | if (silent) { |
12865 | bnxt_close_nic(bp, irq_re_init: false, link_re_init: false); |
12866 | bnxt_open_nic(bp, irq_re_init: false, link_re_init: false); |
12867 | } else { |
12868 | bnxt_ulp_stop(bp); |
12869 | bnxt_close_nic(bp, irq_re_init: true, link_re_init: false); |
12870 | rc = bnxt_open_nic(bp, irq_re_init: true, link_re_init: false); |
12871 | bnxt_ulp_start(bp, err: rc); |
12872 | } |
12873 | } |
12874 | } |
12875 | |
12876 | static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) |
12877 | { |
12878 | struct bnxt *bp = netdev_priv(dev); |
12879 | |
12880 | netdev_err(dev: bp->dev, format: "TX timeout detected, starting reset task!\n" ); |
12881 | bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); |
12882 | } |
12883 | |
12884 | static void bnxt_fw_health_check(struct bnxt *bp) |
12885 | { |
12886 | struct bnxt_fw_health *fw_health = bp->fw_health; |
12887 | struct pci_dev *pdev = bp->pdev; |
12888 | u32 val; |
12889 | |
12890 | if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) |
12891 | return; |
12892 | |
12893 | /* Make sure it is enabled before checking the tmr_counter. */ |
12894 | smp_rmb(); |
12895 | if (fw_health->tmr_counter) { |
12896 | fw_health->tmr_counter--; |
12897 | return; |
12898 | } |
12899 | |
12900 | val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); |
12901 | if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) { |
12902 | fw_health->arrests++; |
12903 | goto fw_reset; |
12904 | } |
12905 | |
12906 | fw_health->last_fw_heartbeat = val; |
12907 | |
12908 | val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); |
12909 | if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) { |
12910 | fw_health->discoveries++; |
12911 | goto fw_reset; |
12912 | } |
12913 | |
12914 | fw_health->tmr_counter = fw_health->tmr_multiplier; |
12915 | return; |
12916 | |
12917 | fw_reset: |
12918 | bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT); |
12919 | } |
12920 | |
12921 | static void bnxt_timer(struct timer_list *t) |
12922 | { |
12923 | struct bnxt *bp = from_timer(bp, t, timer); |
12924 | struct net_device *dev = bp->dev; |
12925 | |
12926 | if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) |
12927 | return; |
12928 | |
12929 | if (atomic_read(v: &bp->intr_sem) != 0) |
12930 | goto bnxt_restart_timer; |
12931 | |
12932 | if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) |
12933 | bnxt_fw_health_check(bp); |
12934 | |
12935 | if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) |
12936 | bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT); |
12937 | |
12938 | if (bnxt_tc_flower_enabled(bp)) |
12939 | bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT); |
12940 | |
12941 | #ifdef CONFIG_RFS_ACCEL |
12942 | if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) |
12943 | bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); |
12944 | #endif /*CONFIG_RFS_ACCEL*/ |
12945 | |
12946 | if (bp->link_info.phy_retry) { |
12947 | if (time_after(jiffies, bp->link_info.phy_retry_expires)) { |
12948 | bp->link_info.phy_retry = false; |
12949 | netdev_warn(dev: bp->dev, format: "failed to update phy settings after maximum retries.\n" ); |
12950 | } else { |
12951 | bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT); |
12952 | } |
12953 | } |
12954 | |
12955 | if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) |
12956 | bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); |
12957 | |
12958 | if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev)) |
12959 | bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT); |
12960 | |
12961 | bnxt_restart_timer: |
12962 | mod_timer(timer: &bp->timer, expires: jiffies + bp->current_interval); |
12963 | } |
12964 | |
12965 | static void bnxt_rtnl_lock_sp(struct bnxt *bp) |
12966 | { |
12967 | /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK |
12968 | * set. If the device is being closed, bnxt_close() may be holding |
12969 | * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we |
12970 | * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). |
12971 | */ |
12972 | clear_bit(BNXT_STATE_IN_SP_TASK, addr: &bp->state); |
12973 | rtnl_lock(); |
12974 | } |
12975 | |
12976 | static void bnxt_rtnl_unlock_sp(struct bnxt *bp) |
12977 | { |
12978 | set_bit(BNXT_STATE_IN_SP_TASK, addr: &bp->state); |
12979 | rtnl_unlock(); |
12980 | } |
12981 | |
12982 | /* Only called from bnxt_sp_task() */ |
12983 | static void bnxt_reset(struct bnxt *bp, bool silent) |
12984 | { |
12985 | bnxt_rtnl_lock_sp(bp); |
12986 | if (test_bit(BNXT_STATE_OPEN, &bp->state)) |
12987 | bnxt_reset_task(bp, silent); |
12988 | bnxt_rtnl_unlock_sp(bp); |
12989 | } |
12990 | |
12991 | /* Only called from bnxt_sp_task() */ |
12992 | static void bnxt_rx_ring_reset(struct bnxt *bp) |
12993 | { |
12994 | int i; |
12995 | |
12996 | bnxt_rtnl_lock_sp(bp); |
12997 | if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { |
12998 | bnxt_rtnl_unlock_sp(bp); |
12999 | return; |
13000 | } |
13001 | /* Disable and flush TPA before resetting the RX ring */ |
13002 | if (bp->flags & BNXT_FLAG_TPA) |
13003 | bnxt_set_tpa(bp, set_tpa: false); |
13004 | for (i = 0; i < bp->rx_nr_rings; i++) { |
13005 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
13006 | struct bnxt_cp_ring_info *cpr; |
13007 | int rc; |
13008 | |
13009 | if (!rxr->bnapi->in_reset) |
13010 | continue; |
13011 | |
13012 | rc = bnxt_hwrm_rx_ring_reset(bp, ring_nr: i); |
13013 | if (rc) { |
13014 | if (rc == -EINVAL || rc == -EOPNOTSUPP) |
13015 | netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n" ); |
13016 | else |
13017 | netdev_warn(dev: bp->dev, format: "RX ring reset failed, rc = %d, falling back to global reset\n" , |
13018 | rc); |
13019 | bnxt_reset_task(bp, silent: true); |
13020 | break; |
13021 | } |
13022 | bnxt_free_one_rx_ring_skbs(bp, ring_nr: i); |
13023 | rxr->rx_prod = 0; |
13024 | rxr->rx_agg_prod = 0; |
13025 | rxr->rx_sw_agg_prod = 0; |
13026 | rxr->rx_next_cons = 0; |
13027 | rxr->bnapi->in_reset = false; |
13028 | bnxt_alloc_one_rx_ring(bp, ring_nr: i); |
13029 | cpr = &rxr->bnapi->cp_ring; |
13030 | cpr->sw_stats.rx.rx_resets++; |
13031 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
13032 | bnxt_db_write(bp, db: &rxr->rx_agg_db, idx: rxr->rx_agg_prod); |
13033 | bnxt_db_write(bp, db: &rxr->rx_db, idx: rxr->rx_prod); |
13034 | } |
13035 | if (bp->flags & BNXT_FLAG_TPA) |
13036 | bnxt_set_tpa(bp, set_tpa: true); |
13037 | bnxt_rtnl_unlock_sp(bp); |
13038 | } |
13039 | |
13040 | static void bnxt_fw_reset_close(struct bnxt *bp) |
13041 | { |
13042 | bnxt_ulp_stop(bp); |
13043 | /* When firmware is in fatal state, quiesce device and disable |
13044 | * bus master to prevent any potential bad DMAs before freeing |
13045 | * kernel memory. |
13046 | */ |
13047 | if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { |
13048 | u16 val = 0; |
13049 | |
13050 | pci_read_config_word(dev: bp->pdev, PCI_SUBSYSTEM_ID, val: &val); |
13051 | if (val == 0xffff) |
13052 | bp->fw_reset_min_dsecs = 0; |
13053 | bnxt_tx_disable(bp); |
13054 | bnxt_disable_napi(bp); |
13055 | bnxt_disable_int_sync(bp); |
13056 | bnxt_free_irq(bp); |
13057 | bnxt_clear_int_mode(bp); |
13058 | pci_disable_device(dev: bp->pdev); |
13059 | } |
13060 | __bnxt_close_nic(bp, irq_re_init: true, link_re_init: false); |
13061 | bnxt_vf_reps_free(bp); |
13062 | bnxt_clear_int_mode(bp); |
13063 | bnxt_hwrm_func_drv_unrgtr(bp); |
13064 | if (pci_is_enabled(pdev: bp->pdev)) |
13065 | pci_disable_device(dev: bp->pdev); |
13066 | bnxt_free_ctx_mem(bp); |
13067 | } |
13068 | |
13069 | static bool is_bnxt_fw_ok(struct bnxt *bp) |
13070 | { |
13071 | struct bnxt_fw_health *fw_health = bp->fw_health; |
13072 | bool no_heartbeat = false, has_reset = false; |
13073 | u32 val; |
13074 | |
13075 | val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); |
13076 | if (val == fw_health->last_fw_heartbeat) |
13077 | no_heartbeat = true; |
13078 | |
13079 | val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); |
13080 | if (val != fw_health->last_fw_reset_cnt) |
13081 | has_reset = true; |
13082 | |
13083 | if (!no_heartbeat && has_reset) |
13084 | return true; |
13085 | |
13086 | return false; |
13087 | } |
13088 | |
13089 | /* rtnl_lock is acquired before calling this function */ |
13090 | static void bnxt_force_fw_reset(struct bnxt *bp) |
13091 | { |
13092 | struct bnxt_fw_health *fw_health = bp->fw_health; |
13093 | struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; |
13094 | u32 wait_dsecs; |
13095 | |
13096 | if (!test_bit(BNXT_STATE_OPEN, &bp->state) || |
13097 | test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) |
13098 | return; |
13099 | |
13100 | if (ptp) { |
13101 | spin_lock_bh(lock: &ptp->ptp_lock); |
13102 | set_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
13103 | spin_unlock_bh(lock: &ptp->ptp_lock); |
13104 | } else { |
13105 | set_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
13106 | } |
13107 | bnxt_fw_reset_close(bp); |
13108 | wait_dsecs = fw_health->master_func_wait_dsecs; |
13109 | if (fw_health->primary) { |
13110 | if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) |
13111 | wait_dsecs = 0; |
13112 | bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; |
13113 | } else { |
13114 | bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; |
13115 | wait_dsecs = fw_health->normal_func_wait_dsecs; |
13116 | bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; |
13117 | } |
13118 | |
13119 | bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; |
13120 | bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; |
13121 | bnxt_queue_fw_reset_work(bp, delay: wait_dsecs * HZ / 10); |
13122 | } |
13123 | |
13124 | void bnxt_fw_exception(struct bnxt *bp) |
13125 | { |
13126 | netdev_warn(dev: bp->dev, format: "Detected firmware fatal condition, initiating reset\n" ); |
13127 | set_bit(BNXT_STATE_FW_FATAL_COND, addr: &bp->state); |
13128 | bnxt_rtnl_lock_sp(bp); |
13129 | bnxt_force_fw_reset(bp); |
13130 | bnxt_rtnl_unlock_sp(bp); |
13131 | } |
13132 | |
13133 | /* Returns the number of registered VFs, or 1 if VF configuration is pending, or |
13134 | * < 0 on error. |
13135 | */ |
13136 | static int bnxt_get_registered_vfs(struct bnxt *bp) |
13137 | { |
13138 | #ifdef CONFIG_BNXT_SRIOV |
13139 | int rc; |
13140 | |
13141 | if (!BNXT_PF(bp)) |
13142 | return 0; |
13143 | |
13144 | rc = bnxt_hwrm_func_qcfg(bp); |
13145 | if (rc) { |
13146 | netdev_err(dev: bp->dev, format: "func_qcfg cmd failed, rc = %d\n" , rc); |
13147 | return rc; |
13148 | } |
13149 | if (bp->pf.registered_vfs) |
13150 | return bp->pf.registered_vfs; |
13151 | if (bp->sriov_cfg) |
13152 | return 1; |
13153 | #endif |
13154 | return 0; |
13155 | } |
13156 | |
13157 | void bnxt_fw_reset(struct bnxt *bp) |
13158 | { |
13159 | bnxt_rtnl_lock_sp(bp); |
13160 | if (test_bit(BNXT_STATE_OPEN, &bp->state) && |
13161 | !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { |
13162 | struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; |
13163 | int n = 0, tmo; |
13164 | |
13165 | if (ptp) { |
13166 | spin_lock_bh(lock: &ptp->ptp_lock); |
13167 | set_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
13168 | spin_unlock_bh(lock: &ptp->ptp_lock); |
13169 | } else { |
13170 | set_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
13171 | } |
13172 | if (bp->pf.active_vfs && |
13173 | !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) |
13174 | n = bnxt_get_registered_vfs(bp); |
13175 | if (n < 0) { |
13176 | netdev_err(dev: bp->dev, format: "Firmware reset aborted, rc = %d\n" , |
13177 | n); |
13178 | clear_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
13179 | dev_close(dev: bp->dev); |
13180 | goto fw_reset_exit; |
13181 | } else if (n > 0) { |
13182 | u16 vf_tmo_dsecs = n * 10; |
13183 | |
13184 | if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) |
13185 | bp->fw_reset_max_dsecs = vf_tmo_dsecs; |
13186 | bp->fw_reset_state = |
13187 | BNXT_FW_RESET_STATE_POLL_VF; |
13188 | bnxt_queue_fw_reset_work(bp, HZ / 10); |
13189 | goto fw_reset_exit; |
13190 | } |
13191 | bnxt_fw_reset_close(bp); |
13192 | if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { |
13193 | bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; |
13194 | tmo = HZ / 10; |
13195 | } else { |
13196 | bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; |
13197 | tmo = bp->fw_reset_min_dsecs * HZ / 10; |
13198 | } |
13199 | bnxt_queue_fw_reset_work(bp, delay: tmo); |
13200 | } |
13201 | fw_reset_exit: |
13202 | bnxt_rtnl_unlock_sp(bp); |
13203 | } |
13204 | |
13205 | static void bnxt_chk_missed_irq(struct bnxt *bp) |
13206 | { |
13207 | int i; |
13208 | |
13209 | if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) |
13210 | return; |
13211 | |
13212 | for (i = 0; i < bp->cp_nr_rings; i++) { |
13213 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
13214 | struct bnxt_cp_ring_info *cpr; |
13215 | u32 fw_ring_id; |
13216 | int j; |
13217 | |
13218 | if (!bnapi) |
13219 | continue; |
13220 | |
13221 | cpr = &bnapi->cp_ring; |
13222 | for (j = 0; j < cpr->cp_ring_count; j++) { |
13223 | struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; |
13224 | u32 val[2]; |
13225 | |
13226 | if (cpr2->has_more_work || !bnxt_has_work(bp, cpr: cpr2)) |
13227 | continue; |
13228 | |
13229 | if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { |
13230 | cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; |
13231 | continue; |
13232 | } |
13233 | fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; |
13234 | bnxt_dbg_hwrm_ring_info_get(bp, |
13235 | DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, |
13236 | ring_id: fw_ring_id, prod: &val[0], cons: &val[1]); |
13237 | cpr->sw_stats.cmn.missed_irqs++; |
13238 | } |
13239 | } |
13240 | } |
13241 | |
13242 | static void bnxt_cfg_ntp_filters(struct bnxt *); |
13243 | |
13244 | static void bnxt_init_ethtool_link_settings(struct bnxt *bp) |
13245 | { |
13246 | struct bnxt_link_info *link_info = &bp->link_info; |
13247 | |
13248 | if (BNXT_AUTO_MODE(link_info->auto_mode)) { |
13249 | link_info->autoneg = BNXT_AUTONEG_SPEED; |
13250 | if (bp->hwrm_spec_code >= 0x10201) { |
13251 | if (link_info->auto_pause_setting & |
13252 | PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) |
13253 | link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; |
13254 | } else { |
13255 | link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; |
13256 | } |
13257 | bnxt_set_auto_speed(link_info); |
13258 | } else { |
13259 | bnxt_set_force_speed(link_info); |
13260 | link_info->req_duplex = link_info->duplex_setting; |
13261 | } |
13262 | if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) |
13263 | link_info->req_flow_ctrl = |
13264 | link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; |
13265 | else |
13266 | link_info->req_flow_ctrl = link_info->force_pause_setting; |
13267 | } |
13268 | |
13269 | static void bnxt_fw_echo_reply(struct bnxt *bp) |
13270 | { |
13271 | struct bnxt_fw_health *fw_health = bp->fw_health; |
13272 | struct hwrm_func_echo_response_input *req; |
13273 | int rc; |
13274 | |
13275 | rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE); |
13276 | if (rc) |
13277 | return; |
13278 | req->event_data1 = cpu_to_le32(fw_health->echo_req_data1); |
13279 | req->event_data2 = cpu_to_le32(fw_health->echo_req_data2); |
13280 | hwrm_req_send(bp, req); |
13281 | } |
13282 | |
13283 | static void bnxt_sp_task(struct work_struct *work) |
13284 | { |
13285 | struct bnxt *bp = container_of(work, struct bnxt, sp_task); |
13286 | |
13287 | set_bit(BNXT_STATE_IN_SP_TASK, addr: &bp->state); |
13288 | smp_mb__after_atomic(); |
13289 | if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { |
13290 | clear_bit(BNXT_STATE_IN_SP_TASK, addr: &bp->state); |
13291 | return; |
13292 | } |
13293 | |
13294 | if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, addr: &bp->sp_event)) |
13295 | bnxt_cfg_rx_mode(bp); |
13296 | |
13297 | if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, addr: &bp->sp_event)) |
13298 | bnxt_cfg_ntp_filters(bp); |
13299 | if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, addr: &bp->sp_event)) |
13300 | bnxt_hwrm_exec_fwd_req(bp); |
13301 | if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, addr: &bp->sp_event)) |
13302 | netdev_info(dev: bp->dev, format: "Receive PF driver unload event!\n" ); |
13303 | if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, addr: &bp->sp_event)) { |
13304 | bnxt_hwrm_port_qstats(bp, flags: 0); |
13305 | bnxt_hwrm_port_qstats_ext(bp, flags: 0); |
13306 | bnxt_accumulate_all_stats(bp); |
13307 | } |
13308 | |
13309 | if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, addr: &bp->sp_event)) { |
13310 | int rc; |
13311 | |
13312 | mutex_lock(&bp->link_lock); |
13313 | if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, |
13314 | addr: &bp->sp_event)) |
13315 | bnxt_hwrm_phy_qcaps(bp); |
13316 | |
13317 | rc = bnxt_update_link(bp, chng_link_state: true); |
13318 | if (rc) |
13319 | netdev_err(dev: bp->dev, format: "SP task can't update link (rc: %x)\n" , |
13320 | rc); |
13321 | |
13322 | if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, |
13323 | addr: &bp->sp_event)) |
13324 | bnxt_init_ethtool_link_settings(bp); |
13325 | mutex_unlock(lock: &bp->link_lock); |
13326 | } |
13327 | if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, addr: &bp->sp_event)) { |
13328 | int rc; |
13329 | |
13330 | mutex_lock(&bp->link_lock); |
13331 | rc = bnxt_update_phy_setting(bp); |
13332 | mutex_unlock(lock: &bp->link_lock); |
13333 | if (rc) { |
13334 | netdev_warn(dev: bp->dev, format: "update phy settings retry failed\n" ); |
13335 | } else { |
13336 | bp->link_info.phy_retry = false; |
13337 | netdev_info(dev: bp->dev, format: "update phy settings retry succeeded\n" ); |
13338 | } |
13339 | } |
13340 | if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, addr: &bp->sp_event)) { |
13341 | mutex_lock(&bp->link_lock); |
13342 | bnxt_get_port_module_status(bp); |
13343 | mutex_unlock(lock: &bp->link_lock); |
13344 | } |
13345 | |
13346 | if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, addr: &bp->sp_event)) |
13347 | bnxt_tc_flow_stats_work(bp); |
13348 | |
13349 | if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, addr: &bp->sp_event)) |
13350 | bnxt_chk_missed_irq(bp); |
13351 | |
13352 | if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, addr: &bp->sp_event)) |
13353 | bnxt_fw_echo_reply(bp); |
13354 | |
13355 | if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, addr: &bp->sp_event)) |
13356 | bnxt_hwmon_notify_event(bp); |
13357 | |
13358 | /* These functions below will clear BNXT_STATE_IN_SP_TASK. They |
13359 | * must be the last functions to be called before exiting. |
13360 | */ |
13361 | if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, addr: &bp->sp_event)) |
13362 | bnxt_reset(bp, silent: false); |
13363 | |
13364 | if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, addr: &bp->sp_event)) |
13365 | bnxt_reset(bp, silent: true); |
13366 | |
13367 | if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, addr: &bp->sp_event)) |
13368 | bnxt_rx_ring_reset(bp); |
13369 | |
13370 | if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, addr: &bp->sp_event)) { |
13371 | if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || |
13372 | test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) |
13373 | bnxt_devlink_health_fw_report(bp); |
13374 | else |
13375 | bnxt_fw_reset(bp); |
13376 | } |
13377 | |
13378 | if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, addr: &bp->sp_event)) { |
13379 | if (!is_bnxt_fw_ok(bp)) |
13380 | bnxt_devlink_health_fw_report(bp); |
13381 | } |
13382 | |
13383 | smp_mb__before_atomic(); |
13384 | clear_bit(BNXT_STATE_IN_SP_TASK, addr: &bp->state); |
13385 | } |
13386 | |
13387 | static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, |
13388 | int *max_cp); |
13389 | |
13390 | /* Under rtnl_lock */ |
13391 | int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, |
13392 | int tx_xdp) |
13393 | { |
13394 | int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp; |
13395 | struct bnxt_hw_rings hwr = {0}; |
13396 | int rx_rings = rx; |
13397 | |
13398 | if (tcs) |
13399 | tx_sets = tcs; |
13400 | |
13401 | _bnxt_get_max_rings(bp, max_rx: &max_rx, max_tx: &max_tx, max_cp: &max_cp); |
13402 | |
13403 | if (max_rx < rx_rings) |
13404 | return -ENOMEM; |
13405 | |
13406 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
13407 | rx_rings <<= 1; |
13408 | |
13409 | hwr.rx = rx_rings; |
13410 | hwr.tx = tx * tx_sets + tx_xdp; |
13411 | if (max_tx < hwr.tx) |
13412 | return -ENOMEM; |
13413 | |
13414 | hwr.vnic = bnxt_get_total_vnics(bp, rx_rings: rx); |
13415 | |
13416 | tx_cp = __bnxt_num_tx_to_cp(bp, tx: hwr.tx, tx_sets, tx_xdp); |
13417 | hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx; |
13418 | if (max_cp < hwr.cp) |
13419 | return -ENOMEM; |
13420 | hwr.stat = hwr.cp; |
13421 | if (BNXT_NEW_RM(bp)) { |
13422 | hwr.cp += bnxt_get_ulp_msix_num(bp); |
13423 | hwr.stat += bnxt_get_ulp_stat_ctxs(bp); |
13424 | hwr.grp = rx; |
13425 | hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, hwr: &hwr); |
13426 | } |
13427 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) |
13428 | hwr.cp_p5 = hwr.tx + rx; |
13429 | return bnxt_hwrm_check_rings(bp, hwr: &hwr); |
13430 | } |
13431 | |
13432 | static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) |
13433 | { |
13434 | if (bp->bar2) { |
13435 | pci_iounmap(dev: pdev, bp->bar2); |
13436 | bp->bar2 = NULL; |
13437 | } |
13438 | |
13439 | if (bp->bar1) { |
13440 | pci_iounmap(dev: pdev, bp->bar1); |
13441 | bp->bar1 = NULL; |
13442 | } |
13443 | |
13444 | if (bp->bar0) { |
13445 | pci_iounmap(dev: pdev, bp->bar0); |
13446 | bp->bar0 = NULL; |
13447 | } |
13448 | } |
13449 | |
13450 | static void bnxt_cleanup_pci(struct bnxt *bp) |
13451 | { |
13452 | bnxt_unmap_bars(bp, pdev: bp->pdev); |
13453 | pci_release_regions(bp->pdev); |
13454 | if (pci_is_enabled(pdev: bp->pdev)) |
13455 | pci_disable_device(dev: bp->pdev); |
13456 | } |
13457 | |
13458 | static void bnxt_init_dflt_coal(struct bnxt *bp) |
13459 | { |
13460 | struct bnxt_coal_cap *coal_cap = &bp->coal_cap; |
13461 | struct bnxt_coal *coal; |
13462 | u16 flags = 0; |
13463 | |
13464 | if (coal_cap->cmpl_params & |
13465 | RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) |
13466 | flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; |
13467 | |
13468 | /* Tick values in micro seconds. |
13469 | * 1 coal_buf x bufs_per_record = 1 completion record. |
13470 | */ |
13471 | coal = &bp->rx_coal; |
13472 | coal->coal_ticks = 10; |
13473 | coal->coal_bufs = 30; |
13474 | coal->coal_ticks_irq = 1; |
13475 | coal->coal_bufs_irq = 2; |
13476 | coal->idle_thresh = 50; |
13477 | coal->bufs_per_record = 2; |
13478 | coal->budget = 64; /* NAPI budget */ |
13479 | coal->flags = flags; |
13480 | |
13481 | coal = &bp->tx_coal; |
13482 | coal->coal_ticks = 28; |
13483 | coal->coal_bufs = 30; |
13484 | coal->coal_ticks_irq = 2; |
13485 | coal->coal_bufs_irq = 2; |
13486 | coal->bufs_per_record = 1; |
13487 | coal->flags = flags; |
13488 | |
13489 | bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; |
13490 | } |
13491 | |
13492 | /* FW that pre-reserves 1 VNIC per function */ |
13493 | static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp) |
13494 | { |
13495 | u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp); |
13496 | |
13497 | if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && |
13498 | (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18))) |
13499 | return true; |
13500 | if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && |
13501 | (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172))) |
13502 | return true; |
13503 | return false; |
13504 | } |
13505 | |
13506 | static int bnxt_fw_init_one_p1(struct bnxt *bp) |
13507 | { |
13508 | int rc; |
13509 | |
13510 | bp->fw_cap = 0; |
13511 | rc = bnxt_hwrm_ver_get(bp); |
13512 | /* FW may be unresponsive after FLR. FLR must complete within 100 msec |
13513 | * so wait before continuing with recovery. |
13514 | */ |
13515 | if (rc) |
13516 | msleep(msecs: 100); |
13517 | bnxt_try_map_fw_health_reg(bp); |
13518 | if (rc) { |
13519 | rc = bnxt_try_recover_fw(bp); |
13520 | if (rc) |
13521 | return rc; |
13522 | rc = bnxt_hwrm_ver_get(bp); |
13523 | if (rc) |
13524 | return rc; |
13525 | } |
13526 | |
13527 | bnxt_nvm_cfg_ver_get(bp); |
13528 | |
13529 | rc = bnxt_hwrm_func_reset(bp); |
13530 | if (rc) |
13531 | return -ENODEV; |
13532 | |
13533 | bnxt_hwrm_fw_set_time(bp); |
13534 | return 0; |
13535 | } |
13536 | |
13537 | static int bnxt_fw_init_one_p2(struct bnxt *bp) |
13538 | { |
13539 | int rc; |
13540 | |
13541 | /* Get the MAX capabilities for this function */ |
13542 | rc = bnxt_hwrm_func_qcaps(bp); |
13543 | if (rc) { |
13544 | netdev_err(dev: bp->dev, format: "hwrm query capability failure rc: %x\n" , |
13545 | rc); |
13546 | return -ENODEV; |
13547 | } |
13548 | |
13549 | rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); |
13550 | if (rc) |
13551 | netdev_warn(dev: bp->dev, format: "hwrm query adv flow mgnt failure rc: %d\n" , |
13552 | rc); |
13553 | |
13554 | if (bnxt_alloc_fw_health(bp)) { |
13555 | netdev_warn(dev: bp->dev, format: "no memory for firmware error recovery\n" ); |
13556 | } else { |
13557 | rc = bnxt_hwrm_error_recovery_qcfg(bp); |
13558 | if (rc) |
13559 | netdev_warn(dev: bp->dev, format: "hwrm query error recovery failure rc: %d\n" , |
13560 | rc); |
13561 | } |
13562 | |
13563 | rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, bmap_size: 0, async_only: false); |
13564 | if (rc) |
13565 | return -ENODEV; |
13566 | |
13567 | if (bnxt_fw_pre_resv_vnics(bp)) |
13568 | bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS; |
13569 | |
13570 | bnxt_hwrm_func_qcfg(bp); |
13571 | bnxt_hwrm_vnic_qcaps(bp); |
13572 | bnxt_hwrm_port_led_qcaps(bp); |
13573 | bnxt_ethtool_init(bp); |
13574 | if (bp->fw_cap & BNXT_FW_CAP_PTP) |
13575 | __bnxt_hwrm_ptp_qcfg(bp); |
13576 | bnxt_dcb_init(bp); |
13577 | bnxt_hwmon_init(bp); |
13578 | return 0; |
13579 | } |
13580 | |
13581 | static void (struct bnxt *bp) |
13582 | { |
13583 | bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP; |
13584 | bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | |
13585 | VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | |
13586 | VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | |
13587 | VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; |
13588 | if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) |
13589 | bp->rss_hash_delta = bp->rss_hash_cfg; |
13590 | if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { |
13591 | bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP; |
13592 | bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | |
13593 | VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; |
13594 | } |
13595 | } |
13596 | |
13597 | static void bnxt_set_dflt_rfs(struct bnxt *bp) |
13598 | { |
13599 | struct net_device *dev = bp->dev; |
13600 | |
13601 | dev->hw_features &= ~NETIF_F_NTUPLE; |
13602 | dev->features &= ~NETIF_F_NTUPLE; |
13603 | bp->flags &= ~BNXT_FLAG_RFS; |
13604 | if (bnxt_rfs_supported(bp)) { |
13605 | dev->hw_features |= NETIF_F_NTUPLE; |
13606 | if (bnxt_rfs_capable(bp)) { |
13607 | bp->flags |= BNXT_FLAG_RFS; |
13608 | dev->features |= NETIF_F_NTUPLE; |
13609 | } |
13610 | } |
13611 | } |
13612 | |
13613 | static void bnxt_fw_init_one_p3(struct bnxt *bp) |
13614 | { |
13615 | struct pci_dev *pdev = bp->pdev; |
13616 | |
13617 | bnxt_set_dflt_rss_hash_type(bp); |
13618 | bnxt_set_dflt_rfs(bp); |
13619 | |
13620 | bnxt_get_wol_settings(bp); |
13621 | if (bp->flags & BNXT_FLAG_WOL_CAP) |
13622 | device_set_wakeup_enable(dev: &pdev->dev, enable: bp->wol); |
13623 | else |
13624 | device_set_wakeup_capable(dev: &pdev->dev, capable: false); |
13625 | |
13626 | bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); |
13627 | bnxt_hwrm_coal_params_qcaps(bp); |
13628 | } |
13629 | |
13630 | static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt); |
13631 | |
13632 | int bnxt_fw_init_one(struct bnxt *bp) |
13633 | { |
13634 | int rc; |
13635 | |
13636 | rc = bnxt_fw_init_one_p1(bp); |
13637 | if (rc) { |
13638 | netdev_err(dev: bp->dev, format: "Firmware init phase 1 failed\n" ); |
13639 | return rc; |
13640 | } |
13641 | rc = bnxt_fw_init_one_p2(bp); |
13642 | if (rc) { |
13643 | netdev_err(dev: bp->dev, format: "Firmware init phase 2 failed\n" ); |
13644 | return rc; |
13645 | } |
13646 | rc = bnxt_probe_phy(bp, fw_dflt: false); |
13647 | if (rc) |
13648 | return rc; |
13649 | rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); |
13650 | if (rc) |
13651 | return rc; |
13652 | |
13653 | bnxt_fw_init_one_p3(bp); |
13654 | return 0; |
13655 | } |
13656 | |
13657 | static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) |
13658 | { |
13659 | struct bnxt_fw_health *fw_health = bp->fw_health; |
13660 | u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; |
13661 | u32 val = fw_health->fw_reset_seq_vals[reg_idx]; |
13662 | u32 reg_type, reg_off, delay_msecs; |
13663 | |
13664 | delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; |
13665 | reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); |
13666 | reg_off = BNXT_FW_HEALTH_REG_OFF(reg); |
13667 | switch (reg_type) { |
13668 | case BNXT_FW_HEALTH_REG_TYPE_CFG: |
13669 | pci_write_config_dword(dev: bp->pdev, where: reg_off, val); |
13670 | break; |
13671 | case BNXT_FW_HEALTH_REG_TYPE_GRC: |
13672 | writel(val: reg_off & BNXT_GRC_BASE_MASK, |
13673 | addr: bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); |
13674 | reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; |
13675 | fallthrough; |
13676 | case BNXT_FW_HEALTH_REG_TYPE_BAR0: |
13677 | writel(val, addr: bp->bar0 + reg_off); |
13678 | break; |
13679 | case BNXT_FW_HEALTH_REG_TYPE_BAR1: |
13680 | writel(val, addr: bp->bar1 + reg_off); |
13681 | break; |
13682 | } |
13683 | if (delay_msecs) { |
13684 | pci_read_config_dword(dev: bp->pdev, where: 0, val: &val); |
13685 | msleep(msecs: delay_msecs); |
13686 | } |
13687 | } |
13688 | |
13689 | bool bnxt_hwrm_reset_permitted(struct bnxt *bp) |
13690 | { |
13691 | struct hwrm_func_qcfg_output *resp; |
13692 | struct hwrm_func_qcfg_input *req; |
13693 | bool result = true; /* firmware will enforce if unknown */ |
13694 | |
13695 | if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) |
13696 | return result; |
13697 | |
13698 | if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG)) |
13699 | return result; |
13700 | |
13701 | req->fid = cpu_to_le16(0xffff); |
13702 | resp = hwrm_req_hold(bp, req); |
13703 | if (!hwrm_req_send(bp, req)) |
13704 | result = !!(le16_to_cpu(resp->flags) & |
13705 | FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED); |
13706 | hwrm_req_drop(bp, req); |
13707 | return result; |
13708 | } |
13709 | |
13710 | static void bnxt_reset_all(struct bnxt *bp) |
13711 | { |
13712 | struct bnxt_fw_health *fw_health = bp->fw_health; |
13713 | int i, rc; |
13714 | |
13715 | if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { |
13716 | bnxt_fw_reset_via_optee(bp); |
13717 | bp->fw_reset_timestamp = jiffies; |
13718 | return; |
13719 | } |
13720 | |
13721 | if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { |
13722 | for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) |
13723 | bnxt_fw_reset_writel(bp, reg_idx: i); |
13724 | } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { |
13725 | struct hwrm_fw_reset_input *req; |
13726 | |
13727 | rc = hwrm_req_init(bp, req, HWRM_FW_RESET); |
13728 | if (!rc) { |
13729 | req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG); |
13730 | req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; |
13731 | req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; |
13732 | req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; |
13733 | rc = hwrm_req_send(bp, req); |
13734 | } |
13735 | if (rc != -ENODEV) |
13736 | netdev_warn(dev: bp->dev, format: "Unable to reset FW rc=%d\n" , rc); |
13737 | } |
13738 | bp->fw_reset_timestamp = jiffies; |
13739 | } |
13740 | |
13741 | static bool bnxt_fw_reset_timeout(struct bnxt *bp) |
13742 | { |
13743 | return time_after(jiffies, bp->fw_reset_timestamp + |
13744 | (bp->fw_reset_max_dsecs * HZ / 10)); |
13745 | } |
13746 | |
13747 | static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) |
13748 | { |
13749 | clear_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
13750 | if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) { |
13751 | bnxt_ulp_start(bp, err: rc); |
13752 | bnxt_dl_health_fw_status_update(bp, healthy: false); |
13753 | } |
13754 | bp->fw_reset_state = 0; |
13755 | dev_close(dev: bp->dev); |
13756 | } |
13757 | |
13758 | static void bnxt_fw_reset_task(struct work_struct *work) |
13759 | { |
13760 | struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); |
13761 | int rc = 0; |
13762 | |
13763 | if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { |
13764 | netdev_err(dev: bp->dev, format: "bnxt_fw_reset_task() called when not in fw reset mode!\n" ); |
13765 | return; |
13766 | } |
13767 | |
13768 | switch (bp->fw_reset_state) { |
13769 | case BNXT_FW_RESET_STATE_POLL_VF: { |
13770 | int n = bnxt_get_registered_vfs(bp); |
13771 | int tmo; |
13772 | |
13773 | if (n < 0) { |
13774 | netdev_err(dev: bp->dev, format: "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n" , |
13775 | n, jiffies_to_msecs(j: jiffies - |
13776 | bp->fw_reset_timestamp)); |
13777 | goto fw_reset_abort; |
13778 | } else if (n > 0) { |
13779 | if (bnxt_fw_reset_timeout(bp)) { |
13780 | clear_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
13781 | bp->fw_reset_state = 0; |
13782 | netdev_err(dev: bp->dev, format: "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n" , |
13783 | n); |
13784 | return; |
13785 | } |
13786 | bnxt_queue_fw_reset_work(bp, HZ / 10); |
13787 | return; |
13788 | } |
13789 | bp->fw_reset_timestamp = jiffies; |
13790 | rtnl_lock(); |
13791 | if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { |
13792 | bnxt_fw_reset_abort(bp, rc); |
13793 | rtnl_unlock(); |
13794 | return; |
13795 | } |
13796 | bnxt_fw_reset_close(bp); |
13797 | if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { |
13798 | bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; |
13799 | tmo = HZ / 10; |
13800 | } else { |
13801 | bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; |
13802 | tmo = bp->fw_reset_min_dsecs * HZ / 10; |
13803 | } |
13804 | rtnl_unlock(); |
13805 | bnxt_queue_fw_reset_work(bp, delay: tmo); |
13806 | return; |
13807 | } |
13808 | case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { |
13809 | u32 val; |
13810 | |
13811 | val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); |
13812 | if (!(val & BNXT_FW_STATUS_SHUTDOWN) && |
13813 | !bnxt_fw_reset_timeout(bp)) { |
13814 | bnxt_queue_fw_reset_work(bp, HZ / 5); |
13815 | return; |
13816 | } |
13817 | |
13818 | if (!bp->fw_health->primary) { |
13819 | u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; |
13820 | |
13821 | bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; |
13822 | bnxt_queue_fw_reset_work(bp, delay: wait_dsecs * HZ / 10); |
13823 | return; |
13824 | } |
13825 | bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; |
13826 | } |
13827 | fallthrough; |
13828 | case BNXT_FW_RESET_STATE_RESET_FW: |
13829 | bnxt_reset_all(bp); |
13830 | bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; |
13831 | bnxt_queue_fw_reset_work(bp, delay: bp->fw_reset_min_dsecs * HZ / 10); |
13832 | return; |
13833 | case BNXT_FW_RESET_STATE_ENABLE_DEV: |
13834 | bnxt_inv_fw_health_reg(bp); |
13835 | if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && |
13836 | !bp->fw_reset_min_dsecs) { |
13837 | u16 val; |
13838 | |
13839 | pci_read_config_word(dev: bp->pdev, PCI_SUBSYSTEM_ID, val: &val); |
13840 | if (val == 0xffff) { |
13841 | if (bnxt_fw_reset_timeout(bp)) { |
13842 | netdev_err(dev: bp->dev, format: "Firmware reset aborted, PCI config space invalid\n" ); |
13843 | rc = -ETIMEDOUT; |
13844 | goto fw_reset_abort; |
13845 | } |
13846 | bnxt_queue_fw_reset_work(bp, HZ / 1000); |
13847 | return; |
13848 | } |
13849 | } |
13850 | clear_bit(BNXT_STATE_FW_FATAL_COND, addr: &bp->state); |
13851 | clear_bit(BNXT_STATE_FW_NON_FATAL_COND, addr: &bp->state); |
13852 | if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, addr: &bp->state) && |
13853 | !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) |
13854 | bnxt_dl_remote_reload(bp); |
13855 | if (pci_enable_device(dev: bp->pdev)) { |
13856 | netdev_err(dev: bp->dev, format: "Cannot re-enable PCI device\n" ); |
13857 | rc = -ENODEV; |
13858 | goto fw_reset_abort; |
13859 | } |
13860 | pci_set_master(dev: bp->pdev); |
13861 | bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; |
13862 | fallthrough; |
13863 | case BNXT_FW_RESET_STATE_POLL_FW: |
13864 | bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; |
13865 | rc = bnxt_hwrm_poll(bp); |
13866 | if (rc) { |
13867 | if (bnxt_fw_reset_timeout(bp)) { |
13868 | netdev_err(dev: bp->dev, format: "Firmware reset aborted\n" ); |
13869 | goto fw_reset_abort_status; |
13870 | } |
13871 | bnxt_queue_fw_reset_work(bp, HZ / 5); |
13872 | return; |
13873 | } |
13874 | bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; |
13875 | bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; |
13876 | fallthrough; |
13877 | case BNXT_FW_RESET_STATE_OPENING: |
13878 | while (!rtnl_trylock()) { |
13879 | bnxt_queue_fw_reset_work(bp, HZ / 10); |
13880 | return; |
13881 | } |
13882 | rc = bnxt_open(dev: bp->dev); |
13883 | if (rc) { |
13884 | netdev_err(dev: bp->dev, format: "bnxt_open() failed during FW reset\n" ); |
13885 | bnxt_fw_reset_abort(bp, rc); |
13886 | rtnl_unlock(); |
13887 | return; |
13888 | } |
13889 | |
13890 | if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && |
13891 | bp->fw_health->enabled) { |
13892 | bp->fw_health->last_fw_reset_cnt = |
13893 | bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); |
13894 | } |
13895 | bp->fw_reset_state = 0; |
13896 | /* Make sure fw_reset_state is 0 before clearing the flag */ |
13897 | smp_mb__before_atomic(); |
13898 | clear_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
13899 | bnxt_ulp_start(bp, err: 0); |
13900 | bnxt_reenable_sriov(bp); |
13901 | bnxt_vf_reps_alloc(bp); |
13902 | bnxt_vf_reps_open(bp); |
13903 | bnxt_ptp_reapply_pps(bp); |
13904 | clear_bit(BNXT_STATE_FW_ACTIVATE, addr: &bp->state); |
13905 | if (test_and_clear_bit(BNXT_STATE_RECOVER, addr: &bp->state)) { |
13906 | bnxt_dl_health_fw_recovery_done(bp); |
13907 | bnxt_dl_health_fw_status_update(bp, healthy: true); |
13908 | } |
13909 | rtnl_unlock(); |
13910 | break; |
13911 | } |
13912 | return; |
13913 | |
13914 | fw_reset_abort_status: |
13915 | if (bp->fw_health->status_reliable || |
13916 | (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { |
13917 | u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); |
13918 | |
13919 | netdev_err(dev: bp->dev, format: "fw_health_status 0x%x\n" , sts); |
13920 | } |
13921 | fw_reset_abort: |
13922 | rtnl_lock(); |
13923 | bnxt_fw_reset_abort(bp, rc); |
13924 | rtnl_unlock(); |
13925 | } |
13926 | |
13927 | static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) |
13928 | { |
13929 | int rc; |
13930 | struct bnxt *bp = netdev_priv(dev); |
13931 | |
13932 | SET_NETDEV_DEV(dev, &pdev->dev); |
13933 | |
13934 | /* enable device (incl. PCI PM wakeup), and bus-mastering */ |
13935 | rc = pci_enable_device(dev: pdev); |
13936 | if (rc) { |
13937 | dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n" ); |
13938 | goto init_err; |
13939 | } |
13940 | |
13941 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
13942 | dev_err(&pdev->dev, |
13943 | "Cannot find PCI device base address, aborting\n" ); |
13944 | rc = -ENODEV; |
13945 | goto init_err_disable; |
13946 | } |
13947 | |
13948 | rc = pci_request_regions(pdev, DRV_MODULE_NAME); |
13949 | if (rc) { |
13950 | dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n" ); |
13951 | goto init_err_disable; |
13952 | } |
13953 | |
13954 | if (dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64)) != 0 && |
13955 | dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(32)) != 0) { |
13956 | dev_err(&pdev->dev, "System does not support DMA, aborting\n" ); |
13957 | rc = -EIO; |
13958 | goto init_err_release; |
13959 | } |
13960 | |
13961 | pci_set_master(dev: pdev); |
13962 | |
13963 | bp->dev = dev; |
13964 | bp->pdev = pdev; |
13965 | |
13966 | /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() |
13967 | * determines the BAR size. |
13968 | */ |
13969 | bp->bar0 = pci_ioremap_bar(pdev, bar: 0); |
13970 | if (!bp->bar0) { |
13971 | dev_err(&pdev->dev, "Cannot map device registers, aborting\n" ); |
13972 | rc = -ENOMEM; |
13973 | goto init_err_release; |
13974 | } |
13975 | |
13976 | bp->bar2 = pci_ioremap_bar(pdev, bar: 4); |
13977 | if (!bp->bar2) { |
13978 | dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n" ); |
13979 | rc = -ENOMEM; |
13980 | goto init_err_release; |
13981 | } |
13982 | |
13983 | INIT_WORK(&bp->sp_task, bnxt_sp_task); |
13984 | INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); |
13985 | |
13986 | spin_lock_init(&bp->ntp_fltr_lock); |
13987 | #if BITS_PER_LONG == 32 |
13988 | spin_lock_init(&bp->db_lock); |
13989 | #endif |
13990 | |
13991 | bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; |
13992 | bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; |
13993 | |
13994 | timer_setup(&bp->timer, bnxt_timer, 0); |
13995 | bp->current_interval = BNXT_TIMER_INTERVAL; |
13996 | |
13997 | bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; |
13998 | bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; |
13999 | |
14000 | clear_bit(BNXT_STATE_OPEN, addr: &bp->state); |
14001 | return 0; |
14002 | |
14003 | init_err_release: |
14004 | bnxt_unmap_bars(bp, pdev); |
14005 | pci_release_regions(pdev); |
14006 | |
14007 | init_err_disable: |
14008 | pci_disable_device(dev: pdev); |
14009 | |
14010 | init_err: |
14011 | return rc; |
14012 | } |
14013 | |
14014 | /* rtnl_lock held */ |
14015 | static int bnxt_change_mac_addr(struct net_device *dev, void *p) |
14016 | { |
14017 | struct sockaddr *addr = p; |
14018 | struct bnxt *bp = netdev_priv(dev); |
14019 | int rc = 0; |
14020 | |
14021 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
14022 | return -EADDRNOTAVAIL; |
14023 | |
14024 | if (ether_addr_equal(addr1: addr->sa_data, addr2: dev->dev_addr)) |
14025 | return 0; |
14026 | |
14027 | rc = bnxt_approve_mac(bp, addr->sa_data, true); |
14028 | if (rc) |
14029 | return rc; |
14030 | |
14031 | eth_hw_addr_set(dev, addr: addr->sa_data); |
14032 | bnxt_clear_usr_fltrs(bp, all: true); |
14033 | if (netif_running(dev)) { |
14034 | bnxt_close_nic(bp, irq_re_init: false, link_re_init: false); |
14035 | rc = bnxt_open_nic(bp, irq_re_init: false, link_re_init: false); |
14036 | } |
14037 | |
14038 | return rc; |
14039 | } |
14040 | |
14041 | /* rtnl_lock held */ |
14042 | static int bnxt_change_mtu(struct net_device *dev, int new_mtu) |
14043 | { |
14044 | struct bnxt *bp = netdev_priv(dev); |
14045 | |
14046 | if (netif_running(dev)) |
14047 | bnxt_close_nic(bp, irq_re_init: true, link_re_init: false); |
14048 | |
14049 | dev->mtu = new_mtu; |
14050 | bnxt_set_ring_params(bp); |
14051 | |
14052 | if (netif_running(dev)) |
14053 | return bnxt_open_nic(bp, irq_re_init: true, link_re_init: false); |
14054 | |
14055 | return 0; |
14056 | } |
14057 | |
14058 | int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) |
14059 | { |
14060 | struct bnxt *bp = netdev_priv(dev); |
14061 | bool sh = false; |
14062 | int rc, tx_cp; |
14063 | |
14064 | if (tc > bp->max_tc) { |
14065 | netdev_err(dev, format: "Too many traffic classes requested: %d. Max supported is %d.\n" , |
14066 | tc, bp->max_tc); |
14067 | return -EINVAL; |
14068 | } |
14069 | |
14070 | if (bp->num_tc == tc) |
14071 | return 0; |
14072 | |
14073 | if (bp->flags & BNXT_FLAG_SHARED_RINGS) |
14074 | sh = true; |
14075 | |
14076 | rc = bnxt_check_rings(bp, tx: bp->tx_nr_rings_per_tc, rx: bp->rx_nr_rings, |
14077 | sh, tcs: tc, tx_xdp: bp->tx_nr_rings_xdp); |
14078 | if (rc) |
14079 | return rc; |
14080 | |
14081 | /* Needs to close the device and do hw resource re-allocations */ |
14082 | if (netif_running(dev: bp->dev)) |
14083 | bnxt_close_nic(bp, irq_re_init: true, link_re_init: false); |
14084 | |
14085 | if (tc) { |
14086 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; |
14087 | netdev_set_num_tc(dev, num_tc: tc); |
14088 | bp->num_tc = tc; |
14089 | } else { |
14090 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc; |
14091 | netdev_reset_tc(dev); |
14092 | bp->num_tc = 0; |
14093 | } |
14094 | bp->tx_nr_rings += bp->tx_nr_rings_xdp; |
14095 | tx_cp = bnxt_num_tx_to_cp(bp, tx: bp->tx_nr_rings); |
14096 | bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) : |
14097 | tx_cp + bp->rx_nr_rings; |
14098 | |
14099 | if (netif_running(dev: bp->dev)) |
14100 | return bnxt_open_nic(bp, irq_re_init: true, link_re_init: false); |
14101 | |
14102 | return 0; |
14103 | } |
14104 | |
14105 | static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, |
14106 | void *cb_priv) |
14107 | { |
14108 | struct bnxt *bp = cb_priv; |
14109 | |
14110 | if (!bnxt_tc_flower_enabled(bp) || |
14111 | !tc_cls_can_offload_and_chain0(dev: bp->dev, common: type_data)) |
14112 | return -EOPNOTSUPP; |
14113 | |
14114 | switch (type) { |
14115 | case TC_SETUP_CLSFLOWER: |
14116 | return bnxt_tc_setup_flower(bp, src_fid: bp->pf.fw_fid, cls_flower: type_data); |
14117 | default: |
14118 | return -EOPNOTSUPP; |
14119 | } |
14120 | } |
14121 | |
14122 | LIST_HEAD(bnxt_block_cb_list); |
14123 | |
14124 | static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, |
14125 | void *type_data) |
14126 | { |
14127 | struct bnxt *bp = netdev_priv(dev); |
14128 | |
14129 | switch (type) { |
14130 | case TC_SETUP_BLOCK: |
14131 | return flow_block_cb_setup_simple(f: type_data, |
14132 | driver_list: &bnxt_block_cb_list, |
14133 | cb: bnxt_setup_tc_block_cb, |
14134 | cb_ident: bp, cb_priv: bp, ingress_only: true); |
14135 | case TC_SETUP_QDISC_MQPRIO: { |
14136 | struct tc_mqprio_qopt *mqprio = type_data; |
14137 | |
14138 | mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; |
14139 | |
14140 | return bnxt_setup_mq_tc(dev, tc: mqprio->num_tc); |
14141 | } |
14142 | default: |
14143 | return -EOPNOTSUPP; |
14144 | } |
14145 | } |
14146 | |
14147 | u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys, |
14148 | const struct sk_buff *skb) |
14149 | { |
14150 | struct bnxt_vnic_info *vnic; |
14151 | |
14152 | if (skb) |
14153 | return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; |
14154 | |
14155 | vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; |
14156 | return bnxt_toeplitz(bp, fkeys, key: (void *)vnic->rss_hash_key); |
14157 | } |
14158 | |
14159 | int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr, |
14160 | u32 idx) |
14161 | { |
14162 | struct hlist_head *head; |
14163 | int bit_id; |
14164 | |
14165 | spin_lock_bh(lock: &bp->ntp_fltr_lock); |
14166 | bit_id = bitmap_find_free_region(bitmap: bp->ntp_fltr_bmap, bits: bp->max_fltr, order: 0); |
14167 | if (bit_id < 0) { |
14168 | spin_unlock_bh(lock: &bp->ntp_fltr_lock); |
14169 | return -ENOMEM; |
14170 | } |
14171 | |
14172 | fltr->base.sw_id = (u16)bit_id; |
14173 | fltr->base.type = BNXT_FLTR_TYPE_NTUPLE; |
14174 | fltr->base.flags |= BNXT_ACT_RING_DST; |
14175 | head = &bp->ntp_fltr_hash_tbl[idx]; |
14176 | hlist_add_head_rcu(n: &fltr->base.hash, h: head); |
14177 | set_bit(BNXT_FLTR_INSERTED, addr: &fltr->base.state); |
14178 | bnxt_insert_usr_fltr(bp, fltr: &fltr->base); |
14179 | bp->ntp_fltr_count++; |
14180 | spin_unlock_bh(lock: &bp->ntp_fltr_lock); |
14181 | return 0; |
14182 | } |
14183 | |
14184 | static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, |
14185 | struct bnxt_ntuple_filter *f2) |
14186 | { |
14187 | struct bnxt_flow_masks *masks1 = &f1->fmasks; |
14188 | struct bnxt_flow_masks *masks2 = &f2->fmasks; |
14189 | struct flow_keys *keys1 = &f1->fkeys; |
14190 | struct flow_keys *keys2 = &f2->fkeys; |
14191 | |
14192 | if (keys1->basic.n_proto != keys2->basic.n_proto || |
14193 | keys1->basic.ip_proto != keys2->basic.ip_proto) |
14194 | return false; |
14195 | |
14196 | if (keys1->basic.n_proto == htons(ETH_P_IP)) { |
14197 | if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || |
14198 | masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src || |
14199 | keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst || |
14200 | masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst) |
14201 | return false; |
14202 | } else { |
14203 | if (!ipv6_addr_equal(a1: &keys1->addrs.v6addrs.src, |
14204 | a2: &keys2->addrs.v6addrs.src) || |
14205 | !ipv6_addr_equal(a1: &masks1->addrs.v6addrs.src, |
14206 | a2: &masks2->addrs.v6addrs.src) || |
14207 | !ipv6_addr_equal(a1: &keys1->addrs.v6addrs.dst, |
14208 | a2: &keys2->addrs.v6addrs.dst) || |
14209 | !ipv6_addr_equal(a1: &masks1->addrs.v6addrs.dst, |
14210 | a2: &masks2->addrs.v6addrs.dst)) |
14211 | return false; |
14212 | } |
14213 | |
14214 | return keys1->ports.src == keys2->ports.src && |
14215 | masks1->ports.src == masks2->ports.src && |
14216 | keys1->ports.dst == keys2->ports.dst && |
14217 | masks1->ports.dst == masks2->ports.dst && |
14218 | keys1->control.flags == keys2->control.flags && |
14219 | f1->l2_fltr == f2->l2_fltr; |
14220 | } |
14221 | |
14222 | struct bnxt_ntuple_filter * |
14223 | bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp, |
14224 | struct bnxt_ntuple_filter *fltr, u32 idx) |
14225 | { |
14226 | struct bnxt_ntuple_filter *f; |
14227 | struct hlist_head *head; |
14228 | |
14229 | head = &bp->ntp_fltr_hash_tbl[idx]; |
14230 | hlist_for_each_entry_rcu(f, head, base.hash) { |
14231 | if (bnxt_fltr_match(f1: f, f2: fltr)) |
14232 | return f; |
14233 | } |
14234 | return NULL; |
14235 | } |
14236 | |
14237 | #ifdef CONFIG_RFS_ACCEL |
14238 | static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, |
14239 | u16 rxq_index, u32 flow_id) |
14240 | { |
14241 | struct bnxt *bp = netdev_priv(dev); |
14242 | struct bnxt_ntuple_filter *fltr, *new_fltr; |
14243 | struct flow_keys *fkeys; |
14244 | struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); |
14245 | struct bnxt_l2_filter *l2_fltr; |
14246 | int rc = 0, idx; |
14247 | u32 flags; |
14248 | |
14249 | if (ether_addr_equal(addr1: dev->dev_addr, addr2: eth->h_dest)) { |
14250 | l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; |
14251 | atomic_inc(v: &l2_fltr->refcnt); |
14252 | } else { |
14253 | struct bnxt_l2_key key; |
14254 | |
14255 | ether_addr_copy(dst: key.dst_mac_addr, src: eth->h_dest); |
14256 | key.vlan = 0; |
14257 | l2_fltr = bnxt_lookup_l2_filter_from_key(bp, key: &key); |
14258 | if (!l2_fltr) |
14259 | return -EINVAL; |
14260 | if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) { |
14261 | bnxt_del_l2_filter(bp, fltr: l2_fltr); |
14262 | return -EINVAL; |
14263 | } |
14264 | } |
14265 | new_fltr = kzalloc(size: sizeof(*new_fltr), GFP_ATOMIC); |
14266 | if (!new_fltr) { |
14267 | bnxt_del_l2_filter(bp, fltr: l2_fltr); |
14268 | return -ENOMEM; |
14269 | } |
14270 | |
14271 | fkeys = &new_fltr->fkeys; |
14272 | if (!skb_flow_dissect_flow_keys(skb, flow: fkeys, flags: 0)) { |
14273 | rc = -EPROTONOSUPPORT; |
14274 | goto err_free; |
14275 | } |
14276 | |
14277 | if ((fkeys->basic.n_proto != htons(ETH_P_IP) && |
14278 | fkeys->basic.n_proto != htons(ETH_P_IPV6)) || |
14279 | ((fkeys->basic.ip_proto != IPPROTO_TCP) && |
14280 | (fkeys->basic.ip_proto != IPPROTO_UDP))) { |
14281 | rc = -EPROTONOSUPPORT; |
14282 | goto err_free; |
14283 | } |
14284 | new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL; |
14285 | if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) { |
14286 | if (bp->hwrm_spec_code < 0x10601) { |
14287 | rc = -EPROTONOSUPPORT; |
14288 | goto err_free; |
14289 | } |
14290 | new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL; |
14291 | } |
14292 | flags = fkeys->control.flags; |
14293 | if (((flags & FLOW_DIS_ENCAPSULATION) && |
14294 | bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { |
14295 | rc = -EPROTONOSUPPORT; |
14296 | goto err_free; |
14297 | } |
14298 | new_fltr->l2_fltr = l2_fltr; |
14299 | |
14300 | idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb); |
14301 | rcu_read_lock(); |
14302 | fltr = bnxt_lookup_ntp_filter_from_idx(bp, fltr: new_fltr, idx); |
14303 | if (fltr) { |
14304 | rc = fltr->base.sw_id; |
14305 | rcu_read_unlock(); |
14306 | goto err_free; |
14307 | } |
14308 | rcu_read_unlock(); |
14309 | |
14310 | new_fltr->flow_id = flow_id; |
14311 | new_fltr->base.rxq = rxq_index; |
14312 | rc = bnxt_insert_ntp_filter(bp, fltr: new_fltr, idx); |
14313 | if (!rc) { |
14314 | bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); |
14315 | return new_fltr->base.sw_id; |
14316 | } |
14317 | |
14318 | err_free: |
14319 | bnxt_del_l2_filter(bp, fltr: l2_fltr); |
14320 | kfree(objp: new_fltr); |
14321 | return rc; |
14322 | } |
14323 | #endif |
14324 | |
14325 | void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) |
14326 | { |
14327 | spin_lock_bh(lock: &bp->ntp_fltr_lock); |
14328 | if (!test_and_clear_bit(BNXT_FLTR_INSERTED, addr: &fltr->base.state)) { |
14329 | spin_unlock_bh(lock: &bp->ntp_fltr_lock); |
14330 | return; |
14331 | } |
14332 | hlist_del_rcu(n: &fltr->base.hash); |
14333 | bnxt_del_one_usr_fltr(bp, fltr: &fltr->base); |
14334 | bp->ntp_fltr_count--; |
14335 | spin_unlock_bh(lock: &bp->ntp_fltr_lock); |
14336 | bnxt_del_l2_filter(bp, fltr: fltr->l2_fltr); |
14337 | clear_bit(nr: fltr->base.sw_id, addr: bp->ntp_fltr_bmap); |
14338 | kfree_rcu(fltr, base.rcu); |
14339 | } |
14340 | |
14341 | static void bnxt_cfg_ntp_filters(struct bnxt *bp) |
14342 | { |
14343 | #ifdef CONFIG_RFS_ACCEL |
14344 | int i; |
14345 | |
14346 | for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { |
14347 | struct hlist_head *head; |
14348 | struct hlist_node *tmp; |
14349 | struct bnxt_ntuple_filter *fltr; |
14350 | int rc; |
14351 | |
14352 | head = &bp->ntp_fltr_hash_tbl[i]; |
14353 | hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { |
14354 | bool del = false; |
14355 | |
14356 | if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) { |
14357 | if (fltr->base.flags & BNXT_ACT_NO_AGING) |
14358 | continue; |
14359 | if (rps_may_expire_flow(dev: bp->dev, rxq_index: fltr->base.rxq, |
14360 | flow_id: fltr->flow_id, |
14361 | filter_id: fltr->base.sw_id)) { |
14362 | bnxt_hwrm_cfa_ntuple_filter_free(bp, |
14363 | fltr); |
14364 | del = true; |
14365 | } |
14366 | } else { |
14367 | rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, |
14368 | fltr); |
14369 | if (rc) |
14370 | del = true; |
14371 | else |
14372 | set_bit(BNXT_FLTR_VALID, addr: &fltr->base.state); |
14373 | } |
14374 | |
14375 | if (del) |
14376 | bnxt_del_ntp_filter(bp, fltr); |
14377 | } |
14378 | } |
14379 | #endif |
14380 | } |
14381 | |
14382 | static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, |
14383 | unsigned int entry, struct udp_tunnel_info *ti) |
14384 | { |
14385 | struct bnxt *bp = netdev_priv(dev: netdev); |
14386 | unsigned int cmd; |
14387 | |
14388 | if (ti->type == UDP_TUNNEL_TYPE_VXLAN) |
14389 | cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN; |
14390 | else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) |
14391 | cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE; |
14392 | else |
14393 | cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE; |
14394 | |
14395 | return bnxt_hwrm_tunnel_dst_port_alloc(bp, port: ti->port, tunnel_type: cmd); |
14396 | } |
14397 | |
14398 | static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, |
14399 | unsigned int entry, struct udp_tunnel_info *ti) |
14400 | { |
14401 | struct bnxt *bp = netdev_priv(dev: netdev); |
14402 | unsigned int cmd; |
14403 | |
14404 | if (ti->type == UDP_TUNNEL_TYPE_VXLAN) |
14405 | cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; |
14406 | else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) |
14407 | cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; |
14408 | else |
14409 | cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE; |
14410 | |
14411 | return bnxt_hwrm_tunnel_dst_port_free(bp, tunnel_type: cmd); |
14412 | } |
14413 | |
14414 | static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { |
14415 | .set_port = bnxt_udp_tunnel_set_port, |
14416 | .unset_port = bnxt_udp_tunnel_unset_port, |
14417 | .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | |
14418 | UDP_TUNNEL_NIC_INFO_OPEN_ONLY, |
14419 | .tables = { |
14420 | { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, |
14421 | { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, |
14422 | }, |
14423 | }, bnxt_udp_tunnels_p7 = { |
14424 | .set_port = bnxt_udp_tunnel_set_port, |
14425 | .unset_port = bnxt_udp_tunnel_unset_port, |
14426 | .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | |
14427 | UDP_TUNNEL_NIC_INFO_OPEN_ONLY, |
14428 | .tables = { |
14429 | { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, |
14430 | { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, |
14431 | { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, }, |
14432 | }, |
14433 | }; |
14434 | |
14435 | static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
14436 | struct net_device *dev, u32 filter_mask, |
14437 | int nlflags) |
14438 | { |
14439 | struct bnxt *bp = netdev_priv(dev); |
14440 | |
14441 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode: bp->br_mode, flags: 0, mask: 0, |
14442 | nlflags, filter_mask, NULL); |
14443 | } |
14444 | |
14445 | static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, |
14446 | u16 flags, struct netlink_ext_ack *extack) |
14447 | { |
14448 | struct bnxt *bp = netdev_priv(dev); |
14449 | struct nlattr *attr, *br_spec; |
14450 | int rem, rc = 0; |
14451 | |
14452 | if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) |
14453 | return -EOPNOTSUPP; |
14454 | |
14455 | br_spec = nlmsg_find_attr(nlh, hdrlen: sizeof(struct ifinfomsg), attrtype: IFLA_AF_SPEC); |
14456 | if (!br_spec) |
14457 | return -EINVAL; |
14458 | |
14459 | nla_for_each_nested(attr, br_spec, rem) { |
14460 | u16 mode; |
14461 | |
14462 | if (nla_type(nla: attr) != IFLA_BRIDGE_MODE) |
14463 | continue; |
14464 | |
14465 | mode = nla_get_u16(nla: attr); |
14466 | if (mode == bp->br_mode) |
14467 | break; |
14468 | |
14469 | rc = bnxt_hwrm_set_br_mode(bp, br_mode: mode); |
14470 | if (!rc) |
14471 | bp->br_mode = mode; |
14472 | break; |
14473 | } |
14474 | return rc; |
14475 | } |
14476 | |
14477 | int bnxt_get_port_parent_id(struct net_device *dev, |
14478 | struct netdev_phys_item_id *ppid) |
14479 | { |
14480 | struct bnxt *bp = netdev_priv(dev); |
14481 | |
14482 | if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) |
14483 | return -EOPNOTSUPP; |
14484 | |
14485 | /* The PF and it's VF-reps only support the switchdev framework */ |
14486 | if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) |
14487 | return -EOPNOTSUPP; |
14488 | |
14489 | ppid->id_len = sizeof(bp->dsn); |
14490 | memcpy(ppid->id, bp->dsn, ppid->id_len); |
14491 | |
14492 | return 0; |
14493 | } |
14494 | |
14495 | static const struct net_device_ops bnxt_netdev_ops = { |
14496 | .ndo_open = bnxt_open, |
14497 | .ndo_start_xmit = bnxt_start_xmit, |
14498 | .ndo_stop = bnxt_close, |
14499 | .ndo_get_stats64 = bnxt_get_stats64, |
14500 | .ndo_set_rx_mode = bnxt_set_rx_mode, |
14501 | .ndo_eth_ioctl = bnxt_ioctl, |
14502 | .ndo_validate_addr = eth_validate_addr, |
14503 | .ndo_set_mac_address = bnxt_change_mac_addr, |
14504 | .ndo_change_mtu = bnxt_change_mtu, |
14505 | .ndo_fix_features = bnxt_fix_features, |
14506 | .ndo_set_features = bnxt_set_features, |
14507 | .ndo_features_check = bnxt_features_check, |
14508 | .ndo_tx_timeout = bnxt_tx_timeout, |
14509 | #ifdef CONFIG_BNXT_SRIOV |
14510 | .ndo_get_vf_config = bnxt_get_vf_config, |
14511 | .ndo_set_vf_mac = bnxt_set_vf_mac, |
14512 | .ndo_set_vf_vlan = bnxt_set_vf_vlan, |
14513 | .ndo_set_vf_rate = bnxt_set_vf_bw, |
14514 | .ndo_set_vf_link_state = bnxt_set_vf_link_state, |
14515 | .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, |
14516 | .ndo_set_vf_trust = bnxt_set_vf_trust, |
14517 | #endif |
14518 | .ndo_setup_tc = bnxt_setup_tc, |
14519 | #ifdef CONFIG_RFS_ACCEL |
14520 | .ndo_rx_flow_steer = bnxt_rx_flow_steer, |
14521 | #endif |
14522 | .ndo_bpf = bnxt_xdp, |
14523 | .ndo_xdp_xmit = bnxt_xdp_xmit, |
14524 | .ndo_bridge_getlink = bnxt_bridge_getlink, |
14525 | .ndo_bridge_setlink = bnxt_bridge_setlink, |
14526 | }; |
14527 | |
14528 | static void bnxt_get_queue_stats_rx(struct net_device *dev, int i, |
14529 | struct netdev_queue_stats_rx *stats) |
14530 | { |
14531 | struct bnxt *bp = netdev_priv(dev); |
14532 | struct bnxt_cp_ring_info *cpr; |
14533 | u64 *sw; |
14534 | |
14535 | cpr = &bp->bnapi[i]->cp_ring; |
14536 | sw = cpr->stats.sw_stats; |
14537 | |
14538 | stats->packets = 0; |
14539 | stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); |
14540 | stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); |
14541 | stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); |
14542 | |
14543 | stats->bytes = 0; |
14544 | stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); |
14545 | stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); |
14546 | stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); |
14547 | |
14548 | stats->alloc_fail = cpr->sw_stats.rx.rx_oom_discards; |
14549 | } |
14550 | |
14551 | static void bnxt_get_queue_stats_tx(struct net_device *dev, int i, |
14552 | struct netdev_queue_stats_tx *stats) |
14553 | { |
14554 | struct bnxt *bp = netdev_priv(dev); |
14555 | struct bnxt_napi *bnapi; |
14556 | u64 *sw; |
14557 | |
14558 | bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi; |
14559 | sw = bnapi->cp_ring.stats.sw_stats; |
14560 | |
14561 | stats->packets = 0; |
14562 | stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); |
14563 | stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); |
14564 | stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); |
14565 | |
14566 | stats->bytes = 0; |
14567 | stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); |
14568 | stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); |
14569 | stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); |
14570 | } |
14571 | |
14572 | static void bnxt_get_base_stats(struct net_device *dev, |
14573 | struct netdev_queue_stats_rx *rx, |
14574 | struct netdev_queue_stats_tx *tx) |
14575 | { |
14576 | struct bnxt *bp = netdev_priv(dev); |
14577 | |
14578 | rx->packets = bp->net_stats_prev.rx_packets; |
14579 | rx->bytes = bp->net_stats_prev.rx_bytes; |
14580 | rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards; |
14581 | |
14582 | tx->packets = bp->net_stats_prev.tx_packets; |
14583 | tx->bytes = bp->net_stats_prev.tx_bytes; |
14584 | } |
14585 | |
14586 | static const struct netdev_stat_ops bnxt_stat_ops = { |
14587 | .get_queue_stats_rx = bnxt_get_queue_stats_rx, |
14588 | .get_queue_stats_tx = bnxt_get_queue_stats_tx, |
14589 | .get_base_stats = bnxt_get_base_stats, |
14590 | }; |
14591 | |
14592 | static void bnxt_remove_one(struct pci_dev *pdev) |
14593 | { |
14594 | struct net_device *dev = pci_get_drvdata(pdev); |
14595 | struct bnxt *bp = netdev_priv(dev); |
14596 | |
14597 | if (BNXT_PF(bp)) |
14598 | bnxt_sriov_disable(bp); |
14599 | |
14600 | bnxt_rdma_aux_device_uninit(bp); |
14601 | |
14602 | bnxt_ptp_clear(bp); |
14603 | unregister_netdev(dev); |
14604 | bnxt_free_l2_filters(bp, all: true); |
14605 | bnxt_free_ntp_fltrs(bp, all: true); |
14606 | clear_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
14607 | /* Flush any pending tasks */ |
14608 | cancel_work_sync(work: &bp->sp_task); |
14609 | cancel_delayed_work_sync(dwork: &bp->fw_reset_task); |
14610 | bp->sp_event = 0; |
14611 | |
14612 | bnxt_dl_fw_reporters_destroy(bp); |
14613 | bnxt_dl_unregister(bp); |
14614 | bnxt_shutdown_tc(bp); |
14615 | |
14616 | bnxt_clear_int_mode(bp); |
14617 | bnxt_hwrm_func_drv_unrgtr(bp); |
14618 | bnxt_free_hwrm_resources(bp); |
14619 | bnxt_hwmon_uninit(bp); |
14620 | bnxt_ethtool_free(bp); |
14621 | bnxt_dcb_free(bp); |
14622 | kfree(objp: bp->ptp_cfg); |
14623 | bp->ptp_cfg = NULL; |
14624 | kfree(objp: bp->fw_health); |
14625 | bp->fw_health = NULL; |
14626 | bnxt_cleanup_pci(bp); |
14627 | bnxt_free_ctx_mem(bp); |
14628 | kfree(objp: bp->rss_indir_tbl); |
14629 | bp->rss_indir_tbl = NULL; |
14630 | bnxt_free_port_stats(bp); |
14631 | free_netdev(dev); |
14632 | } |
14633 | |
14634 | static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) |
14635 | { |
14636 | int rc = 0; |
14637 | struct bnxt_link_info *link_info = &bp->link_info; |
14638 | |
14639 | bp->phy_flags = 0; |
14640 | rc = bnxt_hwrm_phy_qcaps(bp); |
14641 | if (rc) { |
14642 | netdev_err(dev: bp->dev, format: "Probe phy can't get phy capabilities (rc: %x)\n" , |
14643 | rc); |
14644 | return rc; |
14645 | } |
14646 | if (bp->phy_flags & BNXT_PHY_FL_NO_FCS) |
14647 | bp->dev->priv_flags |= IFF_SUPP_NOFCS; |
14648 | else |
14649 | bp->dev->priv_flags &= ~IFF_SUPP_NOFCS; |
14650 | if (!fw_dflt) |
14651 | return 0; |
14652 | |
14653 | mutex_lock(&bp->link_lock); |
14654 | rc = bnxt_update_link(bp, chng_link_state: false); |
14655 | if (rc) { |
14656 | mutex_unlock(lock: &bp->link_lock); |
14657 | netdev_err(dev: bp->dev, format: "Probe phy can't update link (rc: %x)\n" , |
14658 | rc); |
14659 | return rc; |
14660 | } |
14661 | |
14662 | /* Older firmware does not have supported_auto_speeds, so assume |
14663 | * that all supported speeds can be autonegotiated. |
14664 | */ |
14665 | if (link_info->auto_link_speeds && !link_info->support_auto_speeds) |
14666 | link_info->support_auto_speeds = link_info->support_speeds; |
14667 | |
14668 | bnxt_init_ethtool_link_settings(bp); |
14669 | mutex_unlock(lock: &bp->link_lock); |
14670 | return 0; |
14671 | } |
14672 | |
14673 | static int bnxt_get_max_irq(struct pci_dev *pdev) |
14674 | { |
14675 | u16 ctrl; |
14676 | |
14677 | if (!pdev->msix_cap) |
14678 | return 1; |
14679 | |
14680 | pci_read_config_word(dev: pdev, where: pdev->msix_cap + PCI_MSIX_FLAGS, val: &ctrl); |
14681 | return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; |
14682 | } |
14683 | |
14684 | static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, |
14685 | int *max_cp) |
14686 | { |
14687 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
14688 | int max_ring_grps = 0, max_irq; |
14689 | |
14690 | *max_tx = hw_resc->max_tx_rings; |
14691 | *max_rx = hw_resc->max_rx_rings; |
14692 | *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); |
14693 | max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - |
14694 | bnxt_get_ulp_msix_num(bp), |
14695 | hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); |
14696 | if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) |
14697 | *max_cp = min_t(int, *max_cp, max_irq); |
14698 | max_ring_grps = hw_resc->max_hw_ring_grps; |
14699 | if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { |
14700 | *max_cp -= 1; |
14701 | *max_rx -= 2; |
14702 | } |
14703 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
14704 | *max_rx >>= 1; |
14705 | if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { |
14706 | int rc; |
14707 | |
14708 | rc = __bnxt_trim_rings(bp, rx: max_rx, tx: max_tx, max: *max_cp, shared: false); |
14709 | if (rc) { |
14710 | *max_rx = 0; |
14711 | *max_tx = 0; |
14712 | } |
14713 | /* On P5 chips, max_cp output param should be available NQs */ |
14714 | *max_cp = max_irq; |
14715 | } |
14716 | *max_rx = min_t(int, *max_rx, max_ring_grps); |
14717 | } |
14718 | |
14719 | int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) |
14720 | { |
14721 | int rx, tx, cp; |
14722 | |
14723 | _bnxt_get_max_rings(bp, max_rx: &rx, max_tx: &tx, max_cp: &cp); |
14724 | *max_rx = rx; |
14725 | *max_tx = tx; |
14726 | if (!rx || !tx || !cp) |
14727 | return -ENOMEM; |
14728 | |
14729 | return bnxt_trim_rings(bp, rx: max_rx, tx: max_tx, max: cp, sh: shared); |
14730 | } |
14731 | |
14732 | static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, |
14733 | bool shared) |
14734 | { |
14735 | int rc; |
14736 | |
14737 | rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); |
14738 | if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { |
14739 | /* Not enough rings, try disabling agg rings. */ |
14740 | bp->flags &= ~BNXT_FLAG_AGG_RINGS; |
14741 | rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); |
14742 | if (rc) { |
14743 | /* set BNXT_FLAG_AGG_RINGS back for consistency */ |
14744 | bp->flags |= BNXT_FLAG_AGG_RINGS; |
14745 | return rc; |
14746 | } |
14747 | bp->flags |= BNXT_FLAG_NO_AGG_RINGS; |
14748 | bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); |
14749 | bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); |
14750 | bnxt_set_ring_params(bp); |
14751 | } |
14752 | |
14753 | if (bp->flags & BNXT_FLAG_ROCE_CAP) { |
14754 | int max_cp, max_stat, max_irq; |
14755 | |
14756 | /* Reserve minimum resources for RoCE */ |
14757 | max_cp = bnxt_get_max_func_cp_rings(bp); |
14758 | max_stat = bnxt_get_max_func_stat_ctxs(bp); |
14759 | max_irq = bnxt_get_max_func_irqs(bp); |
14760 | if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || |
14761 | max_irq <= BNXT_MIN_ROCE_CP_RINGS || |
14762 | max_stat <= BNXT_MIN_ROCE_STAT_CTXS) |
14763 | return 0; |
14764 | |
14765 | max_cp -= BNXT_MIN_ROCE_CP_RINGS; |
14766 | max_irq -= BNXT_MIN_ROCE_CP_RINGS; |
14767 | max_stat -= BNXT_MIN_ROCE_STAT_CTXS; |
14768 | max_cp = min_t(int, max_cp, max_irq); |
14769 | max_cp = min_t(int, max_cp, max_stat); |
14770 | rc = bnxt_trim_rings(bp, rx: max_rx, tx: max_tx, max: max_cp, sh: shared); |
14771 | if (rc) |
14772 | rc = 0; |
14773 | } |
14774 | return rc; |
14775 | } |
14776 | |
14777 | /* In initial default shared ring setting, each shared ring must have a |
14778 | * RX/TX ring pair. |
14779 | */ |
14780 | static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) |
14781 | { |
14782 | bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); |
14783 | bp->rx_nr_rings = bp->cp_nr_rings; |
14784 | bp->tx_nr_rings_per_tc = bp->cp_nr_rings; |
14785 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc; |
14786 | } |
14787 | |
14788 | static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) |
14789 | { |
14790 | int dflt_rings, max_rx_rings, max_tx_rings, rc; |
14791 | |
14792 | if (!bnxt_can_reserve_rings(bp)) |
14793 | return 0; |
14794 | |
14795 | if (sh) |
14796 | bp->flags |= BNXT_FLAG_SHARED_RINGS; |
14797 | dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); |
14798 | /* Reduce default rings on multi-port cards so that total default |
14799 | * rings do not exceed CPU count. |
14800 | */ |
14801 | if (bp->port_count > 1) { |
14802 | int max_rings = |
14803 | max_t(int, num_online_cpus() / bp->port_count, 1); |
14804 | |
14805 | dflt_rings = min_t(int, dflt_rings, max_rings); |
14806 | } |
14807 | rc = bnxt_get_dflt_rings(bp, max_rx: &max_rx_rings, max_tx: &max_tx_rings, shared: sh); |
14808 | if (rc) |
14809 | return rc; |
14810 | bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); |
14811 | bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); |
14812 | if (sh) |
14813 | bnxt_trim_dflt_sh_rings(bp); |
14814 | else |
14815 | bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; |
14816 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc; |
14817 | |
14818 | rc = __bnxt_reserve_rings(bp); |
14819 | if (rc && rc != -ENODEV) |
14820 | netdev_warn(dev: bp->dev, format: "Unable to reserve tx rings\n" ); |
14821 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
14822 | if (sh) |
14823 | bnxt_trim_dflt_sh_rings(bp); |
14824 | |
14825 | /* Rings may have been trimmed, re-reserve the trimmed rings. */ |
14826 | if (bnxt_need_reserve_rings(bp)) { |
14827 | rc = __bnxt_reserve_rings(bp); |
14828 | if (rc && rc != -ENODEV) |
14829 | netdev_warn(dev: bp->dev, format: "2nd rings reservation failed.\n" ); |
14830 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
14831 | } |
14832 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { |
14833 | bp->rx_nr_rings++; |
14834 | bp->cp_nr_rings++; |
14835 | } |
14836 | if (rc) { |
14837 | bp->tx_nr_rings = 0; |
14838 | bp->rx_nr_rings = 0; |
14839 | } |
14840 | return rc; |
14841 | } |
14842 | |
14843 | static int bnxt_init_dflt_ring_mode(struct bnxt *bp) |
14844 | { |
14845 | int rc; |
14846 | |
14847 | if (bp->tx_nr_rings) |
14848 | return 0; |
14849 | |
14850 | bnxt_ulp_irq_stop(bp); |
14851 | bnxt_clear_int_mode(bp); |
14852 | rc = bnxt_set_dflt_rings(bp, sh: true); |
14853 | if (rc) { |
14854 | if (BNXT_VF(bp) && rc == -ENODEV) |
14855 | netdev_err(dev: bp->dev, format: "Cannot configure VF rings while PF is unavailable.\n" ); |
14856 | else |
14857 | netdev_err(dev: bp->dev, format: "Not enough rings available.\n" ); |
14858 | goto init_dflt_ring_err; |
14859 | } |
14860 | rc = bnxt_init_int_mode(bp); |
14861 | if (rc) |
14862 | goto init_dflt_ring_err; |
14863 | |
14864 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
14865 | |
14866 | bnxt_set_dflt_rfs(bp); |
14867 | |
14868 | init_dflt_ring_err: |
14869 | bnxt_ulp_irq_restart(bp, err: rc); |
14870 | return rc; |
14871 | } |
14872 | |
14873 | int bnxt_restore_pf_fw_resources(struct bnxt *bp) |
14874 | { |
14875 | int rc; |
14876 | |
14877 | ASSERT_RTNL(); |
14878 | bnxt_hwrm_func_qcaps(bp); |
14879 | |
14880 | if (netif_running(dev: bp->dev)) |
14881 | __bnxt_close_nic(bp, irq_re_init: true, link_re_init: false); |
14882 | |
14883 | bnxt_ulp_irq_stop(bp); |
14884 | bnxt_clear_int_mode(bp); |
14885 | rc = bnxt_init_int_mode(bp); |
14886 | bnxt_ulp_irq_restart(bp, err: rc); |
14887 | |
14888 | if (netif_running(dev: bp->dev)) { |
14889 | if (rc) |
14890 | dev_close(dev: bp->dev); |
14891 | else |
14892 | rc = bnxt_open_nic(bp, irq_re_init: true, link_re_init: false); |
14893 | } |
14894 | |
14895 | return rc; |
14896 | } |
14897 | |
14898 | static int bnxt_init_mac_addr(struct bnxt *bp) |
14899 | { |
14900 | int rc = 0; |
14901 | |
14902 | if (BNXT_PF(bp)) { |
14903 | eth_hw_addr_set(dev: bp->dev, addr: bp->pf.mac_addr); |
14904 | } else { |
14905 | #ifdef CONFIG_BNXT_SRIOV |
14906 | struct bnxt_vf_info *vf = &bp->vf; |
14907 | bool strict_approval = true; |
14908 | |
14909 | if (is_valid_ether_addr(addr: vf->mac_addr)) { |
14910 | /* overwrite netdev dev_addr with admin VF MAC */ |
14911 | eth_hw_addr_set(dev: bp->dev, addr: vf->mac_addr); |
14912 | /* Older PF driver or firmware may not approve this |
14913 | * correctly. |
14914 | */ |
14915 | strict_approval = false; |
14916 | } else { |
14917 | eth_hw_addr_random(dev: bp->dev); |
14918 | } |
14919 | rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); |
14920 | #endif |
14921 | } |
14922 | return rc; |
14923 | } |
14924 | |
14925 | static void bnxt_vpd_read_info(struct bnxt *bp) |
14926 | { |
14927 | struct pci_dev *pdev = bp->pdev; |
14928 | unsigned int vpd_size, kw_len; |
14929 | int pos, size; |
14930 | u8 *vpd_data; |
14931 | |
14932 | vpd_data = pci_vpd_alloc(dev: pdev, size: &vpd_size); |
14933 | if (IS_ERR(ptr: vpd_data)) { |
14934 | pci_warn(pdev, "Unable to read VPD\n" ); |
14935 | return; |
14936 | } |
14937 | |
14938 | pos = pci_vpd_find_ro_info_keyword(buf: vpd_data, len: vpd_size, |
14939 | PCI_VPD_RO_KEYWORD_PARTNO, size: &kw_len); |
14940 | if (pos < 0) |
14941 | goto read_sn; |
14942 | |
14943 | size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); |
14944 | memcpy(bp->board_partno, &vpd_data[pos], size); |
14945 | |
14946 | read_sn: |
14947 | pos = pci_vpd_find_ro_info_keyword(buf: vpd_data, len: vpd_size, |
14948 | PCI_VPD_RO_KEYWORD_SERIALNO, |
14949 | size: &kw_len); |
14950 | if (pos < 0) |
14951 | goto exit; |
14952 | |
14953 | size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); |
14954 | memcpy(bp->board_serialno, &vpd_data[pos], size); |
14955 | exit: |
14956 | kfree(objp: vpd_data); |
14957 | } |
14958 | |
14959 | static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) |
14960 | { |
14961 | struct pci_dev *pdev = bp->pdev; |
14962 | u64 qword; |
14963 | |
14964 | qword = pci_get_dsn(dev: pdev); |
14965 | if (!qword) { |
14966 | netdev_info(dev: bp->dev, format: "Unable to read adapter's DSN\n" ); |
14967 | return -EOPNOTSUPP; |
14968 | } |
14969 | |
14970 | put_unaligned_le64(val: qword, p: dsn); |
14971 | |
14972 | bp->flags |= BNXT_FLAG_DSN_VALID; |
14973 | return 0; |
14974 | } |
14975 | |
14976 | static int bnxt_map_db_bar(struct bnxt *bp) |
14977 | { |
14978 | if (!bp->db_size) |
14979 | return -ENODEV; |
14980 | bp->bar1 = pci_iomap(dev: bp->pdev, bar: 2, max: bp->db_size); |
14981 | if (!bp->bar1) |
14982 | return -ENOMEM; |
14983 | return 0; |
14984 | } |
14985 | |
14986 | void bnxt_print_device_info(struct bnxt *bp) |
14987 | { |
14988 | netdev_info(dev: bp->dev, format: "%s found at mem %lx, node addr %pM\n" , |
14989 | board_info[bp->board_idx].name, |
14990 | (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr); |
14991 | |
14992 | pcie_print_link_status(dev: bp->pdev); |
14993 | } |
14994 | |
14995 | static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
14996 | { |
14997 | struct bnxt_hw_resc *hw_resc; |
14998 | struct net_device *dev; |
14999 | struct bnxt *bp; |
15000 | int rc, max_irqs; |
15001 | |
15002 | if (pci_is_bridge(dev: pdev)) |
15003 | return -ENODEV; |
15004 | |
15005 | /* Clear any pending DMA transactions from crash kernel |
15006 | * while loading driver in capture kernel. |
15007 | */ |
15008 | if (is_kdump_kernel()) { |
15009 | pci_clear_master(dev: pdev); |
15010 | pcie_flr(dev: pdev); |
15011 | } |
15012 | |
15013 | max_irqs = bnxt_get_max_irq(pdev); |
15014 | dev = alloc_etherdev_mqs(sizeof_priv: sizeof(*bp), txqs: max_irqs * BNXT_MAX_QUEUE, |
15015 | rxqs: max_irqs); |
15016 | if (!dev) |
15017 | return -ENOMEM; |
15018 | |
15019 | bp = netdev_priv(dev); |
15020 | bp->board_idx = ent->driver_data; |
15021 | bp->msg_enable = BNXT_DEF_MSG_ENABLE; |
15022 | bnxt_set_max_func_irqs(bp, max_irqs); |
15023 | |
15024 | if (bnxt_vf_pciid(idx: bp->board_idx)) |
15025 | bp->flags |= BNXT_FLAG_VF; |
15026 | |
15027 | /* No devlink port registration in case of a VF */ |
15028 | if (BNXT_PF(bp)) |
15029 | SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port); |
15030 | |
15031 | if (pdev->msix_cap) |
15032 | bp->flags |= BNXT_FLAG_MSIX_CAP; |
15033 | |
15034 | rc = bnxt_init_board(pdev, dev); |
15035 | if (rc < 0) |
15036 | goto init_err_free; |
15037 | |
15038 | dev->netdev_ops = &bnxt_netdev_ops; |
15039 | dev->stat_ops = &bnxt_stat_ops; |
15040 | dev->watchdog_timeo = BNXT_TX_TIMEOUT; |
15041 | dev->ethtool_ops = &bnxt_ethtool_ops; |
15042 | pci_set_drvdata(pdev, data: dev); |
15043 | |
15044 | rc = bnxt_alloc_hwrm_resources(bp); |
15045 | if (rc) |
15046 | goto init_err_pci_clean; |
15047 | |
15048 | mutex_init(&bp->hwrm_cmd_lock); |
15049 | mutex_init(&bp->link_lock); |
15050 | |
15051 | rc = bnxt_fw_init_one_p1(bp); |
15052 | if (rc) |
15053 | goto init_err_pci_clean; |
15054 | |
15055 | if (BNXT_PF(bp)) |
15056 | bnxt_vpd_read_info(bp); |
15057 | |
15058 | if (BNXT_CHIP_P5_PLUS(bp)) { |
15059 | bp->flags |= BNXT_FLAG_CHIP_P5_PLUS; |
15060 | if (BNXT_CHIP_P7(bp)) |
15061 | bp->flags |= BNXT_FLAG_CHIP_P7; |
15062 | } |
15063 | |
15064 | rc = bnxt_alloc_rss_indir_tbl(bp); |
15065 | if (rc) |
15066 | goto init_err_pci_clean; |
15067 | |
15068 | rc = bnxt_fw_init_one_p2(bp); |
15069 | if (rc) |
15070 | goto init_err_pci_clean; |
15071 | |
15072 | rc = bnxt_map_db_bar(bp); |
15073 | if (rc) { |
15074 | dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n" , |
15075 | rc); |
15076 | goto init_err_pci_clean; |
15077 | } |
15078 | |
15079 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | |
15080 | NETIF_F_TSO | NETIF_F_TSO6 | |
15081 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | |
15082 | NETIF_F_GSO_IPXIP4 | |
15083 | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | |
15084 | NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | |
15085 | NETIF_F_RXCSUM | NETIF_F_GRO; |
15086 | if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) |
15087 | dev->hw_features |= NETIF_F_GSO_UDP_L4; |
15088 | |
15089 | if (BNXT_SUPPORTS_TPA(bp)) |
15090 | dev->hw_features |= NETIF_F_LRO; |
15091 | |
15092 | dev->hw_enc_features = |
15093 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | |
15094 | NETIF_F_TSO | NETIF_F_TSO6 | |
15095 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | |
15096 | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | |
15097 | NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; |
15098 | if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) |
15099 | dev->hw_enc_features |= NETIF_F_GSO_UDP_L4; |
15100 | if (bp->flags & BNXT_FLAG_CHIP_P7) |
15101 | dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7; |
15102 | else |
15103 | dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; |
15104 | |
15105 | dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | |
15106 | NETIF_F_GSO_GRE_CSUM; |
15107 | dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; |
15108 | if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) |
15109 | dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; |
15110 | if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) |
15111 | dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; |
15112 | if (BNXT_SUPPORTS_TPA(bp)) |
15113 | dev->hw_features |= NETIF_F_GRO_HW; |
15114 | dev->features |= dev->hw_features | NETIF_F_HIGHDMA; |
15115 | if (dev->features & NETIF_F_GRO_HW) |
15116 | dev->features &= ~NETIF_F_LRO; |
15117 | dev->priv_flags |= IFF_UNICAST_FLT; |
15118 | |
15119 | netif_set_tso_max_size(dev, GSO_MAX_SIZE); |
15120 | |
15121 | dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | |
15122 | NETDEV_XDP_ACT_RX_SG; |
15123 | |
15124 | #ifdef CONFIG_BNXT_SRIOV |
15125 | init_waitqueue_head(&bp->sriov_cfg_wait); |
15126 | #endif |
15127 | if (BNXT_SUPPORTS_TPA(bp)) { |
15128 | bp->gro_func = bnxt_gro_func_5730x; |
15129 | if (BNXT_CHIP_P4(bp)) |
15130 | bp->gro_func = bnxt_gro_func_5731x; |
15131 | else if (BNXT_CHIP_P5_PLUS(bp)) |
15132 | bp->gro_func = bnxt_gro_func_5750x; |
15133 | } |
15134 | if (!BNXT_CHIP_P4_PLUS(bp)) |
15135 | bp->flags |= BNXT_FLAG_DOUBLE_DB; |
15136 | |
15137 | rc = bnxt_init_mac_addr(bp); |
15138 | if (rc) { |
15139 | dev_err(&pdev->dev, "Unable to initialize mac address.\n" ); |
15140 | rc = -EADDRNOTAVAIL; |
15141 | goto init_err_pci_clean; |
15142 | } |
15143 | |
15144 | if (BNXT_PF(bp)) { |
15145 | /* Read the adapter's DSN to use as the eswitch switch_id */ |
15146 | rc = bnxt_pcie_dsn_get(bp, dsn: bp->dsn); |
15147 | } |
15148 | |
15149 | /* MTU range: 60 - FW defined max */ |
15150 | dev->min_mtu = ETH_ZLEN; |
15151 | dev->max_mtu = bp->max_mtu; |
15152 | |
15153 | rc = bnxt_probe_phy(bp, fw_dflt: true); |
15154 | if (rc) |
15155 | goto init_err_pci_clean; |
15156 | |
15157 | hw_resc = &bp->hw_resc; |
15158 | bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows + |
15159 | BNXT_L2_FLTR_MAX_FLTR; |
15160 | /* Older firmware may not report these filters properly */ |
15161 | if (bp->max_fltr < BNXT_MAX_FLTR) |
15162 | bp->max_fltr = BNXT_MAX_FLTR; |
15163 | bnxt_init_l2_fltr_tbl(bp); |
15164 | bnxt_set_rx_skb_mode(bp, page_mode: false); |
15165 | bnxt_set_tpa_flags(bp); |
15166 | bnxt_set_ring_params(bp); |
15167 | rc = bnxt_set_dflt_rings(bp, sh: true); |
15168 | if (rc) { |
15169 | if (BNXT_VF(bp) && rc == -ENODEV) { |
15170 | netdev_err(dev: bp->dev, format: "Cannot configure VF rings while PF is unavailable.\n" ); |
15171 | } else { |
15172 | netdev_err(dev: bp->dev, format: "Not enough rings available.\n" ); |
15173 | rc = -ENOMEM; |
15174 | } |
15175 | goto init_err_pci_clean; |
15176 | } |
15177 | |
15178 | bnxt_fw_init_one_p3(bp); |
15179 | |
15180 | bnxt_init_dflt_coal(bp); |
15181 | |
15182 | if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) |
15183 | bp->flags |= BNXT_FLAG_STRIP_VLAN; |
15184 | |
15185 | rc = bnxt_init_int_mode(bp); |
15186 | if (rc) |
15187 | goto init_err_pci_clean; |
15188 | |
15189 | /* No TC has been set yet and rings may have been trimmed due to |
15190 | * limited MSIX, so we re-initialize the TX rings per TC. |
15191 | */ |
15192 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
15193 | |
15194 | if (BNXT_PF(bp)) { |
15195 | if (!bnxt_pf_wq) { |
15196 | bnxt_pf_wq = |
15197 | create_singlethread_workqueue("bnxt_pf_wq" ); |
15198 | if (!bnxt_pf_wq) { |
15199 | dev_err(&pdev->dev, "Unable to create workqueue.\n" ); |
15200 | rc = -ENOMEM; |
15201 | goto init_err_pci_clean; |
15202 | } |
15203 | } |
15204 | rc = bnxt_init_tc(bp); |
15205 | if (rc) |
15206 | netdev_err(dev, format: "Failed to initialize TC flower offload, err = %d.\n" , |
15207 | rc); |
15208 | } |
15209 | |
15210 | bnxt_inv_fw_health_reg(bp); |
15211 | rc = bnxt_dl_register(bp); |
15212 | if (rc) |
15213 | goto init_err_dl; |
15214 | |
15215 | INIT_LIST_HEAD(list: &bp->usr_fltr_list); |
15216 | |
15217 | rc = register_netdev(dev); |
15218 | if (rc) |
15219 | goto init_err_cleanup; |
15220 | |
15221 | bnxt_dl_fw_reporters_create(bp); |
15222 | |
15223 | bnxt_rdma_aux_device_init(bp); |
15224 | |
15225 | bnxt_print_device_info(bp); |
15226 | |
15227 | pci_save_state(dev: pdev); |
15228 | |
15229 | return 0; |
15230 | init_err_cleanup: |
15231 | bnxt_dl_unregister(bp); |
15232 | init_err_dl: |
15233 | bnxt_shutdown_tc(bp); |
15234 | bnxt_clear_int_mode(bp); |
15235 | |
15236 | init_err_pci_clean: |
15237 | bnxt_hwrm_func_drv_unrgtr(bp); |
15238 | bnxt_free_hwrm_resources(bp); |
15239 | bnxt_hwmon_uninit(bp); |
15240 | bnxt_ethtool_free(bp); |
15241 | bnxt_ptp_clear(bp); |
15242 | kfree(objp: bp->ptp_cfg); |
15243 | bp->ptp_cfg = NULL; |
15244 | kfree(objp: bp->fw_health); |
15245 | bp->fw_health = NULL; |
15246 | bnxt_cleanup_pci(bp); |
15247 | bnxt_free_ctx_mem(bp); |
15248 | kfree(objp: bp->rss_indir_tbl); |
15249 | bp->rss_indir_tbl = NULL; |
15250 | |
15251 | init_err_free: |
15252 | free_netdev(dev); |
15253 | return rc; |
15254 | } |
15255 | |
15256 | static void bnxt_shutdown(struct pci_dev *pdev) |
15257 | { |
15258 | struct net_device *dev = pci_get_drvdata(pdev); |
15259 | struct bnxt *bp; |
15260 | |
15261 | if (!dev) |
15262 | return; |
15263 | |
15264 | rtnl_lock(); |
15265 | bp = netdev_priv(dev); |
15266 | if (!bp) |
15267 | goto shutdown_exit; |
15268 | |
15269 | if (netif_running(dev)) |
15270 | dev_close(dev); |
15271 | |
15272 | bnxt_clear_int_mode(bp); |
15273 | pci_disable_device(dev: pdev); |
15274 | |
15275 | if (system_state == SYSTEM_POWER_OFF) { |
15276 | pci_wake_from_d3(dev: pdev, enable: bp->wol); |
15277 | pci_set_power_state(dev: pdev, PCI_D3hot); |
15278 | } |
15279 | |
15280 | shutdown_exit: |
15281 | rtnl_unlock(); |
15282 | } |
15283 | |
15284 | #ifdef CONFIG_PM_SLEEP |
15285 | static int bnxt_suspend(struct device *device) |
15286 | { |
15287 | struct net_device *dev = dev_get_drvdata(dev: device); |
15288 | struct bnxt *bp = netdev_priv(dev); |
15289 | int rc = 0; |
15290 | |
15291 | rtnl_lock(); |
15292 | bnxt_ulp_stop(bp); |
15293 | if (netif_running(dev)) { |
15294 | netif_device_detach(dev); |
15295 | rc = bnxt_close(dev); |
15296 | } |
15297 | bnxt_hwrm_func_drv_unrgtr(bp); |
15298 | pci_disable_device(dev: bp->pdev); |
15299 | bnxt_free_ctx_mem(bp); |
15300 | rtnl_unlock(); |
15301 | return rc; |
15302 | } |
15303 | |
15304 | static int bnxt_resume(struct device *device) |
15305 | { |
15306 | struct net_device *dev = dev_get_drvdata(dev: device); |
15307 | struct bnxt *bp = netdev_priv(dev); |
15308 | int rc = 0; |
15309 | |
15310 | rtnl_lock(); |
15311 | rc = pci_enable_device(dev: bp->pdev); |
15312 | if (rc) { |
15313 | netdev_err(dev, format: "Cannot re-enable PCI device during resume, err = %d\n" , |
15314 | rc); |
15315 | goto resume_exit; |
15316 | } |
15317 | pci_set_master(dev: bp->pdev); |
15318 | if (bnxt_hwrm_ver_get(bp)) { |
15319 | rc = -ENODEV; |
15320 | goto resume_exit; |
15321 | } |
15322 | rc = bnxt_hwrm_func_reset(bp); |
15323 | if (rc) { |
15324 | rc = -EBUSY; |
15325 | goto resume_exit; |
15326 | } |
15327 | |
15328 | rc = bnxt_hwrm_func_qcaps(bp); |
15329 | if (rc) |
15330 | goto resume_exit; |
15331 | |
15332 | bnxt_clear_reservations(bp, fw_reset: true); |
15333 | |
15334 | if (bnxt_hwrm_func_drv_rgtr(bp, NULL, bmap_size: 0, async_only: false)) { |
15335 | rc = -ENODEV; |
15336 | goto resume_exit; |
15337 | } |
15338 | |
15339 | bnxt_get_wol_settings(bp); |
15340 | if (netif_running(dev)) { |
15341 | rc = bnxt_open(dev); |
15342 | if (!rc) |
15343 | netif_device_attach(dev); |
15344 | } |
15345 | |
15346 | resume_exit: |
15347 | bnxt_ulp_start(bp, err: rc); |
15348 | if (!rc) |
15349 | bnxt_reenable_sriov(bp); |
15350 | rtnl_unlock(); |
15351 | return rc; |
15352 | } |
15353 | |
15354 | static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); |
15355 | #define BNXT_PM_OPS (&bnxt_pm_ops) |
15356 | |
15357 | #else |
15358 | |
15359 | #define BNXT_PM_OPS NULL |
15360 | |
15361 | #endif /* CONFIG_PM_SLEEP */ |
15362 | |
15363 | /** |
15364 | * bnxt_io_error_detected - called when PCI error is detected |
15365 | * @pdev: Pointer to PCI device |
15366 | * @state: The current pci connection state |
15367 | * |
15368 | * This function is called after a PCI bus error affecting |
15369 | * this device has been detected. |
15370 | */ |
15371 | static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, |
15372 | pci_channel_state_t state) |
15373 | { |
15374 | struct net_device *netdev = pci_get_drvdata(pdev); |
15375 | struct bnxt *bp = netdev_priv(dev: netdev); |
15376 | |
15377 | netdev_info(dev: netdev, format: "PCI I/O error detected\n" ); |
15378 | |
15379 | rtnl_lock(); |
15380 | netif_device_detach(dev: netdev); |
15381 | |
15382 | bnxt_ulp_stop(bp); |
15383 | |
15384 | if (state == pci_channel_io_perm_failure) { |
15385 | rtnl_unlock(); |
15386 | return PCI_ERS_RESULT_DISCONNECT; |
15387 | } |
15388 | |
15389 | if (state == pci_channel_io_frozen) |
15390 | set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, addr: &bp->state); |
15391 | |
15392 | if (netif_running(dev: netdev)) |
15393 | bnxt_close(dev: netdev); |
15394 | |
15395 | if (pci_is_enabled(pdev)) |
15396 | pci_disable_device(dev: pdev); |
15397 | bnxt_free_ctx_mem(bp); |
15398 | rtnl_unlock(); |
15399 | |
15400 | /* Request a slot slot reset. */ |
15401 | return PCI_ERS_RESULT_NEED_RESET; |
15402 | } |
15403 | |
15404 | /** |
15405 | * bnxt_io_slot_reset - called after the pci bus has been reset. |
15406 | * @pdev: Pointer to PCI device |
15407 | * |
15408 | * Restart the card from scratch, as if from a cold-boot. |
15409 | * At this point, the card has exprienced a hard reset, |
15410 | * followed by fixups by BIOS, and has its config space |
15411 | * set up identically to what it was at cold boot. |
15412 | */ |
15413 | static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) |
15414 | { |
15415 | pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; |
15416 | struct net_device *netdev = pci_get_drvdata(pdev); |
15417 | struct bnxt *bp = netdev_priv(dev: netdev); |
15418 | int retry = 0; |
15419 | int err = 0; |
15420 | int off; |
15421 | |
15422 | netdev_info(dev: bp->dev, format: "PCI Slot Reset\n" ); |
15423 | |
15424 | rtnl_lock(); |
15425 | |
15426 | if (pci_enable_device(dev: pdev)) { |
15427 | dev_err(&pdev->dev, |
15428 | "Cannot re-enable PCI device after reset.\n" ); |
15429 | } else { |
15430 | pci_set_master(dev: pdev); |
15431 | /* Upon fatal error, our device internal logic that latches to |
15432 | * BAR value is getting reset and will restore only upon |
15433 | * rewritting the BARs. |
15434 | * |
15435 | * As pci_restore_state() does not re-write the BARs if the |
15436 | * value is same as saved value earlier, driver needs to |
15437 | * write the BARs to 0 to force restore, in case of fatal error. |
15438 | */ |
15439 | if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, |
15440 | addr: &bp->state)) { |
15441 | for (off = PCI_BASE_ADDRESS_0; |
15442 | off <= PCI_BASE_ADDRESS_5; off += 4) |
15443 | pci_write_config_dword(dev: bp->pdev, where: off, val: 0); |
15444 | } |
15445 | pci_restore_state(dev: pdev); |
15446 | pci_save_state(dev: pdev); |
15447 | |
15448 | bnxt_inv_fw_health_reg(bp); |
15449 | bnxt_try_map_fw_health_reg(bp); |
15450 | |
15451 | /* In some PCIe AER scenarios, firmware may take up to |
15452 | * 10 seconds to become ready in the worst case. |
15453 | */ |
15454 | do { |
15455 | err = bnxt_try_recover_fw(bp); |
15456 | if (!err) |
15457 | break; |
15458 | retry++; |
15459 | } while (retry < BNXT_FW_SLOT_RESET_RETRY); |
15460 | |
15461 | if (err) { |
15462 | dev_err(&pdev->dev, "Firmware not ready\n" ); |
15463 | goto reset_exit; |
15464 | } |
15465 | |
15466 | err = bnxt_hwrm_func_reset(bp); |
15467 | if (!err) |
15468 | result = PCI_ERS_RESULT_RECOVERED; |
15469 | |
15470 | bnxt_ulp_irq_stop(bp); |
15471 | bnxt_clear_int_mode(bp); |
15472 | err = bnxt_init_int_mode(bp); |
15473 | bnxt_ulp_irq_restart(bp, err); |
15474 | } |
15475 | |
15476 | reset_exit: |
15477 | bnxt_clear_reservations(bp, fw_reset: true); |
15478 | rtnl_unlock(); |
15479 | |
15480 | return result; |
15481 | } |
15482 | |
15483 | /** |
15484 | * bnxt_io_resume - called when traffic can start flowing again. |
15485 | * @pdev: Pointer to PCI device |
15486 | * |
15487 | * This callback is called when the error recovery driver tells |
15488 | * us that its OK to resume normal operation. |
15489 | */ |
15490 | static void bnxt_io_resume(struct pci_dev *pdev) |
15491 | { |
15492 | struct net_device *netdev = pci_get_drvdata(pdev); |
15493 | struct bnxt *bp = netdev_priv(dev: netdev); |
15494 | int err; |
15495 | |
15496 | netdev_info(dev: bp->dev, format: "PCI Slot Resume\n" ); |
15497 | rtnl_lock(); |
15498 | |
15499 | err = bnxt_hwrm_func_qcaps(bp); |
15500 | if (!err && netif_running(dev: netdev)) |
15501 | err = bnxt_open(dev: netdev); |
15502 | |
15503 | bnxt_ulp_start(bp, err); |
15504 | if (!err) { |
15505 | bnxt_reenable_sriov(bp); |
15506 | netif_device_attach(dev: netdev); |
15507 | } |
15508 | |
15509 | rtnl_unlock(); |
15510 | } |
15511 | |
15512 | static const struct pci_error_handlers bnxt_err_handler = { |
15513 | .error_detected = bnxt_io_error_detected, |
15514 | .slot_reset = bnxt_io_slot_reset, |
15515 | .resume = bnxt_io_resume |
15516 | }; |
15517 | |
15518 | static struct pci_driver bnxt_pci_driver = { |
15519 | .name = DRV_MODULE_NAME, |
15520 | .id_table = bnxt_pci_tbl, |
15521 | .probe = bnxt_init_one, |
15522 | .remove = bnxt_remove_one, |
15523 | .shutdown = bnxt_shutdown, |
15524 | .driver.pm = BNXT_PM_OPS, |
15525 | .err_handler = &bnxt_err_handler, |
15526 | #if defined(CONFIG_BNXT_SRIOV) |
15527 | .sriov_configure = bnxt_sriov_configure, |
15528 | #endif |
15529 | }; |
15530 | |
15531 | static int __init bnxt_init(void) |
15532 | { |
15533 | int err; |
15534 | |
15535 | bnxt_debug_init(); |
15536 | err = pci_register_driver(&bnxt_pci_driver); |
15537 | if (err) { |
15538 | bnxt_debug_exit(); |
15539 | return err; |
15540 | } |
15541 | |
15542 | return 0; |
15543 | } |
15544 | |
15545 | static void __exit bnxt_exit(void) |
15546 | { |
15547 | pci_unregister_driver(dev: &bnxt_pci_driver); |
15548 | if (bnxt_pf_wq) |
15549 | destroy_workqueue(wq: bnxt_pf_wq); |
15550 | bnxt_debug_exit(); |
15551 | } |
15552 | |
15553 | module_init(bnxt_init); |
15554 | module_exit(bnxt_exit); |
15555 | |