1 | /* Broadcom NetXtreme-C/E network driver. |
2 | * |
3 | * Copyright (c) 2014-2016 Broadcom Corporation |
4 | * Copyright (c) 2016-2019 Broadcom Limited |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation. |
9 | */ |
10 | |
11 | #include <linux/module.h> |
12 | |
13 | #include <linux/stringify.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/timer.h> |
16 | #include <linux/errno.h> |
17 | #include <linux/ioport.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/vmalloc.h> |
20 | #include <linux/interrupt.h> |
21 | #include <linux/pci.h> |
22 | #include <linux/netdevice.h> |
23 | #include <linux/etherdevice.h> |
24 | #include <linux/skbuff.h> |
25 | #include <linux/dma-mapping.h> |
26 | #include <linux/bitops.h> |
27 | #include <linux/io.h> |
28 | #include <linux/irq.h> |
29 | #include <linux/delay.h> |
30 | #include <asm/byteorder.h> |
31 | #include <asm/page.h> |
32 | #include <linux/time.h> |
33 | #include <linux/mii.h> |
34 | #include <linux/mdio.h> |
35 | #include <linux/if.h> |
36 | #include <linux/if_vlan.h> |
37 | #include <linux/if_bridge.h> |
38 | #include <linux/rtc.h> |
39 | #include <linux/bpf.h> |
40 | #include <net/gro.h> |
41 | #include <net/ip.h> |
42 | #include <net/tcp.h> |
43 | #include <net/udp.h> |
44 | #include <net/checksum.h> |
45 | #include <net/ip6_checksum.h> |
46 | #include <net/udp_tunnel.h> |
47 | #include <linux/workqueue.h> |
48 | #include <linux/prefetch.h> |
49 | #include <linux/cache.h> |
50 | #include <linux/log2.h> |
51 | #include <linux/bitmap.h> |
52 | #include <linux/cpu_rmap.h> |
53 | #include <linux/cpumask.h> |
54 | #include <net/pkt_cls.h> |
55 | #include <net/page_pool/helpers.h> |
56 | #include <linux/align.h> |
57 | #include <net/netdev_queues.h> |
58 | |
59 | #include "bnxt_hsi.h" |
60 | #include "bnxt.h" |
61 | #include "bnxt_hwrm.h" |
62 | #include "bnxt_ulp.h" |
63 | #include "bnxt_sriov.h" |
64 | #include "bnxt_ethtool.h" |
65 | #include "bnxt_dcb.h" |
66 | #include "bnxt_xdp.h" |
67 | #include "bnxt_ptp.h" |
68 | #include "bnxt_vfr.h" |
69 | #include "bnxt_tc.h" |
70 | #include "bnxt_devlink.h" |
71 | #include "bnxt_debugfs.h" |
72 | #include "bnxt_hwmon.h" |
73 | |
74 | #define BNXT_TX_TIMEOUT (5 * HZ) |
75 | #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ |
76 | NETIF_MSG_TX_ERR) |
77 | |
78 | MODULE_LICENSE("GPL" ); |
79 | MODULE_DESCRIPTION("Broadcom BCM573xx network driver" ); |
80 | |
81 | #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) |
82 | #define BNXT_RX_DMA_OFFSET NET_SKB_PAD |
83 | #define BNXT_RX_COPY_THRESH 256 |
84 | |
85 | #define BNXT_TX_PUSH_THRESH 164 |
86 | |
87 | /* indexed by enum board_idx */ |
88 | static const struct { |
89 | char *name; |
90 | } board_info[] = { |
91 | [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, |
92 | [BCM57302] = { .name: "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, |
93 | [BCM57304] = { .name: "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, |
94 | [BCM57417_NPAR] = { .name: "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, |
95 | [BCM58700] = { .name: "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, |
96 | [BCM57311] = { .name: "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, |
97 | [BCM57312] = { .name: "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, |
98 | [BCM57402] = { .name: "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, |
99 | [BCM57404] = { .name: "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, |
100 | [BCM57406] = { .name: "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, |
101 | [BCM57402_NPAR] = { .name: "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, |
102 | [BCM57407] = { .name: "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, |
103 | [BCM57412] = { .name: "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, |
104 | [BCM57414] = { .name: "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, |
105 | [BCM57416] = { .name: "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, |
106 | [BCM57417] = { .name: "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, |
107 | [BCM57412_NPAR] = { .name: "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, |
108 | [BCM57314] = { .name: "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, |
109 | [BCM57417_SFP] = { .name: "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, |
110 | [BCM57416_SFP] = { .name: "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, |
111 | [BCM57404_NPAR] = { .name: "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, |
112 | [BCM57406_NPAR] = { .name: "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, |
113 | [BCM57407_SFP] = { .name: "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, |
114 | [BCM57407_NPAR] = { .name: "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, |
115 | [BCM57414_NPAR] = { .name: "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, |
116 | [BCM57416_NPAR] = { .name: "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, |
117 | [BCM57452] = { .name: "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, |
118 | [BCM57454] = { .name: "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, |
119 | [BCM5745x_NPAR] = { .name: "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, |
120 | [BCM57508] = { .name: "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, |
121 | [BCM57504] = { .name: "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, |
122 | [BCM57502] = { .name: "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, |
123 | [BCM57508_NPAR] = { .name: "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, |
124 | [BCM57504_NPAR] = { .name: "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, |
125 | [BCM57502_NPAR] = { .name: "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, |
126 | [BCM58802] = { .name: "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, |
127 | [BCM58804] = { .name: "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, |
128 | [BCM58808] = { .name: "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, |
129 | [NETXTREME_E_VF] = { .name: "Broadcom NetXtreme-E Ethernet Virtual Function" }, |
130 | [NETXTREME_C_VF] = { .name: "Broadcom NetXtreme-C Ethernet Virtual Function" }, |
131 | [NETXTREME_S_VF] = { .name: "Broadcom NetXtreme-S Ethernet Virtual Function" }, |
132 | [NETXTREME_C_VF_HV] = { .name: "Broadcom NetXtreme-C Virtual Function for Hyper-V" }, |
133 | [NETXTREME_E_VF_HV] = { .name: "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, |
134 | [NETXTREME_E_P5_VF] = { .name: "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, |
135 | [NETXTREME_E_P5_VF_HV] = { .name: "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, |
136 | }; |
137 | |
138 | static const struct pci_device_id bnxt_pci_tbl[] = { |
139 | { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, |
140 | { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, |
141 | { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, |
142 | { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, |
143 | { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, |
144 | { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, |
145 | { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, |
146 | { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, |
147 | { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, |
148 | { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, |
149 | { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, |
150 | { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, |
151 | { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, |
152 | { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, |
153 | { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, |
154 | { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, |
155 | { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, |
156 | { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, |
157 | { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, |
158 | { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, |
159 | { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, |
160 | { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, |
161 | { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, |
162 | { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, |
163 | { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, |
164 | { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, |
165 | { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, |
166 | { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, |
167 | { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, |
168 | { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, |
169 | { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, |
170 | { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, |
171 | { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, |
172 | { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, |
173 | { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, |
174 | { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, |
175 | { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, |
176 | { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, |
177 | { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR }, |
178 | { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, |
179 | { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR }, |
180 | { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR }, |
181 | { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, |
182 | { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR }, |
183 | { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, |
184 | { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, |
185 | #ifdef CONFIG_BNXT_SRIOV |
186 | { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, |
187 | { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV }, |
188 | { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV }, |
189 | { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, |
190 | { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV }, |
191 | { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, |
192 | { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV }, |
193 | { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV }, |
194 | { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV }, |
195 | { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV }, |
196 | { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, |
197 | { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, |
198 | { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, |
199 | { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, |
200 | { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, |
201 | { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV }, |
202 | { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, |
203 | { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, |
204 | { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, |
205 | { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, |
206 | { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, |
207 | #endif |
208 | { 0 } |
209 | }; |
210 | |
211 | MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); |
212 | |
213 | static const u16 bnxt_vf_req_snif[] = { |
214 | HWRM_FUNC_CFG, |
215 | HWRM_FUNC_VF_CFG, |
216 | HWRM_PORT_PHY_QCFG, |
217 | HWRM_CFA_L2_FILTER_ALLOC, |
218 | }; |
219 | |
220 | static const u16 bnxt_async_events_arr[] = { |
221 | ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, |
222 | ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, |
223 | ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, |
224 | ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, |
225 | ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, |
226 | ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, |
227 | ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, |
228 | ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, |
229 | ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, |
230 | ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, |
231 | ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE, |
232 | ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, |
233 | ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST, |
234 | ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP, |
235 | ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT, |
236 | ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE, |
237 | }; |
238 | |
239 | static struct workqueue_struct *bnxt_pf_wq; |
240 | |
241 | static bool bnxt_vf_pciid(enum board_idx idx) |
242 | { |
243 | return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || |
244 | idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || |
245 | idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF || |
246 | idx == NETXTREME_E_P5_VF_HV); |
247 | } |
248 | |
249 | #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) |
250 | #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) |
251 | #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) |
252 | |
253 | #define BNXT_CP_DB_IRQ_DIS(db) \ |
254 | writel(DB_CP_IRQ_DIS_FLAGS, db) |
255 | |
256 | #define BNXT_DB_CQ(db, idx) \ |
257 | writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell) |
258 | |
259 | #define BNXT_DB_NQ_P5(db, idx) \ |
260 | bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), \ |
261 | (db)->doorbell) |
262 | |
263 | #define BNXT_DB_CQ_ARM(db, idx) \ |
264 | writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell) |
265 | |
266 | #define BNXT_DB_NQ_ARM_P5(db, idx) \ |
267 | bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\ |
268 | (db)->doorbell) |
269 | |
270 | static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) |
271 | { |
272 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
273 | BNXT_DB_NQ_P5(db, idx); |
274 | else |
275 | BNXT_DB_CQ(db, idx); |
276 | } |
277 | |
278 | static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) |
279 | { |
280 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
281 | BNXT_DB_NQ_ARM_P5(db, idx); |
282 | else |
283 | BNXT_DB_CQ_ARM(db, idx); |
284 | } |
285 | |
286 | static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) |
287 | { |
288 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
289 | bnxt_writeq(bp, val: db->db_key64 | DBR_TYPE_CQ_ARMALL | |
290 | RING_CMP(idx), addr: db->doorbell); |
291 | else |
292 | BNXT_DB_CQ(db, idx); |
293 | } |
294 | |
295 | static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) |
296 | { |
297 | if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) |
298 | return; |
299 | |
300 | if (BNXT_PF(bp)) |
301 | queue_delayed_work(wq: bnxt_pf_wq, dwork: &bp->fw_reset_task, delay); |
302 | else |
303 | schedule_delayed_work(dwork: &bp->fw_reset_task, delay); |
304 | } |
305 | |
306 | static void __bnxt_queue_sp_work(struct bnxt *bp) |
307 | { |
308 | if (BNXT_PF(bp)) |
309 | queue_work(wq: bnxt_pf_wq, work: &bp->sp_task); |
310 | else |
311 | schedule_work(work: &bp->sp_task); |
312 | } |
313 | |
314 | static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event) |
315 | { |
316 | set_bit(nr: event, addr: &bp->sp_event); |
317 | __bnxt_queue_sp_work(bp); |
318 | } |
319 | |
320 | static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) |
321 | { |
322 | if (!rxr->bnapi->in_reset) { |
323 | rxr->bnapi->in_reset = true; |
324 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
325 | set_bit(BNXT_RESET_TASK_SP_EVENT, addr: &bp->sp_event); |
326 | else |
327 | set_bit(BNXT_RST_RING_SP_EVENT, addr: &bp->sp_event); |
328 | __bnxt_queue_sp_work(bp); |
329 | } |
330 | rxr->rx_next_cons = 0xffff; |
331 | } |
332 | |
333 | void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, |
334 | int idx) |
335 | { |
336 | struct bnxt_napi *bnapi = txr->bnapi; |
337 | |
338 | if (bnapi->tx_fault) |
339 | return; |
340 | |
341 | netdev_err(dev: bp->dev, format: "Invalid Tx completion (ring:%d tx_pkts:%d cons:%u prod:%u i:%d)" , |
342 | txr->txq_index, bnapi->tx_pkts, |
343 | txr->tx_cons, txr->tx_prod, idx); |
344 | WARN_ON_ONCE(1); |
345 | bnapi->tx_fault = 1; |
346 | bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); |
347 | } |
348 | |
349 | const u16 bnxt_lhint_arr[] = { |
350 | TX_BD_FLAGS_LHINT_512_AND_SMALLER, |
351 | TX_BD_FLAGS_LHINT_512_TO_1023, |
352 | TX_BD_FLAGS_LHINT_1024_TO_2047, |
353 | TX_BD_FLAGS_LHINT_1024_TO_2047, |
354 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
355 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
356 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
357 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
358 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
359 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
360 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
361 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
362 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
363 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
364 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
365 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
366 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
367 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
368 | TX_BD_FLAGS_LHINT_2048_AND_LARGER, |
369 | }; |
370 | |
371 | static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) |
372 | { |
373 | struct metadata_dst *md_dst = skb_metadata_dst(skb); |
374 | |
375 | if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) |
376 | return 0; |
377 | |
378 | return md_dst->u.port_info.port_id; |
379 | } |
380 | |
381 | static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, |
382 | u16 prod) |
383 | { |
384 | bnxt_db_write(bp, db: &txr->tx_db, idx: prod); |
385 | txr->kick_pending = 0; |
386 | } |
387 | |
388 | static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) |
389 | { |
390 | struct bnxt *bp = netdev_priv(dev); |
391 | struct tx_bd *txbd; |
392 | struct tx_bd_ext *txbd1; |
393 | struct netdev_queue *txq; |
394 | int i; |
395 | dma_addr_t mapping; |
396 | unsigned int length, pad = 0; |
397 | u32 len, free_size, vlan_tag_flags, cfa_action, flags; |
398 | u16 prod, last_frag; |
399 | struct pci_dev *pdev = bp->pdev; |
400 | struct bnxt_tx_ring_info *txr; |
401 | struct bnxt_sw_tx_bd *tx_buf; |
402 | __le32 lflags = 0; |
403 | |
404 | i = skb_get_queue_mapping(skb); |
405 | if (unlikely(i >= bp->tx_nr_rings)) { |
406 | dev_kfree_skb_any(skb); |
407 | dev_core_stats_tx_dropped_inc(dev); |
408 | return NETDEV_TX_OK; |
409 | } |
410 | |
411 | txq = netdev_get_tx_queue(dev, index: i); |
412 | txr = &bp->tx_ring[bp->tx_ring_map[i]]; |
413 | prod = txr->tx_prod; |
414 | |
415 | free_size = bnxt_tx_avail(bp, txr); |
416 | if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { |
417 | /* We must have raced with NAPI cleanup */ |
418 | if (net_ratelimit() && txr->kick_pending) |
419 | netif_warn(bp, tx_err, dev, |
420 | "bnxt: ring busy w/ flush pending!\n" ); |
421 | if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), |
422 | bp->tx_wake_thresh)) |
423 | return NETDEV_TX_BUSY; |
424 | } |
425 | |
426 | if (unlikely(ipv6_hopopt_jumbo_remove(skb))) |
427 | goto tx_free; |
428 | |
429 | length = skb->len; |
430 | len = skb_headlen(skb); |
431 | last_frag = skb_shinfo(skb)->nr_frags; |
432 | |
433 | txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; |
434 | |
435 | txbd->tx_bd_opaque = prod; |
436 | |
437 | tx_buf = &txr->tx_buf_ring[prod]; |
438 | tx_buf->skb = skb; |
439 | tx_buf->nr_frags = last_frag; |
440 | |
441 | vlan_tag_flags = 0; |
442 | cfa_action = bnxt_xmit_get_cfa_action(skb); |
443 | if (skb_vlan_tag_present(skb)) { |
444 | vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | |
445 | skb_vlan_tag_get(skb); |
446 | /* Currently supports 8021Q, 8021AD vlan offloads |
447 | * QINQ1, QINQ2, QINQ3 vlan headers are deprecated |
448 | */ |
449 | if (skb->vlan_proto == htons(ETH_P_8021Q)) |
450 | vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; |
451 | } |
452 | |
453 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { |
454 | struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; |
455 | |
456 | if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) && |
457 | atomic_dec_if_positive(v: &ptp->tx_avail) >= 0) { |
458 | if (!bnxt_ptp_parse(skb, seq_id: &ptp->tx_seqid, |
459 | hdr_off: &ptp->tx_hdr_off)) { |
460 | if (vlan_tag_flags) |
461 | ptp->tx_hdr_off += VLAN_HLEN; |
462 | lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); |
463 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
464 | } else { |
465 | atomic_inc(v: &bp->ptp_cfg->tx_avail); |
466 | } |
467 | } |
468 | } |
469 | |
470 | if (unlikely(skb->no_fcs)) |
471 | lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC); |
472 | |
473 | if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh && |
474 | !lflags) { |
475 | struct tx_push_buffer *tx_push_buf = txr->tx_push; |
476 | struct tx_push_bd *tx_push = &tx_push_buf->push_bd; |
477 | struct tx_bd_ext *tx_push1 = &tx_push->txbd2; |
478 | void __iomem *db = txr->tx_db.doorbell; |
479 | void *pdata = tx_push_buf->data; |
480 | u64 *end; |
481 | int j, push_len; |
482 | |
483 | /* Set COAL_NOW to be ready quickly for the next push */ |
484 | tx_push->tx_bd_len_flags_type = |
485 | cpu_to_le32((length << TX_BD_LEN_SHIFT) | |
486 | TX_BD_TYPE_LONG_TX_BD | |
487 | TX_BD_FLAGS_LHINT_512_AND_SMALLER | |
488 | TX_BD_FLAGS_COAL_NOW | |
489 | TX_BD_FLAGS_PACKET_END | |
490 | (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); |
491 | |
492 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
493 | tx_push1->tx_bd_hsize_lflags = |
494 | cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); |
495 | else |
496 | tx_push1->tx_bd_hsize_lflags = 0; |
497 | |
498 | tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); |
499 | tx_push1->tx_bd_cfa_action = |
500 | cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); |
501 | |
502 | end = pdata + length; |
503 | end = PTR_ALIGN(end, 8) - 1; |
504 | *end = 0; |
505 | |
506 | skb_copy_from_linear_data(skb, to: pdata, len); |
507 | pdata += len; |
508 | for (j = 0; j < last_frag; j++) { |
509 | skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; |
510 | void *fptr; |
511 | |
512 | fptr = skb_frag_address_safe(frag); |
513 | if (!fptr) |
514 | goto normal_tx; |
515 | |
516 | memcpy(pdata, fptr, skb_frag_size(frag)); |
517 | pdata += skb_frag_size(frag); |
518 | } |
519 | |
520 | txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; |
521 | txbd->tx_bd_haddr = txr->data_mapping; |
522 | prod = NEXT_TX(prod); |
523 | txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; |
524 | memcpy(txbd, tx_push1, sizeof(*txbd)); |
525 | prod = NEXT_TX(prod); |
526 | tx_push->doorbell = |
527 | cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); |
528 | WRITE_ONCE(txr->tx_prod, prod); |
529 | |
530 | tx_buf->is_push = 1; |
531 | netdev_tx_sent_queue(dev_queue: txq, bytes: skb->len); |
532 | wmb(); /* Sync is_push and byte queue before pushing data */ |
533 | |
534 | push_len = (length + sizeof(*tx_push) + 7) / 8; |
535 | if (push_len > 16) { |
536 | __iowrite64_copy(to: db, from: tx_push_buf, count: 16); |
537 | __iowrite32_copy(to: db + 4, from: tx_push_buf + 1, |
538 | count: (push_len - 16) << 1); |
539 | } else { |
540 | __iowrite64_copy(to: db, from: tx_push_buf, count: push_len); |
541 | } |
542 | |
543 | goto tx_done; |
544 | } |
545 | |
546 | normal_tx: |
547 | if (length < BNXT_MIN_PKT_SIZE) { |
548 | pad = BNXT_MIN_PKT_SIZE - length; |
549 | if (skb_pad(skb, pad)) |
550 | /* SKB already freed. */ |
551 | goto tx_kick_pending; |
552 | length = BNXT_MIN_PKT_SIZE; |
553 | } |
554 | |
555 | mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); |
556 | |
557 | if (unlikely(dma_mapping_error(&pdev->dev, mapping))) |
558 | goto tx_free; |
559 | |
560 | dma_unmap_addr_set(tx_buf, mapping, mapping); |
561 | flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | |
562 | ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); |
563 | |
564 | txbd->tx_bd_haddr = cpu_to_le64(mapping); |
565 | |
566 | prod = NEXT_TX(prod); |
567 | txbd1 = (struct tx_bd_ext *) |
568 | &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; |
569 | |
570 | txbd1->tx_bd_hsize_lflags = lflags; |
571 | if (skb_is_gso(skb)) { |
572 | u32 hdr_len; |
573 | |
574 | if (skb->encapsulation) |
575 | hdr_len = skb_inner_tcp_all_headers(skb); |
576 | else |
577 | hdr_len = skb_tcp_all_headers(skb); |
578 | |
579 | txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | |
580 | TX_BD_FLAGS_T_IPID | |
581 | (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); |
582 | length = skb_shinfo(skb)->gso_size; |
583 | txbd1->tx_bd_mss = cpu_to_le32(length); |
584 | length += hdr_len; |
585 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
586 | txbd1->tx_bd_hsize_lflags |= |
587 | cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); |
588 | txbd1->tx_bd_mss = 0; |
589 | } |
590 | |
591 | length >>= 9; |
592 | if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { |
593 | dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n" , |
594 | skb->len); |
595 | i = 0; |
596 | goto tx_dma_error; |
597 | } |
598 | flags |= bnxt_lhint_arr[length]; |
599 | txbd->tx_bd_len_flags_type = cpu_to_le32(flags); |
600 | |
601 | txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); |
602 | txbd1->tx_bd_cfa_action = |
603 | cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); |
604 | for (i = 0; i < last_frag; i++) { |
605 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
606 | |
607 | prod = NEXT_TX(prod); |
608 | txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; |
609 | |
610 | len = skb_frag_size(frag); |
611 | mapping = skb_frag_dma_map(dev: &pdev->dev, frag, offset: 0, size: len, |
612 | dir: DMA_TO_DEVICE); |
613 | |
614 | if (unlikely(dma_mapping_error(&pdev->dev, mapping))) |
615 | goto tx_dma_error; |
616 | |
617 | tx_buf = &txr->tx_buf_ring[prod]; |
618 | dma_unmap_addr_set(tx_buf, mapping, mapping); |
619 | |
620 | txbd->tx_bd_haddr = cpu_to_le64(mapping); |
621 | |
622 | flags = len << TX_BD_LEN_SHIFT; |
623 | txbd->tx_bd_len_flags_type = cpu_to_le32(flags); |
624 | } |
625 | |
626 | flags &= ~TX_BD_LEN; |
627 | txbd->tx_bd_len_flags_type = |
628 | cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | |
629 | TX_BD_FLAGS_PACKET_END); |
630 | |
631 | netdev_tx_sent_queue(dev_queue: txq, bytes: skb->len); |
632 | |
633 | skb_tx_timestamp(skb); |
634 | |
635 | /* Sync BD data before updating doorbell */ |
636 | wmb(); |
637 | |
638 | prod = NEXT_TX(prod); |
639 | WRITE_ONCE(txr->tx_prod, prod); |
640 | |
641 | if (!netdev_xmit_more() || netif_xmit_stopped(dev_queue: txq)) |
642 | bnxt_txr_db_kick(bp, txr, prod); |
643 | else |
644 | txr->kick_pending = 1; |
645 | |
646 | tx_done: |
647 | |
648 | if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { |
649 | if (netdev_xmit_more() && !tx_buf->is_push) |
650 | bnxt_txr_db_kick(bp, txr, prod); |
651 | |
652 | netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), |
653 | bp->tx_wake_thresh); |
654 | } |
655 | return NETDEV_TX_OK; |
656 | |
657 | tx_dma_error: |
658 | if (BNXT_TX_PTP_IS_SET(lflags)) |
659 | atomic_inc(v: &bp->ptp_cfg->tx_avail); |
660 | |
661 | last_frag = i; |
662 | |
663 | /* start back at beginning and unmap skb */ |
664 | prod = txr->tx_prod; |
665 | tx_buf = &txr->tx_buf_ring[prod]; |
666 | dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), |
667 | skb_headlen(skb), DMA_TO_DEVICE); |
668 | prod = NEXT_TX(prod); |
669 | |
670 | /* unmap remaining mapped pages */ |
671 | for (i = 0; i < last_frag; i++) { |
672 | prod = NEXT_TX(prod); |
673 | tx_buf = &txr->tx_buf_ring[prod]; |
674 | dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), |
675 | skb_frag_size(&skb_shinfo(skb)->frags[i]), |
676 | DMA_TO_DEVICE); |
677 | } |
678 | |
679 | tx_free: |
680 | dev_kfree_skb_any(skb); |
681 | tx_kick_pending: |
682 | if (txr->kick_pending) |
683 | bnxt_txr_db_kick(bp, txr, prod: txr->tx_prod); |
684 | txr->tx_buf_ring[txr->tx_prod].skb = NULL; |
685 | dev_core_stats_tx_dropped_inc(dev); |
686 | return NETDEV_TX_OK; |
687 | } |
688 | |
689 | static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) |
690 | { |
691 | struct bnxt_tx_ring_info *txr = bnapi->tx_ring; |
692 | struct netdev_queue *txq = netdev_get_tx_queue(dev: bp->dev, index: txr->txq_index); |
693 | u16 cons = txr->tx_cons; |
694 | struct pci_dev *pdev = bp->pdev; |
695 | int nr_pkts = bnapi->tx_pkts; |
696 | int i; |
697 | unsigned int tx_bytes = 0; |
698 | |
699 | for (i = 0; i < nr_pkts; i++) { |
700 | struct bnxt_sw_tx_bd *tx_buf; |
701 | struct sk_buff *skb; |
702 | int j, last; |
703 | |
704 | tx_buf = &txr->tx_buf_ring[cons]; |
705 | cons = NEXT_TX(cons); |
706 | skb = tx_buf->skb; |
707 | tx_buf->skb = NULL; |
708 | |
709 | if (unlikely(!skb)) { |
710 | bnxt_sched_reset_txr(bp, txr, idx: i); |
711 | return; |
712 | } |
713 | |
714 | tx_bytes += skb->len; |
715 | |
716 | if (tx_buf->is_push) { |
717 | tx_buf->is_push = 0; |
718 | goto next_tx_int; |
719 | } |
720 | |
721 | dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), |
722 | skb_headlen(skb), DMA_TO_DEVICE); |
723 | last = tx_buf->nr_frags; |
724 | |
725 | for (j = 0; j < last; j++) { |
726 | cons = NEXT_TX(cons); |
727 | tx_buf = &txr->tx_buf_ring[cons]; |
728 | dma_unmap_page( |
729 | &pdev->dev, |
730 | dma_unmap_addr(tx_buf, mapping), |
731 | skb_frag_size(&skb_shinfo(skb)->frags[j]), |
732 | DMA_TO_DEVICE); |
733 | } |
734 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { |
735 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
736 | /* PTP worker takes ownership of the skb */ |
737 | if (!bnxt_get_tx_ts_p5(bp, skb)) |
738 | skb = NULL; |
739 | else |
740 | atomic_inc(v: &bp->ptp_cfg->tx_avail); |
741 | } |
742 | } |
743 | |
744 | next_tx_int: |
745 | cons = NEXT_TX(cons); |
746 | |
747 | dev_consume_skb_any(skb); |
748 | } |
749 | |
750 | bnapi->tx_pkts = 0; |
751 | WRITE_ONCE(txr->tx_cons, cons); |
752 | |
753 | __netif_txq_completed_wake(txq, nr_pkts, tx_bytes, |
754 | bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, |
755 | READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING); |
756 | } |
757 | |
758 | static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, |
759 | struct bnxt_rx_ring_info *rxr, |
760 | unsigned int *offset, |
761 | gfp_t gfp) |
762 | { |
763 | struct page *page; |
764 | |
765 | if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { |
766 | page = page_pool_dev_alloc_frag(pool: rxr->page_pool, offset, |
767 | BNXT_RX_PAGE_SIZE); |
768 | } else { |
769 | page = page_pool_dev_alloc_pages(pool: rxr->page_pool); |
770 | *offset = 0; |
771 | } |
772 | if (!page) |
773 | return NULL; |
774 | |
775 | *mapping = page_pool_get_dma_addr(page) + *offset; |
776 | return page; |
777 | } |
778 | |
779 | static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, |
780 | gfp_t gfp) |
781 | { |
782 | u8 *data; |
783 | struct pci_dev *pdev = bp->pdev; |
784 | |
785 | if (gfp == GFP_ATOMIC) |
786 | data = napi_alloc_frag(fragsz: bp->rx_buf_size); |
787 | else |
788 | data = netdev_alloc_frag(fragsz: bp->rx_buf_size); |
789 | if (!data) |
790 | return NULL; |
791 | |
792 | *mapping = dma_map_single_attrs(dev: &pdev->dev, ptr: data + bp->rx_dma_offset, |
793 | size: bp->rx_buf_use_size, dir: bp->rx_dir, |
794 | DMA_ATTR_WEAK_ORDERING); |
795 | |
796 | if (dma_mapping_error(dev: &pdev->dev, dma_addr: *mapping)) { |
797 | skb_free_frag(addr: data); |
798 | data = NULL; |
799 | } |
800 | return data; |
801 | } |
802 | |
803 | int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, |
804 | u16 prod, gfp_t gfp) |
805 | { |
806 | struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; |
807 | struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; |
808 | dma_addr_t mapping; |
809 | |
810 | if (BNXT_RX_PAGE_MODE(bp)) { |
811 | unsigned int offset; |
812 | struct page *page = |
813 | __bnxt_alloc_rx_page(bp, mapping: &mapping, rxr, offset: &offset, gfp); |
814 | |
815 | if (!page) |
816 | return -ENOMEM; |
817 | |
818 | mapping += bp->rx_dma_offset; |
819 | rx_buf->data = page; |
820 | rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; |
821 | } else { |
822 | u8 *data = __bnxt_alloc_rx_frag(bp, mapping: &mapping, gfp); |
823 | |
824 | if (!data) |
825 | return -ENOMEM; |
826 | |
827 | rx_buf->data = data; |
828 | rx_buf->data_ptr = data + bp->rx_offset; |
829 | } |
830 | rx_buf->mapping = mapping; |
831 | |
832 | rxbd->rx_bd_haddr = cpu_to_le64(mapping); |
833 | return 0; |
834 | } |
835 | |
836 | void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) |
837 | { |
838 | u16 prod = rxr->rx_prod; |
839 | struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; |
840 | struct rx_bd *cons_bd, *prod_bd; |
841 | |
842 | prod_rx_buf = &rxr->rx_buf_ring[prod]; |
843 | cons_rx_buf = &rxr->rx_buf_ring[cons]; |
844 | |
845 | prod_rx_buf->data = data; |
846 | prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; |
847 | |
848 | prod_rx_buf->mapping = cons_rx_buf->mapping; |
849 | |
850 | prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; |
851 | cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; |
852 | |
853 | prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; |
854 | } |
855 | |
856 | static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) |
857 | { |
858 | u16 next, max = rxr->rx_agg_bmap_size; |
859 | |
860 | next = find_next_zero_bit(addr: rxr->rx_agg_bmap, size: max, offset: idx); |
861 | if (next >= max) |
862 | next = find_first_zero_bit(addr: rxr->rx_agg_bmap, size: max); |
863 | return next; |
864 | } |
865 | |
866 | static inline int bnxt_alloc_rx_page(struct bnxt *bp, |
867 | struct bnxt_rx_ring_info *rxr, |
868 | u16 prod, gfp_t gfp) |
869 | { |
870 | struct rx_bd *rxbd = |
871 | &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; |
872 | struct bnxt_sw_rx_agg_bd *rx_agg_buf; |
873 | struct page *page; |
874 | dma_addr_t mapping; |
875 | u16 sw_prod = rxr->rx_sw_agg_prod; |
876 | unsigned int offset = 0; |
877 | |
878 | page = __bnxt_alloc_rx_page(bp, mapping: &mapping, rxr, offset: &offset, gfp); |
879 | |
880 | if (!page) |
881 | return -ENOMEM; |
882 | |
883 | if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) |
884 | sw_prod = bnxt_find_next_agg_idx(rxr, idx: sw_prod); |
885 | |
886 | __set_bit(sw_prod, rxr->rx_agg_bmap); |
887 | rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; |
888 | rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); |
889 | |
890 | rx_agg_buf->page = page; |
891 | rx_agg_buf->offset = offset; |
892 | rx_agg_buf->mapping = mapping; |
893 | rxbd->rx_bd_haddr = cpu_to_le64(mapping); |
894 | rxbd->rx_bd_opaque = sw_prod; |
895 | return 0; |
896 | } |
897 | |
898 | static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, |
899 | struct bnxt_cp_ring_info *cpr, |
900 | u16 cp_cons, u16 curr) |
901 | { |
902 | struct rx_agg_cmp *agg; |
903 | |
904 | cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); |
905 | agg = (struct rx_agg_cmp *) |
906 | &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
907 | return agg; |
908 | } |
909 | |
910 | static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, |
911 | struct bnxt_rx_ring_info *rxr, |
912 | u16 agg_id, u16 curr) |
913 | { |
914 | struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; |
915 | |
916 | return &tpa_info->agg_arr[curr]; |
917 | } |
918 | |
919 | static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, |
920 | u16 start, u32 agg_bufs, bool tpa) |
921 | { |
922 | struct bnxt_napi *bnapi = cpr->bnapi; |
923 | struct bnxt *bp = bnapi->bp; |
924 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
925 | u16 prod = rxr->rx_agg_prod; |
926 | u16 sw_prod = rxr->rx_sw_agg_prod; |
927 | bool p5_tpa = false; |
928 | u32 i; |
929 | |
930 | if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) |
931 | p5_tpa = true; |
932 | |
933 | for (i = 0; i < agg_bufs; i++) { |
934 | u16 cons; |
935 | struct rx_agg_cmp *agg; |
936 | struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; |
937 | struct rx_bd *prod_bd; |
938 | struct page *page; |
939 | |
940 | if (p5_tpa) |
941 | agg = bnxt_get_tpa_agg_p5(bp, rxr, agg_id: idx, curr: start + i); |
942 | else |
943 | agg = bnxt_get_agg(bp, cpr, cp_cons: idx, curr: start + i); |
944 | cons = agg->rx_agg_cmp_opaque; |
945 | __clear_bit(cons, rxr->rx_agg_bmap); |
946 | |
947 | if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) |
948 | sw_prod = bnxt_find_next_agg_idx(rxr, idx: sw_prod); |
949 | |
950 | __set_bit(sw_prod, rxr->rx_agg_bmap); |
951 | prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; |
952 | cons_rx_buf = &rxr->rx_agg_ring[cons]; |
953 | |
954 | /* It is possible for sw_prod to be equal to cons, so |
955 | * set cons_rx_buf->page to NULL first. |
956 | */ |
957 | page = cons_rx_buf->page; |
958 | cons_rx_buf->page = NULL; |
959 | prod_rx_buf->page = page; |
960 | prod_rx_buf->offset = cons_rx_buf->offset; |
961 | |
962 | prod_rx_buf->mapping = cons_rx_buf->mapping; |
963 | |
964 | prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; |
965 | |
966 | prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); |
967 | prod_bd->rx_bd_opaque = sw_prod; |
968 | |
969 | prod = NEXT_RX_AGG(prod); |
970 | sw_prod = NEXT_RX_AGG(sw_prod); |
971 | } |
972 | rxr->rx_agg_prod = prod; |
973 | rxr->rx_sw_agg_prod = sw_prod; |
974 | } |
975 | |
976 | static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, |
977 | struct bnxt_rx_ring_info *rxr, |
978 | u16 cons, void *data, u8 *data_ptr, |
979 | dma_addr_t dma_addr, |
980 | unsigned int offset_and_len) |
981 | { |
982 | unsigned int len = offset_and_len & 0xffff; |
983 | struct page *page = data; |
984 | u16 prod = rxr->rx_prod; |
985 | struct sk_buff *skb; |
986 | int err; |
987 | |
988 | err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); |
989 | if (unlikely(err)) { |
990 | bnxt_reuse_rx_data(rxr, cons, data); |
991 | return NULL; |
992 | } |
993 | dma_addr -= bp->rx_dma_offset; |
994 | dma_sync_single_for_cpu(dev: &bp->pdev->dev, addr: dma_addr, BNXT_RX_PAGE_SIZE, |
995 | dir: bp->rx_dir); |
996 | skb = napi_build_skb(data: data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); |
997 | if (!skb) { |
998 | page_pool_recycle_direct(pool: rxr->page_pool, page); |
999 | return NULL; |
1000 | } |
1001 | skb_mark_for_recycle(skb); |
1002 | skb_reserve(skb, len: bp->rx_offset); |
1003 | __skb_put(skb, len); |
1004 | |
1005 | return skb; |
1006 | } |
1007 | |
1008 | static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, |
1009 | struct bnxt_rx_ring_info *rxr, |
1010 | u16 cons, void *data, u8 *data_ptr, |
1011 | dma_addr_t dma_addr, |
1012 | unsigned int offset_and_len) |
1013 | { |
1014 | unsigned int payload = offset_and_len >> 16; |
1015 | unsigned int len = offset_and_len & 0xffff; |
1016 | skb_frag_t *frag; |
1017 | struct page *page = data; |
1018 | u16 prod = rxr->rx_prod; |
1019 | struct sk_buff *skb; |
1020 | int off, err; |
1021 | |
1022 | err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); |
1023 | if (unlikely(err)) { |
1024 | bnxt_reuse_rx_data(rxr, cons, data); |
1025 | return NULL; |
1026 | } |
1027 | dma_addr -= bp->rx_dma_offset; |
1028 | dma_sync_single_for_cpu(dev: &bp->pdev->dev, addr: dma_addr, BNXT_RX_PAGE_SIZE, |
1029 | dir: bp->rx_dir); |
1030 | |
1031 | if (unlikely(!payload)) |
1032 | payload = eth_get_headlen(dev: bp->dev, data: data_ptr, len); |
1033 | |
1034 | skb = napi_alloc_skb(napi: &rxr->bnapi->napi, length: payload); |
1035 | if (!skb) { |
1036 | page_pool_recycle_direct(pool: rxr->page_pool, page); |
1037 | return NULL; |
1038 | } |
1039 | |
1040 | skb_mark_for_recycle(skb); |
1041 | off = (void *)data_ptr - page_address(page); |
1042 | skb_add_rx_frag(skb, i: 0, page, off, size: len, BNXT_RX_PAGE_SIZE); |
1043 | memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, |
1044 | payload + NET_IP_ALIGN); |
1045 | |
1046 | frag = &skb_shinfo(skb)->frags[0]; |
1047 | skb_frag_size_sub(frag, delta: payload); |
1048 | skb_frag_off_add(frag, delta: payload); |
1049 | skb->data_len -= payload; |
1050 | skb->tail += payload; |
1051 | |
1052 | return skb; |
1053 | } |
1054 | |
1055 | static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, |
1056 | struct bnxt_rx_ring_info *rxr, u16 cons, |
1057 | void *data, u8 *data_ptr, |
1058 | dma_addr_t dma_addr, |
1059 | unsigned int offset_and_len) |
1060 | { |
1061 | u16 prod = rxr->rx_prod; |
1062 | struct sk_buff *skb; |
1063 | int err; |
1064 | |
1065 | err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); |
1066 | if (unlikely(err)) { |
1067 | bnxt_reuse_rx_data(rxr, cons, data); |
1068 | return NULL; |
1069 | } |
1070 | |
1071 | skb = napi_build_skb(data, frag_size: bp->rx_buf_size); |
1072 | dma_unmap_single_attrs(dev: &bp->pdev->dev, addr: dma_addr, size: bp->rx_buf_use_size, |
1073 | dir: bp->rx_dir, DMA_ATTR_WEAK_ORDERING); |
1074 | if (!skb) { |
1075 | skb_free_frag(addr: data); |
1076 | return NULL; |
1077 | } |
1078 | |
1079 | skb_reserve(skb, len: bp->rx_offset); |
1080 | skb_put(skb, len: offset_and_len & 0xffff); |
1081 | return skb; |
1082 | } |
1083 | |
1084 | static u32 __bnxt_rx_agg_pages(struct bnxt *bp, |
1085 | struct bnxt_cp_ring_info *cpr, |
1086 | struct skb_shared_info *shinfo, |
1087 | u16 idx, u32 agg_bufs, bool tpa, |
1088 | struct xdp_buff *xdp) |
1089 | { |
1090 | struct bnxt_napi *bnapi = cpr->bnapi; |
1091 | struct pci_dev *pdev = bp->pdev; |
1092 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
1093 | u16 prod = rxr->rx_agg_prod; |
1094 | u32 i, total_frag_len = 0; |
1095 | bool p5_tpa = false; |
1096 | |
1097 | if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) |
1098 | p5_tpa = true; |
1099 | |
1100 | for (i = 0; i < agg_bufs; i++) { |
1101 | skb_frag_t *frag = &shinfo->frags[i]; |
1102 | u16 cons, frag_len; |
1103 | struct rx_agg_cmp *agg; |
1104 | struct bnxt_sw_rx_agg_bd *cons_rx_buf; |
1105 | struct page *page; |
1106 | dma_addr_t mapping; |
1107 | |
1108 | if (p5_tpa) |
1109 | agg = bnxt_get_tpa_agg_p5(bp, rxr, agg_id: idx, curr: i); |
1110 | else |
1111 | agg = bnxt_get_agg(bp, cpr, cp_cons: idx, curr: i); |
1112 | cons = agg->rx_agg_cmp_opaque; |
1113 | frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & |
1114 | RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; |
1115 | |
1116 | cons_rx_buf = &rxr->rx_agg_ring[cons]; |
1117 | skb_frag_fill_page_desc(frag, page: cons_rx_buf->page, |
1118 | off: cons_rx_buf->offset, size: frag_len); |
1119 | shinfo->nr_frags = i + 1; |
1120 | __clear_bit(cons, rxr->rx_agg_bmap); |
1121 | |
1122 | /* It is possible for bnxt_alloc_rx_page() to allocate |
1123 | * a sw_prod index that equals the cons index, so we |
1124 | * need to clear the cons entry now. |
1125 | */ |
1126 | mapping = cons_rx_buf->mapping; |
1127 | page = cons_rx_buf->page; |
1128 | cons_rx_buf->page = NULL; |
1129 | |
1130 | if (xdp && page_is_pfmemalloc(page)) |
1131 | xdp_buff_set_frag_pfmemalloc(xdp); |
1132 | |
1133 | if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { |
1134 | --shinfo->nr_frags; |
1135 | cons_rx_buf->page = page; |
1136 | |
1137 | /* Update prod since possibly some pages have been |
1138 | * allocated already. |
1139 | */ |
1140 | rxr->rx_agg_prod = prod; |
1141 | bnxt_reuse_rx_agg_bufs(cpr, idx, start: i, agg_bufs: agg_bufs - i, tpa); |
1142 | return 0; |
1143 | } |
1144 | |
1145 | dma_sync_single_for_cpu(dev: &pdev->dev, addr: mapping, BNXT_RX_PAGE_SIZE, |
1146 | dir: bp->rx_dir); |
1147 | |
1148 | total_frag_len += frag_len; |
1149 | prod = NEXT_RX_AGG(prod); |
1150 | } |
1151 | rxr->rx_agg_prod = prod; |
1152 | return total_frag_len; |
1153 | } |
1154 | |
1155 | static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, |
1156 | struct bnxt_cp_ring_info *cpr, |
1157 | struct sk_buff *skb, u16 idx, |
1158 | u32 agg_bufs, bool tpa) |
1159 | { |
1160 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
1161 | u32 total_frag_len = 0; |
1162 | |
1163 | total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, |
1164 | agg_bufs, tpa, NULL); |
1165 | if (!total_frag_len) { |
1166 | skb_mark_for_recycle(skb); |
1167 | dev_kfree_skb(skb); |
1168 | return NULL; |
1169 | } |
1170 | |
1171 | skb->data_len += total_frag_len; |
1172 | skb->len += total_frag_len; |
1173 | skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs; |
1174 | return skb; |
1175 | } |
1176 | |
1177 | static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp, |
1178 | struct bnxt_cp_ring_info *cpr, |
1179 | struct xdp_buff *xdp, u16 idx, |
1180 | u32 agg_bufs, bool tpa) |
1181 | { |
1182 | struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp); |
1183 | u32 total_frag_len = 0; |
1184 | |
1185 | if (!xdp_buff_has_frags(xdp)) |
1186 | shinfo->nr_frags = 0; |
1187 | |
1188 | total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, |
1189 | idx, agg_bufs, tpa, xdp); |
1190 | if (total_frag_len) { |
1191 | xdp_buff_set_frags_flag(xdp); |
1192 | shinfo->nr_frags = agg_bufs; |
1193 | shinfo->xdp_frags_size = total_frag_len; |
1194 | } |
1195 | return total_frag_len; |
1196 | } |
1197 | |
1198 | static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, |
1199 | u8 agg_bufs, u32 *raw_cons) |
1200 | { |
1201 | u16 last; |
1202 | struct rx_agg_cmp *agg; |
1203 | |
1204 | *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); |
1205 | last = RING_CMP(*raw_cons); |
1206 | agg = (struct rx_agg_cmp *) |
1207 | &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; |
1208 | return RX_AGG_CMP_VALID(agg, *raw_cons); |
1209 | } |
1210 | |
1211 | static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, |
1212 | unsigned int len, |
1213 | dma_addr_t mapping) |
1214 | { |
1215 | struct bnxt *bp = bnapi->bp; |
1216 | struct pci_dev *pdev = bp->pdev; |
1217 | struct sk_buff *skb; |
1218 | |
1219 | skb = napi_alloc_skb(napi: &bnapi->napi, length: len); |
1220 | if (!skb) |
1221 | return NULL; |
1222 | |
1223 | dma_sync_single_for_cpu(dev: &pdev->dev, addr: mapping, size: bp->rx_copy_thresh, |
1224 | dir: bp->rx_dir); |
1225 | |
1226 | memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, |
1227 | len + NET_IP_ALIGN); |
1228 | |
1229 | dma_sync_single_for_device(dev: &pdev->dev, addr: mapping, size: bp->rx_copy_thresh, |
1230 | dir: bp->rx_dir); |
1231 | |
1232 | skb_put(skb, len); |
1233 | return skb; |
1234 | } |
1235 | |
1236 | static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, |
1237 | u32 *raw_cons, void *cmp) |
1238 | { |
1239 | struct rx_cmp *rxcmp = cmp; |
1240 | u32 tmp_raw_cons = *raw_cons; |
1241 | u8 cmp_type, agg_bufs = 0; |
1242 | |
1243 | cmp_type = RX_CMP_TYPE(rxcmp); |
1244 | |
1245 | if (cmp_type == CMP_TYPE_RX_L2_CMP) { |
1246 | agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & |
1247 | RX_CMP_AGG_BUFS) >> |
1248 | RX_CMP_AGG_BUFS_SHIFT; |
1249 | } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { |
1250 | struct rx_tpa_end_cmp *tpa_end = cmp; |
1251 | |
1252 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
1253 | return 0; |
1254 | |
1255 | agg_bufs = TPA_END_AGG_BUFS(tpa_end); |
1256 | } |
1257 | |
1258 | if (agg_bufs) { |
1259 | if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons: &tmp_raw_cons)) |
1260 | return -EBUSY; |
1261 | } |
1262 | *raw_cons = tmp_raw_cons; |
1263 | return 0; |
1264 | } |
1265 | |
1266 | static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) |
1267 | { |
1268 | struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; |
1269 | u16 idx = agg_id & MAX_TPA_P5_MASK; |
1270 | |
1271 | if (test_bit(idx, map->agg_idx_bmap)) |
1272 | idx = find_first_zero_bit(addr: map->agg_idx_bmap, |
1273 | BNXT_AGG_IDX_BMAP_SIZE); |
1274 | __set_bit(idx, map->agg_idx_bmap); |
1275 | map->agg_id_tbl[agg_id] = idx; |
1276 | return idx; |
1277 | } |
1278 | |
1279 | static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) |
1280 | { |
1281 | struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; |
1282 | |
1283 | __clear_bit(idx, map->agg_idx_bmap); |
1284 | } |
1285 | |
1286 | static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) |
1287 | { |
1288 | struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; |
1289 | |
1290 | return map->agg_id_tbl[agg_id]; |
1291 | } |
1292 | |
1293 | static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, |
1294 | struct rx_tpa_start_cmp *tpa_start, |
1295 | struct rx_tpa_start_cmp_ext *tpa_start1) |
1296 | { |
1297 | struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; |
1298 | struct bnxt_tpa_info *tpa_info; |
1299 | u16 cons, prod, agg_id; |
1300 | struct rx_bd *prod_bd; |
1301 | dma_addr_t mapping; |
1302 | |
1303 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
1304 | agg_id = TPA_START_AGG_ID_P5(tpa_start); |
1305 | agg_id = bnxt_alloc_agg_idx(rxr, agg_id); |
1306 | } else { |
1307 | agg_id = TPA_START_AGG_ID(tpa_start); |
1308 | } |
1309 | cons = tpa_start->rx_tpa_start_cmp_opaque; |
1310 | prod = rxr->rx_prod; |
1311 | cons_rx_buf = &rxr->rx_buf_ring[cons]; |
1312 | prod_rx_buf = &rxr->rx_buf_ring[prod]; |
1313 | tpa_info = &rxr->rx_tpa[agg_id]; |
1314 | |
1315 | if (unlikely(cons != rxr->rx_next_cons || |
1316 | TPA_START_ERROR(tpa_start))) { |
1317 | netdev_warn(dev: bp->dev, format: "TPA cons %x, expected cons %x, error code %x\n" , |
1318 | cons, rxr->rx_next_cons, |
1319 | TPA_START_ERROR_CODE(tpa_start1)); |
1320 | bnxt_sched_reset_rxr(bp, rxr); |
1321 | return; |
1322 | } |
1323 | /* Store cfa_code in tpa_info to use in tpa_end |
1324 | * completion processing. |
1325 | */ |
1326 | tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); |
1327 | prod_rx_buf->data = tpa_info->data; |
1328 | prod_rx_buf->data_ptr = tpa_info->data_ptr; |
1329 | |
1330 | mapping = tpa_info->mapping; |
1331 | prod_rx_buf->mapping = mapping; |
1332 | |
1333 | prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; |
1334 | |
1335 | prod_bd->rx_bd_haddr = cpu_to_le64(mapping); |
1336 | |
1337 | tpa_info->data = cons_rx_buf->data; |
1338 | tpa_info->data_ptr = cons_rx_buf->data_ptr; |
1339 | cons_rx_buf->data = NULL; |
1340 | tpa_info->mapping = cons_rx_buf->mapping; |
1341 | |
1342 | tpa_info->len = |
1343 | le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> |
1344 | RX_TPA_START_CMP_LEN_SHIFT; |
1345 | if (likely(TPA_START_HASH_VALID(tpa_start))) { |
1346 | u32 hash_type = TPA_START_HASH_TYPE(tpa_start); |
1347 | |
1348 | tpa_info->hash_type = PKT_HASH_TYPE_L4; |
1349 | tpa_info->gso_type = SKB_GSO_TCPV4; |
1350 | /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ |
1351 | if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1)) |
1352 | tpa_info->gso_type = SKB_GSO_TCPV6; |
1353 | tpa_info->rss_hash = |
1354 | le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); |
1355 | } else { |
1356 | tpa_info->hash_type = PKT_HASH_TYPE_NONE; |
1357 | tpa_info->gso_type = 0; |
1358 | netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n" ); |
1359 | } |
1360 | tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); |
1361 | tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); |
1362 | tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); |
1363 | tpa_info->agg_count = 0; |
1364 | |
1365 | rxr->rx_prod = NEXT_RX(prod); |
1366 | cons = NEXT_RX(cons); |
1367 | rxr->rx_next_cons = NEXT_RX(cons); |
1368 | cons_rx_buf = &rxr->rx_buf_ring[cons]; |
1369 | |
1370 | bnxt_reuse_rx_data(rxr, cons, data: cons_rx_buf->data); |
1371 | rxr->rx_prod = NEXT_RX(rxr->rx_prod); |
1372 | cons_rx_buf->data = NULL; |
1373 | } |
1374 | |
1375 | static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) |
1376 | { |
1377 | if (agg_bufs) |
1378 | bnxt_reuse_rx_agg_bufs(cpr, idx, start: 0, agg_bufs, tpa: true); |
1379 | } |
1380 | |
1381 | #ifdef CONFIG_INET |
1382 | static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) |
1383 | { |
1384 | struct udphdr *uh = NULL; |
1385 | |
1386 | if (ip_proto == htons(ETH_P_IP)) { |
1387 | struct iphdr *iph = (struct iphdr *)skb->data; |
1388 | |
1389 | if (iph->protocol == IPPROTO_UDP) |
1390 | uh = (struct udphdr *)(iph + 1); |
1391 | } else { |
1392 | struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; |
1393 | |
1394 | if (iph->nexthdr == IPPROTO_UDP) |
1395 | uh = (struct udphdr *)(iph + 1); |
1396 | } |
1397 | if (uh) { |
1398 | if (uh->check) |
1399 | skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; |
1400 | else |
1401 | skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; |
1402 | } |
1403 | } |
1404 | #endif |
1405 | |
1406 | static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, |
1407 | int payload_off, int tcp_ts, |
1408 | struct sk_buff *skb) |
1409 | { |
1410 | #ifdef CONFIG_INET |
1411 | struct tcphdr *th; |
1412 | int len, nw_off; |
1413 | u16 outer_ip_off, inner_ip_off, inner_mac_off; |
1414 | u32 hdr_info = tpa_info->hdr_info; |
1415 | bool loopback = false; |
1416 | |
1417 | inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); |
1418 | inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); |
1419 | outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); |
1420 | |
1421 | /* If the packet is an internal loopback packet, the offsets will |
1422 | * have an extra 4 bytes. |
1423 | */ |
1424 | if (inner_mac_off == 4) { |
1425 | loopback = true; |
1426 | } else if (inner_mac_off > 4) { |
1427 | __be16 proto = *((__be16 *)(skb->data + inner_ip_off - |
1428 | ETH_HLEN - 2)); |
1429 | |
1430 | /* We only support inner iPv4/ipv6. If we don't see the |
1431 | * correct protocol ID, it must be a loopback packet where |
1432 | * the offsets are off by 4. |
1433 | */ |
1434 | if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) |
1435 | loopback = true; |
1436 | } |
1437 | if (loopback) { |
1438 | /* internal loopback packet, subtract all offsets by 4 */ |
1439 | inner_ip_off -= 4; |
1440 | inner_mac_off -= 4; |
1441 | outer_ip_off -= 4; |
1442 | } |
1443 | |
1444 | nw_off = inner_ip_off - ETH_HLEN; |
1445 | skb_set_network_header(skb, offset: nw_off); |
1446 | if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { |
1447 | struct ipv6hdr *iph = ipv6_hdr(skb); |
1448 | |
1449 | skb_set_transport_header(skb, offset: nw_off + sizeof(struct ipv6hdr)); |
1450 | len = skb->len - skb_transport_offset(skb); |
1451 | th = tcp_hdr(skb); |
1452 | th->check = ~tcp_v6_check(len, saddr: &iph->saddr, daddr: &iph->daddr, base: 0); |
1453 | } else { |
1454 | struct iphdr *iph = ip_hdr(skb); |
1455 | |
1456 | skb_set_transport_header(skb, offset: nw_off + sizeof(struct iphdr)); |
1457 | len = skb->len - skb_transport_offset(skb); |
1458 | th = tcp_hdr(skb); |
1459 | th->check = ~tcp_v4_check(len, saddr: iph->saddr, daddr: iph->daddr, base: 0); |
1460 | } |
1461 | |
1462 | if (inner_mac_off) { /* tunnel */ |
1463 | __be16 proto = *((__be16 *)(skb->data + outer_ip_off - |
1464 | ETH_HLEN - 2)); |
1465 | |
1466 | bnxt_gro_tunnel(skb, ip_proto: proto); |
1467 | } |
1468 | #endif |
1469 | return skb; |
1470 | } |
1471 | |
1472 | static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, |
1473 | int payload_off, int tcp_ts, |
1474 | struct sk_buff *skb) |
1475 | { |
1476 | #ifdef CONFIG_INET |
1477 | u16 outer_ip_off, inner_ip_off, inner_mac_off; |
1478 | u32 hdr_info = tpa_info->hdr_info; |
1479 | int iphdr_len, nw_off; |
1480 | |
1481 | inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); |
1482 | inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); |
1483 | outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); |
1484 | |
1485 | nw_off = inner_ip_off - ETH_HLEN; |
1486 | skb_set_network_header(skb, offset: nw_off); |
1487 | iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? |
1488 | sizeof(struct ipv6hdr) : sizeof(struct iphdr); |
1489 | skb_set_transport_header(skb, offset: nw_off + iphdr_len); |
1490 | |
1491 | if (inner_mac_off) { /* tunnel */ |
1492 | __be16 proto = *((__be16 *)(skb->data + outer_ip_off - |
1493 | ETH_HLEN - 2)); |
1494 | |
1495 | bnxt_gro_tunnel(skb, ip_proto: proto); |
1496 | } |
1497 | #endif |
1498 | return skb; |
1499 | } |
1500 | |
1501 | #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) |
1502 | #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) |
1503 | |
1504 | static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, |
1505 | int payload_off, int tcp_ts, |
1506 | struct sk_buff *skb) |
1507 | { |
1508 | #ifdef CONFIG_INET |
1509 | struct tcphdr *th; |
1510 | int len, nw_off, tcp_opt_len = 0; |
1511 | |
1512 | if (tcp_ts) |
1513 | tcp_opt_len = 12; |
1514 | |
1515 | if (tpa_info->gso_type == SKB_GSO_TCPV4) { |
1516 | struct iphdr *iph; |
1517 | |
1518 | nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - |
1519 | ETH_HLEN; |
1520 | skb_set_network_header(skb, offset: nw_off); |
1521 | iph = ip_hdr(skb); |
1522 | skb_set_transport_header(skb, offset: nw_off + sizeof(struct iphdr)); |
1523 | len = skb->len - skb_transport_offset(skb); |
1524 | th = tcp_hdr(skb); |
1525 | th->check = ~tcp_v4_check(len, saddr: iph->saddr, daddr: iph->daddr, base: 0); |
1526 | } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { |
1527 | struct ipv6hdr *iph; |
1528 | |
1529 | nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - |
1530 | ETH_HLEN; |
1531 | skb_set_network_header(skb, offset: nw_off); |
1532 | iph = ipv6_hdr(skb); |
1533 | skb_set_transport_header(skb, offset: nw_off + sizeof(struct ipv6hdr)); |
1534 | len = skb->len - skb_transport_offset(skb); |
1535 | th = tcp_hdr(skb); |
1536 | th->check = ~tcp_v6_check(len, saddr: &iph->saddr, daddr: &iph->daddr, base: 0); |
1537 | } else { |
1538 | dev_kfree_skb_any(skb); |
1539 | return NULL; |
1540 | } |
1541 | |
1542 | if (nw_off) /* tunnel */ |
1543 | bnxt_gro_tunnel(skb, ip_proto: skb->protocol); |
1544 | #endif |
1545 | return skb; |
1546 | } |
1547 | |
1548 | static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, |
1549 | struct bnxt_tpa_info *tpa_info, |
1550 | struct rx_tpa_end_cmp *tpa_end, |
1551 | struct rx_tpa_end_cmp_ext *tpa_end1, |
1552 | struct sk_buff *skb) |
1553 | { |
1554 | #ifdef CONFIG_INET |
1555 | int payload_off; |
1556 | u16 segs; |
1557 | |
1558 | segs = TPA_END_TPA_SEGS(tpa_end); |
1559 | if (segs == 1) |
1560 | return skb; |
1561 | |
1562 | NAPI_GRO_CB(skb)->count = segs; |
1563 | skb_shinfo(skb)->gso_size = |
1564 | le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); |
1565 | skb_shinfo(skb)->gso_type = tpa_info->gso_type; |
1566 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
1567 | payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); |
1568 | else |
1569 | payload_off = TPA_END_PAYLOAD_OFF(tpa_end); |
1570 | skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); |
1571 | if (likely(skb)) |
1572 | tcp_gro_complete(skb); |
1573 | #endif |
1574 | return skb; |
1575 | } |
1576 | |
1577 | /* Given the cfa_code of a received packet determine which |
1578 | * netdev (vf-rep or PF) the packet is destined to. |
1579 | */ |
1580 | static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) |
1581 | { |
1582 | struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); |
1583 | |
1584 | /* if vf-rep dev is NULL, the must belongs to the PF */ |
1585 | return dev ? dev : bp->dev; |
1586 | } |
1587 | |
1588 | static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, |
1589 | struct bnxt_cp_ring_info *cpr, |
1590 | u32 *raw_cons, |
1591 | struct rx_tpa_end_cmp *tpa_end, |
1592 | struct rx_tpa_end_cmp_ext *tpa_end1, |
1593 | u8 *event) |
1594 | { |
1595 | struct bnxt_napi *bnapi = cpr->bnapi; |
1596 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
1597 | u8 *data_ptr, agg_bufs; |
1598 | unsigned int len; |
1599 | struct bnxt_tpa_info *tpa_info; |
1600 | dma_addr_t mapping; |
1601 | struct sk_buff *skb; |
1602 | u16 idx = 0, agg_id; |
1603 | void *data; |
1604 | bool gro; |
1605 | |
1606 | if (unlikely(bnapi->in_reset)) { |
1607 | int rc = bnxt_discard_rx(bp, cpr, raw_cons, cmp: tpa_end); |
1608 | |
1609 | if (rc < 0) |
1610 | return ERR_PTR(error: -EBUSY); |
1611 | return NULL; |
1612 | } |
1613 | |
1614 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
1615 | agg_id = TPA_END_AGG_ID_P5(tpa_end); |
1616 | agg_id = bnxt_lookup_agg_idx(rxr, agg_id); |
1617 | agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); |
1618 | tpa_info = &rxr->rx_tpa[agg_id]; |
1619 | if (unlikely(agg_bufs != tpa_info->agg_count)) { |
1620 | netdev_warn(dev: bp->dev, format: "TPA end agg_buf %d != expected agg_bufs %d\n" , |
1621 | agg_bufs, tpa_info->agg_count); |
1622 | agg_bufs = tpa_info->agg_count; |
1623 | } |
1624 | tpa_info->agg_count = 0; |
1625 | *event |= BNXT_AGG_EVENT; |
1626 | bnxt_free_agg_idx(rxr, idx: agg_id); |
1627 | idx = agg_id; |
1628 | gro = !!(bp->flags & BNXT_FLAG_GRO); |
1629 | } else { |
1630 | agg_id = TPA_END_AGG_ID(tpa_end); |
1631 | agg_bufs = TPA_END_AGG_BUFS(tpa_end); |
1632 | tpa_info = &rxr->rx_tpa[agg_id]; |
1633 | idx = RING_CMP(*raw_cons); |
1634 | if (agg_bufs) { |
1635 | if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) |
1636 | return ERR_PTR(error: -EBUSY); |
1637 | |
1638 | *event |= BNXT_AGG_EVENT; |
1639 | idx = NEXT_CMP(idx); |
1640 | } |
1641 | gro = !!TPA_END_GRO(tpa_end); |
1642 | } |
1643 | data = tpa_info->data; |
1644 | data_ptr = tpa_info->data_ptr; |
1645 | prefetch(data_ptr); |
1646 | len = tpa_info->len; |
1647 | mapping = tpa_info->mapping; |
1648 | |
1649 | if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { |
1650 | bnxt_abort_tpa(cpr, idx, agg_bufs); |
1651 | if (agg_bufs > MAX_SKB_FRAGS) |
1652 | netdev_warn(dev: bp->dev, format: "TPA frags %d exceeded MAX_SKB_FRAGS %d\n" , |
1653 | agg_bufs, (int)MAX_SKB_FRAGS); |
1654 | return NULL; |
1655 | } |
1656 | |
1657 | if (len <= bp->rx_copy_thresh) { |
1658 | skb = bnxt_copy_skb(bnapi, data: data_ptr, len, mapping); |
1659 | if (!skb) { |
1660 | bnxt_abort_tpa(cpr, idx, agg_bufs); |
1661 | cpr->sw_stats.rx.rx_oom_discards += 1; |
1662 | return NULL; |
1663 | } |
1664 | } else { |
1665 | u8 *new_data; |
1666 | dma_addr_t new_mapping; |
1667 | |
1668 | new_data = __bnxt_alloc_rx_frag(bp, mapping: &new_mapping, GFP_ATOMIC); |
1669 | if (!new_data) { |
1670 | bnxt_abort_tpa(cpr, idx, agg_bufs); |
1671 | cpr->sw_stats.rx.rx_oom_discards += 1; |
1672 | return NULL; |
1673 | } |
1674 | |
1675 | tpa_info->data = new_data; |
1676 | tpa_info->data_ptr = new_data + bp->rx_offset; |
1677 | tpa_info->mapping = new_mapping; |
1678 | |
1679 | skb = napi_build_skb(data, frag_size: bp->rx_buf_size); |
1680 | dma_unmap_single_attrs(dev: &bp->pdev->dev, addr: mapping, |
1681 | size: bp->rx_buf_use_size, dir: bp->rx_dir, |
1682 | DMA_ATTR_WEAK_ORDERING); |
1683 | |
1684 | if (!skb) { |
1685 | skb_free_frag(addr: data); |
1686 | bnxt_abort_tpa(cpr, idx, agg_bufs); |
1687 | cpr->sw_stats.rx.rx_oom_discards += 1; |
1688 | return NULL; |
1689 | } |
1690 | skb_reserve(skb, len: bp->rx_offset); |
1691 | skb_put(skb, len); |
1692 | } |
1693 | |
1694 | if (agg_bufs) { |
1695 | skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, tpa: true); |
1696 | if (!skb) { |
1697 | /* Page reuse already handled by bnxt_rx_pages(). */ |
1698 | cpr->sw_stats.rx.rx_oom_discards += 1; |
1699 | return NULL; |
1700 | } |
1701 | } |
1702 | |
1703 | skb->protocol = |
1704 | eth_type_trans(skb, dev: bnxt_get_pkt_dev(bp, cfa_code: tpa_info->cfa_code)); |
1705 | |
1706 | if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) |
1707 | skb_set_hash(skb, hash: tpa_info->rss_hash, type: tpa_info->hash_type); |
1708 | |
1709 | if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && |
1710 | (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { |
1711 | __be16 vlan_proto = htons(tpa_info->metadata >> |
1712 | RX_CMP_FLAGS2_METADATA_TPID_SFT); |
1713 | u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; |
1714 | |
1715 | if (eth_type_vlan(ethertype: vlan_proto)) { |
1716 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci: vtag); |
1717 | } else { |
1718 | dev_kfree_skb(skb); |
1719 | return NULL; |
1720 | } |
1721 | } |
1722 | |
1723 | skb_checksum_none_assert(skb); |
1724 | if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { |
1725 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1726 | skb->csum_level = |
1727 | (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; |
1728 | } |
1729 | |
1730 | if (gro) |
1731 | skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); |
1732 | |
1733 | return skb; |
1734 | } |
1735 | |
1736 | static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, |
1737 | struct rx_agg_cmp *rx_agg) |
1738 | { |
1739 | u16 agg_id = TPA_AGG_AGG_ID(rx_agg); |
1740 | struct bnxt_tpa_info *tpa_info; |
1741 | |
1742 | agg_id = bnxt_lookup_agg_idx(rxr, agg_id); |
1743 | tpa_info = &rxr->rx_tpa[agg_id]; |
1744 | BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); |
1745 | tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; |
1746 | } |
1747 | |
1748 | static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, |
1749 | struct sk_buff *skb) |
1750 | { |
1751 | if (skb->dev != bp->dev) { |
1752 | /* this packet belongs to a vf-rep */ |
1753 | bnxt_vf_rep_rx(bp, skb); |
1754 | return; |
1755 | } |
1756 | skb_record_rx_queue(skb, rx_queue: bnapi->index); |
1757 | skb_mark_for_recycle(skb); |
1758 | napi_gro_receive(napi: &bnapi->napi, skb); |
1759 | } |
1760 | |
1761 | /* returns the following: |
1762 | * 1 - 1 packet successfully received |
1763 | * 0 - successful TPA_START, packet not completed yet |
1764 | * -EBUSY - completion ring does not have all the agg buffers yet |
1765 | * -ENOMEM - packet aborted due to out of memory |
1766 | * -EIO - packet aborted due to hw error indicated in BD |
1767 | */ |
1768 | static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, |
1769 | u32 *raw_cons, u8 *event) |
1770 | { |
1771 | struct bnxt_napi *bnapi = cpr->bnapi; |
1772 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
1773 | struct net_device *dev = bp->dev; |
1774 | struct rx_cmp *rxcmp; |
1775 | struct rx_cmp_ext *rxcmp1; |
1776 | u32 tmp_raw_cons = *raw_cons; |
1777 | u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons); |
1778 | struct bnxt_sw_rx_bd *rx_buf; |
1779 | unsigned int len; |
1780 | u8 *data_ptr, agg_bufs, cmp_type; |
1781 | bool xdp_active = false; |
1782 | dma_addr_t dma_addr; |
1783 | struct sk_buff *skb; |
1784 | struct xdp_buff xdp; |
1785 | u32 flags, misc; |
1786 | void *data; |
1787 | int rc = 0; |
1788 | |
1789 | rxcmp = (struct rx_cmp *) |
1790 | &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
1791 | |
1792 | cmp_type = RX_CMP_TYPE(rxcmp); |
1793 | |
1794 | if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { |
1795 | bnxt_tpa_agg(bp, rxr, rx_agg: (struct rx_agg_cmp *)rxcmp); |
1796 | goto next_rx_no_prod_no_len; |
1797 | } |
1798 | |
1799 | tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); |
1800 | cp_cons = RING_CMP(tmp_raw_cons); |
1801 | rxcmp1 = (struct rx_cmp_ext *) |
1802 | &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
1803 | |
1804 | if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) |
1805 | return -EBUSY; |
1806 | |
1807 | /* The valid test of the entry must be done first before |
1808 | * reading any further. |
1809 | */ |
1810 | dma_rmb(); |
1811 | prod = rxr->rx_prod; |
1812 | |
1813 | if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { |
1814 | bnxt_tpa_start(bp, rxr, tpa_start: (struct rx_tpa_start_cmp *)rxcmp, |
1815 | tpa_start1: (struct rx_tpa_start_cmp_ext *)rxcmp1); |
1816 | |
1817 | *event |= BNXT_RX_EVENT; |
1818 | goto next_rx_no_prod_no_len; |
1819 | |
1820 | } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { |
1821 | skb = bnxt_tpa_end(bp, cpr, raw_cons: &tmp_raw_cons, |
1822 | tpa_end: (struct rx_tpa_end_cmp *)rxcmp, |
1823 | tpa_end1: (struct rx_tpa_end_cmp_ext *)rxcmp1, event); |
1824 | |
1825 | if (IS_ERR(ptr: skb)) |
1826 | return -EBUSY; |
1827 | |
1828 | rc = -ENOMEM; |
1829 | if (likely(skb)) { |
1830 | bnxt_deliver_skb(bp, bnapi, skb); |
1831 | rc = 1; |
1832 | } |
1833 | *event |= BNXT_RX_EVENT; |
1834 | goto next_rx_no_prod_no_len; |
1835 | } |
1836 | |
1837 | cons = rxcmp->rx_cmp_opaque; |
1838 | if (unlikely(cons != rxr->rx_next_cons)) { |
1839 | int rc1 = bnxt_discard_rx(bp, cpr, raw_cons: &tmp_raw_cons, cmp: rxcmp); |
1840 | |
1841 | /* 0xffff is forced error, don't print it */ |
1842 | if (rxr->rx_next_cons != 0xffff) |
1843 | netdev_warn(dev: bp->dev, format: "RX cons %x != expected cons %x\n" , |
1844 | cons, rxr->rx_next_cons); |
1845 | bnxt_sched_reset_rxr(bp, rxr); |
1846 | if (rc1) |
1847 | return rc1; |
1848 | goto next_rx_no_prod_no_len; |
1849 | } |
1850 | rx_buf = &rxr->rx_buf_ring[cons]; |
1851 | data = rx_buf->data; |
1852 | data_ptr = rx_buf->data_ptr; |
1853 | prefetch(data_ptr); |
1854 | |
1855 | misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); |
1856 | agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; |
1857 | |
1858 | if (agg_bufs) { |
1859 | if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons: &tmp_raw_cons)) |
1860 | return -EBUSY; |
1861 | |
1862 | cp_cons = NEXT_CMP(cp_cons); |
1863 | *event |= BNXT_AGG_EVENT; |
1864 | } |
1865 | *event |= BNXT_RX_EVENT; |
1866 | |
1867 | rx_buf->data = NULL; |
1868 | if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { |
1869 | u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); |
1870 | |
1871 | bnxt_reuse_rx_data(rxr, cons, data); |
1872 | if (agg_bufs) |
1873 | bnxt_reuse_rx_agg_bufs(cpr, idx: cp_cons, start: 0, agg_bufs, |
1874 | tpa: false); |
1875 | |
1876 | rc = -EIO; |
1877 | if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { |
1878 | bnapi->cp_ring.sw_stats.rx.rx_buf_errors++; |
1879 | if (!(bp->flags & BNXT_FLAG_CHIP_P5) && |
1880 | !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { |
1881 | netdev_warn_once(bp->dev, "RX buffer error %x\n" , |
1882 | rx_err); |
1883 | bnxt_sched_reset_rxr(bp, rxr); |
1884 | } |
1885 | } |
1886 | goto next_rx_no_len; |
1887 | } |
1888 | |
1889 | flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type); |
1890 | len = flags >> RX_CMP_LEN_SHIFT; |
1891 | dma_addr = rx_buf->mapping; |
1892 | |
1893 | if (bnxt_xdp_attached(bp, rxr)) { |
1894 | bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, xdp: &xdp); |
1895 | if (agg_bufs) { |
1896 | u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, xdp: &xdp, |
1897 | idx: cp_cons, agg_bufs, |
1898 | tpa: false); |
1899 | if (!frag_len) { |
1900 | cpr->sw_stats.rx.rx_oom_discards += 1; |
1901 | rc = -ENOMEM; |
1902 | goto next_rx; |
1903 | } |
1904 | } |
1905 | xdp_active = true; |
1906 | } |
1907 | |
1908 | if (xdp_active) { |
1909 | if (bnxt_rx_xdp(bp, rxr, cons, xdp, page: data, data_ptr: &data_ptr, len: &len, event)) { |
1910 | rc = 1; |
1911 | goto next_rx; |
1912 | } |
1913 | } |
1914 | |
1915 | if (len <= bp->rx_copy_thresh) { |
1916 | skb = bnxt_copy_skb(bnapi, data: data_ptr, len, mapping: dma_addr); |
1917 | bnxt_reuse_rx_data(rxr, cons, data); |
1918 | if (!skb) { |
1919 | if (agg_bufs) { |
1920 | if (!xdp_active) |
1921 | bnxt_reuse_rx_agg_bufs(cpr, idx: cp_cons, start: 0, |
1922 | agg_bufs, tpa: false); |
1923 | else |
1924 | bnxt_xdp_buff_frags_free(rxr, xdp: &xdp); |
1925 | } |
1926 | cpr->sw_stats.rx.rx_oom_discards += 1; |
1927 | rc = -ENOMEM; |
1928 | goto next_rx; |
1929 | } |
1930 | } else { |
1931 | u32 payload; |
1932 | |
1933 | if (rx_buf->data_ptr == data_ptr) |
1934 | payload = misc & RX_CMP_PAYLOAD_OFFSET; |
1935 | else |
1936 | payload = 0; |
1937 | skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, |
1938 | payload | len); |
1939 | if (!skb) { |
1940 | cpr->sw_stats.rx.rx_oom_discards += 1; |
1941 | rc = -ENOMEM; |
1942 | goto next_rx; |
1943 | } |
1944 | } |
1945 | |
1946 | if (agg_bufs) { |
1947 | if (!xdp_active) { |
1948 | skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx: cp_cons, agg_bufs, tpa: false); |
1949 | if (!skb) { |
1950 | cpr->sw_stats.rx.rx_oom_discards += 1; |
1951 | rc = -ENOMEM; |
1952 | goto next_rx; |
1953 | } |
1954 | } else { |
1955 | skb = bnxt_xdp_build_skb(bp, skb, num_frags: agg_bufs, pool: rxr->page_pool, xdp: &xdp, rxcmp1); |
1956 | if (!skb) { |
1957 | /* we should be able to free the old skb here */ |
1958 | bnxt_xdp_buff_frags_free(rxr, xdp: &xdp); |
1959 | cpr->sw_stats.rx.rx_oom_discards += 1; |
1960 | rc = -ENOMEM; |
1961 | goto next_rx; |
1962 | } |
1963 | } |
1964 | } |
1965 | |
1966 | if (RX_CMP_HASH_VALID(rxcmp)) { |
1967 | u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); |
1968 | enum pkt_hash_types type = PKT_HASH_TYPE_L4; |
1969 | |
1970 | /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ |
1971 | if (hash_type != 1 && hash_type != 3) |
1972 | type = PKT_HASH_TYPE_L3; |
1973 | skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); |
1974 | } |
1975 | |
1976 | cfa_code = RX_CMP_CFA_CODE(rxcmp1); |
1977 | skb->protocol = eth_type_trans(skb, dev: bnxt_get_pkt_dev(bp, cfa_code)); |
1978 | |
1979 | if ((rxcmp1->rx_cmp_flags2 & |
1980 | cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && |
1981 | (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { |
1982 | u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); |
1983 | u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; |
1984 | __be16 vlan_proto = htons(meta_data >> |
1985 | RX_CMP_FLAGS2_METADATA_TPID_SFT); |
1986 | |
1987 | if (eth_type_vlan(ethertype: vlan_proto)) { |
1988 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci: vtag); |
1989 | } else { |
1990 | dev_kfree_skb(skb); |
1991 | goto next_rx; |
1992 | } |
1993 | } |
1994 | |
1995 | skb_checksum_none_assert(skb); |
1996 | if (RX_CMP_L4_CS_OK(rxcmp1)) { |
1997 | if (dev->features & NETIF_F_RXCSUM) { |
1998 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1999 | skb->csum_level = RX_CMP_ENCAP(rxcmp1); |
2000 | } |
2001 | } else { |
2002 | if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { |
2003 | if (dev->features & NETIF_F_RXCSUM) |
2004 | bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++; |
2005 | } |
2006 | } |
2007 | |
2008 | if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) == |
2009 | RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) { |
2010 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
2011 | u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); |
2012 | u64 ns, ts; |
2013 | |
2014 | if (!bnxt_get_rx_ts_p5(bp, ts: &ts, pkt_ts: cmpl_ts)) { |
2015 | struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; |
2016 | |
2017 | spin_lock_bh(lock: &ptp->ptp_lock); |
2018 | ns = timecounter_cyc2time(tc: &ptp->tc, cycle_tstamp: ts); |
2019 | spin_unlock_bh(lock: &ptp->ptp_lock); |
2020 | memset(skb_hwtstamps(skb), 0, |
2021 | sizeof(*skb_hwtstamps(skb))); |
2022 | skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); |
2023 | } |
2024 | } |
2025 | } |
2026 | bnxt_deliver_skb(bp, bnapi, skb); |
2027 | rc = 1; |
2028 | |
2029 | next_rx: |
2030 | cpr->rx_packets += 1; |
2031 | cpr->rx_bytes += len; |
2032 | |
2033 | next_rx_no_len: |
2034 | rxr->rx_prod = NEXT_RX(prod); |
2035 | rxr->rx_next_cons = NEXT_RX(cons); |
2036 | |
2037 | next_rx_no_prod_no_len: |
2038 | *raw_cons = tmp_raw_cons; |
2039 | |
2040 | return rc; |
2041 | } |
2042 | |
2043 | /* In netpoll mode, if we are using a combined completion ring, we need to |
2044 | * discard the rx packets and recycle the buffers. |
2045 | */ |
2046 | static int bnxt_force_rx_discard(struct bnxt *bp, |
2047 | struct bnxt_cp_ring_info *cpr, |
2048 | u32 *raw_cons, u8 *event) |
2049 | { |
2050 | u32 tmp_raw_cons = *raw_cons; |
2051 | struct rx_cmp_ext *rxcmp1; |
2052 | struct rx_cmp *rxcmp; |
2053 | u16 cp_cons; |
2054 | u8 cmp_type; |
2055 | int rc; |
2056 | |
2057 | cp_cons = RING_CMP(tmp_raw_cons); |
2058 | rxcmp = (struct rx_cmp *) |
2059 | &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
2060 | |
2061 | tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); |
2062 | cp_cons = RING_CMP(tmp_raw_cons); |
2063 | rxcmp1 = (struct rx_cmp_ext *) |
2064 | &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
2065 | |
2066 | if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) |
2067 | return -EBUSY; |
2068 | |
2069 | /* The valid test of the entry must be done first before |
2070 | * reading any further. |
2071 | */ |
2072 | dma_rmb(); |
2073 | cmp_type = RX_CMP_TYPE(rxcmp); |
2074 | if (cmp_type == CMP_TYPE_RX_L2_CMP) { |
2075 | rxcmp1->rx_cmp_cfa_code_errors_v2 |= |
2076 | cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); |
2077 | } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { |
2078 | struct rx_tpa_end_cmp_ext *tpa_end1; |
2079 | |
2080 | tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; |
2081 | tpa_end1->rx_tpa_end_cmp_errors_v2 |= |
2082 | cpu_to_le32(RX_TPA_END_CMP_ERRORS); |
2083 | } |
2084 | rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); |
2085 | if (rc && rc != -EBUSY) |
2086 | cpr->sw_stats.rx.rx_netpoll_discards += 1; |
2087 | return rc; |
2088 | } |
2089 | |
2090 | u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) |
2091 | { |
2092 | struct bnxt_fw_health *fw_health = bp->fw_health; |
2093 | u32 reg = fw_health->regs[reg_idx]; |
2094 | u32 reg_type, reg_off, val = 0; |
2095 | |
2096 | reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); |
2097 | reg_off = BNXT_FW_HEALTH_REG_OFF(reg); |
2098 | switch (reg_type) { |
2099 | case BNXT_FW_HEALTH_REG_TYPE_CFG: |
2100 | pci_read_config_dword(dev: bp->pdev, where: reg_off, val: &val); |
2101 | break; |
2102 | case BNXT_FW_HEALTH_REG_TYPE_GRC: |
2103 | reg_off = fw_health->mapped_regs[reg_idx]; |
2104 | fallthrough; |
2105 | case BNXT_FW_HEALTH_REG_TYPE_BAR0: |
2106 | val = readl(addr: bp->bar0 + reg_off); |
2107 | break; |
2108 | case BNXT_FW_HEALTH_REG_TYPE_BAR1: |
2109 | val = readl(addr: bp->bar1 + reg_off); |
2110 | break; |
2111 | } |
2112 | if (reg_idx == BNXT_FW_RESET_INPROG_REG) |
2113 | val &= fw_health->fw_reset_inprog_reg_mask; |
2114 | return val; |
2115 | } |
2116 | |
2117 | static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) |
2118 | { |
2119 | int i; |
2120 | |
2121 | for (i = 0; i < bp->rx_nr_rings; i++) { |
2122 | u16 grp_idx = bp->rx_ring[i].bnapi->index; |
2123 | struct bnxt_ring_grp_info *grp_info; |
2124 | |
2125 | grp_info = &bp->grp_info[grp_idx]; |
2126 | if (grp_info->agg_fw_ring_id == ring_id) |
2127 | return grp_idx; |
2128 | } |
2129 | return INVALID_HW_RING_ID; |
2130 | } |
2131 | |
2132 | static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info) |
2133 | { |
2134 | if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4) |
2135 | return link_info->force_pam4_link_speed; |
2136 | return link_info->force_link_speed; |
2137 | } |
2138 | |
2139 | static void bnxt_set_force_speed(struct bnxt_link_info *link_info) |
2140 | { |
2141 | link_info->req_link_speed = link_info->force_link_speed; |
2142 | link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; |
2143 | if (link_info->force_pam4_link_speed) { |
2144 | link_info->req_link_speed = link_info->force_pam4_link_speed; |
2145 | link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; |
2146 | } |
2147 | } |
2148 | |
2149 | static void bnxt_set_auto_speed(struct bnxt_link_info *link_info) |
2150 | { |
2151 | link_info->advertising = link_info->auto_link_speeds; |
2152 | link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; |
2153 | } |
2154 | |
2155 | static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info) |
2156 | { |
2157 | if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && |
2158 | link_info->req_link_speed != link_info->force_link_speed) |
2159 | return true; |
2160 | if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && |
2161 | link_info->req_link_speed != link_info->force_pam4_link_speed) |
2162 | return true; |
2163 | return false; |
2164 | } |
2165 | |
2166 | static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info) |
2167 | { |
2168 | if (link_info->advertising != link_info->auto_link_speeds || |
2169 | link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) |
2170 | return true; |
2171 | return false; |
2172 | } |
2173 | |
2174 | #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \ |
2175 | ((data2) & \ |
2176 | ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK) |
2177 | |
2178 | #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \ |
2179 | (((data2) & \ |
2180 | ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\ |
2181 | ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT) |
2182 | |
2183 | #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \ |
2184 | ((data1) & \ |
2185 | ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK) |
2186 | |
2187 | #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \ |
2188 | (((data1) & \ |
2189 | ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\ |
2190 | ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING) |
2191 | |
2192 | /* Return true if the workqueue has to be scheduled */ |
2193 | static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) |
2194 | { |
2195 | u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1); |
2196 | |
2197 | switch (err_type) { |
2198 | case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL: |
2199 | netdev_err(dev: bp->dev, format: "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n" , |
2200 | BNXT_EVENT_INVALID_SIGNAL_DATA(data2)); |
2201 | break; |
2202 | case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM: |
2203 | netdev_warn(dev: bp->dev, format: "Pause Storm detected!\n" ); |
2204 | break; |
2205 | case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD: |
2206 | netdev_warn(dev: bp->dev, format: "One or more MMIO doorbells dropped by the device!\n" ); |
2207 | break; |
2208 | case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: { |
2209 | u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1); |
2210 | char *threshold_type; |
2211 | bool notify = false; |
2212 | char *dir_str; |
2213 | |
2214 | switch (type) { |
2215 | case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN: |
2216 | threshold_type = "warning" ; |
2217 | break; |
2218 | case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL: |
2219 | threshold_type = "critical" ; |
2220 | break; |
2221 | case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL: |
2222 | threshold_type = "fatal" ; |
2223 | break; |
2224 | case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN: |
2225 | threshold_type = "shutdown" ; |
2226 | break; |
2227 | default: |
2228 | netdev_err(dev: bp->dev, format: "Unknown Thermal threshold type event\n" ); |
2229 | return false; |
2230 | } |
2231 | if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) { |
2232 | dir_str = "above" ; |
2233 | notify = true; |
2234 | } else { |
2235 | dir_str = "below" ; |
2236 | } |
2237 | netdev_warn(dev: bp->dev, format: "Chip temperature has gone %s the %s thermal threshold!\n" , |
2238 | dir_str, threshold_type); |
2239 | netdev_warn(dev: bp->dev, format: "Temperature (In Celsius), Current: %lu, threshold: %lu\n" , |
2240 | BNXT_EVENT_THERMAL_CURRENT_TEMP(data2), |
2241 | BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)); |
2242 | if (notify) { |
2243 | bp->thermal_threshold_type = type; |
2244 | set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, addr: &bp->sp_event); |
2245 | return true; |
2246 | } |
2247 | return false; |
2248 | } |
2249 | default: |
2250 | netdev_err(dev: bp->dev, format: "FW reported unknown error type %u\n" , |
2251 | err_type); |
2252 | break; |
2253 | } |
2254 | return false; |
2255 | } |
2256 | |
2257 | #define BNXT_GET_EVENT_PORT(data) \ |
2258 | ((data) & \ |
2259 | ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) |
2260 | |
2261 | #define BNXT_EVENT_RING_TYPE(data2) \ |
2262 | ((data2) & \ |
2263 | ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK) |
2264 | |
2265 | #define BNXT_EVENT_RING_TYPE_RX(data2) \ |
2266 | (BNXT_EVENT_RING_TYPE(data2) == \ |
2267 | ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX) |
2268 | |
2269 | #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \ |
2270 | (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\ |
2271 | ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT) |
2272 | |
2273 | #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \ |
2274 | (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\ |
2275 | ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT) |
2276 | |
2277 | #define BNXT_PHC_BITS 48 |
2278 | |
2279 | static int bnxt_async_event_process(struct bnxt *bp, |
2280 | struct hwrm_async_event_cmpl *cmpl) |
2281 | { |
2282 | u16 event_id = le16_to_cpu(cmpl->event_id); |
2283 | u32 data1 = le32_to_cpu(cmpl->event_data1); |
2284 | u32 data2 = le32_to_cpu(cmpl->event_data2); |
2285 | |
2286 | netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n" , |
2287 | event_id, data1, data2); |
2288 | |
2289 | /* TODO CHIMP_FW: Define event id's for link change, error etc */ |
2290 | switch (event_id) { |
2291 | case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { |
2292 | struct bnxt_link_info *link_info = &bp->link_info; |
2293 | |
2294 | if (BNXT_VF(bp)) |
2295 | goto async_event_process_exit; |
2296 | |
2297 | /* print unsupported speed warning in forced speed mode only */ |
2298 | if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && |
2299 | (data1 & 0x20000)) { |
2300 | u16 fw_speed = bnxt_get_force_speed(link_info); |
2301 | u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); |
2302 | |
2303 | if (speed != SPEED_UNKNOWN) |
2304 | netdev_warn(dev: bp->dev, format: "Link speed %d no longer supported\n" , |
2305 | speed); |
2306 | } |
2307 | set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, addr: &bp->sp_event); |
2308 | } |
2309 | fallthrough; |
2310 | case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: |
2311 | case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: |
2312 | set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, addr: &bp->sp_event); |
2313 | fallthrough; |
2314 | case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: |
2315 | set_bit(BNXT_LINK_CHNG_SP_EVENT, addr: &bp->sp_event); |
2316 | break; |
2317 | case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: |
2318 | set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, addr: &bp->sp_event); |
2319 | break; |
2320 | case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { |
2321 | u16 port_id = BNXT_GET_EVENT_PORT(data1); |
2322 | |
2323 | if (BNXT_VF(bp)) |
2324 | break; |
2325 | |
2326 | if (bp->pf.port_id != port_id) |
2327 | break; |
2328 | |
2329 | set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, addr: &bp->sp_event); |
2330 | break; |
2331 | } |
2332 | case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: |
2333 | if (BNXT_PF(bp)) |
2334 | goto async_event_process_exit; |
2335 | set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, addr: &bp->sp_event); |
2336 | break; |
2337 | case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { |
2338 | char *type_str = "Solicited" ; |
2339 | |
2340 | if (!bp->fw_health) |
2341 | goto async_event_process_exit; |
2342 | |
2343 | bp->fw_reset_timestamp = jiffies; |
2344 | bp->fw_reset_min_dsecs = cmpl->timestamp_lo; |
2345 | if (!bp->fw_reset_min_dsecs) |
2346 | bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; |
2347 | bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); |
2348 | if (!bp->fw_reset_max_dsecs) |
2349 | bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; |
2350 | if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) { |
2351 | set_bit(BNXT_STATE_FW_ACTIVATE_RESET, addr: &bp->state); |
2352 | } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { |
2353 | type_str = "Fatal" ; |
2354 | bp->fw_health->fatalities++; |
2355 | set_bit(BNXT_STATE_FW_FATAL_COND, addr: &bp->state); |
2356 | } else if (data2 && BNXT_FW_STATUS_HEALTHY != |
2357 | EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) { |
2358 | type_str = "Non-fatal" ; |
2359 | bp->fw_health->survivals++; |
2360 | set_bit(BNXT_STATE_FW_NON_FATAL_COND, addr: &bp->state); |
2361 | } |
2362 | netif_warn(bp, hw, bp->dev, |
2363 | "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n" , |
2364 | type_str, data1, data2, |
2365 | bp->fw_reset_min_dsecs * 100, |
2366 | bp->fw_reset_max_dsecs * 100); |
2367 | set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, addr: &bp->sp_event); |
2368 | break; |
2369 | } |
2370 | case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { |
2371 | struct bnxt_fw_health *fw_health = bp->fw_health; |
2372 | char *status_desc = "healthy" ; |
2373 | u32 status; |
2374 | |
2375 | if (!fw_health) |
2376 | goto async_event_process_exit; |
2377 | |
2378 | if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) { |
2379 | fw_health->enabled = false; |
2380 | netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n" ); |
2381 | break; |
2382 | } |
2383 | fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); |
2384 | fw_health->tmr_multiplier = |
2385 | DIV_ROUND_UP(fw_health->polling_dsecs * HZ, |
2386 | bp->current_interval * 10); |
2387 | fw_health->tmr_counter = fw_health->tmr_multiplier; |
2388 | if (!fw_health->enabled) |
2389 | fw_health->last_fw_heartbeat = |
2390 | bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); |
2391 | fw_health->last_fw_reset_cnt = |
2392 | bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); |
2393 | status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); |
2394 | if (status != BNXT_FW_STATUS_HEALTHY) |
2395 | status_desc = "unhealthy" ; |
2396 | netif_info(bp, drv, bp->dev, |
2397 | "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n" , |
2398 | fw_health->primary ? "primary" : "backup" , status, |
2399 | status_desc, fw_health->last_fw_reset_cnt); |
2400 | if (!fw_health->enabled) { |
2401 | /* Make sure tmr_counter is set and visible to |
2402 | * bnxt_health_check() before setting enabled to true. |
2403 | */ |
2404 | smp_wmb(); |
2405 | fw_health->enabled = true; |
2406 | } |
2407 | goto async_event_process_exit; |
2408 | } |
2409 | case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: |
2410 | netif_notice(bp, hw, bp->dev, |
2411 | "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n" , |
2412 | data1, data2); |
2413 | goto async_event_process_exit; |
2414 | case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: { |
2415 | struct bnxt_rx_ring_info *rxr; |
2416 | u16 grp_idx; |
2417 | |
2418 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
2419 | goto async_event_process_exit; |
2420 | |
2421 | netdev_warn(dev: bp->dev, format: "Ring monitor event, ring type %lu id 0x%x\n" , |
2422 | BNXT_EVENT_RING_TYPE(data2), data1); |
2423 | if (!BNXT_EVENT_RING_TYPE_RX(data2)) |
2424 | goto async_event_process_exit; |
2425 | |
2426 | grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, ring_id: data1); |
2427 | if (grp_idx == INVALID_HW_RING_ID) { |
2428 | netdev_warn(dev: bp->dev, format: "Unknown RX agg ring id 0x%x\n" , |
2429 | data1); |
2430 | goto async_event_process_exit; |
2431 | } |
2432 | rxr = bp->bnapi[grp_idx]->rx_ring; |
2433 | bnxt_sched_reset_rxr(bp, rxr); |
2434 | goto async_event_process_exit; |
2435 | } |
2436 | case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: { |
2437 | struct bnxt_fw_health *fw_health = bp->fw_health; |
2438 | |
2439 | netif_notice(bp, hw, bp->dev, |
2440 | "Received firmware echo request, data1: 0x%x, data2: 0x%x\n" , |
2441 | data1, data2); |
2442 | if (fw_health) { |
2443 | fw_health->echo_req_data1 = data1; |
2444 | fw_health->echo_req_data2 = data2; |
2445 | set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, addr: &bp->sp_event); |
2446 | break; |
2447 | } |
2448 | goto async_event_process_exit; |
2449 | } |
2450 | case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: { |
2451 | bnxt_ptp_pps_event(bp, data1, data2); |
2452 | goto async_event_process_exit; |
2453 | } |
2454 | case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: { |
2455 | if (bnxt_event_error_report(bp, data1, data2)) |
2456 | break; |
2457 | goto async_event_process_exit; |
2458 | } |
2459 | case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: { |
2460 | switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) { |
2461 | case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE: |
2462 | if (BNXT_PTP_USE_RTC(bp)) { |
2463 | struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; |
2464 | u64 ns; |
2465 | |
2466 | if (!ptp) |
2467 | goto async_event_process_exit; |
2468 | |
2469 | spin_lock_bh(lock: &ptp->ptp_lock); |
2470 | bnxt_ptp_update_current_time(bp); |
2471 | ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) << |
2472 | BNXT_PHC_BITS) | ptp->current_time); |
2473 | bnxt_ptp_rtc_timecounter_init(ptp, ns); |
2474 | spin_unlock_bh(lock: &ptp->ptp_lock); |
2475 | } |
2476 | break; |
2477 | } |
2478 | goto async_event_process_exit; |
2479 | } |
2480 | case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: { |
2481 | u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff; |
2482 | |
2483 | hwrm_update_token(bp, seq: seq_id, s: BNXT_HWRM_DEFERRED); |
2484 | goto async_event_process_exit; |
2485 | } |
2486 | default: |
2487 | goto async_event_process_exit; |
2488 | } |
2489 | __bnxt_queue_sp_work(bp); |
2490 | async_event_process_exit: |
2491 | return 0; |
2492 | } |
2493 | |
2494 | static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) |
2495 | { |
2496 | u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; |
2497 | struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; |
2498 | struct hwrm_fwd_req_cmpl *fwd_req_cmpl = |
2499 | (struct hwrm_fwd_req_cmpl *)txcmp; |
2500 | |
2501 | switch (cmpl_type) { |
2502 | case CMPL_BASE_TYPE_HWRM_DONE: |
2503 | seq_id = le16_to_cpu(h_cmpl->sequence_id); |
2504 | hwrm_update_token(bp, seq: seq_id, s: BNXT_HWRM_COMPLETE); |
2505 | break; |
2506 | |
2507 | case CMPL_BASE_TYPE_HWRM_FWD_REQ: |
2508 | vf_id = le16_to_cpu(fwd_req_cmpl->source_id); |
2509 | |
2510 | if ((vf_id < bp->pf.first_vf_id) || |
2511 | (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { |
2512 | netdev_err(dev: bp->dev, format: "Msg contains invalid VF id %x\n" , |
2513 | vf_id); |
2514 | return -EINVAL; |
2515 | } |
2516 | |
2517 | set_bit(nr: vf_id - bp->pf.first_vf_id, addr: bp->pf.vf_event_bmap); |
2518 | bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT); |
2519 | break; |
2520 | |
2521 | case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: |
2522 | bnxt_async_event_process(bp, |
2523 | cmpl: (struct hwrm_async_event_cmpl *)txcmp); |
2524 | break; |
2525 | |
2526 | default: |
2527 | break; |
2528 | } |
2529 | |
2530 | return 0; |
2531 | } |
2532 | |
2533 | static irqreturn_t bnxt_msix(int irq, void *dev_instance) |
2534 | { |
2535 | struct bnxt_napi *bnapi = dev_instance; |
2536 | struct bnxt *bp = bnapi->bp; |
2537 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
2538 | u32 cons = RING_CMP(cpr->cp_raw_cons); |
2539 | |
2540 | cpr->event_ctr++; |
2541 | prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); |
2542 | napi_schedule(n: &bnapi->napi); |
2543 | return IRQ_HANDLED; |
2544 | } |
2545 | |
2546 | static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) |
2547 | { |
2548 | u32 raw_cons = cpr->cp_raw_cons; |
2549 | u16 cons = RING_CMP(raw_cons); |
2550 | struct tx_cmp *txcmp; |
2551 | |
2552 | txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; |
2553 | |
2554 | return TX_CMP_VALID(txcmp, raw_cons); |
2555 | } |
2556 | |
2557 | static irqreturn_t bnxt_inta(int irq, void *dev_instance) |
2558 | { |
2559 | struct bnxt_napi *bnapi = dev_instance; |
2560 | struct bnxt *bp = bnapi->bp; |
2561 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
2562 | u32 cons = RING_CMP(cpr->cp_raw_cons); |
2563 | u32 int_status; |
2564 | |
2565 | prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); |
2566 | |
2567 | if (!bnxt_has_work(bp, cpr)) { |
2568 | int_status = readl(addr: bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); |
2569 | /* return if erroneous interrupt */ |
2570 | if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) |
2571 | return IRQ_NONE; |
2572 | } |
2573 | |
2574 | /* disable ring IRQ */ |
2575 | BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); |
2576 | |
2577 | /* Return here if interrupt is shared and is disabled. */ |
2578 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) |
2579 | return IRQ_HANDLED; |
2580 | |
2581 | napi_schedule(n: &bnapi->napi); |
2582 | return IRQ_HANDLED; |
2583 | } |
2584 | |
2585 | static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, |
2586 | int budget) |
2587 | { |
2588 | struct bnxt_napi *bnapi = cpr->bnapi; |
2589 | u32 raw_cons = cpr->cp_raw_cons; |
2590 | u32 cons; |
2591 | int tx_pkts = 0; |
2592 | int rx_pkts = 0; |
2593 | u8 event = 0; |
2594 | struct tx_cmp *txcmp; |
2595 | |
2596 | cpr->has_more_work = 0; |
2597 | cpr->had_work_done = 1; |
2598 | while (1) { |
2599 | int rc; |
2600 | |
2601 | cons = RING_CMP(raw_cons); |
2602 | txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; |
2603 | |
2604 | if (!TX_CMP_VALID(txcmp, raw_cons)) |
2605 | break; |
2606 | |
2607 | /* The valid test of the entry must be done first before |
2608 | * reading any further. |
2609 | */ |
2610 | dma_rmb(); |
2611 | if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { |
2612 | tx_pkts++; |
2613 | /* return full budget so NAPI will complete. */ |
2614 | if (unlikely(tx_pkts >= bp->tx_wake_thresh)) { |
2615 | rx_pkts = budget; |
2616 | raw_cons = NEXT_RAW_CMP(raw_cons); |
2617 | if (budget) |
2618 | cpr->has_more_work = 1; |
2619 | break; |
2620 | } |
2621 | } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { |
2622 | if (likely(budget)) |
2623 | rc = bnxt_rx_pkt(bp, cpr, raw_cons: &raw_cons, event: &event); |
2624 | else |
2625 | rc = bnxt_force_rx_discard(bp, cpr, raw_cons: &raw_cons, |
2626 | event: &event); |
2627 | if (likely(rc >= 0)) |
2628 | rx_pkts += rc; |
2629 | /* Increment rx_pkts when rc is -ENOMEM to count towards |
2630 | * the NAPI budget. Otherwise, we may potentially loop |
2631 | * here forever if we consistently cannot allocate |
2632 | * buffers. |
2633 | */ |
2634 | else if (rc == -ENOMEM && budget) |
2635 | rx_pkts++; |
2636 | else if (rc == -EBUSY) /* partial completion */ |
2637 | break; |
2638 | } else if (unlikely((TX_CMP_TYPE(txcmp) == |
2639 | CMPL_BASE_TYPE_HWRM_DONE) || |
2640 | (TX_CMP_TYPE(txcmp) == |
2641 | CMPL_BASE_TYPE_HWRM_FWD_REQ) || |
2642 | (TX_CMP_TYPE(txcmp) == |
2643 | CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { |
2644 | bnxt_hwrm_handler(bp, txcmp); |
2645 | } |
2646 | raw_cons = NEXT_RAW_CMP(raw_cons); |
2647 | |
2648 | if (rx_pkts && rx_pkts == budget) { |
2649 | cpr->has_more_work = 1; |
2650 | break; |
2651 | } |
2652 | } |
2653 | |
2654 | if (event & BNXT_REDIRECT_EVENT) |
2655 | xdp_do_flush(); |
2656 | |
2657 | if (event & BNXT_TX_EVENT) { |
2658 | struct bnxt_tx_ring_info *txr = bnapi->tx_ring; |
2659 | u16 prod = txr->tx_prod; |
2660 | |
2661 | /* Sync BD data before updating doorbell */ |
2662 | wmb(); |
2663 | |
2664 | bnxt_db_write_relaxed(bp, db: &txr->tx_db, idx: prod); |
2665 | } |
2666 | |
2667 | cpr->cp_raw_cons = raw_cons; |
2668 | bnapi->tx_pkts += tx_pkts; |
2669 | bnapi->events |= event; |
2670 | return rx_pkts; |
2671 | } |
2672 | |
2673 | static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, |
2674 | int budget) |
2675 | { |
2676 | if (bnapi->tx_pkts && !bnapi->tx_fault) |
2677 | bnapi->tx_int(bp, bnapi, budget); |
2678 | |
2679 | if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { |
2680 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
2681 | |
2682 | bnxt_db_write(bp, db: &rxr->rx_db, idx: rxr->rx_prod); |
2683 | } |
2684 | if (bnapi->events & BNXT_AGG_EVENT) { |
2685 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
2686 | |
2687 | bnxt_db_write(bp, db: &rxr->rx_agg_db, idx: rxr->rx_agg_prod); |
2688 | } |
2689 | bnapi->events = 0; |
2690 | } |
2691 | |
2692 | static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, |
2693 | int budget) |
2694 | { |
2695 | struct bnxt_napi *bnapi = cpr->bnapi; |
2696 | int rx_pkts; |
2697 | |
2698 | rx_pkts = __bnxt_poll_work(bp, cpr, budget); |
2699 | |
2700 | /* ACK completion ring before freeing tx ring and producing new |
2701 | * buffers in rx/agg rings to prevent overflowing the completion |
2702 | * ring. |
2703 | */ |
2704 | bnxt_db_cq(bp, db: &cpr->cp_db, idx: cpr->cp_raw_cons); |
2705 | |
2706 | __bnxt_poll_work_done(bp, bnapi, budget); |
2707 | return rx_pkts; |
2708 | } |
2709 | |
2710 | static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) |
2711 | { |
2712 | struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); |
2713 | struct bnxt *bp = bnapi->bp; |
2714 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
2715 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
2716 | struct tx_cmp *txcmp; |
2717 | struct rx_cmp_ext *rxcmp1; |
2718 | u32 cp_cons, tmp_raw_cons; |
2719 | u32 raw_cons = cpr->cp_raw_cons; |
2720 | bool flush_xdp = false; |
2721 | u32 rx_pkts = 0; |
2722 | u8 event = 0; |
2723 | |
2724 | while (1) { |
2725 | int rc; |
2726 | |
2727 | cp_cons = RING_CMP(raw_cons); |
2728 | txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
2729 | |
2730 | if (!TX_CMP_VALID(txcmp, raw_cons)) |
2731 | break; |
2732 | |
2733 | /* The valid test of the entry must be done first before |
2734 | * reading any further. |
2735 | */ |
2736 | dma_rmb(); |
2737 | if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { |
2738 | tmp_raw_cons = NEXT_RAW_CMP(raw_cons); |
2739 | cp_cons = RING_CMP(tmp_raw_cons); |
2740 | rxcmp1 = (struct rx_cmp_ext *) |
2741 | &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; |
2742 | |
2743 | if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) |
2744 | break; |
2745 | |
2746 | /* force an error to recycle the buffer */ |
2747 | rxcmp1->rx_cmp_cfa_code_errors_v2 |= |
2748 | cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); |
2749 | |
2750 | rc = bnxt_rx_pkt(bp, cpr, raw_cons: &raw_cons, event: &event); |
2751 | if (likely(rc == -EIO) && budget) |
2752 | rx_pkts++; |
2753 | else if (rc == -EBUSY) /* partial completion */ |
2754 | break; |
2755 | if (event & BNXT_REDIRECT_EVENT) |
2756 | flush_xdp = true; |
2757 | } else if (unlikely(TX_CMP_TYPE(txcmp) == |
2758 | CMPL_BASE_TYPE_HWRM_DONE)) { |
2759 | bnxt_hwrm_handler(bp, txcmp); |
2760 | } else { |
2761 | netdev_err(dev: bp->dev, |
2762 | format: "Invalid completion received on special ring\n" ); |
2763 | } |
2764 | raw_cons = NEXT_RAW_CMP(raw_cons); |
2765 | |
2766 | if (rx_pkts == budget) |
2767 | break; |
2768 | } |
2769 | |
2770 | cpr->cp_raw_cons = raw_cons; |
2771 | BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); |
2772 | bnxt_db_write(bp, db: &rxr->rx_db, idx: rxr->rx_prod); |
2773 | |
2774 | if (event & BNXT_AGG_EVENT) |
2775 | bnxt_db_write(bp, db: &rxr->rx_agg_db, idx: rxr->rx_agg_prod); |
2776 | if (flush_xdp) |
2777 | xdp_do_flush(); |
2778 | |
2779 | if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { |
2780 | napi_complete_done(n: napi, work_done: rx_pkts); |
2781 | BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); |
2782 | } |
2783 | return rx_pkts; |
2784 | } |
2785 | |
2786 | static int bnxt_poll(struct napi_struct *napi, int budget) |
2787 | { |
2788 | struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); |
2789 | struct bnxt *bp = bnapi->bp; |
2790 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
2791 | int work_done = 0; |
2792 | |
2793 | if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { |
2794 | napi_complete(n: napi); |
2795 | return 0; |
2796 | } |
2797 | while (1) { |
2798 | work_done += bnxt_poll_work(bp, cpr, budget: budget - work_done); |
2799 | |
2800 | if (work_done >= budget) { |
2801 | if (!budget) |
2802 | BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); |
2803 | break; |
2804 | } |
2805 | |
2806 | if (!bnxt_has_work(bp, cpr)) { |
2807 | if (napi_complete_done(n: napi, work_done)) |
2808 | BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); |
2809 | break; |
2810 | } |
2811 | } |
2812 | if (bp->flags & BNXT_FLAG_DIM) { |
2813 | struct dim_sample dim_sample = {}; |
2814 | |
2815 | dim_update_sample(event_ctr: cpr->event_ctr, |
2816 | packets: cpr->rx_packets, |
2817 | bytes: cpr->rx_bytes, |
2818 | s: &dim_sample); |
2819 | net_dim(dim: &cpr->dim, end_sample: dim_sample); |
2820 | } |
2821 | return work_done; |
2822 | } |
2823 | |
2824 | static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) |
2825 | { |
2826 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
2827 | int i, work_done = 0; |
2828 | |
2829 | for (i = 0; i < 2; i++) { |
2830 | struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; |
2831 | |
2832 | if (cpr2) { |
2833 | work_done += __bnxt_poll_work(bp, cpr: cpr2, |
2834 | budget: budget - work_done); |
2835 | cpr->has_more_work |= cpr2->has_more_work; |
2836 | } |
2837 | } |
2838 | return work_done; |
2839 | } |
2840 | |
2841 | static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, |
2842 | u64 dbr_type, int budget) |
2843 | { |
2844 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
2845 | int i; |
2846 | |
2847 | for (i = 0; i < 2; i++) { |
2848 | struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; |
2849 | struct bnxt_db_info *db; |
2850 | |
2851 | if (cpr2 && cpr2->had_work_done) { |
2852 | db = &cpr2->cp_db; |
2853 | bnxt_writeq(bp, val: db->db_key64 | dbr_type | |
2854 | RING_CMP(cpr2->cp_raw_cons), addr: db->doorbell); |
2855 | cpr2->had_work_done = 0; |
2856 | } |
2857 | } |
2858 | __bnxt_poll_work_done(bp, bnapi, budget); |
2859 | } |
2860 | |
2861 | static int bnxt_poll_p5(struct napi_struct *napi, int budget) |
2862 | { |
2863 | struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); |
2864 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
2865 | struct bnxt_cp_ring_info *cpr_rx; |
2866 | u32 raw_cons = cpr->cp_raw_cons; |
2867 | struct bnxt *bp = bnapi->bp; |
2868 | struct nqe_cn *nqcmp; |
2869 | int work_done = 0; |
2870 | u32 cons; |
2871 | |
2872 | if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { |
2873 | napi_complete(n: napi); |
2874 | return 0; |
2875 | } |
2876 | if (cpr->has_more_work) { |
2877 | cpr->has_more_work = 0; |
2878 | work_done = __bnxt_poll_cqs(bp, bnapi, budget); |
2879 | } |
2880 | while (1) { |
2881 | cons = RING_CMP(raw_cons); |
2882 | nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; |
2883 | |
2884 | if (!NQ_CMP_VALID(nqcmp, raw_cons)) { |
2885 | if (cpr->has_more_work) |
2886 | break; |
2887 | |
2888 | __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, |
2889 | budget); |
2890 | cpr->cp_raw_cons = raw_cons; |
2891 | if (napi_complete_done(n: napi, work_done)) |
2892 | BNXT_DB_NQ_ARM_P5(&cpr->cp_db, |
2893 | cpr->cp_raw_cons); |
2894 | goto poll_done; |
2895 | } |
2896 | |
2897 | /* The valid test of the entry must be done first before |
2898 | * reading any further. |
2899 | */ |
2900 | dma_rmb(); |
2901 | |
2902 | if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) { |
2903 | u32 idx = le32_to_cpu(nqcmp->cq_handle_low); |
2904 | struct bnxt_cp_ring_info *cpr2; |
2905 | |
2906 | /* No more budget for RX work */ |
2907 | if (budget && work_done >= budget && idx == BNXT_RX_HDL) |
2908 | break; |
2909 | |
2910 | cpr2 = cpr->cp_ring_arr[idx]; |
2911 | work_done += __bnxt_poll_work(bp, cpr: cpr2, |
2912 | budget: budget - work_done); |
2913 | cpr->has_more_work |= cpr2->has_more_work; |
2914 | } else { |
2915 | bnxt_hwrm_handler(bp, txcmp: (struct tx_cmp *)nqcmp); |
2916 | } |
2917 | raw_cons = NEXT_RAW_CMP(raw_cons); |
2918 | } |
2919 | __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget); |
2920 | if (raw_cons != cpr->cp_raw_cons) { |
2921 | cpr->cp_raw_cons = raw_cons; |
2922 | BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); |
2923 | } |
2924 | poll_done: |
2925 | cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL]; |
2926 | if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) { |
2927 | struct dim_sample dim_sample = {}; |
2928 | |
2929 | dim_update_sample(event_ctr: cpr->event_ctr, |
2930 | packets: cpr_rx->rx_packets, |
2931 | bytes: cpr_rx->rx_bytes, |
2932 | s: &dim_sample); |
2933 | net_dim(dim: &cpr->dim, end_sample: dim_sample); |
2934 | } |
2935 | return work_done; |
2936 | } |
2937 | |
2938 | static void bnxt_free_tx_skbs(struct bnxt *bp) |
2939 | { |
2940 | int i, max_idx; |
2941 | struct pci_dev *pdev = bp->pdev; |
2942 | |
2943 | if (!bp->tx_ring) |
2944 | return; |
2945 | |
2946 | max_idx = bp->tx_nr_pages * TX_DESC_CNT; |
2947 | for (i = 0; i < bp->tx_nr_rings; i++) { |
2948 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
2949 | int j; |
2950 | |
2951 | if (!txr->tx_buf_ring) |
2952 | continue; |
2953 | |
2954 | for (j = 0; j < max_idx;) { |
2955 | struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; |
2956 | struct sk_buff *skb; |
2957 | int k, last; |
2958 | |
2959 | if (i < bp->tx_nr_rings_xdp && |
2960 | tx_buf->action == XDP_REDIRECT) { |
2961 | dma_unmap_single(&pdev->dev, |
2962 | dma_unmap_addr(tx_buf, mapping), |
2963 | dma_unmap_len(tx_buf, len), |
2964 | DMA_TO_DEVICE); |
2965 | xdp_return_frame(xdpf: tx_buf->xdpf); |
2966 | tx_buf->action = 0; |
2967 | tx_buf->xdpf = NULL; |
2968 | j++; |
2969 | continue; |
2970 | } |
2971 | |
2972 | skb = tx_buf->skb; |
2973 | if (!skb) { |
2974 | j++; |
2975 | continue; |
2976 | } |
2977 | |
2978 | tx_buf->skb = NULL; |
2979 | |
2980 | if (tx_buf->is_push) { |
2981 | dev_kfree_skb(skb); |
2982 | j += 2; |
2983 | continue; |
2984 | } |
2985 | |
2986 | dma_unmap_single(&pdev->dev, |
2987 | dma_unmap_addr(tx_buf, mapping), |
2988 | skb_headlen(skb), |
2989 | DMA_TO_DEVICE); |
2990 | |
2991 | last = tx_buf->nr_frags; |
2992 | j += 2; |
2993 | for (k = 0; k < last; k++, j++) { |
2994 | int ring_idx = j & bp->tx_ring_mask; |
2995 | skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; |
2996 | |
2997 | tx_buf = &txr->tx_buf_ring[ring_idx]; |
2998 | dma_unmap_page( |
2999 | &pdev->dev, |
3000 | dma_unmap_addr(tx_buf, mapping), |
3001 | skb_frag_size(frag), DMA_TO_DEVICE); |
3002 | } |
3003 | dev_kfree_skb(skb); |
3004 | } |
3005 | netdev_tx_reset_queue(q: netdev_get_tx_queue(dev: bp->dev, index: i)); |
3006 | } |
3007 | } |
3008 | |
3009 | static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) |
3010 | { |
3011 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; |
3012 | struct pci_dev *pdev = bp->pdev; |
3013 | struct bnxt_tpa_idx_map *map; |
3014 | int i, max_idx, max_agg_idx; |
3015 | |
3016 | max_idx = bp->rx_nr_pages * RX_DESC_CNT; |
3017 | max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; |
3018 | if (!rxr->rx_tpa) |
3019 | goto skip_rx_tpa_free; |
3020 | |
3021 | for (i = 0; i < bp->max_tpa; i++) { |
3022 | struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; |
3023 | u8 *data = tpa_info->data; |
3024 | |
3025 | if (!data) |
3026 | continue; |
3027 | |
3028 | dma_unmap_single_attrs(dev: &pdev->dev, addr: tpa_info->mapping, |
3029 | size: bp->rx_buf_use_size, dir: bp->rx_dir, |
3030 | DMA_ATTR_WEAK_ORDERING); |
3031 | |
3032 | tpa_info->data = NULL; |
3033 | |
3034 | skb_free_frag(addr: data); |
3035 | } |
3036 | |
3037 | skip_rx_tpa_free: |
3038 | if (!rxr->rx_buf_ring) |
3039 | goto skip_rx_buf_free; |
3040 | |
3041 | for (i = 0; i < max_idx; i++) { |
3042 | struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; |
3043 | dma_addr_t mapping = rx_buf->mapping; |
3044 | void *data = rx_buf->data; |
3045 | |
3046 | if (!data) |
3047 | continue; |
3048 | |
3049 | rx_buf->data = NULL; |
3050 | if (BNXT_RX_PAGE_MODE(bp)) { |
3051 | page_pool_recycle_direct(pool: rxr->page_pool, page: data); |
3052 | } else { |
3053 | dma_unmap_single_attrs(dev: &pdev->dev, addr: mapping, |
3054 | size: bp->rx_buf_use_size, dir: bp->rx_dir, |
3055 | DMA_ATTR_WEAK_ORDERING); |
3056 | skb_free_frag(addr: data); |
3057 | } |
3058 | } |
3059 | |
3060 | skip_rx_buf_free: |
3061 | if (!rxr->rx_agg_ring) |
3062 | goto skip_rx_agg_free; |
3063 | |
3064 | for (i = 0; i < max_agg_idx; i++) { |
3065 | struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; |
3066 | struct page *page = rx_agg_buf->page; |
3067 | |
3068 | if (!page) |
3069 | continue; |
3070 | |
3071 | rx_agg_buf->page = NULL; |
3072 | __clear_bit(i, rxr->rx_agg_bmap); |
3073 | |
3074 | page_pool_recycle_direct(pool: rxr->page_pool, page); |
3075 | } |
3076 | |
3077 | skip_rx_agg_free: |
3078 | map = rxr->rx_tpa_idx_map; |
3079 | if (map) |
3080 | memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); |
3081 | } |
3082 | |
3083 | static void bnxt_free_rx_skbs(struct bnxt *bp) |
3084 | { |
3085 | int i; |
3086 | |
3087 | if (!bp->rx_ring) |
3088 | return; |
3089 | |
3090 | for (i = 0; i < bp->rx_nr_rings; i++) |
3091 | bnxt_free_one_rx_ring_skbs(bp, ring_nr: i); |
3092 | } |
3093 | |
3094 | static void bnxt_free_skbs(struct bnxt *bp) |
3095 | { |
3096 | bnxt_free_tx_skbs(bp); |
3097 | bnxt_free_rx_skbs(bp); |
3098 | } |
3099 | |
3100 | static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len) |
3101 | { |
3102 | u8 init_val = mem_init->init_val; |
3103 | u16 offset = mem_init->offset; |
3104 | u8 *p2 = p; |
3105 | int i; |
3106 | |
3107 | if (!init_val) |
3108 | return; |
3109 | if (offset == BNXT_MEM_INVALID_OFFSET) { |
3110 | memset(p, init_val, len); |
3111 | return; |
3112 | } |
3113 | for (i = 0; i < len; i += mem_init->size) |
3114 | *(p2 + i + offset) = init_val; |
3115 | } |
3116 | |
3117 | static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) |
3118 | { |
3119 | struct pci_dev *pdev = bp->pdev; |
3120 | int i; |
3121 | |
3122 | if (!rmem->pg_arr) |
3123 | goto skip_pages; |
3124 | |
3125 | for (i = 0; i < rmem->nr_pages; i++) { |
3126 | if (!rmem->pg_arr[i]) |
3127 | continue; |
3128 | |
3129 | dma_free_coherent(dev: &pdev->dev, size: rmem->page_size, |
3130 | cpu_addr: rmem->pg_arr[i], dma_handle: rmem->dma_arr[i]); |
3131 | |
3132 | rmem->pg_arr[i] = NULL; |
3133 | } |
3134 | skip_pages: |
3135 | if (rmem->pg_tbl) { |
3136 | size_t pg_tbl_size = rmem->nr_pages * 8; |
3137 | |
3138 | if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) |
3139 | pg_tbl_size = rmem->page_size; |
3140 | dma_free_coherent(dev: &pdev->dev, size: pg_tbl_size, |
3141 | cpu_addr: rmem->pg_tbl, dma_handle: rmem->pg_tbl_map); |
3142 | rmem->pg_tbl = NULL; |
3143 | } |
3144 | if (rmem->vmem_size && *rmem->vmem) { |
3145 | vfree(addr: *rmem->vmem); |
3146 | *rmem->vmem = NULL; |
3147 | } |
3148 | } |
3149 | |
3150 | static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) |
3151 | { |
3152 | struct pci_dev *pdev = bp->pdev; |
3153 | u64 valid_bit = 0; |
3154 | int i; |
3155 | |
3156 | if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) |
3157 | valid_bit = PTU_PTE_VALID; |
3158 | if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { |
3159 | size_t pg_tbl_size = rmem->nr_pages * 8; |
3160 | |
3161 | if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) |
3162 | pg_tbl_size = rmem->page_size; |
3163 | rmem->pg_tbl = dma_alloc_coherent(dev: &pdev->dev, size: pg_tbl_size, |
3164 | dma_handle: &rmem->pg_tbl_map, |
3165 | GFP_KERNEL); |
3166 | if (!rmem->pg_tbl) |
3167 | return -ENOMEM; |
3168 | } |
3169 | |
3170 | for (i = 0; i < rmem->nr_pages; i++) { |
3171 | u64 = valid_bit; |
3172 | |
3173 | rmem->pg_arr[i] = dma_alloc_coherent(dev: &pdev->dev, |
3174 | size: rmem->page_size, |
3175 | dma_handle: &rmem->dma_arr[i], |
3176 | GFP_KERNEL); |
3177 | if (!rmem->pg_arr[i]) |
3178 | return -ENOMEM; |
3179 | |
3180 | if (rmem->mem_init) |
3181 | bnxt_init_ctx_mem(mem_init: rmem->mem_init, p: rmem->pg_arr[i], |
3182 | len: rmem->page_size); |
3183 | if (rmem->nr_pages > 1 || rmem->depth > 0) { |
3184 | if (i == rmem->nr_pages - 2 && |
3185 | (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) |
3186 | extra_bits |= PTU_PTE_NEXT_TO_LAST; |
3187 | else if (i == rmem->nr_pages - 1 && |
3188 | (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) |
3189 | extra_bits |= PTU_PTE_LAST; |
3190 | rmem->pg_tbl[i] = |
3191 | cpu_to_le64(rmem->dma_arr[i] | extra_bits); |
3192 | } |
3193 | } |
3194 | |
3195 | if (rmem->vmem_size) { |
3196 | *rmem->vmem = vzalloc(size: rmem->vmem_size); |
3197 | if (!(*rmem->vmem)) |
3198 | return -ENOMEM; |
3199 | } |
3200 | return 0; |
3201 | } |
3202 | |
3203 | static void bnxt_free_tpa_info(struct bnxt *bp) |
3204 | { |
3205 | int i, j; |
3206 | |
3207 | for (i = 0; i < bp->rx_nr_rings; i++) { |
3208 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
3209 | |
3210 | kfree(objp: rxr->rx_tpa_idx_map); |
3211 | rxr->rx_tpa_idx_map = NULL; |
3212 | if (rxr->rx_tpa) { |
3213 | for (j = 0; j < bp->max_tpa; j++) { |
3214 | kfree(objp: rxr->rx_tpa[j].agg_arr); |
3215 | rxr->rx_tpa[j].agg_arr = NULL; |
3216 | } |
3217 | } |
3218 | kfree(objp: rxr->rx_tpa); |
3219 | rxr->rx_tpa = NULL; |
3220 | } |
3221 | } |
3222 | |
3223 | static int bnxt_alloc_tpa_info(struct bnxt *bp) |
3224 | { |
3225 | int i, j; |
3226 | |
3227 | bp->max_tpa = MAX_TPA; |
3228 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
3229 | if (!bp->max_tpa_v2) |
3230 | return 0; |
3231 | bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); |
3232 | } |
3233 | |
3234 | for (i = 0; i < bp->rx_nr_rings; i++) { |
3235 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
3236 | struct rx_agg_cmp *agg; |
3237 | |
3238 | rxr->rx_tpa = kcalloc(n: bp->max_tpa, size: sizeof(struct bnxt_tpa_info), |
3239 | GFP_KERNEL); |
3240 | if (!rxr->rx_tpa) |
3241 | return -ENOMEM; |
3242 | |
3243 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
3244 | continue; |
3245 | for (j = 0; j < bp->max_tpa; j++) { |
3246 | agg = kcalloc(MAX_SKB_FRAGS, size: sizeof(*agg), GFP_KERNEL); |
3247 | if (!agg) |
3248 | return -ENOMEM; |
3249 | rxr->rx_tpa[j].agg_arr = agg; |
3250 | } |
3251 | rxr->rx_tpa_idx_map = kzalloc(size: sizeof(*rxr->rx_tpa_idx_map), |
3252 | GFP_KERNEL); |
3253 | if (!rxr->rx_tpa_idx_map) |
3254 | return -ENOMEM; |
3255 | } |
3256 | return 0; |
3257 | } |
3258 | |
3259 | static void bnxt_free_rx_rings(struct bnxt *bp) |
3260 | { |
3261 | int i; |
3262 | |
3263 | if (!bp->rx_ring) |
3264 | return; |
3265 | |
3266 | bnxt_free_tpa_info(bp); |
3267 | for (i = 0; i < bp->rx_nr_rings; i++) { |
3268 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
3269 | struct bnxt_ring_struct *ring; |
3270 | |
3271 | if (rxr->xdp_prog) |
3272 | bpf_prog_put(prog: rxr->xdp_prog); |
3273 | |
3274 | if (xdp_rxq_info_is_reg(xdp_rxq: &rxr->xdp_rxq)) |
3275 | xdp_rxq_info_unreg(xdp_rxq: &rxr->xdp_rxq); |
3276 | |
3277 | page_pool_destroy(pool: rxr->page_pool); |
3278 | rxr->page_pool = NULL; |
3279 | |
3280 | kfree(objp: rxr->rx_agg_bmap); |
3281 | rxr->rx_agg_bmap = NULL; |
3282 | |
3283 | ring = &rxr->rx_ring_struct; |
3284 | bnxt_free_ring(bp, rmem: &ring->ring_mem); |
3285 | |
3286 | ring = &rxr->rx_agg_ring_struct; |
3287 | bnxt_free_ring(bp, rmem: &ring->ring_mem); |
3288 | } |
3289 | } |
3290 | |
3291 | static int bnxt_alloc_rx_page_pool(struct bnxt *bp, |
3292 | struct bnxt_rx_ring_info *rxr) |
3293 | { |
3294 | struct page_pool_params pp = { 0 }; |
3295 | |
3296 | pp.pool_size = bp->rx_agg_ring_size; |
3297 | if (BNXT_RX_PAGE_MODE(bp)) |
3298 | pp.pool_size += bp->rx_ring_size; |
3299 | pp.nid = dev_to_node(dev: &bp->pdev->dev); |
3300 | pp.napi = &rxr->bnapi->napi; |
3301 | pp.dev = &bp->pdev->dev; |
3302 | pp.dma_dir = bp->rx_dir; |
3303 | pp.max_len = PAGE_SIZE; |
3304 | pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; |
3305 | |
3306 | rxr->page_pool = page_pool_create(params: &pp); |
3307 | if (IS_ERR(ptr: rxr->page_pool)) { |
3308 | int err = PTR_ERR(ptr: rxr->page_pool); |
3309 | |
3310 | rxr->page_pool = NULL; |
3311 | return err; |
3312 | } |
3313 | return 0; |
3314 | } |
3315 | |
3316 | static int bnxt_alloc_rx_rings(struct bnxt *bp) |
3317 | { |
3318 | int i, rc = 0, agg_rings = 0; |
3319 | |
3320 | if (!bp->rx_ring) |
3321 | return -ENOMEM; |
3322 | |
3323 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
3324 | agg_rings = 1; |
3325 | |
3326 | for (i = 0; i < bp->rx_nr_rings; i++) { |
3327 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
3328 | struct bnxt_ring_struct *ring; |
3329 | |
3330 | ring = &rxr->rx_ring_struct; |
3331 | |
3332 | rc = bnxt_alloc_rx_page_pool(bp, rxr); |
3333 | if (rc) |
3334 | return rc; |
3335 | |
3336 | rc = xdp_rxq_info_reg(xdp_rxq: &rxr->xdp_rxq, dev: bp->dev, queue_index: i, napi_id: 0); |
3337 | if (rc < 0) |
3338 | return rc; |
3339 | |
3340 | rc = xdp_rxq_info_reg_mem_model(xdp_rxq: &rxr->xdp_rxq, |
3341 | type: MEM_TYPE_PAGE_POOL, |
3342 | allocator: rxr->page_pool); |
3343 | if (rc) { |
3344 | xdp_rxq_info_unreg(xdp_rxq: &rxr->xdp_rxq); |
3345 | return rc; |
3346 | } |
3347 | |
3348 | rc = bnxt_alloc_ring(bp, rmem: &ring->ring_mem); |
3349 | if (rc) |
3350 | return rc; |
3351 | |
3352 | ring->grp_idx = i; |
3353 | if (agg_rings) { |
3354 | u16 mem_size; |
3355 | |
3356 | ring = &rxr->rx_agg_ring_struct; |
3357 | rc = bnxt_alloc_ring(bp, rmem: &ring->ring_mem); |
3358 | if (rc) |
3359 | return rc; |
3360 | |
3361 | ring->grp_idx = i; |
3362 | rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; |
3363 | mem_size = rxr->rx_agg_bmap_size / 8; |
3364 | rxr->rx_agg_bmap = kzalloc(size: mem_size, GFP_KERNEL); |
3365 | if (!rxr->rx_agg_bmap) |
3366 | return -ENOMEM; |
3367 | } |
3368 | } |
3369 | if (bp->flags & BNXT_FLAG_TPA) |
3370 | rc = bnxt_alloc_tpa_info(bp); |
3371 | return rc; |
3372 | } |
3373 | |
3374 | static void bnxt_free_tx_rings(struct bnxt *bp) |
3375 | { |
3376 | int i; |
3377 | struct pci_dev *pdev = bp->pdev; |
3378 | |
3379 | if (!bp->tx_ring) |
3380 | return; |
3381 | |
3382 | for (i = 0; i < bp->tx_nr_rings; i++) { |
3383 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
3384 | struct bnxt_ring_struct *ring; |
3385 | |
3386 | if (txr->tx_push) { |
3387 | dma_free_coherent(dev: &pdev->dev, size: bp->tx_push_size, |
3388 | cpu_addr: txr->tx_push, dma_handle: txr->tx_push_mapping); |
3389 | txr->tx_push = NULL; |
3390 | } |
3391 | |
3392 | ring = &txr->tx_ring_struct; |
3393 | |
3394 | bnxt_free_ring(bp, rmem: &ring->ring_mem); |
3395 | } |
3396 | } |
3397 | |
3398 | static int bnxt_alloc_tx_rings(struct bnxt *bp) |
3399 | { |
3400 | int i, j, rc; |
3401 | struct pci_dev *pdev = bp->pdev; |
3402 | |
3403 | bp->tx_push_size = 0; |
3404 | if (bp->tx_push_thresh) { |
3405 | int push_size; |
3406 | |
3407 | push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + |
3408 | bp->tx_push_thresh); |
3409 | |
3410 | if (push_size > 256) { |
3411 | push_size = 0; |
3412 | bp->tx_push_thresh = 0; |
3413 | } |
3414 | |
3415 | bp->tx_push_size = push_size; |
3416 | } |
3417 | |
3418 | for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { |
3419 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
3420 | struct bnxt_ring_struct *ring; |
3421 | u8 qidx; |
3422 | |
3423 | ring = &txr->tx_ring_struct; |
3424 | |
3425 | rc = bnxt_alloc_ring(bp, rmem: &ring->ring_mem); |
3426 | if (rc) |
3427 | return rc; |
3428 | |
3429 | ring->grp_idx = txr->bnapi->index; |
3430 | if (bp->tx_push_size) { |
3431 | dma_addr_t mapping; |
3432 | |
3433 | /* One pre-allocated DMA buffer to backup |
3434 | * TX push operation |
3435 | */ |
3436 | txr->tx_push = dma_alloc_coherent(dev: &pdev->dev, |
3437 | size: bp->tx_push_size, |
3438 | dma_handle: &txr->tx_push_mapping, |
3439 | GFP_KERNEL); |
3440 | |
3441 | if (!txr->tx_push) |
3442 | return -ENOMEM; |
3443 | |
3444 | mapping = txr->tx_push_mapping + |
3445 | sizeof(struct tx_push_bd); |
3446 | txr->data_mapping = cpu_to_le64(mapping); |
3447 | } |
3448 | qidx = bp->tc_to_qidx[j]; |
3449 | ring->queue_id = bp->q_info[qidx].queue_id; |
3450 | spin_lock_init(&txr->xdp_tx_lock); |
3451 | if (i < bp->tx_nr_rings_xdp) |
3452 | continue; |
3453 | if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) |
3454 | j++; |
3455 | } |
3456 | return 0; |
3457 | } |
3458 | |
3459 | static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) |
3460 | { |
3461 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; |
3462 | |
3463 | kfree(objp: cpr->cp_desc_ring); |
3464 | cpr->cp_desc_ring = NULL; |
3465 | ring->ring_mem.pg_arr = NULL; |
3466 | kfree(objp: cpr->cp_desc_mapping); |
3467 | cpr->cp_desc_mapping = NULL; |
3468 | ring->ring_mem.dma_arr = NULL; |
3469 | } |
3470 | |
3471 | static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) |
3472 | { |
3473 | cpr->cp_desc_ring = kcalloc(n, size: sizeof(*cpr->cp_desc_ring), GFP_KERNEL); |
3474 | if (!cpr->cp_desc_ring) |
3475 | return -ENOMEM; |
3476 | cpr->cp_desc_mapping = kcalloc(n, size: sizeof(*cpr->cp_desc_mapping), |
3477 | GFP_KERNEL); |
3478 | if (!cpr->cp_desc_mapping) |
3479 | return -ENOMEM; |
3480 | return 0; |
3481 | } |
3482 | |
3483 | static void bnxt_free_all_cp_arrays(struct bnxt *bp) |
3484 | { |
3485 | int i; |
3486 | |
3487 | if (!bp->bnapi) |
3488 | return; |
3489 | for (i = 0; i < bp->cp_nr_rings; i++) { |
3490 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
3491 | |
3492 | if (!bnapi) |
3493 | continue; |
3494 | bnxt_free_cp_arrays(cpr: &bnapi->cp_ring); |
3495 | } |
3496 | } |
3497 | |
3498 | static int bnxt_alloc_all_cp_arrays(struct bnxt *bp) |
3499 | { |
3500 | int i, n = bp->cp_nr_pages; |
3501 | |
3502 | for (i = 0; i < bp->cp_nr_rings; i++) { |
3503 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
3504 | int rc; |
3505 | |
3506 | if (!bnapi) |
3507 | continue; |
3508 | rc = bnxt_alloc_cp_arrays(cpr: &bnapi->cp_ring, n); |
3509 | if (rc) |
3510 | return rc; |
3511 | } |
3512 | return 0; |
3513 | } |
3514 | |
3515 | static void bnxt_free_cp_rings(struct bnxt *bp) |
3516 | { |
3517 | int i; |
3518 | |
3519 | if (!bp->bnapi) |
3520 | return; |
3521 | |
3522 | for (i = 0; i < bp->cp_nr_rings; i++) { |
3523 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
3524 | struct bnxt_cp_ring_info *cpr; |
3525 | struct bnxt_ring_struct *ring; |
3526 | int j; |
3527 | |
3528 | if (!bnapi) |
3529 | continue; |
3530 | |
3531 | cpr = &bnapi->cp_ring; |
3532 | ring = &cpr->cp_ring_struct; |
3533 | |
3534 | bnxt_free_ring(bp, rmem: &ring->ring_mem); |
3535 | |
3536 | for (j = 0; j < 2; j++) { |
3537 | struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; |
3538 | |
3539 | if (cpr2) { |
3540 | ring = &cpr2->cp_ring_struct; |
3541 | bnxt_free_ring(bp, rmem: &ring->ring_mem); |
3542 | bnxt_free_cp_arrays(cpr: cpr2); |
3543 | kfree(objp: cpr2); |
3544 | cpr->cp_ring_arr[j] = NULL; |
3545 | } |
3546 | } |
3547 | } |
3548 | } |
3549 | |
3550 | static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp) |
3551 | { |
3552 | struct bnxt_ring_mem_info *rmem; |
3553 | struct bnxt_ring_struct *ring; |
3554 | struct bnxt_cp_ring_info *cpr; |
3555 | int rc; |
3556 | |
3557 | cpr = kzalloc(size: sizeof(*cpr), GFP_KERNEL); |
3558 | if (!cpr) |
3559 | return NULL; |
3560 | |
3561 | rc = bnxt_alloc_cp_arrays(cpr, n: bp->cp_nr_pages); |
3562 | if (rc) { |
3563 | bnxt_free_cp_arrays(cpr); |
3564 | kfree(objp: cpr); |
3565 | return NULL; |
3566 | } |
3567 | ring = &cpr->cp_ring_struct; |
3568 | rmem = &ring->ring_mem; |
3569 | rmem->nr_pages = bp->cp_nr_pages; |
3570 | rmem->page_size = HW_CMPD_RING_SIZE; |
3571 | rmem->pg_arr = (void **)cpr->cp_desc_ring; |
3572 | rmem->dma_arr = cpr->cp_desc_mapping; |
3573 | rmem->flags = BNXT_RMEM_RING_PTE_FLAG; |
3574 | rc = bnxt_alloc_ring(bp, rmem); |
3575 | if (rc) { |
3576 | bnxt_free_ring(bp, rmem); |
3577 | bnxt_free_cp_arrays(cpr); |
3578 | kfree(objp: cpr); |
3579 | cpr = NULL; |
3580 | } |
3581 | return cpr; |
3582 | } |
3583 | |
3584 | static int bnxt_alloc_cp_rings(struct bnxt *bp) |
3585 | { |
3586 | bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); |
3587 | int i, rc, ulp_base_vec, ulp_msix; |
3588 | |
3589 | ulp_msix = bnxt_get_ulp_msix_num(bp); |
3590 | ulp_base_vec = bnxt_get_ulp_msix_base(bp); |
3591 | for (i = 0; i < bp->cp_nr_rings; i++) { |
3592 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
3593 | struct bnxt_cp_ring_info *cpr; |
3594 | struct bnxt_ring_struct *ring; |
3595 | |
3596 | if (!bnapi) |
3597 | continue; |
3598 | |
3599 | cpr = &bnapi->cp_ring; |
3600 | cpr->bnapi = bnapi; |
3601 | ring = &cpr->cp_ring_struct; |
3602 | |
3603 | rc = bnxt_alloc_ring(bp, rmem: &ring->ring_mem); |
3604 | if (rc) |
3605 | return rc; |
3606 | |
3607 | if (ulp_msix && i >= ulp_base_vec) |
3608 | ring->map_idx = i + ulp_msix; |
3609 | else |
3610 | ring->map_idx = i; |
3611 | |
3612 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
3613 | continue; |
3614 | |
3615 | if (i < bp->rx_nr_rings) { |
3616 | struct bnxt_cp_ring_info *cpr2 = |
3617 | bnxt_alloc_cp_sub_ring(bp); |
3618 | |
3619 | cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2; |
3620 | if (!cpr2) |
3621 | return -ENOMEM; |
3622 | cpr2->bnapi = bnapi; |
3623 | } |
3624 | if ((sh && i < bp->tx_nr_rings) || |
3625 | (!sh && i >= bp->rx_nr_rings)) { |
3626 | struct bnxt_cp_ring_info *cpr2 = |
3627 | bnxt_alloc_cp_sub_ring(bp); |
3628 | |
3629 | cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2; |
3630 | if (!cpr2) |
3631 | return -ENOMEM; |
3632 | cpr2->bnapi = bnapi; |
3633 | } |
3634 | } |
3635 | return 0; |
3636 | } |
3637 | |
3638 | static void bnxt_init_ring_struct(struct bnxt *bp) |
3639 | { |
3640 | int i; |
3641 | |
3642 | for (i = 0; i < bp->cp_nr_rings; i++) { |
3643 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
3644 | struct bnxt_ring_mem_info *rmem; |
3645 | struct bnxt_cp_ring_info *cpr; |
3646 | struct bnxt_rx_ring_info *rxr; |
3647 | struct bnxt_tx_ring_info *txr; |
3648 | struct bnxt_ring_struct *ring; |
3649 | |
3650 | if (!bnapi) |
3651 | continue; |
3652 | |
3653 | cpr = &bnapi->cp_ring; |
3654 | ring = &cpr->cp_ring_struct; |
3655 | rmem = &ring->ring_mem; |
3656 | rmem->nr_pages = bp->cp_nr_pages; |
3657 | rmem->page_size = HW_CMPD_RING_SIZE; |
3658 | rmem->pg_arr = (void **)cpr->cp_desc_ring; |
3659 | rmem->dma_arr = cpr->cp_desc_mapping; |
3660 | rmem->vmem_size = 0; |
3661 | |
3662 | rxr = bnapi->rx_ring; |
3663 | if (!rxr) |
3664 | goto skip_rx; |
3665 | |
3666 | ring = &rxr->rx_ring_struct; |
3667 | rmem = &ring->ring_mem; |
3668 | rmem->nr_pages = bp->rx_nr_pages; |
3669 | rmem->page_size = HW_RXBD_RING_SIZE; |
3670 | rmem->pg_arr = (void **)rxr->rx_desc_ring; |
3671 | rmem->dma_arr = rxr->rx_desc_mapping; |
3672 | rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; |
3673 | rmem->vmem = (void **)&rxr->rx_buf_ring; |
3674 | |
3675 | ring = &rxr->rx_agg_ring_struct; |
3676 | rmem = &ring->ring_mem; |
3677 | rmem->nr_pages = bp->rx_agg_nr_pages; |
3678 | rmem->page_size = HW_RXBD_RING_SIZE; |
3679 | rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; |
3680 | rmem->dma_arr = rxr->rx_agg_desc_mapping; |
3681 | rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; |
3682 | rmem->vmem = (void **)&rxr->rx_agg_ring; |
3683 | |
3684 | skip_rx: |
3685 | txr = bnapi->tx_ring; |
3686 | if (!txr) |
3687 | continue; |
3688 | |
3689 | ring = &txr->tx_ring_struct; |
3690 | rmem = &ring->ring_mem; |
3691 | rmem->nr_pages = bp->tx_nr_pages; |
3692 | rmem->page_size = HW_RXBD_RING_SIZE; |
3693 | rmem->pg_arr = (void **)txr->tx_desc_ring; |
3694 | rmem->dma_arr = txr->tx_desc_mapping; |
3695 | rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; |
3696 | rmem->vmem = (void **)&txr->tx_buf_ring; |
3697 | } |
3698 | } |
3699 | |
3700 | static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) |
3701 | { |
3702 | int i; |
3703 | u32 prod; |
3704 | struct rx_bd **rx_buf_ring; |
3705 | |
3706 | rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; |
3707 | for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { |
3708 | int j; |
3709 | struct rx_bd *rxbd; |
3710 | |
3711 | rxbd = rx_buf_ring[i]; |
3712 | if (!rxbd) |
3713 | continue; |
3714 | |
3715 | for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { |
3716 | rxbd->rx_bd_len_flags_type = cpu_to_le32(type); |
3717 | rxbd->rx_bd_opaque = prod; |
3718 | } |
3719 | } |
3720 | } |
3721 | |
3722 | static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) |
3723 | { |
3724 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; |
3725 | struct net_device *dev = bp->dev; |
3726 | u32 prod; |
3727 | int i; |
3728 | |
3729 | prod = rxr->rx_prod; |
3730 | for (i = 0; i < bp->rx_ring_size; i++) { |
3731 | if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { |
3732 | netdev_warn(dev, format: "init'ed rx ring %d with %d/%d skbs only\n" , |
3733 | ring_nr, i, bp->rx_ring_size); |
3734 | break; |
3735 | } |
3736 | prod = NEXT_RX(prod); |
3737 | } |
3738 | rxr->rx_prod = prod; |
3739 | |
3740 | if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) |
3741 | return 0; |
3742 | |
3743 | prod = rxr->rx_agg_prod; |
3744 | for (i = 0; i < bp->rx_agg_ring_size; i++) { |
3745 | if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { |
3746 | netdev_warn(dev, format: "init'ed rx ring %d with %d/%d pages only\n" , |
3747 | ring_nr, i, bp->rx_ring_size); |
3748 | break; |
3749 | } |
3750 | prod = NEXT_RX_AGG(prod); |
3751 | } |
3752 | rxr->rx_agg_prod = prod; |
3753 | |
3754 | if (rxr->rx_tpa) { |
3755 | dma_addr_t mapping; |
3756 | u8 *data; |
3757 | |
3758 | for (i = 0; i < bp->max_tpa; i++) { |
3759 | data = __bnxt_alloc_rx_frag(bp, mapping: &mapping, GFP_KERNEL); |
3760 | if (!data) |
3761 | return -ENOMEM; |
3762 | |
3763 | rxr->rx_tpa[i].data = data; |
3764 | rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; |
3765 | rxr->rx_tpa[i].mapping = mapping; |
3766 | } |
3767 | } |
3768 | return 0; |
3769 | } |
3770 | |
3771 | static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) |
3772 | { |
3773 | struct bnxt_rx_ring_info *rxr; |
3774 | struct bnxt_ring_struct *ring; |
3775 | u32 type; |
3776 | |
3777 | type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | |
3778 | RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; |
3779 | |
3780 | if (NET_IP_ALIGN == 2) |
3781 | type |= RX_BD_FLAGS_SOP; |
3782 | |
3783 | rxr = &bp->rx_ring[ring_nr]; |
3784 | ring = &rxr->rx_ring_struct; |
3785 | bnxt_init_rxbd_pages(ring, type); |
3786 | |
3787 | if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { |
3788 | bpf_prog_add(prog: bp->xdp_prog, i: 1); |
3789 | rxr->xdp_prog = bp->xdp_prog; |
3790 | } |
3791 | ring->fw_ring_id = INVALID_HW_RING_ID; |
3792 | |
3793 | ring = &rxr->rx_agg_ring_struct; |
3794 | ring->fw_ring_id = INVALID_HW_RING_ID; |
3795 | |
3796 | if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { |
3797 | type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | |
3798 | RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; |
3799 | |
3800 | bnxt_init_rxbd_pages(ring, type); |
3801 | } |
3802 | |
3803 | return bnxt_alloc_one_rx_ring(bp, ring_nr); |
3804 | } |
3805 | |
3806 | static void bnxt_init_cp_rings(struct bnxt *bp) |
3807 | { |
3808 | int i, j; |
3809 | |
3810 | for (i = 0; i < bp->cp_nr_rings; i++) { |
3811 | struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; |
3812 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; |
3813 | |
3814 | ring->fw_ring_id = INVALID_HW_RING_ID; |
3815 | cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; |
3816 | cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; |
3817 | for (j = 0; j < 2; j++) { |
3818 | struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; |
3819 | |
3820 | if (!cpr2) |
3821 | continue; |
3822 | |
3823 | ring = &cpr2->cp_ring_struct; |
3824 | ring->fw_ring_id = INVALID_HW_RING_ID; |
3825 | cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; |
3826 | cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; |
3827 | } |
3828 | } |
3829 | } |
3830 | |
3831 | static int bnxt_init_rx_rings(struct bnxt *bp) |
3832 | { |
3833 | int i, rc = 0; |
3834 | |
3835 | if (BNXT_RX_PAGE_MODE(bp)) { |
3836 | bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; |
3837 | bp->rx_dma_offset = XDP_PACKET_HEADROOM; |
3838 | } else { |
3839 | bp->rx_offset = BNXT_RX_OFFSET; |
3840 | bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; |
3841 | } |
3842 | |
3843 | for (i = 0; i < bp->rx_nr_rings; i++) { |
3844 | rc = bnxt_init_one_rx_ring(bp, ring_nr: i); |
3845 | if (rc) |
3846 | break; |
3847 | } |
3848 | |
3849 | return rc; |
3850 | } |
3851 | |
3852 | static int bnxt_init_tx_rings(struct bnxt *bp) |
3853 | { |
3854 | u16 i; |
3855 | |
3856 | bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, |
3857 | BNXT_MIN_TX_DESC_CNT); |
3858 | |
3859 | for (i = 0; i < bp->tx_nr_rings; i++) { |
3860 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
3861 | struct bnxt_ring_struct *ring = &txr->tx_ring_struct; |
3862 | |
3863 | ring->fw_ring_id = INVALID_HW_RING_ID; |
3864 | } |
3865 | |
3866 | return 0; |
3867 | } |
3868 | |
3869 | static void bnxt_free_ring_grps(struct bnxt *bp) |
3870 | { |
3871 | kfree(objp: bp->grp_info); |
3872 | bp->grp_info = NULL; |
3873 | } |
3874 | |
3875 | static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) |
3876 | { |
3877 | int i; |
3878 | |
3879 | if (irq_re_init) { |
3880 | bp->grp_info = kcalloc(n: bp->cp_nr_rings, |
3881 | size: sizeof(struct bnxt_ring_grp_info), |
3882 | GFP_KERNEL); |
3883 | if (!bp->grp_info) |
3884 | return -ENOMEM; |
3885 | } |
3886 | for (i = 0; i < bp->cp_nr_rings; i++) { |
3887 | if (irq_re_init) |
3888 | bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; |
3889 | bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; |
3890 | bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; |
3891 | bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; |
3892 | bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; |
3893 | } |
3894 | return 0; |
3895 | } |
3896 | |
3897 | static void bnxt_free_vnics(struct bnxt *bp) |
3898 | { |
3899 | kfree(objp: bp->vnic_info); |
3900 | bp->vnic_info = NULL; |
3901 | bp->nr_vnics = 0; |
3902 | } |
3903 | |
3904 | static int bnxt_alloc_vnics(struct bnxt *bp) |
3905 | { |
3906 | int num_vnics = 1; |
3907 | |
3908 | #ifdef CONFIG_RFS_ACCEL |
3909 | if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) |
3910 | num_vnics += bp->rx_nr_rings; |
3911 | #endif |
3912 | |
3913 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
3914 | num_vnics++; |
3915 | |
3916 | bp->vnic_info = kcalloc(n: num_vnics, size: sizeof(struct bnxt_vnic_info), |
3917 | GFP_KERNEL); |
3918 | if (!bp->vnic_info) |
3919 | return -ENOMEM; |
3920 | |
3921 | bp->nr_vnics = num_vnics; |
3922 | return 0; |
3923 | } |
3924 | |
3925 | static void bnxt_init_vnics(struct bnxt *bp) |
3926 | { |
3927 | int i; |
3928 | |
3929 | for (i = 0; i < bp->nr_vnics; i++) { |
3930 | struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; |
3931 | int j; |
3932 | |
3933 | vnic->fw_vnic_id = INVALID_HW_RING_ID; |
3934 | for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) |
3935 | vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; |
3936 | |
3937 | vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; |
3938 | |
3939 | if (bp->vnic_info[i].rss_hash_key) { |
3940 | if (i == 0) |
3941 | get_random_bytes(buf: vnic->rss_hash_key, |
3942 | HW_HASH_KEY_SIZE); |
3943 | else |
3944 | memcpy(vnic->rss_hash_key, |
3945 | bp->vnic_info[0].rss_hash_key, |
3946 | HW_HASH_KEY_SIZE); |
3947 | } |
3948 | } |
3949 | } |
3950 | |
3951 | static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) |
3952 | { |
3953 | int pages; |
3954 | |
3955 | pages = ring_size / desc_per_pg; |
3956 | |
3957 | if (!pages) |
3958 | return 1; |
3959 | |
3960 | pages++; |
3961 | |
3962 | while (pages & (pages - 1)) |
3963 | pages++; |
3964 | |
3965 | return pages; |
3966 | } |
3967 | |
3968 | void bnxt_set_tpa_flags(struct bnxt *bp) |
3969 | { |
3970 | bp->flags &= ~BNXT_FLAG_TPA; |
3971 | if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) |
3972 | return; |
3973 | if (bp->dev->features & NETIF_F_LRO) |
3974 | bp->flags |= BNXT_FLAG_LRO; |
3975 | else if (bp->dev->features & NETIF_F_GRO_HW) |
3976 | bp->flags |= BNXT_FLAG_GRO; |
3977 | } |
3978 | |
3979 | /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must |
3980 | * be set on entry. |
3981 | */ |
3982 | void bnxt_set_ring_params(struct bnxt *bp) |
3983 | { |
3984 | u32 ring_size, rx_size, rx_space, max_rx_cmpl; |
3985 | u32 agg_factor = 0, agg_ring_size = 0; |
3986 | |
3987 | /* 8 for CRC and VLAN */ |
3988 | rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); |
3989 | |
3990 | rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) + |
3991 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
3992 | |
3993 | bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; |
3994 | ring_size = bp->rx_ring_size; |
3995 | bp->rx_agg_ring_size = 0; |
3996 | bp->rx_agg_nr_pages = 0; |
3997 | |
3998 | if (bp->flags & BNXT_FLAG_TPA) |
3999 | agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); |
4000 | |
4001 | bp->flags &= ~BNXT_FLAG_JUMBO; |
4002 | if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { |
4003 | u32 jumbo_factor; |
4004 | |
4005 | bp->flags |= BNXT_FLAG_JUMBO; |
4006 | jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; |
4007 | if (jumbo_factor > agg_factor) |
4008 | agg_factor = jumbo_factor; |
4009 | } |
4010 | if (agg_factor) { |
4011 | if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) { |
4012 | ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA; |
4013 | netdev_warn(dev: bp->dev, format: "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n" , |
4014 | bp->rx_ring_size, ring_size); |
4015 | bp->rx_ring_size = ring_size; |
4016 | } |
4017 | agg_ring_size = ring_size * agg_factor; |
4018 | |
4019 | bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(ring_size: agg_ring_size, |
4020 | RX_DESC_CNT); |
4021 | if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { |
4022 | u32 tmp = agg_ring_size; |
4023 | |
4024 | bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; |
4025 | agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; |
4026 | netdev_warn(dev: bp->dev, format: "rx agg ring size %d reduced to %d.\n" , |
4027 | tmp, agg_ring_size); |
4028 | } |
4029 | bp->rx_agg_ring_size = agg_ring_size; |
4030 | bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; |
4031 | |
4032 | if (BNXT_RX_PAGE_MODE(bp)) { |
4033 | rx_space = PAGE_SIZE; |
4034 | rx_size = PAGE_SIZE - |
4035 | ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) - |
4036 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
4037 | } else { |
4038 | rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); |
4039 | rx_space = rx_size + NET_SKB_PAD + |
4040 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
4041 | } |
4042 | } |
4043 | |
4044 | bp->rx_buf_use_size = rx_size; |
4045 | bp->rx_buf_size = rx_space; |
4046 | |
4047 | bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); |
4048 | bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; |
4049 | |
4050 | ring_size = bp->tx_ring_size; |
4051 | bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); |
4052 | bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; |
4053 | |
4054 | max_rx_cmpl = bp->rx_ring_size; |
4055 | /* MAX TPA needs to be added because TPA_START completions are |
4056 | * immediately recycled, so the TPA completions are not bound by |
4057 | * the RX ring size. |
4058 | */ |
4059 | if (bp->flags & BNXT_FLAG_TPA) |
4060 | max_rx_cmpl += bp->max_tpa; |
4061 | /* RX and TPA completions are 32-byte, all others are 16-byte */ |
4062 | ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; |
4063 | bp->cp_ring_size = ring_size; |
4064 | |
4065 | bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); |
4066 | if (bp->cp_nr_pages > MAX_CP_PAGES) { |
4067 | bp->cp_nr_pages = MAX_CP_PAGES; |
4068 | bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; |
4069 | netdev_warn(dev: bp->dev, format: "completion ring size %d reduced to %d.\n" , |
4070 | ring_size, bp->cp_ring_size); |
4071 | } |
4072 | bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; |
4073 | bp->cp_ring_mask = bp->cp_bit - 1; |
4074 | } |
4075 | |
4076 | /* Changing allocation mode of RX rings. |
4077 | * TODO: Update when extending xdp_rxq_info to support allocation modes. |
4078 | */ |
4079 | int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) |
4080 | { |
4081 | struct net_device *dev = bp->dev; |
4082 | |
4083 | if (page_mode) { |
4084 | bp->flags &= ~BNXT_FLAG_AGG_RINGS; |
4085 | bp->flags |= BNXT_FLAG_RX_PAGE_MODE; |
4086 | |
4087 | if (bp->xdp_prog->aux->xdp_has_frags) |
4088 | dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU); |
4089 | else |
4090 | dev->max_mtu = |
4091 | min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); |
4092 | if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { |
4093 | bp->flags |= BNXT_FLAG_JUMBO; |
4094 | bp->rx_skb_func = bnxt_rx_multi_page_skb; |
4095 | } else { |
4096 | bp->flags |= BNXT_FLAG_NO_AGG_RINGS; |
4097 | bp->rx_skb_func = bnxt_rx_page_skb; |
4098 | } |
4099 | bp->rx_dir = DMA_BIDIRECTIONAL; |
4100 | /* Disable LRO or GRO_HW */ |
4101 | netdev_update_features(dev); |
4102 | } else { |
4103 | dev->max_mtu = bp->max_mtu; |
4104 | bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; |
4105 | bp->rx_dir = DMA_FROM_DEVICE; |
4106 | bp->rx_skb_func = bnxt_rx_skb; |
4107 | } |
4108 | return 0; |
4109 | } |
4110 | |
4111 | static void bnxt_free_vnic_attributes(struct bnxt *bp) |
4112 | { |
4113 | int i; |
4114 | struct bnxt_vnic_info *vnic; |
4115 | struct pci_dev *pdev = bp->pdev; |
4116 | |
4117 | if (!bp->vnic_info) |
4118 | return; |
4119 | |
4120 | for (i = 0; i < bp->nr_vnics; i++) { |
4121 | vnic = &bp->vnic_info[i]; |
4122 | |
4123 | kfree(objp: vnic->fw_grp_ids); |
4124 | vnic->fw_grp_ids = NULL; |
4125 | |
4126 | kfree(objp: vnic->uc_list); |
4127 | vnic->uc_list = NULL; |
4128 | |
4129 | if (vnic->mc_list) { |
4130 | dma_free_coherent(dev: &pdev->dev, size: vnic->mc_list_size, |
4131 | cpu_addr: vnic->mc_list, dma_handle: vnic->mc_list_mapping); |
4132 | vnic->mc_list = NULL; |
4133 | } |
4134 | |
4135 | if (vnic->rss_table) { |
4136 | dma_free_coherent(dev: &pdev->dev, size: vnic->rss_table_size, |
4137 | cpu_addr: vnic->rss_table, |
4138 | dma_handle: vnic->rss_table_dma_addr); |
4139 | vnic->rss_table = NULL; |
4140 | } |
4141 | |
4142 | vnic->rss_hash_key = NULL; |
4143 | vnic->flags = 0; |
4144 | } |
4145 | } |
4146 | |
4147 | static int bnxt_alloc_vnic_attributes(struct bnxt *bp) |
4148 | { |
4149 | int i, rc = 0, size; |
4150 | struct bnxt_vnic_info *vnic; |
4151 | struct pci_dev *pdev = bp->pdev; |
4152 | int max_rings; |
4153 | |
4154 | for (i = 0; i < bp->nr_vnics; i++) { |
4155 | vnic = &bp->vnic_info[i]; |
4156 | |
4157 | if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { |
4158 | int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; |
4159 | |
4160 | if (mem_size > 0) { |
4161 | vnic->uc_list = kmalloc(size: mem_size, GFP_KERNEL); |
4162 | if (!vnic->uc_list) { |
4163 | rc = -ENOMEM; |
4164 | goto out; |
4165 | } |
4166 | } |
4167 | } |
4168 | |
4169 | if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { |
4170 | vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; |
4171 | vnic->mc_list = |
4172 | dma_alloc_coherent(dev: &pdev->dev, |
4173 | size: vnic->mc_list_size, |
4174 | dma_handle: &vnic->mc_list_mapping, |
4175 | GFP_KERNEL); |
4176 | if (!vnic->mc_list) { |
4177 | rc = -ENOMEM; |
4178 | goto out; |
4179 | } |
4180 | } |
4181 | |
4182 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
4183 | goto vnic_skip_grps; |
4184 | |
4185 | if (vnic->flags & BNXT_VNIC_RSS_FLAG) |
4186 | max_rings = bp->rx_nr_rings; |
4187 | else |
4188 | max_rings = 1; |
4189 | |
4190 | vnic->fw_grp_ids = kcalloc(n: max_rings, size: sizeof(u16), GFP_KERNEL); |
4191 | if (!vnic->fw_grp_ids) { |
4192 | rc = -ENOMEM; |
4193 | goto out; |
4194 | } |
4195 | vnic_skip_grps: |
4196 | if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && |
4197 | !(vnic->flags & BNXT_VNIC_RSS_FLAG)) |
4198 | continue; |
4199 | |
4200 | /* Allocate rss table and hash key */ |
4201 | size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); |
4202 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
4203 | size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); |
4204 | |
4205 | vnic->rss_table_size = size + HW_HASH_KEY_SIZE; |
4206 | vnic->rss_table = dma_alloc_coherent(dev: &pdev->dev, |
4207 | size: vnic->rss_table_size, |
4208 | dma_handle: &vnic->rss_table_dma_addr, |
4209 | GFP_KERNEL); |
4210 | if (!vnic->rss_table) { |
4211 | rc = -ENOMEM; |
4212 | goto out; |
4213 | } |
4214 | |
4215 | vnic->rss_hash_key = ((void *)vnic->rss_table) + size; |
4216 | vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; |
4217 | } |
4218 | return 0; |
4219 | |
4220 | out: |
4221 | return rc; |
4222 | } |
4223 | |
4224 | static void bnxt_free_hwrm_resources(struct bnxt *bp) |
4225 | { |
4226 | struct bnxt_hwrm_wait_token *token; |
4227 | |
4228 | dma_pool_destroy(pool: bp->hwrm_dma_pool); |
4229 | bp->hwrm_dma_pool = NULL; |
4230 | |
4231 | rcu_read_lock(); |
4232 | hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) |
4233 | WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED); |
4234 | rcu_read_unlock(); |
4235 | } |
4236 | |
4237 | static int bnxt_alloc_hwrm_resources(struct bnxt *bp) |
4238 | { |
4239 | bp->hwrm_dma_pool = dma_pool_create(name: "bnxt_hwrm" , dev: &bp->pdev->dev, |
4240 | BNXT_HWRM_DMA_SIZE, |
4241 | BNXT_HWRM_DMA_ALIGN, allocation: 0); |
4242 | if (!bp->hwrm_dma_pool) |
4243 | return -ENOMEM; |
4244 | |
4245 | INIT_HLIST_HEAD(&bp->hwrm_pending_list); |
4246 | |
4247 | return 0; |
4248 | } |
4249 | |
4250 | static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) |
4251 | { |
4252 | kfree(objp: stats->hw_masks); |
4253 | stats->hw_masks = NULL; |
4254 | kfree(objp: stats->sw_stats); |
4255 | stats->sw_stats = NULL; |
4256 | if (stats->hw_stats) { |
4257 | dma_free_coherent(dev: &bp->pdev->dev, size: stats->len, cpu_addr: stats->hw_stats, |
4258 | dma_handle: stats->hw_stats_map); |
4259 | stats->hw_stats = NULL; |
4260 | } |
4261 | } |
4262 | |
4263 | static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, |
4264 | bool alloc_masks) |
4265 | { |
4266 | stats->hw_stats = dma_alloc_coherent(dev: &bp->pdev->dev, size: stats->len, |
4267 | dma_handle: &stats->hw_stats_map, GFP_KERNEL); |
4268 | if (!stats->hw_stats) |
4269 | return -ENOMEM; |
4270 | |
4271 | stats->sw_stats = kzalloc(size: stats->len, GFP_KERNEL); |
4272 | if (!stats->sw_stats) |
4273 | goto stats_mem_err; |
4274 | |
4275 | if (alloc_masks) { |
4276 | stats->hw_masks = kzalloc(size: stats->len, GFP_KERNEL); |
4277 | if (!stats->hw_masks) |
4278 | goto stats_mem_err; |
4279 | } |
4280 | return 0; |
4281 | |
4282 | stats_mem_err: |
4283 | bnxt_free_stats_mem(bp, stats); |
4284 | return -ENOMEM; |
4285 | } |
4286 | |
4287 | static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) |
4288 | { |
4289 | int i; |
4290 | |
4291 | for (i = 0; i < count; i++) |
4292 | mask_arr[i] = mask; |
4293 | } |
4294 | |
4295 | static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) |
4296 | { |
4297 | int i; |
4298 | |
4299 | for (i = 0; i < count; i++) |
4300 | mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); |
4301 | } |
4302 | |
4303 | static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, |
4304 | struct bnxt_stats_mem *stats) |
4305 | { |
4306 | struct hwrm_func_qstats_ext_output *resp; |
4307 | struct hwrm_func_qstats_ext_input *req; |
4308 | __le64 *hw_masks; |
4309 | int rc; |
4310 | |
4311 | if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || |
4312 | !(bp->flags & BNXT_FLAG_CHIP_P5)) |
4313 | return -EOPNOTSUPP; |
4314 | |
4315 | rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT); |
4316 | if (rc) |
4317 | return rc; |
4318 | |
4319 | req->fid = cpu_to_le16(0xffff); |
4320 | req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; |
4321 | |
4322 | resp = hwrm_req_hold(bp, req); |
4323 | rc = hwrm_req_send(bp, req); |
4324 | if (!rc) { |
4325 | hw_masks = &resp->rx_ucast_pkts; |
4326 | bnxt_copy_hw_masks(mask_arr: stats->hw_masks, hw_mask_arr: hw_masks, count: stats->len / 8); |
4327 | } |
4328 | hwrm_req_drop(bp, req); |
4329 | return rc; |
4330 | } |
4331 | |
4332 | static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); |
4333 | static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); |
4334 | |
4335 | static void bnxt_init_stats(struct bnxt *bp) |
4336 | { |
4337 | struct bnxt_napi *bnapi = bp->bnapi[0]; |
4338 | struct bnxt_cp_ring_info *cpr; |
4339 | struct bnxt_stats_mem *stats; |
4340 | __le64 *rx_stats, *tx_stats; |
4341 | int rc, rx_count, tx_count; |
4342 | u64 *rx_masks, *tx_masks; |
4343 | u64 mask; |
4344 | u8 flags; |
4345 | |
4346 | cpr = &bnapi->cp_ring; |
4347 | stats = &cpr->stats; |
4348 | rc = bnxt_hwrm_func_qstat_ext(bp, stats); |
4349 | if (rc) { |
4350 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
4351 | mask = (1ULL << 48) - 1; |
4352 | else |
4353 | mask = -1ULL; |
4354 | bnxt_fill_masks(mask_arr: stats->hw_masks, mask, count: stats->len / 8); |
4355 | } |
4356 | if (bp->flags & BNXT_FLAG_PORT_STATS) { |
4357 | stats = &bp->port_stats; |
4358 | rx_stats = stats->hw_stats; |
4359 | rx_masks = stats->hw_masks; |
4360 | rx_count = sizeof(struct rx_port_stats) / 8; |
4361 | tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
4362 | tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
4363 | tx_count = sizeof(struct tx_port_stats) / 8; |
4364 | |
4365 | flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; |
4366 | rc = bnxt_hwrm_port_qstats(bp, flags); |
4367 | if (rc) { |
4368 | mask = (1ULL << 40) - 1; |
4369 | |
4370 | bnxt_fill_masks(mask_arr: rx_masks, mask, count: rx_count); |
4371 | bnxt_fill_masks(mask_arr: tx_masks, mask, count: tx_count); |
4372 | } else { |
4373 | bnxt_copy_hw_masks(mask_arr: rx_masks, hw_mask_arr: rx_stats, count: rx_count); |
4374 | bnxt_copy_hw_masks(mask_arr: tx_masks, hw_mask_arr: tx_stats, count: tx_count); |
4375 | bnxt_hwrm_port_qstats(bp, flags: 0); |
4376 | } |
4377 | } |
4378 | if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { |
4379 | stats = &bp->rx_port_stats_ext; |
4380 | rx_stats = stats->hw_stats; |
4381 | rx_masks = stats->hw_masks; |
4382 | rx_count = sizeof(struct rx_port_stats_ext) / 8; |
4383 | stats = &bp->tx_port_stats_ext; |
4384 | tx_stats = stats->hw_stats; |
4385 | tx_masks = stats->hw_masks; |
4386 | tx_count = sizeof(struct tx_port_stats_ext) / 8; |
4387 | |
4388 | flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; |
4389 | rc = bnxt_hwrm_port_qstats_ext(bp, flags); |
4390 | if (rc) { |
4391 | mask = (1ULL << 40) - 1; |
4392 | |
4393 | bnxt_fill_masks(mask_arr: rx_masks, mask, count: rx_count); |
4394 | if (tx_stats) |
4395 | bnxt_fill_masks(mask_arr: tx_masks, mask, count: tx_count); |
4396 | } else { |
4397 | bnxt_copy_hw_masks(mask_arr: rx_masks, hw_mask_arr: rx_stats, count: rx_count); |
4398 | if (tx_stats) |
4399 | bnxt_copy_hw_masks(mask_arr: tx_masks, hw_mask_arr: tx_stats, |
4400 | count: tx_count); |
4401 | bnxt_hwrm_port_qstats_ext(bp, flags: 0); |
4402 | } |
4403 | } |
4404 | } |
4405 | |
4406 | static void bnxt_free_port_stats(struct bnxt *bp) |
4407 | { |
4408 | bp->flags &= ~BNXT_FLAG_PORT_STATS; |
4409 | bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; |
4410 | |
4411 | bnxt_free_stats_mem(bp, stats: &bp->port_stats); |
4412 | bnxt_free_stats_mem(bp, stats: &bp->rx_port_stats_ext); |
4413 | bnxt_free_stats_mem(bp, stats: &bp->tx_port_stats_ext); |
4414 | } |
4415 | |
4416 | static void bnxt_free_ring_stats(struct bnxt *bp) |
4417 | { |
4418 | int i; |
4419 | |
4420 | if (!bp->bnapi) |
4421 | return; |
4422 | |
4423 | for (i = 0; i < bp->cp_nr_rings; i++) { |
4424 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
4425 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
4426 | |
4427 | bnxt_free_stats_mem(bp, stats: &cpr->stats); |
4428 | } |
4429 | } |
4430 | |
4431 | static int bnxt_alloc_stats(struct bnxt *bp) |
4432 | { |
4433 | u32 size, i; |
4434 | int rc; |
4435 | |
4436 | size = bp->hw_ring_stats_size; |
4437 | |
4438 | for (i = 0; i < bp->cp_nr_rings; i++) { |
4439 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
4440 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
4441 | |
4442 | cpr->stats.len = size; |
4443 | rc = bnxt_alloc_stats_mem(bp, stats: &cpr->stats, alloc_masks: !i); |
4444 | if (rc) |
4445 | return rc; |
4446 | |
4447 | cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; |
4448 | } |
4449 | |
4450 | if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) |
4451 | return 0; |
4452 | |
4453 | if (bp->port_stats.hw_stats) |
4454 | goto alloc_ext_stats; |
4455 | |
4456 | bp->port_stats.len = BNXT_PORT_STATS_SIZE; |
4457 | rc = bnxt_alloc_stats_mem(bp, stats: &bp->port_stats, alloc_masks: true); |
4458 | if (rc) |
4459 | return rc; |
4460 | |
4461 | bp->flags |= BNXT_FLAG_PORT_STATS; |
4462 | |
4463 | alloc_ext_stats: |
4464 | /* Display extended statistics only if FW supports it */ |
4465 | if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) |
4466 | if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) |
4467 | return 0; |
4468 | |
4469 | if (bp->rx_port_stats_ext.hw_stats) |
4470 | goto alloc_tx_ext_stats; |
4471 | |
4472 | bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); |
4473 | rc = bnxt_alloc_stats_mem(bp, stats: &bp->rx_port_stats_ext, alloc_masks: true); |
4474 | /* Extended stats are optional */ |
4475 | if (rc) |
4476 | return 0; |
4477 | |
4478 | alloc_tx_ext_stats: |
4479 | if (bp->tx_port_stats_ext.hw_stats) |
4480 | return 0; |
4481 | |
4482 | if (bp->hwrm_spec_code >= 0x10902 || |
4483 | (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { |
4484 | bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); |
4485 | rc = bnxt_alloc_stats_mem(bp, stats: &bp->tx_port_stats_ext, alloc_masks: true); |
4486 | /* Extended stats are optional */ |
4487 | if (rc) |
4488 | return 0; |
4489 | } |
4490 | bp->flags |= BNXT_FLAG_PORT_STATS_EXT; |
4491 | return 0; |
4492 | } |
4493 | |
4494 | static void bnxt_clear_ring_indices(struct bnxt *bp) |
4495 | { |
4496 | int i; |
4497 | |
4498 | if (!bp->bnapi) |
4499 | return; |
4500 | |
4501 | for (i = 0; i < bp->cp_nr_rings; i++) { |
4502 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
4503 | struct bnxt_cp_ring_info *cpr; |
4504 | struct bnxt_rx_ring_info *rxr; |
4505 | struct bnxt_tx_ring_info *txr; |
4506 | |
4507 | if (!bnapi) |
4508 | continue; |
4509 | |
4510 | cpr = &bnapi->cp_ring; |
4511 | cpr->cp_raw_cons = 0; |
4512 | |
4513 | txr = bnapi->tx_ring; |
4514 | if (txr) { |
4515 | txr->tx_prod = 0; |
4516 | txr->tx_cons = 0; |
4517 | } |
4518 | |
4519 | rxr = bnapi->rx_ring; |
4520 | if (rxr) { |
4521 | rxr->rx_prod = 0; |
4522 | rxr->rx_agg_prod = 0; |
4523 | rxr->rx_sw_agg_prod = 0; |
4524 | rxr->rx_next_cons = 0; |
4525 | } |
4526 | } |
4527 | } |
4528 | |
4529 | static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) |
4530 | { |
4531 | #ifdef CONFIG_RFS_ACCEL |
4532 | int i; |
4533 | |
4534 | /* Under rtnl_lock and all our NAPIs have been disabled. It's |
4535 | * safe to delete the hash table. |
4536 | */ |
4537 | for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { |
4538 | struct hlist_head *head; |
4539 | struct hlist_node *tmp; |
4540 | struct bnxt_ntuple_filter *fltr; |
4541 | |
4542 | head = &bp->ntp_fltr_hash_tbl[i]; |
4543 | hlist_for_each_entry_safe(fltr, tmp, head, hash) { |
4544 | hlist_del(n: &fltr->hash); |
4545 | kfree(objp: fltr); |
4546 | } |
4547 | } |
4548 | if (irq_reinit) { |
4549 | bitmap_free(bitmap: bp->ntp_fltr_bmap); |
4550 | bp->ntp_fltr_bmap = NULL; |
4551 | } |
4552 | bp->ntp_fltr_count = 0; |
4553 | #endif |
4554 | } |
4555 | |
4556 | static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) |
4557 | { |
4558 | #ifdef CONFIG_RFS_ACCEL |
4559 | int i, rc = 0; |
4560 | |
4561 | if (!(bp->flags & BNXT_FLAG_RFS)) |
4562 | return 0; |
4563 | |
4564 | for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) |
4565 | INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); |
4566 | |
4567 | bp->ntp_fltr_count = 0; |
4568 | bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL); |
4569 | |
4570 | if (!bp->ntp_fltr_bmap) |
4571 | rc = -ENOMEM; |
4572 | |
4573 | return rc; |
4574 | #else |
4575 | return 0; |
4576 | #endif |
4577 | } |
4578 | |
4579 | static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) |
4580 | { |
4581 | bnxt_free_vnic_attributes(bp); |
4582 | bnxt_free_tx_rings(bp); |
4583 | bnxt_free_rx_rings(bp); |
4584 | bnxt_free_cp_rings(bp); |
4585 | bnxt_free_all_cp_arrays(bp); |
4586 | bnxt_free_ntp_fltrs(bp, irq_reinit: irq_re_init); |
4587 | if (irq_re_init) { |
4588 | bnxt_free_ring_stats(bp); |
4589 | if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) || |
4590 | test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) |
4591 | bnxt_free_port_stats(bp); |
4592 | bnxt_free_ring_grps(bp); |
4593 | bnxt_free_vnics(bp); |
4594 | kfree(objp: bp->tx_ring_map); |
4595 | bp->tx_ring_map = NULL; |
4596 | kfree(objp: bp->tx_ring); |
4597 | bp->tx_ring = NULL; |
4598 | kfree(objp: bp->rx_ring); |
4599 | bp->rx_ring = NULL; |
4600 | kfree(objp: bp->bnapi); |
4601 | bp->bnapi = NULL; |
4602 | } else { |
4603 | bnxt_clear_ring_indices(bp); |
4604 | } |
4605 | } |
4606 | |
4607 | static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) |
4608 | { |
4609 | int i, j, rc, size, arr_size; |
4610 | void *bnapi; |
4611 | |
4612 | if (irq_re_init) { |
4613 | /* Allocate bnapi mem pointer array and mem block for |
4614 | * all queues |
4615 | */ |
4616 | arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * |
4617 | bp->cp_nr_rings); |
4618 | size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); |
4619 | bnapi = kzalloc(size: arr_size + size * bp->cp_nr_rings, GFP_KERNEL); |
4620 | if (!bnapi) |
4621 | return -ENOMEM; |
4622 | |
4623 | bp->bnapi = bnapi; |
4624 | bnapi += arr_size; |
4625 | for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { |
4626 | bp->bnapi[i] = bnapi; |
4627 | bp->bnapi[i]->index = i; |
4628 | bp->bnapi[i]->bp = bp; |
4629 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
4630 | struct bnxt_cp_ring_info *cpr = |
4631 | &bp->bnapi[i]->cp_ring; |
4632 | |
4633 | cpr->cp_ring_struct.ring_mem.flags = |
4634 | BNXT_RMEM_RING_PTE_FLAG; |
4635 | } |
4636 | } |
4637 | |
4638 | bp->rx_ring = kcalloc(n: bp->rx_nr_rings, |
4639 | size: sizeof(struct bnxt_rx_ring_info), |
4640 | GFP_KERNEL); |
4641 | if (!bp->rx_ring) |
4642 | return -ENOMEM; |
4643 | |
4644 | for (i = 0; i < bp->rx_nr_rings; i++) { |
4645 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
4646 | |
4647 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
4648 | rxr->rx_ring_struct.ring_mem.flags = |
4649 | BNXT_RMEM_RING_PTE_FLAG; |
4650 | rxr->rx_agg_ring_struct.ring_mem.flags = |
4651 | BNXT_RMEM_RING_PTE_FLAG; |
4652 | } |
4653 | rxr->bnapi = bp->bnapi[i]; |
4654 | bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; |
4655 | } |
4656 | |
4657 | bp->tx_ring = kcalloc(n: bp->tx_nr_rings, |
4658 | size: sizeof(struct bnxt_tx_ring_info), |
4659 | GFP_KERNEL); |
4660 | if (!bp->tx_ring) |
4661 | return -ENOMEM; |
4662 | |
4663 | bp->tx_ring_map = kcalloc(n: bp->tx_nr_rings, size: sizeof(u16), |
4664 | GFP_KERNEL); |
4665 | |
4666 | if (!bp->tx_ring_map) |
4667 | return -ENOMEM; |
4668 | |
4669 | if (bp->flags & BNXT_FLAG_SHARED_RINGS) |
4670 | j = 0; |
4671 | else |
4672 | j = bp->rx_nr_rings; |
4673 | |
4674 | for (i = 0; i < bp->tx_nr_rings; i++, j++) { |
4675 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
4676 | |
4677 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
4678 | txr->tx_ring_struct.ring_mem.flags = |
4679 | BNXT_RMEM_RING_PTE_FLAG; |
4680 | txr->bnapi = bp->bnapi[j]; |
4681 | bp->bnapi[j]->tx_ring = txr; |
4682 | bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; |
4683 | if (i >= bp->tx_nr_rings_xdp) { |
4684 | txr->txq_index = i - bp->tx_nr_rings_xdp; |
4685 | bp->bnapi[j]->tx_int = bnxt_tx_int; |
4686 | } else { |
4687 | bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; |
4688 | bp->bnapi[j]->tx_int = bnxt_tx_int_xdp; |
4689 | } |
4690 | } |
4691 | |
4692 | rc = bnxt_alloc_stats(bp); |
4693 | if (rc) |
4694 | goto alloc_mem_err; |
4695 | bnxt_init_stats(bp); |
4696 | |
4697 | rc = bnxt_alloc_ntp_fltrs(bp); |
4698 | if (rc) |
4699 | goto alloc_mem_err; |
4700 | |
4701 | rc = bnxt_alloc_vnics(bp); |
4702 | if (rc) |
4703 | goto alloc_mem_err; |
4704 | } |
4705 | |
4706 | rc = bnxt_alloc_all_cp_arrays(bp); |
4707 | if (rc) |
4708 | goto alloc_mem_err; |
4709 | |
4710 | bnxt_init_ring_struct(bp); |
4711 | |
4712 | rc = bnxt_alloc_rx_rings(bp); |
4713 | if (rc) |
4714 | goto alloc_mem_err; |
4715 | |
4716 | rc = bnxt_alloc_tx_rings(bp); |
4717 | if (rc) |
4718 | goto alloc_mem_err; |
4719 | |
4720 | rc = bnxt_alloc_cp_rings(bp); |
4721 | if (rc) |
4722 | goto alloc_mem_err; |
4723 | |
4724 | bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | |
4725 | BNXT_VNIC_UCAST_FLAG; |
4726 | rc = bnxt_alloc_vnic_attributes(bp); |
4727 | if (rc) |
4728 | goto alloc_mem_err; |
4729 | return 0; |
4730 | |
4731 | alloc_mem_err: |
4732 | bnxt_free_mem(bp, irq_re_init: true); |
4733 | return rc; |
4734 | } |
4735 | |
4736 | static void bnxt_disable_int(struct bnxt *bp) |
4737 | { |
4738 | int i; |
4739 | |
4740 | if (!bp->bnapi) |
4741 | return; |
4742 | |
4743 | for (i = 0; i < bp->cp_nr_rings; i++) { |
4744 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
4745 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
4746 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; |
4747 | |
4748 | if (ring->fw_ring_id != INVALID_HW_RING_ID) |
4749 | bnxt_db_nq(bp, db: &cpr->cp_db, idx: cpr->cp_raw_cons); |
4750 | } |
4751 | } |
4752 | |
4753 | static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) |
4754 | { |
4755 | struct bnxt_napi *bnapi = bp->bnapi[n]; |
4756 | struct bnxt_cp_ring_info *cpr; |
4757 | |
4758 | cpr = &bnapi->cp_ring; |
4759 | return cpr->cp_ring_struct.map_idx; |
4760 | } |
4761 | |
4762 | static void bnxt_disable_int_sync(struct bnxt *bp) |
4763 | { |
4764 | int i; |
4765 | |
4766 | if (!bp->irq_tbl) |
4767 | return; |
4768 | |
4769 | atomic_inc(v: &bp->intr_sem); |
4770 | |
4771 | bnxt_disable_int(bp); |
4772 | for (i = 0; i < bp->cp_nr_rings; i++) { |
4773 | int map_idx = bnxt_cp_num_to_irq_num(bp, n: i); |
4774 | |
4775 | synchronize_irq(irq: bp->irq_tbl[map_idx].vector); |
4776 | } |
4777 | } |
4778 | |
4779 | static void bnxt_enable_int(struct bnxt *bp) |
4780 | { |
4781 | int i; |
4782 | |
4783 | atomic_set(v: &bp->intr_sem, i: 0); |
4784 | for (i = 0; i < bp->cp_nr_rings; i++) { |
4785 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
4786 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
4787 | |
4788 | bnxt_db_nq_arm(bp, db: &cpr->cp_db, idx: cpr->cp_raw_cons); |
4789 | } |
4790 | } |
4791 | |
4792 | int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, |
4793 | bool async_only) |
4794 | { |
4795 | DECLARE_BITMAP(async_events_bmap, 256); |
4796 | u32 *events = (u32 *)async_events_bmap; |
4797 | struct hwrm_func_drv_rgtr_output *resp; |
4798 | struct hwrm_func_drv_rgtr_input *req; |
4799 | u32 flags; |
4800 | int rc, i; |
4801 | |
4802 | rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR); |
4803 | if (rc) |
4804 | return rc; |
4805 | |
4806 | req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | |
4807 | FUNC_DRV_RGTR_REQ_ENABLES_VER | |
4808 | FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); |
4809 | |
4810 | req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); |
4811 | flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; |
4812 | if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) |
4813 | flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; |
4814 | if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) |
4815 | flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | |
4816 | FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; |
4817 | req->flags = cpu_to_le32(flags); |
4818 | req->ver_maj_8b = DRV_VER_MAJ; |
4819 | req->ver_min_8b = DRV_VER_MIN; |
4820 | req->ver_upd_8b = DRV_VER_UPD; |
4821 | req->ver_maj = cpu_to_le16(DRV_VER_MAJ); |
4822 | req->ver_min = cpu_to_le16(DRV_VER_MIN); |
4823 | req->ver_upd = cpu_to_le16(DRV_VER_UPD); |
4824 | |
4825 | if (BNXT_PF(bp)) { |
4826 | u32 data[8]; |
4827 | int i; |
4828 | |
4829 | memset(data, 0, sizeof(data)); |
4830 | for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { |
4831 | u16 cmd = bnxt_vf_req_snif[i]; |
4832 | unsigned int bit, idx; |
4833 | |
4834 | idx = cmd / 32; |
4835 | bit = cmd % 32; |
4836 | data[idx] |= 1 << bit; |
4837 | } |
4838 | |
4839 | for (i = 0; i < 8; i++) |
4840 | req->vf_req_fwd[i] = cpu_to_le32(data[i]); |
4841 | |
4842 | req->enables |= |
4843 | cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); |
4844 | } |
4845 | |
4846 | if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) |
4847 | req->flags |= cpu_to_le32( |
4848 | FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); |
4849 | |
4850 | memset(async_events_bmap, 0, sizeof(async_events_bmap)); |
4851 | for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { |
4852 | u16 event_id = bnxt_async_events_arr[i]; |
4853 | |
4854 | if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && |
4855 | !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) |
4856 | continue; |
4857 | if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE && |
4858 | !bp->ptp_cfg) |
4859 | continue; |
4860 | __set_bit(bnxt_async_events_arr[i], async_events_bmap); |
4861 | } |
4862 | if (bmap && bmap_size) { |
4863 | for (i = 0; i < bmap_size; i++) { |
4864 | if (test_bit(i, bmap)) |
4865 | __set_bit(i, async_events_bmap); |
4866 | } |
4867 | } |
4868 | for (i = 0; i < 8; i++) |
4869 | req->async_event_fwd[i] |= cpu_to_le32(events[i]); |
4870 | |
4871 | if (async_only) |
4872 | req->enables = |
4873 | cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); |
4874 | |
4875 | resp = hwrm_req_hold(bp, req); |
4876 | rc = hwrm_req_send(bp, req); |
4877 | if (!rc) { |
4878 | set_bit(BNXT_STATE_DRV_REGISTERED, addr: &bp->state); |
4879 | if (resp->flags & |
4880 | cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) |
4881 | bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; |
4882 | } |
4883 | hwrm_req_drop(bp, req); |
4884 | return rc; |
4885 | } |
4886 | |
4887 | int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) |
4888 | { |
4889 | struct hwrm_func_drv_unrgtr_input *req; |
4890 | int rc; |
4891 | |
4892 | if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, addr: &bp->state)) |
4893 | return 0; |
4894 | |
4895 | rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR); |
4896 | if (rc) |
4897 | return rc; |
4898 | return hwrm_req_send(bp, req); |
4899 | } |
4900 | |
4901 | static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) |
4902 | { |
4903 | struct hwrm_tunnel_dst_port_free_input *req; |
4904 | int rc; |
4905 | |
4906 | if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN && |
4907 | bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID) |
4908 | return 0; |
4909 | if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE && |
4910 | bp->nge_fw_dst_port_id == INVALID_HW_RING_ID) |
4911 | return 0; |
4912 | |
4913 | rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE); |
4914 | if (rc) |
4915 | return rc; |
4916 | |
4917 | req->tunnel_type = tunnel_type; |
4918 | |
4919 | switch (tunnel_type) { |
4920 | case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: |
4921 | req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); |
4922 | bp->vxlan_port = 0; |
4923 | bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; |
4924 | break; |
4925 | case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: |
4926 | req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); |
4927 | bp->nge_port = 0; |
4928 | bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; |
4929 | break; |
4930 | default: |
4931 | break; |
4932 | } |
4933 | |
4934 | rc = hwrm_req_send(bp, req); |
4935 | if (rc) |
4936 | netdev_err(dev: bp->dev, format: "hwrm_tunnel_dst_port_free failed. rc:%d\n" , |
4937 | rc); |
4938 | return rc; |
4939 | } |
4940 | |
4941 | static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, |
4942 | u8 tunnel_type) |
4943 | { |
4944 | struct hwrm_tunnel_dst_port_alloc_output *resp; |
4945 | struct hwrm_tunnel_dst_port_alloc_input *req; |
4946 | int rc; |
4947 | |
4948 | rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC); |
4949 | if (rc) |
4950 | return rc; |
4951 | |
4952 | req->tunnel_type = tunnel_type; |
4953 | req->tunnel_dst_port_val = port; |
4954 | |
4955 | resp = hwrm_req_hold(bp, req); |
4956 | rc = hwrm_req_send(bp, req); |
4957 | if (rc) { |
4958 | netdev_err(dev: bp->dev, format: "hwrm_tunnel_dst_port_alloc failed. rc:%d\n" , |
4959 | rc); |
4960 | goto err_out; |
4961 | } |
4962 | |
4963 | switch (tunnel_type) { |
4964 | case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: |
4965 | bp->vxlan_port = port; |
4966 | bp->vxlan_fw_dst_port_id = |
4967 | le16_to_cpu(resp->tunnel_dst_port_id); |
4968 | break; |
4969 | case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: |
4970 | bp->nge_port = port; |
4971 | bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); |
4972 | break; |
4973 | default: |
4974 | break; |
4975 | } |
4976 | |
4977 | err_out: |
4978 | hwrm_req_drop(bp, req); |
4979 | return rc; |
4980 | } |
4981 | |
4982 | static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) |
4983 | { |
4984 | struct hwrm_cfa_l2_set_rx_mask_input *req; |
4985 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
4986 | int rc; |
4987 | |
4988 | rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK); |
4989 | if (rc) |
4990 | return rc; |
4991 | |
4992 | req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); |
4993 | if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { |
4994 | req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); |
4995 | req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); |
4996 | } |
4997 | req->mask = cpu_to_le32(vnic->rx_mask); |
4998 | return hwrm_req_send_silent(bp, req); |
4999 | } |
5000 | |
5001 | #ifdef CONFIG_RFS_ACCEL |
5002 | static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, |
5003 | struct bnxt_ntuple_filter *fltr) |
5004 | { |
5005 | struct hwrm_cfa_ntuple_filter_free_input *req; |
5006 | int rc; |
5007 | |
5008 | rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); |
5009 | if (rc) |
5010 | return rc; |
5011 | |
5012 | req->ntuple_filter_id = fltr->filter_id; |
5013 | return hwrm_req_send(bp, req); |
5014 | } |
5015 | |
5016 | #define BNXT_NTP_FLTR_FLAGS \ |
5017 | (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ |
5018 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ |
5019 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ |
5020 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ |
5021 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ |
5022 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ |
5023 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ |
5024 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ |
5025 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ |
5026 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ |
5027 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ |
5028 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ |
5029 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ |
5030 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) |
5031 | |
5032 | #define BNXT_NTP_TUNNEL_FLTR_FLAG \ |
5033 | CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
5034 | |
5035 | static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, |
5036 | struct bnxt_ntuple_filter *fltr) |
5037 | { |
5038 | struct hwrm_cfa_ntuple_filter_alloc_output *resp; |
5039 | struct hwrm_cfa_ntuple_filter_alloc_input *req; |
5040 | struct flow_keys *keys = &fltr->fkeys; |
5041 | struct bnxt_vnic_info *vnic; |
5042 | u32 flags = 0; |
5043 | int rc; |
5044 | |
5045 | rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); |
5046 | if (rc) |
5047 | return rc; |
5048 | |
5049 | req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; |
5050 | |
5051 | if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { |
5052 | flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; |
5053 | req->dst_id = cpu_to_le16(fltr->rxq); |
5054 | } else { |
5055 | vnic = &bp->vnic_info[fltr->rxq + 1]; |
5056 | req->dst_id = cpu_to_le16(vnic->fw_vnic_id); |
5057 | } |
5058 | req->flags = cpu_to_le32(flags); |
5059 | req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); |
5060 | |
5061 | req->ethertype = htons(ETH_P_IP); |
5062 | memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN); |
5063 | req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; |
5064 | req->ip_protocol = keys->basic.ip_proto; |
5065 | |
5066 | if (keys->basic.n_proto == htons(ETH_P_IPV6)) { |
5067 | int i; |
5068 | |
5069 | req->ethertype = htons(ETH_P_IPV6); |
5070 | req->ip_addr_type = |
5071 | CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; |
5072 | *(struct in6_addr *)&req->src_ipaddr[0] = |
5073 | keys->addrs.v6addrs.src; |
5074 | *(struct in6_addr *)&req->dst_ipaddr[0] = |
5075 | keys->addrs.v6addrs.dst; |
5076 | for (i = 0; i < 4; i++) { |
5077 | req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); |
5078 | req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); |
5079 | } |
5080 | } else { |
5081 | req->src_ipaddr[0] = keys->addrs.v4addrs.src; |
5082 | req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); |
5083 | req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; |
5084 | req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); |
5085 | } |
5086 | if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { |
5087 | req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); |
5088 | req->tunnel_type = |
5089 | CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; |
5090 | } |
5091 | |
5092 | req->src_port = keys->ports.src; |
5093 | req->src_port_mask = cpu_to_be16(0xffff); |
5094 | req->dst_port = keys->ports.dst; |
5095 | req->dst_port_mask = cpu_to_be16(0xffff); |
5096 | |
5097 | resp = hwrm_req_hold(bp, req); |
5098 | rc = hwrm_req_send(bp, req); |
5099 | if (!rc) |
5100 | fltr->filter_id = resp->ntuple_filter_id; |
5101 | hwrm_req_drop(bp, req); |
5102 | return rc; |
5103 | } |
5104 | #endif |
5105 | |
5106 | static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, |
5107 | const u8 *mac_addr) |
5108 | { |
5109 | struct hwrm_cfa_l2_filter_alloc_output *resp; |
5110 | struct hwrm_cfa_l2_filter_alloc_input *req; |
5111 | int rc; |
5112 | |
5113 | rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC); |
5114 | if (rc) |
5115 | return rc; |
5116 | |
5117 | req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); |
5118 | if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) |
5119 | req->flags |= |
5120 | cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); |
5121 | req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); |
5122 | req->enables = |
5123 | cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | |
5124 | CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | |
5125 | CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); |
5126 | memcpy(req->l2_addr, mac_addr, ETH_ALEN); |
5127 | req->l2_addr_mask[0] = 0xff; |
5128 | req->l2_addr_mask[1] = 0xff; |
5129 | req->l2_addr_mask[2] = 0xff; |
5130 | req->l2_addr_mask[3] = 0xff; |
5131 | req->l2_addr_mask[4] = 0xff; |
5132 | req->l2_addr_mask[5] = 0xff; |
5133 | |
5134 | resp = hwrm_req_hold(bp, req); |
5135 | rc = hwrm_req_send(bp, req); |
5136 | if (!rc) |
5137 | bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = |
5138 | resp->l2_filter_id; |
5139 | hwrm_req_drop(bp, req); |
5140 | return rc; |
5141 | } |
5142 | |
5143 | static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) |
5144 | { |
5145 | struct hwrm_cfa_l2_filter_free_input *req; |
5146 | u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ |
5147 | int rc; |
5148 | |
5149 | /* Any associated ntuple filters will also be cleared by firmware. */ |
5150 | rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); |
5151 | if (rc) |
5152 | return rc; |
5153 | hwrm_req_hold(bp, req); |
5154 | for (i = 0; i < num_of_vnics; i++) { |
5155 | struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; |
5156 | |
5157 | for (j = 0; j < vnic->uc_filter_count; j++) { |
5158 | req->l2_filter_id = vnic->fw_l2_filter_id[j]; |
5159 | |
5160 | rc = hwrm_req_send(bp, req); |
5161 | } |
5162 | vnic->uc_filter_count = 0; |
5163 | } |
5164 | hwrm_req_drop(bp, req); |
5165 | return rc; |
5166 | } |
5167 | |
5168 | static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) |
5169 | { |
5170 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
5171 | u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; |
5172 | struct hwrm_vnic_tpa_cfg_input *req; |
5173 | int rc; |
5174 | |
5175 | if (vnic->fw_vnic_id == INVALID_HW_RING_ID) |
5176 | return 0; |
5177 | |
5178 | rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG); |
5179 | if (rc) |
5180 | return rc; |
5181 | |
5182 | if (tpa_flags) { |
5183 | u16 mss = bp->dev->mtu - 40; |
5184 | u32 nsegs, n, segs = 0, flags; |
5185 | |
5186 | flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | |
5187 | VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | |
5188 | VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | |
5189 | VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | |
5190 | VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; |
5191 | if (tpa_flags & BNXT_FLAG_GRO) |
5192 | flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; |
5193 | |
5194 | req->flags = cpu_to_le32(flags); |
5195 | |
5196 | req->enables = |
5197 | cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | |
5198 | VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | |
5199 | VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); |
5200 | |
5201 | /* Number of segs are log2 units, and first packet is not |
5202 | * included as part of this units. |
5203 | */ |
5204 | if (mss <= BNXT_RX_PAGE_SIZE) { |
5205 | n = BNXT_RX_PAGE_SIZE / mss; |
5206 | nsegs = (MAX_SKB_FRAGS - 1) * n; |
5207 | } else { |
5208 | n = mss / BNXT_RX_PAGE_SIZE; |
5209 | if (mss & (BNXT_RX_PAGE_SIZE - 1)) |
5210 | n++; |
5211 | nsegs = (MAX_SKB_FRAGS - n) / n; |
5212 | } |
5213 | |
5214 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
5215 | segs = MAX_TPA_SEGS_P5; |
5216 | max_aggs = bp->max_tpa; |
5217 | } else { |
5218 | segs = ilog2(nsegs); |
5219 | } |
5220 | req->max_agg_segs = cpu_to_le16(segs); |
5221 | req->max_aggs = cpu_to_le16(max_aggs); |
5222 | |
5223 | req->min_agg_len = cpu_to_le32(512); |
5224 | } |
5225 | req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); |
5226 | |
5227 | return hwrm_req_send(bp, req); |
5228 | } |
5229 | |
5230 | static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) |
5231 | { |
5232 | struct bnxt_ring_grp_info *grp_info; |
5233 | |
5234 | grp_info = &bp->grp_info[ring->grp_idx]; |
5235 | return grp_info->cp_fw_ring_id; |
5236 | } |
5237 | |
5238 | static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) |
5239 | { |
5240 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
5241 | struct bnxt_napi *bnapi = rxr->bnapi; |
5242 | struct bnxt_cp_ring_info *cpr; |
5243 | |
5244 | cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL]; |
5245 | return cpr->cp_ring_struct.fw_ring_id; |
5246 | } else { |
5247 | return bnxt_cp_ring_from_grp(bp, ring: &rxr->rx_ring_struct); |
5248 | } |
5249 | } |
5250 | |
5251 | static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) |
5252 | { |
5253 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
5254 | struct bnxt_napi *bnapi = txr->bnapi; |
5255 | struct bnxt_cp_ring_info *cpr; |
5256 | |
5257 | cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL]; |
5258 | return cpr->cp_ring_struct.fw_ring_id; |
5259 | } else { |
5260 | return bnxt_cp_ring_from_grp(bp, ring: &txr->tx_ring_struct); |
5261 | } |
5262 | } |
5263 | |
5264 | static int (struct bnxt *bp) |
5265 | { |
5266 | int entries; |
5267 | |
5268 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
5269 | entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; |
5270 | else |
5271 | entries = HW_HASH_INDEX_SIZE; |
5272 | |
5273 | bp->rss_indir_tbl_entries = entries; |
5274 | bp->rss_indir_tbl = kmalloc_array(n: entries, size: sizeof(*bp->rss_indir_tbl), |
5275 | GFP_KERNEL); |
5276 | if (!bp->rss_indir_tbl) |
5277 | return -ENOMEM; |
5278 | return 0; |
5279 | } |
5280 | |
5281 | static void (struct bnxt *bp) |
5282 | { |
5283 | u16 max_rings, max_entries, pad, i; |
5284 | |
5285 | if (!bp->rx_nr_rings) |
5286 | return; |
5287 | |
5288 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
5289 | max_rings = bp->rx_nr_rings - 1; |
5290 | else |
5291 | max_rings = bp->rx_nr_rings; |
5292 | |
5293 | max_entries = bnxt_get_rxfh_indir_size(dev: bp->dev); |
5294 | |
5295 | for (i = 0; i < max_entries; i++) |
5296 | bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(index: i, n_rx_rings: max_rings); |
5297 | |
5298 | pad = bp->rss_indir_tbl_entries - max_entries; |
5299 | if (pad) |
5300 | memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); |
5301 | } |
5302 | |
5303 | static u16 (struct bnxt *bp) |
5304 | { |
5305 | u16 i, tbl_size, max_ring = 0; |
5306 | |
5307 | if (!bp->rss_indir_tbl) |
5308 | return 0; |
5309 | |
5310 | tbl_size = bnxt_get_rxfh_indir_size(dev: bp->dev); |
5311 | for (i = 0; i < tbl_size; i++) |
5312 | max_ring = max(max_ring, bp->rss_indir_tbl[i]); |
5313 | return max_ring; |
5314 | } |
5315 | |
5316 | int (struct bnxt *bp, int rx_rings) |
5317 | { |
5318 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
5319 | return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5); |
5320 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
5321 | return 2; |
5322 | return 1; |
5323 | } |
5324 | |
5325 | static void (struct bnxt *bp, struct bnxt_vnic_info *vnic) |
5326 | { |
5327 | bool = !(vnic->flags & BNXT_VNIC_RSS_FLAG); |
5328 | u16 i, j; |
5329 | |
5330 | /* Fill the RSS indirection table with ring group ids */ |
5331 | for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { |
5332 | if (!no_rss) |
5333 | j = bp->rss_indir_tbl[i]; |
5334 | vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); |
5335 | } |
5336 | } |
5337 | |
5338 | static void (struct bnxt *bp, |
5339 | struct bnxt_vnic_info *vnic) |
5340 | { |
5341 | __le16 *ring_tbl = vnic->rss_table; |
5342 | struct bnxt_rx_ring_info *rxr; |
5343 | u16 tbl_size, i; |
5344 | |
5345 | tbl_size = bnxt_get_rxfh_indir_size(dev: bp->dev); |
5346 | |
5347 | for (i = 0; i < tbl_size; i++) { |
5348 | u16 ring_id, j; |
5349 | |
5350 | j = bp->rss_indir_tbl[i]; |
5351 | rxr = &bp->rx_ring[j]; |
5352 | |
5353 | ring_id = rxr->rx_ring_struct.fw_ring_id; |
5354 | *ring_tbl++ = cpu_to_le16(ring_id); |
5355 | ring_id = bnxt_cp_ring_for_rx(bp, rxr); |
5356 | *ring_tbl++ = cpu_to_le16(ring_id); |
5357 | } |
5358 | } |
5359 | |
5360 | static void |
5361 | (struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req, |
5362 | struct bnxt_vnic_info *vnic) |
5363 | { |
5364 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
5365 | bnxt_fill_hw_rss_tbl_p5(bp, vnic); |
5366 | else |
5367 | bnxt_fill_hw_rss_tbl(bp, vnic); |
5368 | |
5369 | if (bp->rss_hash_delta) { |
5370 | req->hash_type = cpu_to_le32(bp->rss_hash_delta); |
5371 | if (bp->rss_hash_cfg & bp->rss_hash_delta) |
5372 | req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE; |
5373 | else |
5374 | req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE; |
5375 | } else { |
5376 | req->hash_type = cpu_to_le32(bp->rss_hash_cfg); |
5377 | } |
5378 | req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; |
5379 | req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); |
5380 | req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); |
5381 | } |
5382 | |
5383 | static int (struct bnxt *bp, u16 vnic_id, bool ) |
5384 | { |
5385 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
5386 | struct hwrm_vnic_rss_cfg_input *req; |
5387 | int rc; |
5388 | |
5389 | if ((bp->flags & BNXT_FLAG_CHIP_P5) || |
5390 | vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) |
5391 | return 0; |
5392 | |
5393 | rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); |
5394 | if (rc) |
5395 | return rc; |
5396 | |
5397 | if (set_rss) |
5398 | __bnxt_hwrm_vnic_set_rss(bp, req, vnic); |
5399 | req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); |
5400 | return hwrm_req_send(bp, req); |
5401 | } |
5402 | |
5403 | static int (struct bnxt *bp, u16 vnic_id, bool ) |
5404 | { |
5405 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
5406 | struct hwrm_vnic_rss_cfg_input *req; |
5407 | dma_addr_t ring_tbl_map; |
5408 | u32 i, nr_ctxs; |
5409 | int rc; |
5410 | |
5411 | rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); |
5412 | if (rc) |
5413 | return rc; |
5414 | |
5415 | req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); |
5416 | if (!set_rss) |
5417 | return hwrm_req_send(bp, req); |
5418 | |
5419 | __bnxt_hwrm_vnic_set_rss(bp, req, vnic); |
5420 | ring_tbl_map = vnic->rss_table_dma_addr; |
5421 | nr_ctxs = bnxt_get_nr_rss_ctxs(bp, rx_rings: bp->rx_nr_rings); |
5422 | |
5423 | hwrm_req_hold(bp, req); |
5424 | for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { |
5425 | req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); |
5426 | req->ring_table_pair_index = i; |
5427 | req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); |
5428 | rc = hwrm_req_send(bp, req); |
5429 | if (rc) |
5430 | goto exit; |
5431 | } |
5432 | |
5433 | exit: |
5434 | hwrm_req_drop(bp, req); |
5435 | return rc; |
5436 | } |
5437 | |
5438 | static void (struct bnxt *bp) |
5439 | { |
5440 | struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; |
5441 | struct hwrm_vnic_rss_qcfg_output *resp; |
5442 | struct hwrm_vnic_rss_qcfg_input *req; |
5443 | |
5444 | if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG)) |
5445 | return; |
5446 | |
5447 | req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); |
5448 | /* all contexts configured to same hash_type, zero always exists */ |
5449 | req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); |
5450 | resp = hwrm_req_hold(bp, req); |
5451 | if (!hwrm_req_send(bp, req)) { |
5452 | bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg; |
5453 | bp->rss_hash_delta = 0; |
5454 | } |
5455 | hwrm_req_drop(bp, req); |
5456 | } |
5457 | |
5458 | static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) |
5459 | { |
5460 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
5461 | struct hwrm_vnic_plcmodes_cfg_input *req; |
5462 | int rc; |
5463 | |
5464 | rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG); |
5465 | if (rc) |
5466 | return rc; |
5467 | |
5468 | req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); |
5469 | req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID); |
5470 | |
5471 | if (BNXT_RX_PAGE_MODE(bp)) { |
5472 | req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size); |
5473 | } else { |
5474 | req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | |
5475 | VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); |
5476 | req->enables |= |
5477 | cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); |
5478 | req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); |
5479 | req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); |
5480 | } |
5481 | req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); |
5482 | return hwrm_req_send(bp, req); |
5483 | } |
5484 | |
5485 | static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, |
5486 | u16 ctx_idx) |
5487 | { |
5488 | struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; |
5489 | |
5490 | if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) |
5491 | return; |
5492 | |
5493 | req->rss_cos_lb_ctx_id = |
5494 | cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); |
5495 | |
5496 | hwrm_req_send(bp, req); |
5497 | bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; |
5498 | } |
5499 | |
5500 | static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) |
5501 | { |
5502 | int i, j; |
5503 | |
5504 | for (i = 0; i < bp->nr_vnics; i++) { |
5505 | struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; |
5506 | |
5507 | for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { |
5508 | if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) |
5509 | bnxt_hwrm_vnic_ctx_free_one(bp, vnic_id: i, ctx_idx: j); |
5510 | } |
5511 | } |
5512 | bp->rsscos_nr_ctxs = 0; |
5513 | } |
5514 | |
5515 | static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) |
5516 | { |
5517 | struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; |
5518 | struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; |
5519 | int rc; |
5520 | |
5521 | rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); |
5522 | if (rc) |
5523 | return rc; |
5524 | |
5525 | resp = hwrm_req_hold(bp, req); |
5526 | rc = hwrm_req_send(bp, req); |
5527 | if (!rc) |
5528 | bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = |
5529 | le16_to_cpu(resp->rss_cos_lb_ctx_id); |
5530 | hwrm_req_drop(bp, req); |
5531 | |
5532 | return rc; |
5533 | } |
5534 | |
5535 | static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) |
5536 | { |
5537 | if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) |
5538 | return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; |
5539 | return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; |
5540 | } |
5541 | |
5542 | int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) |
5543 | { |
5544 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
5545 | struct hwrm_vnic_cfg_input *req; |
5546 | unsigned int ring = 0, grp_idx; |
5547 | u16 def_vlan = 0; |
5548 | int rc; |
5549 | |
5550 | rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG); |
5551 | if (rc) |
5552 | return rc; |
5553 | |
5554 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
5555 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; |
5556 | |
5557 | req->default_rx_ring_id = |
5558 | cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); |
5559 | req->default_cmpl_ring_id = |
5560 | cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); |
5561 | req->enables = |
5562 | cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | |
5563 | VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); |
5564 | goto vnic_mru; |
5565 | } |
5566 | req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); |
5567 | /* Only RSS support for now TBD: COS & LB */ |
5568 | if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { |
5569 | req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); |
5570 | req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | |
5571 | VNIC_CFG_REQ_ENABLES_MRU); |
5572 | } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { |
5573 | req->rss_rule = |
5574 | cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); |
5575 | req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | |
5576 | VNIC_CFG_REQ_ENABLES_MRU); |
5577 | req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); |
5578 | } else { |
5579 | req->rss_rule = cpu_to_le16(0xffff); |
5580 | } |
5581 | |
5582 | if (BNXT_CHIP_TYPE_NITRO_A0(bp) && |
5583 | (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { |
5584 | req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); |
5585 | req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); |
5586 | } else { |
5587 | req->cos_rule = cpu_to_le16(0xffff); |
5588 | } |
5589 | |
5590 | if (vnic->flags & BNXT_VNIC_RSS_FLAG) |
5591 | ring = 0; |
5592 | else if (vnic->flags & BNXT_VNIC_RFS_FLAG) |
5593 | ring = vnic_id - 1; |
5594 | else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) |
5595 | ring = bp->rx_nr_rings - 1; |
5596 | |
5597 | grp_idx = bp->rx_ring[ring].bnapi->index; |
5598 | req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); |
5599 | req->lb_rule = cpu_to_le16(0xffff); |
5600 | vnic_mru: |
5601 | req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); |
5602 | |
5603 | req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); |
5604 | #ifdef CONFIG_BNXT_SRIOV |
5605 | if (BNXT_VF(bp)) |
5606 | def_vlan = bp->vf.vlan; |
5607 | #endif |
5608 | if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) |
5609 | req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); |
5610 | if (!vnic_id && bnxt_ulp_registered(edev: bp->edev)) |
5611 | req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); |
5612 | |
5613 | return hwrm_req_send(bp, req); |
5614 | } |
5615 | |
5616 | static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) |
5617 | { |
5618 | if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { |
5619 | struct hwrm_vnic_free_input *req; |
5620 | |
5621 | if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) |
5622 | return; |
5623 | |
5624 | req->vnic_id = |
5625 | cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); |
5626 | |
5627 | hwrm_req_send(bp, req); |
5628 | bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; |
5629 | } |
5630 | } |
5631 | |
5632 | static void bnxt_hwrm_vnic_free(struct bnxt *bp) |
5633 | { |
5634 | u16 i; |
5635 | |
5636 | for (i = 0; i < bp->nr_vnics; i++) |
5637 | bnxt_hwrm_vnic_free_one(bp, vnic_id: i); |
5638 | } |
5639 | |
5640 | static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, |
5641 | unsigned int start_rx_ring_idx, |
5642 | unsigned int nr_rings) |
5643 | { |
5644 | unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; |
5645 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
5646 | struct hwrm_vnic_alloc_output *resp; |
5647 | struct hwrm_vnic_alloc_input *req; |
5648 | int rc; |
5649 | |
5650 | rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC); |
5651 | if (rc) |
5652 | return rc; |
5653 | |
5654 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
5655 | goto vnic_no_ring_grps; |
5656 | |
5657 | /* map ring groups to this vnic */ |
5658 | for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { |
5659 | grp_idx = bp->rx_ring[i].bnapi->index; |
5660 | if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { |
5661 | netdev_err(dev: bp->dev, format: "Not enough ring groups avail:%x req:%x\n" , |
5662 | j, nr_rings); |
5663 | break; |
5664 | } |
5665 | vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; |
5666 | } |
5667 | |
5668 | vnic_no_ring_grps: |
5669 | for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) |
5670 | vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; |
5671 | if (vnic_id == 0) |
5672 | req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); |
5673 | |
5674 | resp = hwrm_req_hold(bp, req); |
5675 | rc = hwrm_req_send(bp, req); |
5676 | if (!rc) |
5677 | vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); |
5678 | hwrm_req_drop(bp, req); |
5679 | return rc; |
5680 | } |
5681 | |
5682 | static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) |
5683 | { |
5684 | struct hwrm_vnic_qcaps_output *resp; |
5685 | struct hwrm_vnic_qcaps_input *req; |
5686 | int rc; |
5687 | |
5688 | bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); |
5689 | bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP); |
5690 | if (bp->hwrm_spec_code < 0x10600) |
5691 | return 0; |
5692 | |
5693 | rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS); |
5694 | if (rc) |
5695 | return rc; |
5696 | |
5697 | resp = hwrm_req_hold(bp, req); |
5698 | rc = hwrm_req_send(bp, req); |
5699 | if (!rc) { |
5700 | u32 flags = le32_to_cpu(resp->flags); |
5701 | |
5702 | if (!(bp->flags & BNXT_FLAG_CHIP_P5) && |
5703 | (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) |
5704 | bp->flags |= BNXT_FLAG_NEW_RSS_CAP; |
5705 | if (flags & |
5706 | VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) |
5707 | bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; |
5708 | |
5709 | /* Older P5 fw before EXT_HW_STATS support did not set |
5710 | * VLAN_STRIP_CAP properly. |
5711 | */ |
5712 | if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || |
5713 | (BNXT_CHIP_P5_THOR(bp) && |
5714 | !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) |
5715 | bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; |
5716 | if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP) |
5717 | bp->fw_cap |= BNXT_FW_CAP_RSS_HASH_TYPE_DELTA; |
5718 | bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); |
5719 | if (bp->max_tpa_v2) { |
5720 | if (BNXT_CHIP_P5_THOR(bp)) |
5721 | bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; |
5722 | else |
5723 | bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2; |
5724 | } |
5725 | } |
5726 | hwrm_req_drop(bp, req); |
5727 | return rc; |
5728 | } |
5729 | |
5730 | static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) |
5731 | { |
5732 | struct hwrm_ring_grp_alloc_output *resp; |
5733 | struct hwrm_ring_grp_alloc_input *req; |
5734 | int rc; |
5735 | u16 i; |
5736 | |
5737 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
5738 | return 0; |
5739 | |
5740 | rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC); |
5741 | if (rc) |
5742 | return rc; |
5743 | |
5744 | resp = hwrm_req_hold(bp, req); |
5745 | for (i = 0; i < bp->rx_nr_rings; i++) { |
5746 | unsigned int grp_idx = bp->rx_ring[i].bnapi->index; |
5747 | |
5748 | req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); |
5749 | req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); |
5750 | req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); |
5751 | req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); |
5752 | |
5753 | rc = hwrm_req_send(bp, req); |
5754 | |
5755 | if (rc) |
5756 | break; |
5757 | |
5758 | bp->grp_info[grp_idx].fw_grp_id = |
5759 | le32_to_cpu(resp->ring_group_id); |
5760 | } |
5761 | hwrm_req_drop(bp, req); |
5762 | return rc; |
5763 | } |
5764 | |
5765 | static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) |
5766 | { |
5767 | struct hwrm_ring_grp_free_input *req; |
5768 | u16 i; |
5769 | |
5770 | if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) |
5771 | return; |
5772 | |
5773 | if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE)) |
5774 | return; |
5775 | |
5776 | hwrm_req_hold(bp, req); |
5777 | for (i = 0; i < bp->cp_nr_rings; i++) { |
5778 | if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) |
5779 | continue; |
5780 | req->ring_group_id = |
5781 | cpu_to_le32(bp->grp_info[i].fw_grp_id); |
5782 | |
5783 | hwrm_req_send(bp, req); |
5784 | bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; |
5785 | } |
5786 | hwrm_req_drop(bp, req); |
5787 | } |
5788 | |
5789 | static int hwrm_ring_alloc_send_msg(struct bnxt *bp, |
5790 | struct bnxt_ring_struct *ring, |
5791 | u32 ring_type, u32 map_index) |
5792 | { |
5793 | struct hwrm_ring_alloc_output *resp; |
5794 | struct hwrm_ring_alloc_input *req; |
5795 | struct bnxt_ring_mem_info *rmem = &ring->ring_mem; |
5796 | struct bnxt_ring_grp_info *grp_info; |
5797 | int rc, err = 0; |
5798 | u16 ring_id; |
5799 | |
5800 | rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC); |
5801 | if (rc) |
5802 | goto exit; |
5803 | |
5804 | req->enables = 0; |
5805 | if (rmem->nr_pages > 1) { |
5806 | req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); |
5807 | /* Page size is in log2 units */ |
5808 | req->page_size = BNXT_PAGE_SHIFT; |
5809 | req->page_tbl_depth = 1; |
5810 | } else { |
5811 | req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); |
5812 | } |
5813 | req->fbo = 0; |
5814 | /* Association of ring index with doorbell index and MSIX number */ |
5815 | req->logical_id = cpu_to_le16(map_index); |
5816 | |
5817 | switch (ring_type) { |
5818 | case HWRM_RING_ALLOC_TX: { |
5819 | struct bnxt_tx_ring_info *txr; |
5820 | |
5821 | txr = container_of(ring, struct bnxt_tx_ring_info, |
5822 | tx_ring_struct); |
5823 | req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; |
5824 | /* Association of transmit ring with completion ring */ |
5825 | grp_info = &bp->grp_info[ring->grp_idx]; |
5826 | req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); |
5827 | req->length = cpu_to_le32(bp->tx_ring_mask + 1); |
5828 | req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); |
5829 | req->queue_id = cpu_to_le16(ring->queue_id); |
5830 | break; |
5831 | } |
5832 | case HWRM_RING_ALLOC_RX: |
5833 | req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; |
5834 | req->length = cpu_to_le32(bp->rx_ring_mask + 1); |
5835 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
5836 | u16 flags = 0; |
5837 | |
5838 | /* Association of rx ring with stats context */ |
5839 | grp_info = &bp->grp_info[ring->grp_idx]; |
5840 | req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); |
5841 | req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); |
5842 | req->enables |= cpu_to_le32( |
5843 | RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); |
5844 | if (NET_IP_ALIGN == 2) |
5845 | flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; |
5846 | req->flags = cpu_to_le16(flags); |
5847 | } |
5848 | break; |
5849 | case HWRM_RING_ALLOC_AGG: |
5850 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
5851 | req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; |
5852 | /* Association of agg ring with rx ring */ |
5853 | grp_info = &bp->grp_info[ring->grp_idx]; |
5854 | req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); |
5855 | req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); |
5856 | req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); |
5857 | req->enables |= cpu_to_le32( |
5858 | RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | |
5859 | RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); |
5860 | } else { |
5861 | req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; |
5862 | } |
5863 | req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1); |
5864 | break; |
5865 | case HWRM_RING_ALLOC_CMPL: |
5866 | req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; |
5867 | req->length = cpu_to_le32(bp->cp_ring_mask + 1); |
5868 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
5869 | /* Association of cp ring with nq */ |
5870 | grp_info = &bp->grp_info[map_index]; |
5871 | req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); |
5872 | req->cq_handle = cpu_to_le64(ring->handle); |
5873 | req->enables |= cpu_to_le32( |
5874 | RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); |
5875 | } else if (bp->flags & BNXT_FLAG_USING_MSIX) { |
5876 | req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; |
5877 | } |
5878 | break; |
5879 | case HWRM_RING_ALLOC_NQ: |
5880 | req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; |
5881 | req->length = cpu_to_le32(bp->cp_ring_mask + 1); |
5882 | if (bp->flags & BNXT_FLAG_USING_MSIX) |
5883 | req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; |
5884 | break; |
5885 | default: |
5886 | netdev_err(dev: bp->dev, format: "hwrm alloc invalid ring type %d\n" , |
5887 | ring_type); |
5888 | return -1; |
5889 | } |
5890 | |
5891 | resp = hwrm_req_hold(bp, req); |
5892 | rc = hwrm_req_send(bp, req); |
5893 | err = le16_to_cpu(resp->error_code); |
5894 | ring_id = le16_to_cpu(resp->ring_id); |
5895 | hwrm_req_drop(bp, req); |
5896 | |
5897 | exit: |
5898 | if (rc || err) { |
5899 | netdev_err(dev: bp->dev, format: "hwrm_ring_alloc type %d failed. rc:%x err:%x\n" , |
5900 | ring_type, rc, err); |
5901 | return -EIO; |
5902 | } |
5903 | ring->fw_ring_id = ring_id; |
5904 | return rc; |
5905 | } |
5906 | |
5907 | static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) |
5908 | { |
5909 | int rc; |
5910 | |
5911 | if (BNXT_PF(bp)) { |
5912 | struct hwrm_func_cfg_input *req; |
5913 | |
5914 | rc = bnxt_hwrm_func_cfg_short_req_init(bp, req: &req); |
5915 | if (rc) |
5916 | return rc; |
5917 | |
5918 | req->fid = cpu_to_le16(0xffff); |
5919 | req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); |
5920 | req->async_event_cr = cpu_to_le16(idx); |
5921 | return hwrm_req_send(bp, req); |
5922 | } else { |
5923 | struct hwrm_func_vf_cfg_input *req; |
5924 | |
5925 | rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); |
5926 | if (rc) |
5927 | return rc; |
5928 | |
5929 | req->enables = |
5930 | cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); |
5931 | req->async_event_cr = cpu_to_le16(idx); |
5932 | return hwrm_req_send(bp, req); |
5933 | } |
5934 | } |
5935 | |
5936 | static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, |
5937 | u32 map_idx, u32 xid) |
5938 | { |
5939 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
5940 | if (BNXT_PF(bp)) |
5941 | db->doorbell = bp->bar1 + DB_PF_OFFSET_P5; |
5942 | else |
5943 | db->doorbell = bp->bar1 + DB_VF_OFFSET_P5; |
5944 | switch (ring_type) { |
5945 | case HWRM_RING_ALLOC_TX: |
5946 | db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; |
5947 | break; |
5948 | case HWRM_RING_ALLOC_RX: |
5949 | case HWRM_RING_ALLOC_AGG: |
5950 | db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; |
5951 | break; |
5952 | case HWRM_RING_ALLOC_CMPL: |
5953 | db->db_key64 = DBR_PATH_L2; |
5954 | break; |
5955 | case HWRM_RING_ALLOC_NQ: |
5956 | db->db_key64 = DBR_PATH_L2; |
5957 | break; |
5958 | } |
5959 | db->db_key64 |= (u64)xid << DBR_XID_SFT; |
5960 | } else { |
5961 | db->doorbell = bp->bar1 + map_idx * 0x80; |
5962 | switch (ring_type) { |
5963 | case HWRM_RING_ALLOC_TX: |
5964 | db->db_key32 = DB_KEY_TX; |
5965 | break; |
5966 | case HWRM_RING_ALLOC_RX: |
5967 | case HWRM_RING_ALLOC_AGG: |
5968 | db->db_key32 = DB_KEY_RX; |
5969 | break; |
5970 | case HWRM_RING_ALLOC_CMPL: |
5971 | db->db_key32 = DB_KEY_CP; |
5972 | break; |
5973 | } |
5974 | } |
5975 | } |
5976 | |
5977 | static int bnxt_hwrm_ring_alloc(struct bnxt *bp) |
5978 | { |
5979 | bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); |
5980 | int i, rc = 0; |
5981 | u32 type; |
5982 | |
5983 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
5984 | type = HWRM_RING_ALLOC_NQ; |
5985 | else |
5986 | type = HWRM_RING_ALLOC_CMPL; |
5987 | for (i = 0; i < bp->cp_nr_rings; i++) { |
5988 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
5989 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
5990 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; |
5991 | u32 map_idx = ring->map_idx; |
5992 | unsigned int vector; |
5993 | |
5994 | vector = bp->irq_tbl[map_idx].vector; |
5995 | disable_irq_nosync(irq: vector); |
5996 | rc = hwrm_ring_alloc_send_msg(bp, ring, ring_type: type, map_index: map_idx); |
5997 | if (rc) { |
5998 | enable_irq(irq: vector); |
5999 | goto err_out; |
6000 | } |
6001 | bnxt_set_db(bp, db: &cpr->cp_db, ring_type: type, map_idx, xid: ring->fw_ring_id); |
6002 | bnxt_db_nq(bp, db: &cpr->cp_db, idx: cpr->cp_raw_cons); |
6003 | enable_irq(irq: vector); |
6004 | bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; |
6005 | |
6006 | if (!i) { |
6007 | rc = bnxt_hwrm_set_async_event_cr(bp, idx: ring->fw_ring_id); |
6008 | if (rc) |
6009 | netdev_warn(dev: bp->dev, format: "Failed to set async event completion ring.\n" ); |
6010 | } |
6011 | } |
6012 | |
6013 | type = HWRM_RING_ALLOC_TX; |
6014 | for (i = 0; i < bp->tx_nr_rings; i++) { |
6015 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
6016 | struct bnxt_ring_struct *ring; |
6017 | u32 map_idx; |
6018 | |
6019 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
6020 | struct bnxt_napi *bnapi = txr->bnapi; |
6021 | struct bnxt_cp_ring_info *cpr, *cpr2; |
6022 | u32 type2 = HWRM_RING_ALLOC_CMPL; |
6023 | |
6024 | cpr = &bnapi->cp_ring; |
6025 | cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL]; |
6026 | ring = &cpr2->cp_ring_struct; |
6027 | ring->handle = BNXT_TX_HDL; |
6028 | map_idx = bnapi->index; |
6029 | rc = hwrm_ring_alloc_send_msg(bp, ring, ring_type: type2, map_index: map_idx); |
6030 | if (rc) |
6031 | goto err_out; |
6032 | bnxt_set_db(bp, db: &cpr2->cp_db, ring_type: type2, map_idx, |
6033 | xid: ring->fw_ring_id); |
6034 | bnxt_db_cq(bp, db: &cpr2->cp_db, idx: cpr2->cp_raw_cons); |
6035 | } |
6036 | ring = &txr->tx_ring_struct; |
6037 | map_idx = i; |
6038 | rc = hwrm_ring_alloc_send_msg(bp, ring, ring_type: type, map_index: map_idx); |
6039 | if (rc) |
6040 | goto err_out; |
6041 | bnxt_set_db(bp, db: &txr->tx_db, ring_type: type, map_idx, xid: ring->fw_ring_id); |
6042 | } |
6043 | |
6044 | type = HWRM_RING_ALLOC_RX; |
6045 | for (i = 0; i < bp->rx_nr_rings; i++) { |
6046 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
6047 | struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; |
6048 | struct bnxt_napi *bnapi = rxr->bnapi; |
6049 | u32 map_idx = bnapi->index; |
6050 | |
6051 | rc = hwrm_ring_alloc_send_msg(bp, ring, ring_type: type, map_index: map_idx); |
6052 | if (rc) |
6053 | goto err_out; |
6054 | bnxt_set_db(bp, db: &rxr->rx_db, ring_type: type, map_idx, xid: ring->fw_ring_id); |
6055 | /* If we have agg rings, post agg buffers first. */ |
6056 | if (!agg_rings) |
6057 | bnxt_db_write(bp, db: &rxr->rx_db, idx: rxr->rx_prod); |
6058 | bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; |
6059 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
6060 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
6061 | u32 type2 = HWRM_RING_ALLOC_CMPL; |
6062 | struct bnxt_cp_ring_info *cpr2; |
6063 | |
6064 | cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL]; |
6065 | ring = &cpr2->cp_ring_struct; |
6066 | ring->handle = BNXT_RX_HDL; |
6067 | rc = hwrm_ring_alloc_send_msg(bp, ring, ring_type: type2, map_index: map_idx); |
6068 | if (rc) |
6069 | goto err_out; |
6070 | bnxt_set_db(bp, db: &cpr2->cp_db, ring_type: type2, map_idx, |
6071 | xid: ring->fw_ring_id); |
6072 | bnxt_db_cq(bp, db: &cpr2->cp_db, idx: cpr2->cp_raw_cons); |
6073 | } |
6074 | } |
6075 | |
6076 | if (agg_rings) { |
6077 | type = HWRM_RING_ALLOC_AGG; |
6078 | for (i = 0; i < bp->rx_nr_rings; i++) { |
6079 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
6080 | struct bnxt_ring_struct *ring = |
6081 | &rxr->rx_agg_ring_struct; |
6082 | u32 grp_idx = ring->grp_idx; |
6083 | u32 map_idx = grp_idx + bp->rx_nr_rings; |
6084 | |
6085 | rc = hwrm_ring_alloc_send_msg(bp, ring, ring_type: type, map_index: map_idx); |
6086 | if (rc) |
6087 | goto err_out; |
6088 | |
6089 | bnxt_set_db(bp, db: &rxr->rx_agg_db, ring_type: type, map_idx, |
6090 | xid: ring->fw_ring_id); |
6091 | bnxt_db_write(bp, db: &rxr->rx_agg_db, idx: rxr->rx_agg_prod); |
6092 | bnxt_db_write(bp, db: &rxr->rx_db, idx: rxr->rx_prod); |
6093 | bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; |
6094 | } |
6095 | } |
6096 | err_out: |
6097 | return rc; |
6098 | } |
6099 | |
6100 | static int hwrm_ring_free_send_msg(struct bnxt *bp, |
6101 | struct bnxt_ring_struct *ring, |
6102 | u32 ring_type, int cmpl_ring_id) |
6103 | { |
6104 | struct hwrm_ring_free_output *resp; |
6105 | struct hwrm_ring_free_input *req; |
6106 | u16 error_code = 0; |
6107 | int rc; |
6108 | |
6109 | if (BNXT_NO_FW_ACCESS(bp)) |
6110 | return 0; |
6111 | |
6112 | rc = hwrm_req_init(bp, req, HWRM_RING_FREE); |
6113 | if (rc) |
6114 | goto exit; |
6115 | |
6116 | req->cmpl_ring = cpu_to_le16(cmpl_ring_id); |
6117 | req->ring_type = ring_type; |
6118 | req->ring_id = cpu_to_le16(ring->fw_ring_id); |
6119 | |
6120 | resp = hwrm_req_hold(bp, req); |
6121 | rc = hwrm_req_send(bp, req); |
6122 | error_code = le16_to_cpu(resp->error_code); |
6123 | hwrm_req_drop(bp, req); |
6124 | exit: |
6125 | if (rc || error_code) { |
6126 | netdev_err(dev: bp->dev, format: "hwrm_ring_free type %d failed. rc:%x err:%x\n" , |
6127 | ring_type, rc, error_code); |
6128 | return -EIO; |
6129 | } |
6130 | return 0; |
6131 | } |
6132 | |
6133 | static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) |
6134 | { |
6135 | u32 type; |
6136 | int i; |
6137 | |
6138 | if (!bp->bnapi) |
6139 | return; |
6140 | |
6141 | for (i = 0; i < bp->tx_nr_rings; i++) { |
6142 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
6143 | struct bnxt_ring_struct *ring = &txr->tx_ring_struct; |
6144 | |
6145 | if (ring->fw_ring_id != INVALID_HW_RING_ID) { |
6146 | u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); |
6147 | |
6148 | hwrm_ring_free_send_msg(bp, ring, |
6149 | RING_FREE_REQ_RING_TYPE_TX, |
6150 | cmpl_ring_id: close_path ? cmpl_ring_id : |
6151 | INVALID_HW_RING_ID); |
6152 | ring->fw_ring_id = INVALID_HW_RING_ID; |
6153 | } |
6154 | } |
6155 | |
6156 | for (i = 0; i < bp->rx_nr_rings; i++) { |
6157 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
6158 | struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; |
6159 | u32 grp_idx = rxr->bnapi->index; |
6160 | |
6161 | if (ring->fw_ring_id != INVALID_HW_RING_ID) { |
6162 | u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); |
6163 | |
6164 | hwrm_ring_free_send_msg(bp, ring, |
6165 | RING_FREE_REQ_RING_TYPE_RX, |
6166 | cmpl_ring_id: close_path ? cmpl_ring_id : |
6167 | INVALID_HW_RING_ID); |
6168 | ring->fw_ring_id = INVALID_HW_RING_ID; |
6169 | bp->grp_info[grp_idx].rx_fw_ring_id = |
6170 | INVALID_HW_RING_ID; |
6171 | } |
6172 | } |
6173 | |
6174 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
6175 | type = RING_FREE_REQ_RING_TYPE_RX_AGG; |
6176 | else |
6177 | type = RING_FREE_REQ_RING_TYPE_RX; |
6178 | for (i = 0; i < bp->rx_nr_rings; i++) { |
6179 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
6180 | struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; |
6181 | u32 grp_idx = rxr->bnapi->index; |
6182 | |
6183 | if (ring->fw_ring_id != INVALID_HW_RING_ID) { |
6184 | u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); |
6185 | |
6186 | hwrm_ring_free_send_msg(bp, ring, ring_type: type, |
6187 | cmpl_ring_id: close_path ? cmpl_ring_id : |
6188 | INVALID_HW_RING_ID); |
6189 | ring->fw_ring_id = INVALID_HW_RING_ID; |
6190 | bp->grp_info[grp_idx].agg_fw_ring_id = |
6191 | INVALID_HW_RING_ID; |
6192 | } |
6193 | } |
6194 | |
6195 | /* The completion rings are about to be freed. After that the |
6196 | * IRQ doorbell will not work anymore. So we need to disable |
6197 | * IRQ here. |
6198 | */ |
6199 | bnxt_disable_int_sync(bp); |
6200 | |
6201 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
6202 | type = RING_FREE_REQ_RING_TYPE_NQ; |
6203 | else |
6204 | type = RING_FREE_REQ_RING_TYPE_L2_CMPL; |
6205 | for (i = 0; i < bp->cp_nr_rings; i++) { |
6206 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
6207 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
6208 | struct bnxt_ring_struct *ring; |
6209 | int j; |
6210 | |
6211 | for (j = 0; j < 2; j++) { |
6212 | struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; |
6213 | |
6214 | if (cpr2) { |
6215 | ring = &cpr2->cp_ring_struct; |
6216 | if (ring->fw_ring_id == INVALID_HW_RING_ID) |
6217 | continue; |
6218 | hwrm_ring_free_send_msg(bp, ring, |
6219 | RING_FREE_REQ_RING_TYPE_L2_CMPL, |
6220 | INVALID_HW_RING_ID); |
6221 | ring->fw_ring_id = INVALID_HW_RING_ID; |
6222 | } |
6223 | } |
6224 | ring = &cpr->cp_ring_struct; |
6225 | if (ring->fw_ring_id != INVALID_HW_RING_ID) { |
6226 | hwrm_ring_free_send_msg(bp, ring, ring_type: type, |
6227 | INVALID_HW_RING_ID); |
6228 | ring->fw_ring_id = INVALID_HW_RING_ID; |
6229 | bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; |
6230 | } |
6231 | } |
6232 | } |
6233 | |
6234 | static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, |
6235 | bool shared); |
6236 | |
6237 | static int bnxt_hwrm_get_rings(struct bnxt *bp) |
6238 | { |
6239 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
6240 | struct hwrm_func_qcfg_output *resp; |
6241 | struct hwrm_func_qcfg_input *req; |
6242 | int rc; |
6243 | |
6244 | if (bp->hwrm_spec_code < 0x10601) |
6245 | return 0; |
6246 | |
6247 | rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); |
6248 | if (rc) |
6249 | return rc; |
6250 | |
6251 | req->fid = cpu_to_le16(0xffff); |
6252 | resp = hwrm_req_hold(bp, req); |
6253 | rc = hwrm_req_send(bp, req); |
6254 | if (rc) { |
6255 | hwrm_req_drop(bp, req); |
6256 | return rc; |
6257 | } |
6258 | |
6259 | hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); |
6260 | if (BNXT_NEW_RM(bp)) { |
6261 | u16 cp, stats; |
6262 | |
6263 | hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); |
6264 | hw_resc->resv_hw_ring_grps = |
6265 | le32_to_cpu(resp->alloc_hw_ring_grps); |
6266 | hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); |
6267 | cp = le16_to_cpu(resp->alloc_cmpl_rings); |
6268 | stats = le16_to_cpu(resp->alloc_stat_ctx); |
6269 | hw_resc->resv_irqs = cp; |
6270 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
6271 | int rx = hw_resc->resv_rx_rings; |
6272 | int tx = hw_resc->resv_tx_rings; |
6273 | |
6274 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
6275 | rx >>= 1; |
6276 | if (cp < (rx + tx)) { |
6277 | bnxt_trim_rings(bp, rx: &rx, tx: &tx, max: cp, shared: false); |
6278 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
6279 | rx <<= 1; |
6280 | hw_resc->resv_rx_rings = rx; |
6281 | hw_resc->resv_tx_rings = tx; |
6282 | } |
6283 | hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); |
6284 | hw_resc->resv_hw_ring_grps = rx; |
6285 | } |
6286 | hw_resc->resv_cp_rings = cp; |
6287 | hw_resc->resv_stat_ctxs = stats; |
6288 | } |
6289 | hwrm_req_drop(bp, req); |
6290 | return 0; |
6291 | } |
6292 | |
6293 | int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) |
6294 | { |
6295 | struct hwrm_func_qcfg_output *resp; |
6296 | struct hwrm_func_qcfg_input *req; |
6297 | int rc; |
6298 | |
6299 | if (bp->hwrm_spec_code < 0x10601) |
6300 | return 0; |
6301 | |
6302 | rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); |
6303 | if (rc) |
6304 | return rc; |
6305 | |
6306 | req->fid = cpu_to_le16(fid); |
6307 | resp = hwrm_req_hold(bp, req); |
6308 | rc = hwrm_req_send(bp, req); |
6309 | if (!rc) |
6310 | *tx_rings = le16_to_cpu(resp->alloc_tx_rings); |
6311 | |
6312 | hwrm_req_drop(bp, req); |
6313 | return rc; |
6314 | } |
6315 | |
6316 | static bool bnxt_rfs_supported(struct bnxt *bp); |
6317 | |
6318 | static struct hwrm_func_cfg_input * |
6319 | __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, |
6320 | int ring_grps, int cp_rings, int stats, int vnics) |
6321 | { |
6322 | struct hwrm_func_cfg_input *req; |
6323 | u32 enables = 0; |
6324 | |
6325 | if (bnxt_hwrm_func_cfg_short_req_init(bp, req: &req)) |
6326 | return NULL; |
6327 | |
6328 | req->fid = cpu_to_le16(0xffff); |
6329 | enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; |
6330 | req->num_tx_rings = cpu_to_le16(tx_rings); |
6331 | if (BNXT_NEW_RM(bp)) { |
6332 | enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; |
6333 | enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; |
6334 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
6335 | enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; |
6336 | enables |= tx_rings + ring_grps ? |
6337 | FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; |
6338 | enables |= rx_rings ? |
6339 | FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; |
6340 | } else { |
6341 | enables |= cp_rings ? |
6342 | FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; |
6343 | enables |= ring_grps ? |
6344 | FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | |
6345 | FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; |
6346 | } |
6347 | enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; |
6348 | |
6349 | req->num_rx_rings = cpu_to_le16(rx_rings); |
6350 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
6351 | req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); |
6352 | req->num_msix = cpu_to_le16(cp_rings); |
6353 | req->num_rsscos_ctxs = |
6354 | cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); |
6355 | } else { |
6356 | req->num_cmpl_rings = cpu_to_le16(cp_rings); |
6357 | req->num_hw_ring_grps = cpu_to_le16(ring_grps); |
6358 | req->num_rsscos_ctxs = cpu_to_le16(1); |
6359 | if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) && |
6360 | bnxt_rfs_supported(bp)) |
6361 | req->num_rsscos_ctxs = |
6362 | cpu_to_le16(ring_grps + 1); |
6363 | } |
6364 | req->num_stat_ctxs = cpu_to_le16(stats); |
6365 | req->num_vnics = cpu_to_le16(vnics); |
6366 | } |
6367 | req->enables = cpu_to_le32(enables); |
6368 | return req; |
6369 | } |
6370 | |
6371 | static struct hwrm_func_vf_cfg_input * |
6372 | __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, |
6373 | int ring_grps, int cp_rings, int stats, int vnics) |
6374 | { |
6375 | struct hwrm_func_vf_cfg_input *req; |
6376 | u32 enables = 0; |
6377 | |
6378 | if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) |
6379 | return NULL; |
6380 | |
6381 | enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; |
6382 | enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | |
6383 | FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; |
6384 | enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; |
6385 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
6386 | enables |= tx_rings + ring_grps ? |
6387 | FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; |
6388 | } else { |
6389 | enables |= cp_rings ? |
6390 | FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; |
6391 | enables |= ring_grps ? |
6392 | FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; |
6393 | } |
6394 | enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; |
6395 | enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; |
6396 | |
6397 | req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); |
6398 | req->num_tx_rings = cpu_to_le16(tx_rings); |
6399 | req->num_rx_rings = cpu_to_le16(rx_rings); |
6400 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
6401 | req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); |
6402 | req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); |
6403 | } else { |
6404 | req->num_cmpl_rings = cpu_to_le16(cp_rings); |
6405 | req->num_hw_ring_grps = cpu_to_le16(ring_grps); |
6406 | req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); |
6407 | } |
6408 | req->num_stat_ctxs = cpu_to_le16(stats); |
6409 | req->num_vnics = cpu_to_le16(vnics); |
6410 | |
6411 | req->enables = cpu_to_le32(enables); |
6412 | return req; |
6413 | } |
6414 | |
6415 | static int |
6416 | bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, |
6417 | int ring_grps, int cp_rings, int stats, int vnics) |
6418 | { |
6419 | struct hwrm_func_cfg_input *req; |
6420 | int rc; |
6421 | |
6422 | req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, |
6423 | cp_rings, stats, vnics); |
6424 | if (!req) |
6425 | return -ENOMEM; |
6426 | |
6427 | if (!req->enables) { |
6428 | hwrm_req_drop(bp, req); |
6429 | return 0; |
6430 | } |
6431 | |
6432 | rc = hwrm_req_send(bp, req); |
6433 | if (rc) |
6434 | return rc; |
6435 | |
6436 | if (bp->hwrm_spec_code < 0x10601) |
6437 | bp->hw_resc.resv_tx_rings = tx_rings; |
6438 | |
6439 | return bnxt_hwrm_get_rings(bp); |
6440 | } |
6441 | |
6442 | static int |
6443 | bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, |
6444 | int ring_grps, int cp_rings, int stats, int vnics) |
6445 | { |
6446 | struct hwrm_func_vf_cfg_input *req; |
6447 | int rc; |
6448 | |
6449 | if (!BNXT_NEW_RM(bp)) { |
6450 | bp->hw_resc.resv_tx_rings = tx_rings; |
6451 | return 0; |
6452 | } |
6453 | |
6454 | req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, |
6455 | cp_rings, stats, vnics); |
6456 | if (!req) |
6457 | return -ENOMEM; |
6458 | |
6459 | rc = hwrm_req_send(bp, req); |
6460 | if (rc) |
6461 | return rc; |
6462 | |
6463 | return bnxt_hwrm_get_rings(bp); |
6464 | } |
6465 | |
6466 | static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, |
6467 | int cp, int stat, int vnic) |
6468 | { |
6469 | if (BNXT_PF(bp)) |
6470 | return bnxt_hwrm_reserve_pf_rings(bp, tx_rings: tx, rx_rings: rx, ring_grps: grp, cp_rings: cp, stats: stat, |
6471 | vnics: vnic); |
6472 | else |
6473 | return bnxt_hwrm_reserve_vf_rings(bp, tx_rings: tx, rx_rings: rx, ring_grps: grp, cp_rings: cp, stats: stat, |
6474 | vnics: vnic); |
6475 | } |
6476 | |
6477 | int bnxt_nq_rings_in_use(struct bnxt *bp) |
6478 | { |
6479 | int cp = bp->cp_nr_rings; |
6480 | int ulp_msix, ulp_base; |
6481 | |
6482 | ulp_msix = bnxt_get_ulp_msix_num(bp); |
6483 | if (ulp_msix) { |
6484 | ulp_base = bnxt_get_ulp_msix_base(bp); |
6485 | cp += ulp_msix; |
6486 | if ((ulp_base + ulp_msix) > cp) |
6487 | cp = ulp_base + ulp_msix; |
6488 | } |
6489 | return cp; |
6490 | } |
6491 | |
6492 | static int bnxt_cp_rings_in_use(struct bnxt *bp) |
6493 | { |
6494 | int cp; |
6495 | |
6496 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
6497 | return bnxt_nq_rings_in_use(bp); |
6498 | |
6499 | cp = bp->tx_nr_rings + bp->rx_nr_rings; |
6500 | return cp; |
6501 | } |
6502 | |
6503 | static int bnxt_get_func_stat_ctxs(struct bnxt *bp) |
6504 | { |
6505 | int ulp_stat = bnxt_get_ulp_stat_ctxs(bp); |
6506 | int cp = bp->cp_nr_rings; |
6507 | |
6508 | if (!ulp_stat) |
6509 | return cp; |
6510 | |
6511 | if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp)) |
6512 | return bnxt_get_ulp_msix_base(bp) + ulp_stat; |
6513 | |
6514 | return cp + ulp_stat; |
6515 | } |
6516 | |
6517 | /* Check if a default RSS map needs to be setup. This function is only |
6518 | * used on older firmware that does not require reserving RX rings. |
6519 | */ |
6520 | static void (struct bnxt *bp) |
6521 | { |
6522 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
6523 | |
6524 | /* The RSS map is valid for RX rings set to resv_rx_rings */ |
6525 | if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { |
6526 | hw_resc->resv_rx_rings = bp->rx_nr_rings; |
6527 | if (!netif_is_rxfh_configured(dev: bp->dev)) |
6528 | bnxt_set_dflt_rss_indir_tbl(bp); |
6529 | } |
6530 | } |
6531 | |
6532 | static bool bnxt_need_reserve_rings(struct bnxt *bp) |
6533 | { |
6534 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
6535 | int cp = bnxt_cp_rings_in_use(bp); |
6536 | int nq = bnxt_nq_rings_in_use(bp); |
6537 | int rx = bp->rx_nr_rings, stat; |
6538 | int vnic = 1, grp = rx; |
6539 | |
6540 | if (hw_resc->resv_tx_rings != bp->tx_nr_rings && |
6541 | bp->hwrm_spec_code >= 0x10601) |
6542 | return true; |
6543 | |
6544 | /* Old firmware does not need RX ring reservations but we still |
6545 | * need to setup a default RSS map when needed. With new firmware |
6546 | * we go through RX ring reservations first and then set up the |
6547 | * RSS map for the successfully reserved RX rings when needed. |
6548 | */ |
6549 | if (!BNXT_NEW_RM(bp)) { |
6550 | bnxt_check_rss_tbl_no_rmgr(bp); |
6551 | return false; |
6552 | } |
6553 | if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) |
6554 | vnic = rx + 1; |
6555 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
6556 | rx <<= 1; |
6557 | stat = bnxt_get_func_stat_ctxs(bp); |
6558 | if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || |
6559 | hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || |
6560 | (hw_resc->resv_hw_ring_grps != grp && |
6561 | !(bp->flags & BNXT_FLAG_CHIP_P5))) |
6562 | return true; |
6563 | if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) && |
6564 | hw_resc->resv_irqs != nq) |
6565 | return true; |
6566 | return false; |
6567 | } |
6568 | |
6569 | static int __bnxt_reserve_rings(struct bnxt *bp) |
6570 | { |
6571 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
6572 | int cp = bnxt_nq_rings_in_use(bp); |
6573 | int tx = bp->tx_nr_rings; |
6574 | int rx = bp->rx_nr_rings; |
6575 | int grp, rx_rings, rc; |
6576 | int vnic = 1, stat; |
6577 | bool sh = false; |
6578 | |
6579 | if (!bnxt_need_reserve_rings(bp)) |
6580 | return 0; |
6581 | |
6582 | if (bp->flags & BNXT_FLAG_SHARED_RINGS) |
6583 | sh = true; |
6584 | if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) |
6585 | vnic = rx + 1; |
6586 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
6587 | rx <<= 1; |
6588 | grp = bp->rx_nr_rings; |
6589 | stat = bnxt_get_func_stat_ctxs(bp); |
6590 | |
6591 | rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); |
6592 | if (rc) |
6593 | return rc; |
6594 | |
6595 | tx = hw_resc->resv_tx_rings; |
6596 | if (BNXT_NEW_RM(bp)) { |
6597 | rx = hw_resc->resv_rx_rings; |
6598 | cp = hw_resc->resv_irqs; |
6599 | grp = hw_resc->resv_hw_ring_grps; |
6600 | vnic = hw_resc->resv_vnics; |
6601 | stat = hw_resc->resv_stat_ctxs; |
6602 | } |
6603 | |
6604 | rx_rings = rx; |
6605 | if (bp->flags & BNXT_FLAG_AGG_RINGS) { |
6606 | if (rx >= 2) { |
6607 | rx_rings = rx >> 1; |
6608 | } else { |
6609 | if (netif_running(dev: bp->dev)) |
6610 | return -ENOMEM; |
6611 | |
6612 | bp->flags &= ~BNXT_FLAG_AGG_RINGS; |
6613 | bp->flags |= BNXT_FLAG_NO_AGG_RINGS; |
6614 | bp->dev->hw_features &= ~NETIF_F_LRO; |
6615 | bp->dev->features &= ~NETIF_F_LRO; |
6616 | bnxt_set_ring_params(bp); |
6617 | } |
6618 | } |
6619 | rx_rings = min_t(int, rx_rings, grp); |
6620 | cp = min_t(int, cp, bp->cp_nr_rings); |
6621 | if (stat > bnxt_get_ulp_stat_ctxs(bp)) |
6622 | stat -= bnxt_get_ulp_stat_ctxs(bp); |
6623 | cp = min_t(int, cp, stat); |
6624 | rc = bnxt_trim_rings(bp, rx: &rx_rings, tx: &tx, max: cp, shared: sh); |
6625 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
6626 | rx = rx_rings << 1; |
6627 | cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings; |
6628 | bp->tx_nr_rings = tx; |
6629 | |
6630 | /* If we cannot reserve all the RX rings, reset the RSS map only |
6631 | * if absolutely necessary |
6632 | */ |
6633 | if (rx_rings != bp->rx_nr_rings) { |
6634 | netdev_warn(dev: bp->dev, format: "Able to reserve only %d out of %d requested RX rings\n" , |
6635 | rx_rings, bp->rx_nr_rings); |
6636 | if (netif_is_rxfh_configured(dev: bp->dev) && |
6637 | (bnxt_get_nr_rss_ctxs(bp, rx_rings: bp->rx_nr_rings) != |
6638 | bnxt_get_nr_rss_ctxs(bp, rx_rings) || |
6639 | bnxt_get_max_rss_ring(bp) >= rx_rings)) { |
6640 | netdev_warn(dev: bp->dev, format: "RSS table entries reverting to default\n" ); |
6641 | bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; |
6642 | } |
6643 | } |
6644 | bp->rx_nr_rings = rx_rings; |
6645 | bp->cp_nr_rings = cp; |
6646 | |
6647 | if (!tx || !rx || !cp || !grp || !vnic || !stat) |
6648 | return -ENOMEM; |
6649 | |
6650 | if (!netif_is_rxfh_configured(dev: bp->dev)) |
6651 | bnxt_set_dflt_rss_indir_tbl(bp); |
6652 | |
6653 | return rc; |
6654 | } |
6655 | |
6656 | static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, |
6657 | int ring_grps, int cp_rings, int stats, |
6658 | int vnics) |
6659 | { |
6660 | struct hwrm_func_vf_cfg_input *req; |
6661 | u32 flags; |
6662 | |
6663 | if (!BNXT_NEW_RM(bp)) |
6664 | return 0; |
6665 | |
6666 | req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, |
6667 | cp_rings, stats, vnics); |
6668 | flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | |
6669 | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | |
6670 | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | |
6671 | FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | |
6672 | FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | |
6673 | FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; |
6674 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
6675 | flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; |
6676 | |
6677 | req->flags = cpu_to_le32(flags); |
6678 | return hwrm_req_send_silent(bp, req); |
6679 | } |
6680 | |
6681 | static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, |
6682 | int ring_grps, int cp_rings, int stats, |
6683 | int vnics) |
6684 | { |
6685 | struct hwrm_func_cfg_input *req; |
6686 | u32 flags; |
6687 | |
6688 | req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, |
6689 | cp_rings, stats, vnics); |
6690 | flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; |
6691 | if (BNXT_NEW_RM(bp)) { |
6692 | flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | |
6693 | FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | |
6694 | FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | |
6695 | FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; |
6696 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
6697 | flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | |
6698 | FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; |
6699 | else |
6700 | flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; |
6701 | } |
6702 | |
6703 | req->flags = cpu_to_le32(flags); |
6704 | return hwrm_req_send_silent(bp, req); |
6705 | } |
6706 | |
6707 | static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, |
6708 | int ring_grps, int cp_rings, int stats, |
6709 | int vnics) |
6710 | { |
6711 | if (bp->hwrm_spec_code < 0x10801) |
6712 | return 0; |
6713 | |
6714 | if (BNXT_PF(bp)) |
6715 | return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, |
6716 | ring_grps, cp_rings, stats, |
6717 | vnics); |
6718 | |
6719 | return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, |
6720 | cp_rings, stats, vnics); |
6721 | } |
6722 | |
6723 | static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) |
6724 | { |
6725 | struct bnxt_coal_cap *coal_cap = &bp->coal_cap; |
6726 | struct hwrm_ring_aggint_qcaps_output *resp; |
6727 | struct hwrm_ring_aggint_qcaps_input *req; |
6728 | int rc; |
6729 | |
6730 | coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; |
6731 | coal_cap->num_cmpl_dma_aggr_max = 63; |
6732 | coal_cap->num_cmpl_dma_aggr_during_int_max = 63; |
6733 | coal_cap->cmpl_aggr_dma_tmr_max = 65535; |
6734 | coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; |
6735 | coal_cap->int_lat_tmr_min_max = 65535; |
6736 | coal_cap->int_lat_tmr_max_max = 65535; |
6737 | coal_cap->num_cmpl_aggr_int_max = 65535; |
6738 | coal_cap->timer_units = 80; |
6739 | |
6740 | if (bp->hwrm_spec_code < 0x10902) |
6741 | return; |
6742 | |
6743 | if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS)) |
6744 | return; |
6745 | |
6746 | resp = hwrm_req_hold(bp, req); |
6747 | rc = hwrm_req_send_silent(bp, req); |
6748 | if (!rc) { |
6749 | coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); |
6750 | coal_cap->nq_params = le32_to_cpu(resp->nq_params); |
6751 | coal_cap->num_cmpl_dma_aggr_max = |
6752 | le16_to_cpu(resp->num_cmpl_dma_aggr_max); |
6753 | coal_cap->num_cmpl_dma_aggr_during_int_max = |
6754 | le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); |
6755 | coal_cap->cmpl_aggr_dma_tmr_max = |
6756 | le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); |
6757 | coal_cap->cmpl_aggr_dma_tmr_during_int_max = |
6758 | le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); |
6759 | coal_cap->int_lat_tmr_min_max = |
6760 | le16_to_cpu(resp->int_lat_tmr_min_max); |
6761 | coal_cap->int_lat_tmr_max_max = |
6762 | le16_to_cpu(resp->int_lat_tmr_max_max); |
6763 | coal_cap->num_cmpl_aggr_int_max = |
6764 | le16_to_cpu(resp->num_cmpl_aggr_int_max); |
6765 | coal_cap->timer_units = le16_to_cpu(resp->timer_units); |
6766 | } |
6767 | hwrm_req_drop(bp, req); |
6768 | } |
6769 | |
6770 | static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) |
6771 | { |
6772 | struct bnxt_coal_cap *coal_cap = &bp->coal_cap; |
6773 | |
6774 | return usec * 1000 / coal_cap->timer_units; |
6775 | } |
6776 | |
6777 | static void bnxt_hwrm_set_coal_params(struct bnxt *bp, |
6778 | struct bnxt_coal *hw_coal, |
6779 | struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) |
6780 | { |
6781 | struct bnxt_coal_cap *coal_cap = &bp->coal_cap; |
6782 | u16 val, tmr, max, flags = hw_coal->flags; |
6783 | u32 cmpl_params = coal_cap->cmpl_params; |
6784 | |
6785 | max = hw_coal->bufs_per_record * 128; |
6786 | if (hw_coal->budget) |
6787 | max = hw_coal->bufs_per_record * hw_coal->budget; |
6788 | max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); |
6789 | |
6790 | val = clamp_t(u16, hw_coal->coal_bufs, 1, max); |
6791 | req->num_cmpl_aggr_int = cpu_to_le16(val); |
6792 | |
6793 | val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); |
6794 | req->num_cmpl_dma_aggr = cpu_to_le16(val); |
6795 | |
6796 | val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, |
6797 | coal_cap->num_cmpl_dma_aggr_during_int_max); |
6798 | req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); |
6799 | |
6800 | tmr = bnxt_usec_to_coal_tmr(bp, usec: hw_coal->coal_ticks); |
6801 | tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); |
6802 | req->int_lat_tmr_max = cpu_to_le16(tmr); |
6803 | |
6804 | /* min timer set to 1/2 of interrupt timer */ |
6805 | if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { |
6806 | val = tmr / 2; |
6807 | val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); |
6808 | req->int_lat_tmr_min = cpu_to_le16(val); |
6809 | req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); |
6810 | } |
6811 | |
6812 | /* buf timer set to 1/4 of interrupt timer */ |
6813 | val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); |
6814 | req->cmpl_aggr_dma_tmr = cpu_to_le16(val); |
6815 | |
6816 | if (cmpl_params & |
6817 | RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { |
6818 | tmr = bnxt_usec_to_coal_tmr(bp, usec: hw_coal->coal_ticks_irq); |
6819 | val = clamp_t(u16, tmr, 1, |
6820 | coal_cap->cmpl_aggr_dma_tmr_during_int_max); |
6821 | req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); |
6822 | req->enables |= |
6823 | cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); |
6824 | } |
6825 | |
6826 | if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && |
6827 | hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) |
6828 | flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; |
6829 | req->flags = cpu_to_le16(flags); |
6830 | req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); |
6831 | } |
6832 | |
6833 | static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, |
6834 | struct bnxt_coal *hw_coal) |
6835 | { |
6836 | struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req; |
6837 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
6838 | struct bnxt_coal_cap *coal_cap = &bp->coal_cap; |
6839 | u32 nq_params = coal_cap->nq_params; |
6840 | u16 tmr; |
6841 | int rc; |
6842 | |
6843 | if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) |
6844 | return 0; |
6845 | |
6846 | rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); |
6847 | if (rc) |
6848 | return rc; |
6849 | |
6850 | req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); |
6851 | req->flags = |
6852 | cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); |
6853 | |
6854 | tmr = bnxt_usec_to_coal_tmr(bp, usec: hw_coal->coal_ticks) / 2; |
6855 | tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); |
6856 | req->int_lat_tmr_min = cpu_to_le16(tmr); |
6857 | req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); |
6858 | return hwrm_req_send(bp, req); |
6859 | } |
6860 | |
6861 | int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) |
6862 | { |
6863 | struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx; |
6864 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
6865 | struct bnxt_coal coal; |
6866 | int rc; |
6867 | |
6868 | /* Tick values in micro seconds. |
6869 | * 1 coal_buf x bufs_per_record = 1 completion record. |
6870 | */ |
6871 | memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); |
6872 | |
6873 | coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; |
6874 | coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; |
6875 | |
6876 | if (!bnapi->rx_ring) |
6877 | return -ENODEV; |
6878 | |
6879 | rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); |
6880 | if (rc) |
6881 | return rc; |
6882 | |
6883 | bnxt_hwrm_set_coal_params(bp, hw_coal: &coal, req: req_rx); |
6884 | |
6885 | req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); |
6886 | |
6887 | return hwrm_req_send(bp, req: req_rx); |
6888 | } |
6889 | |
6890 | int bnxt_hwrm_set_coal(struct bnxt *bp) |
6891 | { |
6892 | struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx, |
6893 | *req; |
6894 | int i, rc; |
6895 | |
6896 | rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); |
6897 | if (rc) |
6898 | return rc; |
6899 | |
6900 | rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); |
6901 | if (rc) { |
6902 | hwrm_req_drop(bp, req: req_rx); |
6903 | return rc; |
6904 | } |
6905 | |
6906 | bnxt_hwrm_set_coal_params(bp, hw_coal: &bp->rx_coal, req: req_rx); |
6907 | bnxt_hwrm_set_coal_params(bp, hw_coal: &bp->tx_coal, req: req_tx); |
6908 | |
6909 | hwrm_req_hold(bp, req: req_rx); |
6910 | hwrm_req_hold(bp, req: req_tx); |
6911 | for (i = 0; i < bp->cp_nr_rings; i++) { |
6912 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
6913 | struct bnxt_coal *hw_coal; |
6914 | u16 ring_id; |
6915 | |
6916 | req = req_rx; |
6917 | if (!bnapi->rx_ring) { |
6918 | ring_id = bnxt_cp_ring_for_tx(bp, txr: bnapi->tx_ring); |
6919 | req = req_tx; |
6920 | } else { |
6921 | ring_id = bnxt_cp_ring_for_rx(bp, rxr: bnapi->rx_ring); |
6922 | } |
6923 | req->ring_id = cpu_to_le16(ring_id); |
6924 | |
6925 | rc = hwrm_req_send(bp, req); |
6926 | if (rc) |
6927 | break; |
6928 | |
6929 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
6930 | continue; |
6931 | |
6932 | if (bnapi->rx_ring && bnapi->tx_ring) { |
6933 | req = req_tx; |
6934 | ring_id = bnxt_cp_ring_for_tx(bp, txr: bnapi->tx_ring); |
6935 | req->ring_id = cpu_to_le16(ring_id); |
6936 | rc = hwrm_req_send(bp, req); |
6937 | if (rc) |
6938 | break; |
6939 | } |
6940 | if (bnapi->rx_ring) |
6941 | hw_coal = &bp->rx_coal; |
6942 | else |
6943 | hw_coal = &bp->tx_coal; |
6944 | __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); |
6945 | } |
6946 | hwrm_req_drop(bp, req: req_rx); |
6947 | hwrm_req_drop(bp, req: req_tx); |
6948 | return rc; |
6949 | } |
6950 | |
6951 | static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) |
6952 | { |
6953 | struct hwrm_stat_ctx_clr_stats_input *req0 = NULL; |
6954 | struct hwrm_stat_ctx_free_input *req; |
6955 | int i; |
6956 | |
6957 | if (!bp->bnapi) |
6958 | return; |
6959 | |
6960 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
6961 | return; |
6962 | |
6963 | if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE)) |
6964 | return; |
6965 | if (BNXT_FW_MAJ(bp) <= 20) { |
6966 | if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) { |
6967 | hwrm_req_drop(bp, req); |
6968 | return; |
6969 | } |
6970 | hwrm_req_hold(bp, req: req0); |
6971 | } |
6972 | hwrm_req_hold(bp, req); |
6973 | for (i = 0; i < bp->cp_nr_rings; i++) { |
6974 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
6975 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
6976 | |
6977 | if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { |
6978 | req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); |
6979 | if (req0) { |
6980 | req0->stat_ctx_id = req->stat_ctx_id; |
6981 | hwrm_req_send(bp, req: req0); |
6982 | } |
6983 | hwrm_req_send(bp, req); |
6984 | |
6985 | cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; |
6986 | } |
6987 | } |
6988 | hwrm_req_drop(bp, req); |
6989 | if (req0) |
6990 | hwrm_req_drop(bp, req: req0); |
6991 | } |
6992 | |
6993 | static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) |
6994 | { |
6995 | struct hwrm_stat_ctx_alloc_output *resp; |
6996 | struct hwrm_stat_ctx_alloc_input *req; |
6997 | int rc, i; |
6998 | |
6999 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
7000 | return 0; |
7001 | |
7002 | rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC); |
7003 | if (rc) |
7004 | return rc; |
7005 | |
7006 | req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); |
7007 | req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); |
7008 | |
7009 | resp = hwrm_req_hold(bp, req); |
7010 | for (i = 0; i < bp->cp_nr_rings; i++) { |
7011 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
7012 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
7013 | |
7014 | req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); |
7015 | |
7016 | rc = hwrm_req_send(bp, req); |
7017 | if (rc) |
7018 | break; |
7019 | |
7020 | cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); |
7021 | |
7022 | bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; |
7023 | } |
7024 | hwrm_req_drop(bp, req); |
7025 | return rc; |
7026 | } |
7027 | |
7028 | static int bnxt_hwrm_func_qcfg(struct bnxt *bp) |
7029 | { |
7030 | struct hwrm_func_qcfg_output *resp; |
7031 | struct hwrm_func_qcfg_input *req; |
7032 | u32 min_db_offset = 0; |
7033 | u16 flags; |
7034 | int rc; |
7035 | |
7036 | rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); |
7037 | if (rc) |
7038 | return rc; |
7039 | |
7040 | req->fid = cpu_to_le16(0xffff); |
7041 | resp = hwrm_req_hold(bp, req); |
7042 | rc = hwrm_req_send(bp, req); |
7043 | if (rc) |
7044 | goto func_qcfg_exit; |
7045 | |
7046 | #ifdef CONFIG_BNXT_SRIOV |
7047 | if (BNXT_VF(bp)) { |
7048 | struct bnxt_vf_info *vf = &bp->vf; |
7049 | |
7050 | vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; |
7051 | } else { |
7052 | bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); |
7053 | } |
7054 | #endif |
7055 | flags = le16_to_cpu(resp->flags); |
7056 | if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | |
7057 | FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { |
7058 | bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; |
7059 | if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) |
7060 | bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; |
7061 | } |
7062 | if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) |
7063 | bp->flags |= BNXT_FLAG_MULTI_HOST; |
7064 | |
7065 | if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) |
7066 | bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; |
7067 | |
7068 | switch (resp->port_partition_type) { |
7069 | case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: |
7070 | case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: |
7071 | case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: |
7072 | bp->port_partition_type = resp->port_partition_type; |
7073 | break; |
7074 | } |
7075 | if (bp->hwrm_spec_code < 0x10707 || |
7076 | resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) |
7077 | bp->br_mode = BRIDGE_MODE_VEB; |
7078 | else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) |
7079 | bp->br_mode = BRIDGE_MODE_VEPA; |
7080 | else |
7081 | bp->br_mode = BRIDGE_MODE_UNDEF; |
7082 | |
7083 | bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); |
7084 | if (!bp->max_mtu) |
7085 | bp->max_mtu = BNXT_MAX_MTU; |
7086 | |
7087 | if (bp->db_size) |
7088 | goto func_qcfg_exit; |
7089 | |
7090 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
7091 | if (BNXT_PF(bp)) |
7092 | min_db_offset = DB_PF_OFFSET_P5; |
7093 | else |
7094 | min_db_offset = DB_VF_OFFSET_P5; |
7095 | } |
7096 | bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * |
7097 | 1024); |
7098 | if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || |
7099 | bp->db_size <= min_db_offset) |
7100 | bp->db_size = pci_resource_len(bp->pdev, 2); |
7101 | |
7102 | func_qcfg_exit: |
7103 | hwrm_req_drop(bp, req); |
7104 | return rc; |
7105 | } |
7106 | |
7107 | static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx, |
7108 | struct hwrm_func_backing_store_qcaps_output *resp) |
7109 | { |
7110 | struct bnxt_mem_init *mem_init; |
7111 | u16 init_mask; |
7112 | u8 init_val; |
7113 | u8 *offset; |
7114 | int i; |
7115 | |
7116 | init_val = resp->ctx_kind_initializer; |
7117 | init_mask = le16_to_cpu(resp->ctx_init_mask); |
7118 | offset = &resp->qp_init_offset; |
7119 | mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP]; |
7120 | for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) { |
7121 | mem_init->init_val = init_val; |
7122 | mem_init->offset = BNXT_MEM_INVALID_OFFSET; |
7123 | if (!init_mask) |
7124 | continue; |
7125 | if (i == BNXT_CTX_MEM_INIT_STAT) |
7126 | offset = &resp->stat_init_offset; |
7127 | if (init_mask & (1 << i)) |
7128 | mem_init->offset = *offset * 4; |
7129 | else |
7130 | mem_init->init_val = 0; |
7131 | } |
7132 | ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size; |
7133 | ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size; |
7134 | ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size; |
7135 | ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size; |
7136 | ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size; |
7137 | ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size; |
7138 | } |
7139 | |
7140 | static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) |
7141 | { |
7142 | struct hwrm_func_backing_store_qcaps_output *resp; |
7143 | struct hwrm_func_backing_store_qcaps_input *req; |
7144 | int rc; |
7145 | |
7146 | if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) |
7147 | return 0; |
7148 | |
7149 | rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS); |
7150 | if (rc) |
7151 | return rc; |
7152 | |
7153 | resp = hwrm_req_hold(bp, req); |
7154 | rc = hwrm_req_send_silent(bp, req); |
7155 | if (!rc) { |
7156 | struct bnxt_ctx_pg_info *ctx_pg; |
7157 | struct bnxt_ctx_mem_info *ctx; |
7158 | int i, tqm_rings; |
7159 | |
7160 | ctx = kzalloc(size: sizeof(*ctx), GFP_KERNEL); |
7161 | if (!ctx) { |
7162 | rc = -ENOMEM; |
7163 | goto ctx_err; |
7164 | } |
7165 | ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries); |
7166 | ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); |
7167 | ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); |
7168 | ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size); |
7169 | ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); |
7170 | ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries); |
7171 | ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size); |
7172 | ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); |
7173 | ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries); |
7174 | ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size); |
7175 | ctx->vnic_max_vnic_entries = |
7176 | le16_to_cpu(resp->vnic_max_vnic_entries); |
7177 | ctx->vnic_max_ring_table_entries = |
7178 | le16_to_cpu(resp->vnic_max_ring_table_entries); |
7179 | ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size); |
7180 | ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries); |
7181 | ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size); |
7182 | ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size); |
7183 | ctx->tqm_min_entries_per_ring = |
7184 | le32_to_cpu(resp->tqm_min_entries_per_ring); |
7185 | ctx->tqm_max_entries_per_ring = |
7186 | le32_to_cpu(resp->tqm_max_entries_per_ring); |
7187 | ctx->tqm_entries_multiple = resp->tqm_entries_multiple; |
7188 | if (!ctx->tqm_entries_multiple) |
7189 | ctx->tqm_entries_multiple = 1; |
7190 | ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries); |
7191 | ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size); |
7192 | ctx->mrav_num_entries_units = |
7193 | le16_to_cpu(resp->mrav_num_entries_units); |
7194 | ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); |
7195 | ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); |
7196 | |
7197 | bnxt_init_ctx_initializer(ctx, resp); |
7198 | |
7199 | ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; |
7200 | if (!ctx->tqm_fp_rings_count) |
7201 | ctx->tqm_fp_rings_count = bp->max_q; |
7202 | else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) |
7203 | ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; |
7204 | |
7205 | tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS; |
7206 | ctx_pg = kcalloc(n: tqm_rings, size: sizeof(*ctx_pg), GFP_KERNEL); |
7207 | if (!ctx_pg) { |
7208 | kfree(objp: ctx); |
7209 | rc = -ENOMEM; |
7210 | goto ctx_err; |
7211 | } |
7212 | for (i = 0; i < tqm_rings; i++, ctx_pg++) |
7213 | ctx->tqm_mem[i] = ctx_pg; |
7214 | bp->ctx = ctx; |
7215 | } else { |
7216 | rc = 0; |
7217 | } |
7218 | ctx_err: |
7219 | hwrm_req_drop(bp, req); |
7220 | return rc; |
7221 | } |
7222 | |
7223 | static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, |
7224 | __le64 *pg_dir) |
7225 | { |
7226 | if (!rmem->nr_pages) |
7227 | return; |
7228 | |
7229 | BNXT_SET_CTX_PAGE_ATTR(*pg_attr); |
7230 | if (rmem->depth >= 1) { |
7231 | if (rmem->depth == 2) |
7232 | *pg_attr |= 2; |
7233 | else |
7234 | *pg_attr |= 1; |
7235 | *pg_dir = cpu_to_le64(rmem->pg_tbl_map); |
7236 | } else { |
7237 | *pg_dir = cpu_to_le64(rmem->dma_arr[0]); |
7238 | } |
7239 | } |
7240 | |
7241 | #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ |
7242 | (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ |
7243 | FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ |
7244 | FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ |
7245 | FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ |
7246 | FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) |
7247 | |
7248 | static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) |
7249 | { |
7250 | struct hwrm_func_backing_store_cfg_input *req; |
7251 | struct bnxt_ctx_mem_info *ctx = bp->ctx; |
7252 | struct bnxt_ctx_pg_info *ctx_pg; |
7253 | void **__req = (void **)&req; |
7254 | u32 req_len = sizeof(*req); |
7255 | __le32 *num_entries; |
7256 | __le64 *pg_dir; |
7257 | u32 flags = 0; |
7258 | u8 *pg_attr; |
7259 | u32 ena; |
7260 | int rc; |
7261 | int i; |
7262 | |
7263 | if (!ctx) |
7264 | return 0; |
7265 | |
7266 | if (req_len > bp->hwrm_max_ext_req_len) |
7267 | req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN; |
7268 | rc = __hwrm_req_init(bp, req: __req, HWRM_FUNC_BACKING_STORE_CFG, req_len); |
7269 | if (rc) |
7270 | return rc; |
7271 | |
7272 | req->enables = cpu_to_le32(enables); |
7273 | if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { |
7274 | ctx_pg = &ctx->qp_mem; |
7275 | req->qp_num_entries = cpu_to_le32(ctx_pg->entries); |
7276 | req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); |
7277 | req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); |
7278 | req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size); |
7279 | bnxt_hwrm_set_pg_attr(rmem: &ctx_pg->ring_mem, |
7280 | pg_attr: &req->qpc_pg_size_qpc_lvl, |
7281 | pg_dir: &req->qpc_page_dir); |
7282 | } |
7283 | if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { |
7284 | ctx_pg = &ctx->srq_mem; |
7285 | req->srq_num_entries = cpu_to_le32(ctx_pg->entries); |
7286 | req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); |
7287 | req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size); |
7288 | bnxt_hwrm_set_pg_attr(rmem: &ctx_pg->ring_mem, |
7289 | pg_attr: &req->srq_pg_size_srq_lvl, |
7290 | pg_dir: &req->srq_page_dir); |
7291 | } |
7292 | if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { |
7293 | ctx_pg = &ctx->cq_mem; |
7294 | req->cq_num_entries = cpu_to_le32(ctx_pg->entries); |
7295 | req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); |
7296 | req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size); |
7297 | bnxt_hwrm_set_pg_attr(rmem: &ctx_pg->ring_mem, |
7298 | pg_attr: &req->cq_pg_size_cq_lvl, |
7299 | pg_dir: &req->cq_page_dir); |
7300 | } |
7301 | if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { |
7302 | ctx_pg = &ctx->vnic_mem; |
7303 | req->vnic_num_vnic_entries = |
7304 | cpu_to_le16(ctx->vnic_max_vnic_entries); |
7305 | req->vnic_num_ring_table_entries = |
7306 | cpu_to_le16(ctx->vnic_max_ring_table_entries); |
7307 | req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); |
7308 | bnxt_hwrm_set_pg_attr(rmem: &ctx_pg->ring_mem, |
7309 | pg_attr: &req->vnic_pg_size_vnic_lvl, |
7310 | pg_dir: &req->vnic_page_dir); |
7311 | } |
7312 | if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { |
7313 | ctx_pg = &ctx->stat_mem; |
7314 | req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries); |
7315 | req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size); |
7316 | bnxt_hwrm_set_pg_attr(rmem: &ctx_pg->ring_mem, |
7317 | pg_attr: &req->stat_pg_size_stat_lvl, |
7318 | pg_dir: &req->stat_page_dir); |
7319 | } |
7320 | if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { |
7321 | ctx_pg = &ctx->mrav_mem; |
7322 | req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); |
7323 | if (ctx->mrav_num_entries_units) |
7324 | flags |= |
7325 | FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; |
7326 | req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); |
7327 | bnxt_hwrm_set_pg_attr(rmem: &ctx_pg->ring_mem, |
7328 | pg_attr: &req->mrav_pg_size_mrav_lvl, |
7329 | pg_dir: &req->mrav_page_dir); |
7330 | } |
7331 | if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { |
7332 | ctx_pg = &ctx->tim_mem; |
7333 | req->tim_num_entries = cpu_to_le32(ctx_pg->entries); |
7334 | req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size); |
7335 | bnxt_hwrm_set_pg_attr(rmem: &ctx_pg->ring_mem, |
7336 | pg_attr: &req->tim_pg_size_tim_lvl, |
7337 | pg_dir: &req->tim_page_dir); |
7338 | } |
7339 | for (i = 0, num_entries = &req->tqm_sp_num_entries, |
7340 | pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, |
7341 | pg_dir = &req->tqm_sp_page_dir, |
7342 | ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP; |
7343 | i < BNXT_MAX_TQM_RINGS; |
7344 | i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { |
7345 | if (!(enables & ena)) |
7346 | continue; |
7347 | |
7348 | req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); |
7349 | ctx_pg = ctx->tqm_mem[i]; |
7350 | *num_entries = cpu_to_le32(ctx_pg->entries); |
7351 | bnxt_hwrm_set_pg_attr(rmem: &ctx_pg->ring_mem, pg_attr, pg_dir); |
7352 | } |
7353 | req->flags = cpu_to_le32(flags); |
7354 | return hwrm_req_send(bp, req); |
7355 | } |
7356 | |
7357 | static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, |
7358 | struct bnxt_ctx_pg_info *ctx_pg) |
7359 | { |
7360 | struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; |
7361 | |
7362 | rmem->page_size = BNXT_PAGE_SIZE; |
7363 | rmem->pg_arr = ctx_pg->ctx_pg_arr; |
7364 | rmem->dma_arr = ctx_pg->ctx_dma_arr; |
7365 | rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; |
7366 | if (rmem->depth >= 1) |
7367 | rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; |
7368 | return bnxt_alloc_ring(bp, rmem); |
7369 | } |
7370 | |
7371 | static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, |
7372 | struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, |
7373 | u8 depth, struct bnxt_mem_init *mem_init) |
7374 | { |
7375 | struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; |
7376 | int rc; |
7377 | |
7378 | if (!mem_size) |
7379 | return -EINVAL; |
7380 | |
7381 | ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); |
7382 | if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { |
7383 | ctx_pg->nr_pages = 0; |
7384 | return -EINVAL; |
7385 | } |
7386 | if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { |
7387 | int nr_tbls, i; |
7388 | |
7389 | rmem->depth = 2; |
7390 | ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, size: sizeof(ctx_pg), |
7391 | GFP_KERNEL); |
7392 | if (!ctx_pg->ctx_pg_tbl) |
7393 | return -ENOMEM; |
7394 | nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); |
7395 | rmem->nr_pages = nr_tbls; |
7396 | rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); |
7397 | if (rc) |
7398 | return rc; |
7399 | for (i = 0; i < nr_tbls; i++) { |
7400 | struct bnxt_ctx_pg_info *pg_tbl; |
7401 | |
7402 | pg_tbl = kzalloc(size: sizeof(*pg_tbl), GFP_KERNEL); |
7403 | if (!pg_tbl) |
7404 | return -ENOMEM; |
7405 | ctx_pg->ctx_pg_tbl[i] = pg_tbl; |
7406 | rmem = &pg_tbl->ring_mem; |
7407 | rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; |
7408 | rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; |
7409 | rmem->depth = 1; |
7410 | rmem->nr_pages = MAX_CTX_PAGES; |
7411 | rmem->mem_init = mem_init; |
7412 | if (i == (nr_tbls - 1)) { |
7413 | int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; |
7414 | |
7415 | if (rem) |
7416 | rmem->nr_pages = rem; |
7417 | } |
7418 | rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg: pg_tbl); |
7419 | if (rc) |
7420 | break; |
7421 | } |
7422 | } else { |
7423 | rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); |
7424 | if (rmem->nr_pages > 1 || depth) |
7425 | rmem->depth = 1; |
7426 | rmem->mem_init = mem_init; |
7427 | rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); |
7428 | } |
7429 | return rc; |
7430 | } |
7431 | |
7432 | static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, |
7433 | struct bnxt_ctx_pg_info *ctx_pg) |
7434 | { |
7435 | struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; |
7436 | |
7437 | if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || |
7438 | ctx_pg->ctx_pg_tbl) { |
7439 | int i, nr_tbls = rmem->nr_pages; |
7440 | |
7441 | for (i = 0; i < nr_tbls; i++) { |
7442 | struct bnxt_ctx_pg_info *pg_tbl; |
7443 | struct bnxt_ring_mem_info *rmem2; |
7444 | |
7445 | pg_tbl = ctx_pg->ctx_pg_tbl[i]; |
7446 | if (!pg_tbl) |
7447 | continue; |
7448 | rmem2 = &pg_tbl->ring_mem; |
7449 | bnxt_free_ring(bp, rmem: rmem2); |
7450 | ctx_pg->ctx_pg_arr[i] = NULL; |
7451 | kfree(objp: pg_tbl); |
7452 | ctx_pg->ctx_pg_tbl[i] = NULL; |
7453 | } |
7454 | kfree(objp: ctx_pg->ctx_pg_tbl); |
7455 | ctx_pg->ctx_pg_tbl = NULL; |
7456 | } |
7457 | bnxt_free_ring(bp, rmem); |
7458 | ctx_pg->nr_pages = 0; |
7459 | } |
7460 | |
7461 | void bnxt_free_ctx_mem(struct bnxt *bp) |
7462 | { |
7463 | struct bnxt_ctx_mem_info *ctx = bp->ctx; |
7464 | int i; |
7465 | |
7466 | if (!ctx) |
7467 | return; |
7468 | |
7469 | if (ctx->tqm_mem[0]) { |
7470 | for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) |
7471 | bnxt_free_ctx_pg_tbls(bp, ctx_pg: ctx->tqm_mem[i]); |
7472 | kfree(objp: ctx->tqm_mem[0]); |
7473 | ctx->tqm_mem[0] = NULL; |
7474 | } |
7475 | |
7476 | bnxt_free_ctx_pg_tbls(bp, ctx_pg: &ctx->tim_mem); |
7477 | bnxt_free_ctx_pg_tbls(bp, ctx_pg: &ctx->mrav_mem); |
7478 | bnxt_free_ctx_pg_tbls(bp, ctx_pg: &ctx->stat_mem); |
7479 | bnxt_free_ctx_pg_tbls(bp, ctx_pg: &ctx->vnic_mem); |
7480 | bnxt_free_ctx_pg_tbls(bp, ctx_pg: &ctx->cq_mem); |
7481 | bnxt_free_ctx_pg_tbls(bp, ctx_pg: &ctx->srq_mem); |
7482 | bnxt_free_ctx_pg_tbls(bp, ctx_pg: &ctx->qp_mem); |
7483 | ctx->flags &= ~BNXT_CTX_FLAG_INITED; |
7484 | } |
7485 | |
7486 | static int bnxt_alloc_ctx_mem(struct bnxt *bp) |
7487 | { |
7488 | struct bnxt_ctx_pg_info *ctx_pg; |
7489 | struct bnxt_ctx_mem_info *ctx; |
7490 | struct bnxt_mem_init *init; |
7491 | u32 mem_size, ena, entries; |
7492 | u32 entries_sp, min; |
7493 | u32 num_mr, num_ah; |
7494 | u32 = 0; |
7495 | u32 = 0; |
7496 | u8 pg_lvl = 1; |
7497 | int i, rc; |
7498 | |
7499 | rc = bnxt_hwrm_func_backing_store_qcaps(bp); |
7500 | if (rc) { |
7501 | netdev_err(dev: bp->dev, format: "Failed querying context mem capability, rc = %d.\n" , |
7502 | rc); |
7503 | return rc; |
7504 | } |
7505 | ctx = bp->ctx; |
7506 | if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) |
7507 | return 0; |
7508 | |
7509 | if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { |
7510 | pg_lvl = 2; |
7511 | extra_qps = 65536; |
7512 | extra_srqs = 8192; |
7513 | } |
7514 | |
7515 | ctx_pg = &ctx->qp_mem; |
7516 | ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + |
7517 | extra_qps; |
7518 | if (ctx->qp_entry_size) { |
7519 | mem_size = ctx->qp_entry_size * ctx_pg->entries; |
7520 | init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP]; |
7521 | rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, depth: pg_lvl, mem_init: init); |
7522 | if (rc) |
7523 | return rc; |
7524 | } |
7525 | |
7526 | ctx_pg = &ctx->srq_mem; |
7527 | ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs; |
7528 | if (ctx->srq_entry_size) { |
7529 | mem_size = ctx->srq_entry_size * ctx_pg->entries; |
7530 | init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ]; |
7531 | rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, depth: pg_lvl, mem_init: init); |
7532 | if (rc) |
7533 | return rc; |
7534 | } |
7535 | |
7536 | ctx_pg = &ctx->cq_mem; |
7537 | ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2; |
7538 | if (ctx->cq_entry_size) { |
7539 | mem_size = ctx->cq_entry_size * ctx_pg->entries; |
7540 | init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ]; |
7541 | rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, depth: pg_lvl, mem_init: init); |
7542 | if (rc) |
7543 | return rc; |
7544 | } |
7545 | |
7546 | ctx_pg = &ctx->vnic_mem; |
7547 | ctx_pg->entries = ctx->vnic_max_vnic_entries + |
7548 | ctx->vnic_max_ring_table_entries; |
7549 | if (ctx->vnic_entry_size) { |
7550 | mem_size = ctx->vnic_entry_size * ctx_pg->entries; |
7551 | init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC]; |
7552 | rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, depth: 1, mem_init: init); |
7553 | if (rc) |
7554 | return rc; |
7555 | } |
7556 | |
7557 | ctx_pg = &ctx->stat_mem; |
7558 | ctx_pg->entries = ctx->stat_max_entries; |
7559 | if (ctx->stat_entry_size) { |
7560 | mem_size = ctx->stat_entry_size * ctx_pg->entries; |
7561 | init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT]; |
7562 | rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, depth: 1, mem_init: init); |
7563 | if (rc) |
7564 | return rc; |
7565 | } |
7566 | |
7567 | ena = 0; |
7568 | if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) |
7569 | goto skip_rdma; |
7570 | |
7571 | ctx_pg = &ctx->mrav_mem; |
7572 | /* 128K extra is needed to accommodate static AH context |
7573 | * allocation by f/w. |
7574 | */ |
7575 | num_mr = 1024 * 256; |
7576 | num_ah = 1024 * 128; |
7577 | ctx_pg->entries = num_mr + num_ah; |
7578 | if (ctx->mrav_entry_size) { |
7579 | mem_size = ctx->mrav_entry_size * ctx_pg->entries; |
7580 | init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV]; |
7581 | rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, depth: 2, mem_init: init); |
7582 | if (rc) |
7583 | return rc; |
7584 | } |
7585 | ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; |
7586 | if (ctx->mrav_num_entries_units) |
7587 | ctx_pg->entries = |
7588 | ((num_mr / ctx->mrav_num_entries_units) << 16) | |
7589 | (num_ah / ctx->mrav_num_entries_units); |
7590 | |
7591 | ctx_pg = &ctx->tim_mem; |
7592 | ctx_pg->entries = ctx->qp_mem.entries; |
7593 | if (ctx->tim_entry_size) { |
7594 | mem_size = ctx->tim_entry_size * ctx_pg->entries; |
7595 | rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, depth: 1, NULL); |
7596 | if (rc) |
7597 | return rc; |
7598 | } |
7599 | ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; |
7600 | |
7601 | skip_rdma: |
7602 | min = ctx->tqm_min_entries_per_ring; |
7603 | entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries + |
7604 | 2 * (extra_qps + ctx->qp_min_qp1_entries) + min; |
7605 | entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple); |
7606 | entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries); |
7607 | entries = roundup(entries, ctx->tqm_entries_multiple); |
7608 | entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring); |
7609 | for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) { |
7610 | ctx_pg = ctx->tqm_mem[i]; |
7611 | ctx_pg->entries = i ? entries : entries_sp; |
7612 | if (ctx->tqm_entry_size) { |
7613 | mem_size = ctx->tqm_entry_size * ctx_pg->entries; |
7614 | rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, depth: 1, |
7615 | NULL); |
7616 | if (rc) |
7617 | return rc; |
7618 | } |
7619 | ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; |
7620 | } |
7621 | ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; |
7622 | rc = bnxt_hwrm_func_backing_store_cfg(bp, enables: ena); |
7623 | if (rc) { |
7624 | netdev_err(dev: bp->dev, format: "Failed configuring context mem, rc = %d.\n" , |
7625 | rc); |
7626 | return rc; |
7627 | } |
7628 | ctx->flags |= BNXT_CTX_FLAG_INITED; |
7629 | return 0; |
7630 | } |
7631 | |
7632 | int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) |
7633 | { |
7634 | struct hwrm_func_resource_qcaps_output *resp; |
7635 | struct hwrm_func_resource_qcaps_input *req; |
7636 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
7637 | int rc; |
7638 | |
7639 | rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS); |
7640 | if (rc) |
7641 | return rc; |
7642 | |
7643 | req->fid = cpu_to_le16(0xffff); |
7644 | resp = hwrm_req_hold(bp, req); |
7645 | rc = hwrm_req_send_silent(bp, req); |
7646 | if (rc) |
7647 | goto hwrm_func_resc_qcaps_exit; |
7648 | |
7649 | hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); |
7650 | if (!all) |
7651 | goto hwrm_func_resc_qcaps_exit; |
7652 | |
7653 | hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); |
7654 | hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); |
7655 | hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); |
7656 | hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); |
7657 | hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); |
7658 | hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); |
7659 | hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); |
7660 | hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); |
7661 | hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); |
7662 | hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); |
7663 | hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); |
7664 | hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); |
7665 | hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); |
7666 | hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); |
7667 | hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); |
7668 | hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); |
7669 | |
7670 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
7671 | u16 max_msix = le16_to_cpu(resp->max_msix); |
7672 | |
7673 | hw_resc->max_nqs = max_msix; |
7674 | hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; |
7675 | } |
7676 | |
7677 | if (BNXT_PF(bp)) { |
7678 | struct bnxt_pf_info *pf = &bp->pf; |
7679 | |
7680 | pf->vf_resv_strategy = |
7681 | le16_to_cpu(resp->vf_reservation_strategy); |
7682 | if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) |
7683 | pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; |
7684 | } |
7685 | hwrm_func_resc_qcaps_exit: |
7686 | hwrm_req_drop(bp, req); |
7687 | return rc; |
7688 | } |
7689 | |
7690 | static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) |
7691 | { |
7692 | struct hwrm_port_mac_ptp_qcfg_output *resp; |
7693 | struct hwrm_port_mac_ptp_qcfg_input *req; |
7694 | struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; |
7695 | bool phc_cfg; |
7696 | u8 flags; |
7697 | int rc; |
7698 | |
7699 | if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_THOR(bp)) { |
7700 | rc = -ENODEV; |
7701 | goto no_ptp; |
7702 | } |
7703 | |
7704 | rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG); |
7705 | if (rc) |
7706 | goto no_ptp; |
7707 | |
7708 | req->port_id = cpu_to_le16(bp->pf.port_id); |
7709 | resp = hwrm_req_hold(bp, req); |
7710 | rc = hwrm_req_send(bp, req); |
7711 | if (rc) |
7712 | goto exit; |
7713 | |
7714 | flags = resp->flags; |
7715 | if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { |
7716 | rc = -ENODEV; |
7717 | goto exit; |
7718 | } |
7719 | if (!ptp) { |
7720 | ptp = kzalloc(size: sizeof(*ptp), GFP_KERNEL); |
7721 | if (!ptp) { |
7722 | rc = -ENOMEM; |
7723 | goto exit; |
7724 | } |
7725 | ptp->bp = bp; |
7726 | bp->ptp_cfg = ptp; |
7727 | } |
7728 | if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) { |
7729 | ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); |
7730 | ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper); |
7731 | } else if (bp->flags & BNXT_FLAG_CHIP_P5) { |
7732 | ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; |
7733 | ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; |
7734 | } else { |
7735 | rc = -ENODEV; |
7736 | goto exit; |
7737 | } |
7738 | phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0; |
7739 | rc = bnxt_ptp_init(bp, phc_cfg); |
7740 | if (rc) |
7741 | netdev_warn(dev: bp->dev, format: "PTP initialization failed.\n" ); |
7742 | exit: |
7743 | hwrm_req_drop(bp, req); |
7744 | if (!rc) |
7745 | return 0; |
7746 | |
7747 | no_ptp: |
7748 | bnxt_ptp_clear(bp); |
7749 | kfree(objp: ptp); |
7750 | bp->ptp_cfg = NULL; |
7751 | return rc; |
7752 | } |
7753 | |
7754 | static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) |
7755 | { |
7756 | struct hwrm_func_qcaps_output *resp; |
7757 | struct hwrm_func_qcaps_input *req; |
7758 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
7759 | u32 flags, flags_ext, flags_ext2; |
7760 | int rc; |
7761 | |
7762 | rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); |
7763 | if (rc) |
7764 | return rc; |
7765 | |
7766 | req->fid = cpu_to_le16(0xffff); |
7767 | resp = hwrm_req_hold(bp, req); |
7768 | rc = hwrm_req_send(bp, req); |
7769 | if (rc) |
7770 | goto hwrm_func_qcaps_exit; |
7771 | |
7772 | flags = le32_to_cpu(resp->flags); |
7773 | if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) |
7774 | bp->flags |= BNXT_FLAG_ROCEV1_CAP; |
7775 | if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) |
7776 | bp->flags |= BNXT_FLAG_ROCEV2_CAP; |
7777 | if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) |
7778 | bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; |
7779 | if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) |
7780 | bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; |
7781 | if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) |
7782 | bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; |
7783 | if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) |
7784 | bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; |
7785 | if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) |
7786 | bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; |
7787 | if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) |
7788 | bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; |
7789 | if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED) |
7790 | bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS; |
7791 | |
7792 | flags_ext = le32_to_cpu(resp->flags_ext); |
7793 | if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) |
7794 | bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; |
7795 | if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED)) |
7796 | bp->fw_cap |= BNXT_FW_CAP_PTP_PPS; |
7797 | if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED) |
7798 | bp->fw_cap |= BNXT_FW_CAP_PTP_RTC; |
7799 | if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT)) |
7800 | bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF; |
7801 | if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED)) |
7802 | bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; |
7803 | |
7804 | flags_ext2 = le32_to_cpu(resp->flags_ext2); |
7805 | if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED) |
7806 | bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS; |
7807 | |
7808 | bp->tx_push_thresh = 0; |
7809 | if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && |
7810 | BNXT_FW_MAJ(bp) > 217) |
7811 | bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; |
7812 | |
7813 | hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); |
7814 | hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); |
7815 | hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); |
7816 | hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); |
7817 | hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); |
7818 | if (!hw_resc->max_hw_ring_grps) |
7819 | hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; |
7820 | hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); |
7821 | hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); |
7822 | hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); |
7823 | |
7824 | if (BNXT_PF(bp)) { |
7825 | struct bnxt_pf_info *pf = &bp->pf; |
7826 | |
7827 | pf->fw_fid = le16_to_cpu(resp->fid); |
7828 | pf->port_id = le16_to_cpu(resp->port_id); |
7829 | memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); |
7830 | pf->first_vf_id = le16_to_cpu(resp->first_vf_id); |
7831 | pf->max_vfs = le16_to_cpu(resp->max_vfs); |
7832 | pf->max_encap_records = le32_to_cpu(resp->max_encap_records); |
7833 | pf->max_decap_records = le32_to_cpu(resp->max_decap_records); |
7834 | pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); |
7835 | pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); |
7836 | pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); |
7837 | pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); |
7838 | bp->flags &= ~BNXT_FLAG_WOL_CAP; |
7839 | if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) |
7840 | bp->flags |= BNXT_FLAG_WOL_CAP; |
7841 | if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) { |
7842 | bp->fw_cap |= BNXT_FW_CAP_PTP; |
7843 | } else { |
7844 | bnxt_ptp_clear(bp); |
7845 | kfree(objp: bp->ptp_cfg); |
7846 | bp->ptp_cfg = NULL; |
7847 | } |
7848 | } else { |
7849 | #ifdef CONFIG_BNXT_SRIOV |
7850 | struct bnxt_vf_info *vf = &bp->vf; |
7851 | |
7852 | vf->fw_fid = le16_to_cpu(resp->fid); |
7853 | memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); |
7854 | #endif |
7855 | } |
7856 | |
7857 | hwrm_func_qcaps_exit: |
7858 | hwrm_req_drop(bp, req); |
7859 | return rc; |
7860 | } |
7861 | |
7862 | static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp) |
7863 | { |
7864 | struct hwrm_dbg_qcaps_output *resp; |
7865 | struct hwrm_dbg_qcaps_input *req; |
7866 | int rc; |
7867 | |
7868 | bp->fw_dbg_cap = 0; |
7869 | if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS)) |
7870 | return; |
7871 | |
7872 | rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS); |
7873 | if (rc) |
7874 | return; |
7875 | |
7876 | req->fid = cpu_to_le16(0xffff); |
7877 | resp = hwrm_req_hold(bp, req); |
7878 | rc = hwrm_req_send(bp, req); |
7879 | if (rc) |
7880 | goto hwrm_dbg_qcaps_exit; |
7881 | |
7882 | bp->fw_dbg_cap = le32_to_cpu(resp->flags); |
7883 | |
7884 | hwrm_dbg_qcaps_exit: |
7885 | hwrm_req_drop(bp, req); |
7886 | } |
7887 | |
7888 | static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); |
7889 | |
7890 | int bnxt_hwrm_func_qcaps(struct bnxt *bp) |
7891 | { |
7892 | int rc; |
7893 | |
7894 | rc = __bnxt_hwrm_func_qcaps(bp); |
7895 | if (rc) |
7896 | return rc; |
7897 | |
7898 | bnxt_hwrm_dbg_qcaps(bp); |
7899 | |
7900 | rc = bnxt_hwrm_queue_qportcfg(bp); |
7901 | if (rc) { |
7902 | netdev_err(dev: bp->dev, format: "hwrm query qportcfg failure rc: %d\n" , rc); |
7903 | return rc; |
7904 | } |
7905 | if (bp->hwrm_spec_code >= 0x10803) { |
7906 | rc = bnxt_alloc_ctx_mem(bp); |
7907 | if (rc) |
7908 | return rc; |
7909 | rc = bnxt_hwrm_func_resc_qcaps(bp, all: true); |
7910 | if (!rc) |
7911 | bp->fw_cap |= BNXT_FW_CAP_NEW_RM; |
7912 | } |
7913 | return 0; |
7914 | } |
7915 | |
7916 | static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) |
7917 | { |
7918 | struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; |
7919 | struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req; |
7920 | u32 flags; |
7921 | int rc; |
7922 | |
7923 | if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) |
7924 | return 0; |
7925 | |
7926 | rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS); |
7927 | if (rc) |
7928 | return rc; |
7929 | |
7930 | resp = hwrm_req_hold(bp, req); |
7931 | rc = hwrm_req_send(bp, req); |
7932 | if (rc) |
7933 | goto hwrm_cfa_adv_qcaps_exit; |
7934 | |
7935 | flags = le32_to_cpu(resp->flags); |
7936 | if (flags & |
7937 | CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) |
7938 | bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; |
7939 | |
7940 | hwrm_cfa_adv_qcaps_exit: |
7941 | hwrm_req_drop(bp, req); |
7942 | return rc; |
7943 | } |
7944 | |
7945 | static int __bnxt_alloc_fw_health(struct bnxt *bp) |
7946 | { |
7947 | if (bp->fw_health) |
7948 | return 0; |
7949 | |
7950 | bp->fw_health = kzalloc(size: sizeof(*bp->fw_health), GFP_KERNEL); |
7951 | if (!bp->fw_health) |
7952 | return -ENOMEM; |
7953 | |
7954 | mutex_init(&bp->fw_health->lock); |
7955 | return 0; |
7956 | } |
7957 | |
7958 | static int bnxt_alloc_fw_health(struct bnxt *bp) |
7959 | { |
7960 | int rc; |
7961 | |
7962 | if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && |
7963 | !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) |
7964 | return 0; |
7965 | |
7966 | rc = __bnxt_alloc_fw_health(bp); |
7967 | if (rc) { |
7968 | bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; |
7969 | bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; |
7970 | return rc; |
7971 | } |
7972 | |
7973 | return 0; |
7974 | } |
7975 | |
7976 | static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) |
7977 | { |
7978 | writel(val: reg & BNXT_GRC_BASE_MASK, addr: bp->bar0 + |
7979 | BNXT_GRCPF_REG_WINDOW_BASE_OUT + |
7980 | BNXT_FW_HEALTH_WIN_MAP_OFF); |
7981 | } |
7982 | |
7983 | static void bnxt_inv_fw_health_reg(struct bnxt *bp) |
7984 | { |
7985 | struct bnxt_fw_health *fw_health = bp->fw_health; |
7986 | u32 reg_type; |
7987 | |
7988 | if (!fw_health) |
7989 | return; |
7990 | |
7991 | reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]); |
7992 | if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) |
7993 | fw_health->status_reliable = false; |
7994 | |
7995 | reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]); |
7996 | if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) |
7997 | fw_health->resets_reliable = false; |
7998 | } |
7999 | |
8000 | static void bnxt_try_map_fw_health_reg(struct bnxt *bp) |
8001 | { |
8002 | void __iomem *hs; |
8003 | u32 status_loc; |
8004 | u32 reg_type; |
8005 | u32 sig; |
8006 | |
8007 | if (bp->fw_health) |
8008 | bp->fw_health->status_reliable = false; |
8009 | |
8010 | __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); |
8011 | hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); |
8012 | |
8013 | sig = readl(addr: hs + offsetof(struct hcomm_status, sig_ver)); |
8014 | if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) { |
8015 | if (!bp->chip_num) { |
8016 | __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE); |
8017 | bp->chip_num = readl(addr: bp->bar0 + |
8018 | BNXT_FW_HEALTH_WIN_BASE + |
8019 | BNXT_GRC_REG_CHIP_NUM); |
8020 | } |
8021 | if (!BNXT_CHIP_P5(bp)) |
8022 | return; |
8023 | |
8024 | status_loc = BNXT_GRC_REG_STATUS_P5 | |
8025 | BNXT_FW_HEALTH_REG_TYPE_BAR0; |
8026 | } else { |
8027 | status_loc = readl(addr: hs + offsetof(struct hcomm_status, |
8028 | fw_status_loc)); |
8029 | } |
8030 | |
8031 | if (__bnxt_alloc_fw_health(bp)) { |
8032 | netdev_warn(dev: bp->dev, format: "no memory for firmware status checks\n" ); |
8033 | return; |
8034 | } |
8035 | |
8036 | bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; |
8037 | reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc); |
8038 | if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) { |
8039 | __bnxt_map_fw_health_reg(bp, reg: status_loc); |
8040 | bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = |
8041 | BNXT_FW_HEALTH_WIN_OFF(status_loc); |
8042 | } |
8043 | |
8044 | bp->fw_health->status_reliable = true; |
8045 | } |
8046 | |
8047 | static int bnxt_map_fw_health_regs(struct bnxt *bp) |
8048 | { |
8049 | struct bnxt_fw_health *fw_health = bp->fw_health; |
8050 | u32 reg_base = 0xffffffff; |
8051 | int i; |
8052 | |
8053 | bp->fw_health->status_reliable = false; |
8054 | bp->fw_health->resets_reliable = false; |
8055 | /* Only pre-map the monitoring GRC registers using window 3 */ |
8056 | for (i = 0; i < 4; i++) { |
8057 | u32 reg = fw_health->regs[i]; |
8058 | |
8059 | if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) |
8060 | continue; |
8061 | if (reg_base == 0xffffffff) |
8062 | reg_base = reg & BNXT_GRC_BASE_MASK; |
8063 | if ((reg & BNXT_GRC_BASE_MASK) != reg_base) |
8064 | return -ERANGE; |
8065 | fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); |
8066 | } |
8067 | bp->fw_health->status_reliable = true; |
8068 | bp->fw_health->resets_reliable = true; |
8069 | if (reg_base == 0xffffffff) |
8070 | return 0; |
8071 | |
8072 | __bnxt_map_fw_health_reg(bp, reg: reg_base); |
8073 | return 0; |
8074 | } |
8075 | |
8076 | static void bnxt_remap_fw_health_regs(struct bnxt *bp) |
8077 | { |
8078 | if (!bp->fw_health) |
8079 | return; |
8080 | |
8081 | if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) { |
8082 | bp->fw_health->status_reliable = true; |
8083 | bp->fw_health->resets_reliable = true; |
8084 | } else { |
8085 | bnxt_try_map_fw_health_reg(bp); |
8086 | } |
8087 | } |
8088 | |
8089 | static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) |
8090 | { |
8091 | struct bnxt_fw_health *fw_health = bp->fw_health; |
8092 | struct hwrm_error_recovery_qcfg_output *resp; |
8093 | struct hwrm_error_recovery_qcfg_input *req; |
8094 | int rc, i; |
8095 | |
8096 | if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) |
8097 | return 0; |
8098 | |
8099 | rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG); |
8100 | if (rc) |
8101 | return rc; |
8102 | |
8103 | resp = hwrm_req_hold(bp, req); |
8104 | rc = hwrm_req_send(bp, req); |
8105 | if (rc) |
8106 | goto err_recovery_out; |
8107 | fw_health->flags = le32_to_cpu(resp->flags); |
8108 | if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && |
8109 | !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { |
8110 | rc = -EINVAL; |
8111 | goto err_recovery_out; |
8112 | } |
8113 | fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); |
8114 | fw_health->master_func_wait_dsecs = |
8115 | le32_to_cpu(resp->master_func_wait_period); |
8116 | fw_health->normal_func_wait_dsecs = |
8117 | le32_to_cpu(resp->normal_func_wait_period); |
8118 | fw_health->post_reset_wait_dsecs = |
8119 | le32_to_cpu(resp->master_func_wait_period_after_reset); |
8120 | fw_health->post_reset_max_wait_dsecs = |
8121 | le32_to_cpu(resp->max_bailout_time_after_reset); |
8122 | fw_health->regs[BNXT_FW_HEALTH_REG] = |
8123 | le32_to_cpu(resp->fw_health_status_reg); |
8124 | fw_health->regs[BNXT_FW_HEARTBEAT_REG] = |
8125 | le32_to_cpu(resp->fw_heartbeat_reg); |
8126 | fw_health->regs[BNXT_FW_RESET_CNT_REG] = |
8127 | le32_to_cpu(resp->fw_reset_cnt_reg); |
8128 | fw_health->regs[BNXT_FW_RESET_INPROG_REG] = |
8129 | le32_to_cpu(resp->reset_inprogress_reg); |
8130 | fw_health->fw_reset_inprog_reg_mask = |
8131 | le32_to_cpu(resp->reset_inprogress_reg_mask); |
8132 | fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; |
8133 | if (fw_health->fw_reset_seq_cnt >= 16) { |
8134 | rc = -EINVAL; |
8135 | goto err_recovery_out; |
8136 | } |
8137 | for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { |
8138 | fw_health->fw_reset_seq_regs[i] = |
8139 | le32_to_cpu(resp->reset_reg[i]); |
8140 | fw_health->fw_reset_seq_vals[i] = |
8141 | le32_to_cpu(resp->reset_reg_val[i]); |
8142 | fw_health->fw_reset_seq_delay_msec[i] = |
8143 | resp->delay_after_reset[i]; |
8144 | } |
8145 | err_recovery_out: |
8146 | hwrm_req_drop(bp, req); |
8147 | if (!rc) |
8148 | rc = bnxt_map_fw_health_regs(bp); |
8149 | if (rc) |
8150 | bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; |
8151 | return rc; |
8152 | } |
8153 | |
8154 | static int bnxt_hwrm_func_reset(struct bnxt *bp) |
8155 | { |
8156 | struct hwrm_func_reset_input *req; |
8157 | int rc; |
8158 | |
8159 | rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET); |
8160 | if (rc) |
8161 | return rc; |
8162 | |
8163 | req->enables = 0; |
8164 | hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT); |
8165 | return hwrm_req_send(bp, req); |
8166 | } |
8167 | |
8168 | static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) |
8169 | { |
8170 | struct hwrm_nvm_get_dev_info_output nvm_info; |
8171 | |
8172 | if (!bnxt_hwrm_nvm_get_dev_info(bp, nvm_dev_info: &nvm_info)) |
8173 | snprintf(buf: bp->nvm_cfg_ver, FW_VER_STR_LEN, fmt: "%d.%d.%d" , |
8174 | nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min, |
8175 | nvm_info.nvm_cfg_ver_upd); |
8176 | } |
8177 | |
8178 | static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) |
8179 | { |
8180 | struct hwrm_queue_qportcfg_output *resp; |
8181 | struct hwrm_queue_qportcfg_input *req; |
8182 | u8 i, j, *qptr; |
8183 | bool no_rdma; |
8184 | int rc = 0; |
8185 | |
8186 | rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG); |
8187 | if (rc) |
8188 | return rc; |
8189 | |
8190 | resp = hwrm_req_hold(bp, req); |
8191 | rc = hwrm_req_send(bp, req); |
8192 | if (rc) |
8193 | goto qportcfg_exit; |
8194 | |
8195 | if (!resp->max_configurable_queues) { |
8196 | rc = -EINVAL; |
8197 | goto qportcfg_exit; |
8198 | } |
8199 | bp->max_tc = resp->max_configurable_queues; |
8200 | bp->max_lltc = resp->max_configurable_lossless_queues; |
8201 | if (bp->max_tc > BNXT_MAX_QUEUE) |
8202 | bp->max_tc = BNXT_MAX_QUEUE; |
8203 | |
8204 | no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); |
8205 | qptr = &resp->queue_id0; |
8206 | for (i = 0, j = 0; i < bp->max_tc; i++) { |
8207 | bp->q_info[j].queue_id = *qptr; |
8208 | bp->q_ids[i] = *qptr++; |
8209 | bp->q_info[j].queue_profile = *qptr++; |
8210 | bp->tc_to_qidx[j] = j; |
8211 | if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || |
8212 | (no_rdma && BNXT_PF(bp))) |
8213 | j++; |
8214 | } |
8215 | bp->max_q = bp->max_tc; |
8216 | bp->max_tc = max_t(u8, j, 1); |
8217 | |
8218 | if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) |
8219 | bp->max_tc = 1; |
8220 | |
8221 | if (bp->max_lltc > bp->max_tc) |
8222 | bp->max_lltc = bp->max_tc; |
8223 | |
8224 | qportcfg_exit: |
8225 | hwrm_req_drop(bp, req); |
8226 | return rc; |
8227 | } |
8228 | |
8229 | static int bnxt_hwrm_poll(struct bnxt *bp) |
8230 | { |
8231 | struct hwrm_ver_get_input *req; |
8232 | int rc; |
8233 | |
8234 | rc = hwrm_req_init(bp, req, HWRM_VER_GET); |
8235 | if (rc) |
8236 | return rc; |
8237 | |
8238 | req->hwrm_intf_maj = HWRM_VERSION_MAJOR; |
8239 | req->hwrm_intf_min = HWRM_VERSION_MINOR; |
8240 | req->hwrm_intf_upd = HWRM_VERSION_UPDATE; |
8241 | |
8242 | hwrm_req_flags(bp, req, flags: BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT); |
8243 | rc = hwrm_req_send(bp, req); |
8244 | return rc; |
8245 | } |
8246 | |
8247 | static int bnxt_hwrm_ver_get(struct bnxt *bp) |
8248 | { |
8249 | struct hwrm_ver_get_output *resp; |
8250 | struct hwrm_ver_get_input *req; |
8251 | u16 fw_maj, fw_min, fw_bld, fw_rsv; |
8252 | u32 dev_caps_cfg, hwrm_ver; |
8253 | int rc, len; |
8254 | |
8255 | rc = hwrm_req_init(bp, req, HWRM_VER_GET); |
8256 | if (rc) |
8257 | return rc; |
8258 | |
8259 | hwrm_req_flags(bp, req, flags: BNXT_HWRM_FULL_WAIT); |
8260 | bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; |
8261 | req->hwrm_intf_maj = HWRM_VERSION_MAJOR; |
8262 | req->hwrm_intf_min = HWRM_VERSION_MINOR; |
8263 | req->hwrm_intf_upd = HWRM_VERSION_UPDATE; |
8264 | |
8265 | resp = hwrm_req_hold(bp, req); |
8266 | rc = hwrm_req_send(bp, req); |
8267 | if (rc) |
8268 | goto hwrm_ver_get_exit; |
8269 | |
8270 | memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); |
8271 | |
8272 | bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | |
8273 | resp->hwrm_intf_min_8b << 8 | |
8274 | resp->hwrm_intf_upd_8b; |
8275 | if (resp->hwrm_intf_maj_8b < 1) { |
8276 | netdev_warn(dev: bp->dev, format: "HWRM interface %d.%d.%d is older than 1.0.0.\n" , |
8277 | resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, |
8278 | resp->hwrm_intf_upd_8b); |
8279 | netdev_warn(dev: bp->dev, format: "Please update firmware with HWRM interface 1.0.0 or newer.\n" ); |
8280 | } |
8281 | |
8282 | hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | |
8283 | HWRM_VERSION_UPDATE; |
8284 | |
8285 | if (bp->hwrm_spec_code > hwrm_ver) |
8286 | snprintf(buf: bp->hwrm_ver_supp, FW_VER_STR_LEN, fmt: "%d.%d.%d" , |
8287 | HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, |
8288 | HWRM_VERSION_UPDATE); |
8289 | else |
8290 | snprintf(buf: bp->hwrm_ver_supp, FW_VER_STR_LEN, fmt: "%d.%d.%d" , |
8291 | resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, |
8292 | resp->hwrm_intf_upd_8b); |
8293 | |
8294 | fw_maj = le16_to_cpu(resp->hwrm_fw_major); |
8295 | if (bp->hwrm_spec_code > 0x10803 && fw_maj) { |
8296 | fw_min = le16_to_cpu(resp->hwrm_fw_minor); |
8297 | fw_bld = le16_to_cpu(resp->hwrm_fw_build); |
8298 | fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); |
8299 | len = FW_VER_STR_LEN; |
8300 | } else { |
8301 | fw_maj = resp->hwrm_fw_maj_8b; |
8302 | fw_min = resp->hwrm_fw_min_8b; |
8303 | fw_bld = resp->hwrm_fw_bld_8b; |
8304 | fw_rsv = resp->hwrm_fw_rsvd_8b; |
8305 | len = BC_HWRM_STR_LEN; |
8306 | } |
8307 | bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); |
8308 | snprintf(buf: bp->fw_ver_str, size: len, fmt: "%d.%d.%d.%d" , fw_maj, fw_min, fw_bld, |
8309 | fw_rsv); |
8310 | |
8311 | if (strlen(resp->active_pkg_name)) { |
8312 | int fw_ver_len = strlen(bp->fw_ver_str); |
8313 | |
8314 | snprintf(buf: bp->fw_ver_str + fw_ver_len, |
8315 | FW_VER_STR_LEN - fw_ver_len - 1, fmt: "/pkg %s" , |
8316 | resp->active_pkg_name); |
8317 | bp->fw_cap |= BNXT_FW_CAP_PKG_VER; |
8318 | } |
8319 | |
8320 | bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); |
8321 | if (!bp->hwrm_cmd_timeout) |
8322 | bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; |
8323 | bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000; |
8324 | if (!bp->hwrm_cmd_max_timeout) |
8325 | bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT; |
8326 | else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT) |
8327 | netdev_warn(dev: bp->dev, format: "Device requests max timeout of %d seconds, may trigger hung task watchdog\n" , |
8328 | bp->hwrm_cmd_max_timeout / 1000); |
8329 | |
8330 | if (resp->hwrm_intf_maj_8b >= 1) { |
8331 | bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); |
8332 | bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); |
8333 | } |
8334 | if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) |
8335 | bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; |
8336 | |
8337 | bp->chip_num = le16_to_cpu(resp->chip_num); |
8338 | bp->chip_rev = resp->chip_rev; |
8339 | if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && |
8340 | !resp->chip_metal) |
8341 | bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; |
8342 | |
8343 | dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); |
8344 | if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && |
8345 | (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) |
8346 | bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; |
8347 | |
8348 | if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) |
8349 | bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; |
8350 | |
8351 | if (dev_caps_cfg & |
8352 | VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) |
8353 | bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; |
8354 | |
8355 | if (dev_caps_cfg & |
8356 | VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) |
8357 | bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; |
8358 | |
8359 | if (dev_caps_cfg & |
8360 | VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) |
8361 | bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; |
8362 | |
8363 | hwrm_ver_get_exit: |
8364 | hwrm_req_drop(bp, req); |
8365 | return rc; |
8366 | } |
8367 | |
8368 | int bnxt_hwrm_fw_set_time(struct bnxt *bp) |
8369 | { |
8370 | struct hwrm_fw_set_time_input *req; |
8371 | struct tm tm; |
8372 | time64_t now = ktime_get_real_seconds(); |
8373 | int rc; |
8374 | |
8375 | if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || |
8376 | bp->hwrm_spec_code < 0x10400) |
8377 | return -EOPNOTSUPP; |
8378 | |
8379 | time64_to_tm(totalsecs: now, offset: 0, result: &tm); |
8380 | rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME); |
8381 | if (rc) |
8382 | return rc; |
8383 | |
8384 | req->year = cpu_to_le16(1900 + tm.tm_year); |
8385 | req->month = 1 + tm.tm_mon; |
8386 | req->day = tm.tm_mday; |
8387 | req->hour = tm.tm_hour; |
8388 | req->minute = tm.tm_min; |
8389 | req->second = tm.tm_sec; |
8390 | return hwrm_req_send(bp, req); |
8391 | } |
8392 | |
8393 | static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) |
8394 | { |
8395 | u64 sw_tmp; |
8396 | |
8397 | hw &= mask; |
8398 | sw_tmp = (*sw & ~mask) | hw; |
8399 | if (hw < (*sw & mask)) |
8400 | sw_tmp += mask + 1; |
8401 | WRITE_ONCE(*sw, sw_tmp); |
8402 | } |
8403 | |
8404 | static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, |
8405 | int count, bool ignore_zero) |
8406 | { |
8407 | int i; |
8408 | |
8409 | for (i = 0; i < count; i++) { |
8410 | u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); |
8411 | |
8412 | if (ignore_zero && !hw) |
8413 | continue; |
8414 | |
8415 | if (masks[i] == -1ULL) |
8416 | sw_stats[i] = hw; |
8417 | else |
8418 | bnxt_add_one_ctr(hw, sw: &sw_stats[i], mask: masks[i]); |
8419 | } |
8420 | } |
8421 | |
8422 | static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) |
8423 | { |
8424 | if (!stats->hw_stats) |
8425 | return; |
8426 | |
8427 | __bnxt_accumulate_stats(hw_stats: stats->hw_stats, sw_stats: stats->sw_stats, |
8428 | masks: stats->hw_masks, count: stats->len / 8, ignore_zero: false); |
8429 | } |
8430 | |
8431 | static void bnxt_accumulate_all_stats(struct bnxt *bp) |
8432 | { |
8433 | struct bnxt_stats_mem *ring0_stats; |
8434 | bool ignore_zero = false; |
8435 | int i; |
8436 | |
8437 | /* Chip bug. Counter intermittently becomes 0. */ |
8438 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
8439 | ignore_zero = true; |
8440 | |
8441 | for (i = 0; i < bp->cp_nr_rings; i++) { |
8442 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
8443 | struct bnxt_cp_ring_info *cpr; |
8444 | struct bnxt_stats_mem *stats; |
8445 | |
8446 | cpr = &bnapi->cp_ring; |
8447 | stats = &cpr->stats; |
8448 | if (!i) |
8449 | ring0_stats = stats; |
8450 | __bnxt_accumulate_stats(hw_stats: stats->hw_stats, sw_stats: stats->sw_stats, |
8451 | masks: ring0_stats->hw_masks, |
8452 | count: ring0_stats->len / 8, ignore_zero); |
8453 | } |
8454 | if (bp->flags & BNXT_FLAG_PORT_STATS) { |
8455 | struct bnxt_stats_mem *stats = &bp->port_stats; |
8456 | __le64 *hw_stats = stats->hw_stats; |
8457 | u64 *sw_stats = stats->sw_stats; |
8458 | u64 *masks = stats->hw_masks; |
8459 | int cnt; |
8460 | |
8461 | cnt = sizeof(struct rx_port_stats) / 8; |
8462 | __bnxt_accumulate_stats(hw_stats, sw_stats, masks, count: cnt, ignore_zero: false); |
8463 | |
8464 | hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
8465 | sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
8466 | masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
8467 | cnt = sizeof(struct tx_port_stats) / 8; |
8468 | __bnxt_accumulate_stats(hw_stats, sw_stats, masks, count: cnt, ignore_zero: false); |
8469 | } |
8470 | if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { |
8471 | bnxt_accumulate_stats(stats: &bp->rx_port_stats_ext); |
8472 | bnxt_accumulate_stats(stats: &bp->tx_port_stats_ext); |
8473 | } |
8474 | } |
8475 | |
8476 | static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) |
8477 | { |
8478 | struct hwrm_port_qstats_input *req; |
8479 | struct bnxt_pf_info *pf = &bp->pf; |
8480 | int rc; |
8481 | |
8482 | if (!(bp->flags & BNXT_FLAG_PORT_STATS)) |
8483 | return 0; |
8484 | |
8485 | if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) |
8486 | return -EOPNOTSUPP; |
8487 | |
8488 | rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS); |
8489 | if (rc) |
8490 | return rc; |
8491 | |
8492 | req->flags = flags; |
8493 | req->port_id = cpu_to_le16(pf->port_id); |
8494 | req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + |
8495 | BNXT_TX_PORT_STATS_BYTE_OFFSET); |
8496 | req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); |
8497 | return hwrm_req_send(bp, req); |
8498 | } |
8499 | |
8500 | static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) |
8501 | { |
8502 | struct hwrm_queue_pri2cos_qcfg_output *resp_qc; |
8503 | struct hwrm_queue_pri2cos_qcfg_input *req_qc; |
8504 | struct hwrm_port_qstats_ext_output *resp_qs; |
8505 | struct hwrm_port_qstats_ext_input *req_qs; |
8506 | struct bnxt_pf_info *pf = &bp->pf; |
8507 | u32 tx_stat_size; |
8508 | int rc; |
8509 | |
8510 | if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) |
8511 | return 0; |
8512 | |
8513 | if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) |
8514 | return -EOPNOTSUPP; |
8515 | |
8516 | rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT); |
8517 | if (rc) |
8518 | return rc; |
8519 | |
8520 | req_qs->flags = flags; |
8521 | req_qs->port_id = cpu_to_le16(pf->port_id); |
8522 | req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); |
8523 | req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); |
8524 | tx_stat_size = bp->tx_port_stats_ext.hw_stats ? |
8525 | sizeof(struct tx_port_stats_ext) : 0; |
8526 | req_qs->tx_stat_size = cpu_to_le16(tx_stat_size); |
8527 | req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); |
8528 | resp_qs = hwrm_req_hold(bp, req: req_qs); |
8529 | rc = hwrm_req_send(bp, req: req_qs); |
8530 | if (!rc) { |
8531 | bp->fw_rx_stats_ext_size = |
8532 | le16_to_cpu(resp_qs->rx_stat_size) / 8; |
8533 | if (BNXT_FW_MAJ(bp) < 220 && |
8534 | bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY) |
8535 | bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY; |
8536 | |
8537 | bp->fw_tx_stats_ext_size = tx_stat_size ? |
8538 | le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; |
8539 | } else { |
8540 | bp->fw_rx_stats_ext_size = 0; |
8541 | bp->fw_tx_stats_ext_size = 0; |
8542 | } |
8543 | hwrm_req_drop(bp, req: req_qs); |
8544 | |
8545 | if (flags) |
8546 | return rc; |
8547 | |
8548 | if (bp->fw_tx_stats_ext_size <= |
8549 | offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { |
8550 | bp->pri2cos_valid = 0; |
8551 | return rc; |
8552 | } |
8553 | |
8554 | rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG); |
8555 | if (rc) |
8556 | return rc; |
8557 | |
8558 | req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); |
8559 | |
8560 | resp_qc = hwrm_req_hold(bp, req: req_qc); |
8561 | rc = hwrm_req_send(bp, req: req_qc); |
8562 | if (!rc) { |
8563 | u8 *pri2cos; |
8564 | int i, j; |
8565 | |
8566 | pri2cos = &resp_qc->pri0_cos_queue_id; |
8567 | for (i = 0; i < 8; i++) { |
8568 | u8 queue_id = pri2cos[i]; |
8569 | u8 queue_idx; |
8570 | |
8571 | /* Per port queue IDs start from 0, 10, 20, etc */ |
8572 | queue_idx = queue_id % 10; |
8573 | if (queue_idx > BNXT_MAX_QUEUE) { |
8574 | bp->pri2cos_valid = false; |
8575 | hwrm_req_drop(bp, req: req_qc); |
8576 | return rc; |
8577 | } |
8578 | for (j = 0; j < bp->max_q; j++) { |
8579 | if (bp->q_ids[j] == queue_id) |
8580 | bp->pri2cos_idx[i] = queue_idx; |
8581 | } |
8582 | } |
8583 | bp->pri2cos_valid = true; |
8584 | } |
8585 | hwrm_req_drop(bp, req: req_qc); |
8586 | |
8587 | return rc; |
8588 | } |
8589 | |
8590 | static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) |
8591 | { |
8592 | bnxt_hwrm_tunnel_dst_port_free(bp, |
8593 | TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); |
8594 | bnxt_hwrm_tunnel_dst_port_free(bp, |
8595 | TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); |
8596 | } |
8597 | |
8598 | static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) |
8599 | { |
8600 | int rc, i; |
8601 | u32 tpa_flags = 0; |
8602 | |
8603 | if (set_tpa) |
8604 | tpa_flags = bp->flags & BNXT_FLAG_TPA; |
8605 | else if (BNXT_NO_FW_ACCESS(bp)) |
8606 | return 0; |
8607 | for (i = 0; i < bp->nr_vnics; i++) { |
8608 | rc = bnxt_hwrm_vnic_set_tpa(bp, vnic_id: i, tpa_flags); |
8609 | if (rc) { |
8610 | netdev_err(dev: bp->dev, format: "hwrm vnic set tpa failure rc for vnic %d: %x\n" , |
8611 | i, rc); |
8612 | return rc; |
8613 | } |
8614 | } |
8615 | return 0; |
8616 | } |
8617 | |
8618 | static void (struct bnxt *bp) |
8619 | { |
8620 | int i; |
8621 | |
8622 | for (i = 0; i < bp->nr_vnics; i++) |
8623 | bnxt_hwrm_vnic_set_rss(bp, vnic_id: i, set_rss: false); |
8624 | } |
8625 | |
8626 | static void bnxt_clear_vnic(struct bnxt *bp) |
8627 | { |
8628 | if (!bp->vnic_info) |
8629 | return; |
8630 | |
8631 | bnxt_hwrm_clear_vnic_filter(bp); |
8632 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { |
8633 | /* clear all RSS setting before free vnic ctx */ |
8634 | bnxt_hwrm_clear_vnic_rss(bp); |
8635 | bnxt_hwrm_vnic_ctx_free(bp); |
8636 | } |
8637 | /* before free the vnic, undo the vnic tpa settings */ |
8638 | if (bp->flags & BNXT_FLAG_TPA) |
8639 | bnxt_set_tpa(bp, set_tpa: false); |
8640 | bnxt_hwrm_vnic_free(bp); |
8641 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
8642 | bnxt_hwrm_vnic_ctx_free(bp); |
8643 | } |
8644 | |
8645 | static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, |
8646 | bool irq_re_init) |
8647 | { |
8648 | bnxt_clear_vnic(bp); |
8649 | bnxt_hwrm_ring_free(bp, close_path); |
8650 | bnxt_hwrm_ring_grp_free(bp); |
8651 | if (irq_re_init) { |
8652 | bnxt_hwrm_stat_ctx_free(bp); |
8653 | bnxt_hwrm_free_tunnel_ports(bp); |
8654 | } |
8655 | } |
8656 | |
8657 | static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) |
8658 | { |
8659 | struct hwrm_func_cfg_input *req; |
8660 | u8 evb_mode; |
8661 | int rc; |
8662 | |
8663 | if (br_mode == BRIDGE_MODE_VEB) |
8664 | evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; |
8665 | else if (br_mode == BRIDGE_MODE_VEPA) |
8666 | evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; |
8667 | else |
8668 | return -EINVAL; |
8669 | |
8670 | rc = bnxt_hwrm_func_cfg_short_req_init(bp, req: &req); |
8671 | if (rc) |
8672 | return rc; |
8673 | |
8674 | req->fid = cpu_to_le16(0xffff); |
8675 | req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); |
8676 | req->evb_mode = evb_mode; |
8677 | return hwrm_req_send(bp, req); |
8678 | } |
8679 | |
8680 | static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) |
8681 | { |
8682 | struct hwrm_func_cfg_input *req; |
8683 | int rc; |
8684 | |
8685 | if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) |
8686 | return 0; |
8687 | |
8688 | rc = bnxt_hwrm_func_cfg_short_req_init(bp, req: &req); |
8689 | if (rc) |
8690 | return rc; |
8691 | |
8692 | req->fid = cpu_to_le16(0xffff); |
8693 | req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); |
8694 | req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; |
8695 | if (size == 128) |
8696 | req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; |
8697 | |
8698 | return hwrm_req_send(bp, req); |
8699 | } |
8700 | |
8701 | static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) |
8702 | { |
8703 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
8704 | int rc; |
8705 | |
8706 | if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) |
8707 | goto skip_rss_ctx; |
8708 | |
8709 | /* allocate context for vnic */ |
8710 | rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, ctx_idx: 0); |
8711 | if (rc) { |
8712 | netdev_err(dev: bp->dev, format: "hwrm vnic %d alloc failure rc: %x\n" , |
8713 | vnic_id, rc); |
8714 | goto vnic_setup_err; |
8715 | } |
8716 | bp->rsscos_nr_ctxs++; |
8717 | |
8718 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { |
8719 | rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, ctx_idx: 1); |
8720 | if (rc) { |
8721 | netdev_err(dev: bp->dev, format: "hwrm vnic %d cos ctx alloc failure rc: %x\n" , |
8722 | vnic_id, rc); |
8723 | goto vnic_setup_err; |
8724 | } |
8725 | bp->rsscos_nr_ctxs++; |
8726 | } |
8727 | |
8728 | : |
8729 | /* configure default vnic, ring grp */ |
8730 | rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); |
8731 | if (rc) { |
8732 | netdev_err(dev: bp->dev, format: "hwrm vnic %d cfg failure rc: %x\n" , |
8733 | vnic_id, rc); |
8734 | goto vnic_setup_err; |
8735 | } |
8736 | |
8737 | /* Enable RSS hashing on vnic */ |
8738 | rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, set_rss: true); |
8739 | if (rc) { |
8740 | netdev_err(dev: bp->dev, format: "hwrm vnic %d set rss failure rc: %x\n" , |
8741 | vnic_id, rc); |
8742 | goto vnic_setup_err; |
8743 | } |
8744 | |
8745 | if (bp->flags & BNXT_FLAG_AGG_RINGS) { |
8746 | rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); |
8747 | if (rc) { |
8748 | netdev_err(dev: bp->dev, format: "hwrm vnic %d set hds failure rc: %x\n" , |
8749 | vnic_id, rc); |
8750 | } |
8751 | } |
8752 | |
8753 | vnic_setup_err: |
8754 | return rc; |
8755 | } |
8756 | |
8757 | static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) |
8758 | { |
8759 | int rc, i, nr_ctxs; |
8760 | |
8761 | nr_ctxs = bnxt_get_nr_rss_ctxs(bp, rx_rings: bp->rx_nr_rings); |
8762 | for (i = 0; i < nr_ctxs; i++) { |
8763 | rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, ctx_idx: i); |
8764 | if (rc) { |
8765 | netdev_err(dev: bp->dev, format: "hwrm vnic %d ctx %d alloc failure rc: %x\n" , |
8766 | vnic_id, i, rc); |
8767 | break; |
8768 | } |
8769 | bp->rsscos_nr_ctxs++; |
8770 | } |
8771 | if (i < nr_ctxs) |
8772 | return -ENOMEM; |
8773 | |
8774 | rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, set_rss: true); |
8775 | if (rc) { |
8776 | netdev_err(dev: bp->dev, format: "hwrm vnic %d set rss failure rc: %d\n" , |
8777 | vnic_id, rc); |
8778 | return rc; |
8779 | } |
8780 | rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); |
8781 | if (rc) { |
8782 | netdev_err(dev: bp->dev, format: "hwrm vnic %d cfg failure rc: %x\n" , |
8783 | vnic_id, rc); |
8784 | return rc; |
8785 | } |
8786 | if (bp->flags & BNXT_FLAG_AGG_RINGS) { |
8787 | rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); |
8788 | if (rc) { |
8789 | netdev_err(dev: bp->dev, format: "hwrm vnic %d set hds failure rc: %x\n" , |
8790 | vnic_id, rc); |
8791 | } |
8792 | } |
8793 | return rc; |
8794 | } |
8795 | |
8796 | static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) |
8797 | { |
8798 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
8799 | return __bnxt_setup_vnic_p5(bp, vnic_id); |
8800 | else |
8801 | return __bnxt_setup_vnic(bp, vnic_id); |
8802 | } |
8803 | |
8804 | static int bnxt_alloc_rfs_vnics(struct bnxt *bp) |
8805 | { |
8806 | #ifdef CONFIG_RFS_ACCEL |
8807 | int i, rc = 0; |
8808 | |
8809 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
8810 | return 0; |
8811 | |
8812 | for (i = 0; i < bp->rx_nr_rings; i++) { |
8813 | struct bnxt_vnic_info *vnic; |
8814 | u16 vnic_id = i + 1; |
8815 | u16 ring_id = i; |
8816 | |
8817 | if (vnic_id >= bp->nr_vnics) |
8818 | break; |
8819 | |
8820 | vnic = &bp->vnic_info[vnic_id]; |
8821 | vnic->flags |= BNXT_VNIC_RFS_FLAG; |
8822 | if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) |
8823 | vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; |
8824 | rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx: ring_id, nr_rings: 1); |
8825 | if (rc) { |
8826 | netdev_err(dev: bp->dev, format: "hwrm vnic %d alloc failure rc: %x\n" , |
8827 | vnic_id, rc); |
8828 | break; |
8829 | } |
8830 | rc = bnxt_setup_vnic(bp, vnic_id); |
8831 | if (rc) |
8832 | break; |
8833 | } |
8834 | return rc; |
8835 | #else |
8836 | return 0; |
8837 | #endif |
8838 | } |
8839 | |
8840 | /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */ |
8841 | static bool bnxt_promisc_ok(struct bnxt *bp) |
8842 | { |
8843 | #ifdef CONFIG_BNXT_SRIOV |
8844 | if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, vf: &bp->vf)) |
8845 | return false; |
8846 | #endif |
8847 | return true; |
8848 | } |
8849 | |
8850 | static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) |
8851 | { |
8852 | unsigned int rc = 0; |
8853 | |
8854 | rc = bnxt_hwrm_vnic_alloc(bp, vnic_id: 1, start_rx_ring_idx: bp->rx_nr_rings - 1, nr_rings: 1); |
8855 | if (rc) { |
8856 | netdev_err(dev: bp->dev, format: "Cannot allocate special vnic for NS2 A0: %x\n" , |
8857 | rc); |
8858 | return rc; |
8859 | } |
8860 | |
8861 | rc = bnxt_hwrm_vnic_cfg(bp, vnic_id: 1); |
8862 | if (rc) { |
8863 | netdev_err(dev: bp->dev, format: "Cannot allocate special vnic for NS2 A0: %x\n" , |
8864 | rc); |
8865 | return rc; |
8866 | } |
8867 | return rc; |
8868 | } |
8869 | |
8870 | static int bnxt_cfg_rx_mode(struct bnxt *); |
8871 | static bool bnxt_mc_list_updated(struct bnxt *, u32 *); |
8872 | |
8873 | static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) |
8874 | { |
8875 | struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; |
8876 | int rc = 0; |
8877 | unsigned int rx_nr_rings = bp->rx_nr_rings; |
8878 | |
8879 | if (irq_re_init) { |
8880 | rc = bnxt_hwrm_stat_ctx_alloc(bp); |
8881 | if (rc) { |
8882 | netdev_err(dev: bp->dev, format: "hwrm stat ctx alloc failure rc: %x\n" , |
8883 | rc); |
8884 | goto err_out; |
8885 | } |
8886 | } |
8887 | |
8888 | rc = bnxt_hwrm_ring_alloc(bp); |
8889 | if (rc) { |
8890 | netdev_err(dev: bp->dev, format: "hwrm ring alloc failure rc: %x\n" , rc); |
8891 | goto err_out; |
8892 | } |
8893 | |
8894 | rc = bnxt_hwrm_ring_grp_alloc(bp); |
8895 | if (rc) { |
8896 | netdev_err(dev: bp->dev, format: "hwrm_ring_grp alloc failure: %x\n" , rc); |
8897 | goto err_out; |
8898 | } |
8899 | |
8900 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
8901 | rx_nr_rings--; |
8902 | |
8903 | /* default vnic 0 */ |
8904 | rc = bnxt_hwrm_vnic_alloc(bp, vnic_id: 0, start_rx_ring_idx: 0, nr_rings: rx_nr_rings); |
8905 | if (rc) { |
8906 | netdev_err(dev: bp->dev, format: "hwrm vnic alloc failure rc: %x\n" , rc); |
8907 | goto err_out; |
8908 | } |
8909 | |
8910 | if (BNXT_VF(bp)) |
8911 | bnxt_hwrm_func_qcfg(bp); |
8912 | |
8913 | rc = bnxt_setup_vnic(bp, vnic_id: 0); |
8914 | if (rc) |
8915 | goto err_out; |
8916 | if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA) |
8917 | bnxt_hwrm_update_rss_hash_cfg(bp); |
8918 | |
8919 | if (bp->flags & BNXT_FLAG_RFS) { |
8920 | rc = bnxt_alloc_rfs_vnics(bp); |
8921 | if (rc) |
8922 | goto err_out; |
8923 | } |
8924 | |
8925 | if (bp->flags & BNXT_FLAG_TPA) { |
8926 | rc = bnxt_set_tpa(bp, set_tpa: true); |
8927 | if (rc) |
8928 | goto err_out; |
8929 | } |
8930 | |
8931 | if (BNXT_VF(bp)) |
8932 | bnxt_update_vf_mac(bp); |
8933 | |
8934 | /* Filter for default vnic 0 */ |
8935 | rc = bnxt_hwrm_set_vnic_filter(bp, vnic_id: 0, idx: 0, mac_addr: bp->dev->dev_addr); |
8936 | if (rc) { |
8937 | if (BNXT_VF(bp) && rc == -ENODEV) |
8938 | netdev_err(dev: bp->dev, format: "Cannot configure L2 filter while PF is unavailable\n" ); |
8939 | else |
8940 | netdev_err(dev: bp->dev, format: "HWRM vnic filter failure rc: %x\n" , rc); |
8941 | goto err_out; |
8942 | } |
8943 | vnic->uc_filter_count = 1; |
8944 | |
8945 | vnic->rx_mask = 0; |
8946 | if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) |
8947 | goto skip_rx_mask; |
8948 | |
8949 | if (bp->dev->flags & IFF_BROADCAST) |
8950 | vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; |
8951 | |
8952 | if (bp->dev->flags & IFF_PROMISC) |
8953 | vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; |
8954 | |
8955 | if (bp->dev->flags & IFF_ALLMULTI) { |
8956 | vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; |
8957 | vnic->mc_list_count = 0; |
8958 | } else if (bp->dev->flags & IFF_MULTICAST) { |
8959 | u32 mask = 0; |
8960 | |
8961 | bnxt_mc_list_updated(bp, &mask); |
8962 | vnic->rx_mask |= mask; |
8963 | } |
8964 | |
8965 | rc = bnxt_cfg_rx_mode(bp); |
8966 | if (rc) |
8967 | goto err_out; |
8968 | |
8969 | skip_rx_mask: |
8970 | rc = bnxt_hwrm_set_coal(bp); |
8971 | if (rc) |
8972 | netdev_warn(dev: bp->dev, format: "HWRM set coalescing failure rc: %x\n" , |
8973 | rc); |
8974 | |
8975 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { |
8976 | rc = bnxt_setup_nitroa0_vnic(bp); |
8977 | if (rc) |
8978 | netdev_err(dev: bp->dev, format: "Special vnic setup failure for NS2 A0 rc: %x\n" , |
8979 | rc); |
8980 | } |
8981 | |
8982 | if (BNXT_VF(bp)) { |
8983 | bnxt_hwrm_func_qcfg(bp); |
8984 | netdev_update_features(dev: bp->dev); |
8985 | } |
8986 | |
8987 | return 0; |
8988 | |
8989 | err_out: |
8990 | bnxt_hwrm_resource_free(bp, close_path: 0, irq_re_init: true); |
8991 | |
8992 | return rc; |
8993 | } |
8994 | |
8995 | static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) |
8996 | { |
8997 | bnxt_hwrm_resource_free(bp, close_path: 1, irq_re_init); |
8998 | return 0; |
8999 | } |
9000 | |
9001 | static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) |
9002 | { |
9003 | bnxt_init_cp_rings(bp); |
9004 | bnxt_init_rx_rings(bp); |
9005 | bnxt_init_tx_rings(bp); |
9006 | bnxt_init_ring_grps(bp, irq_re_init); |
9007 | bnxt_init_vnics(bp); |
9008 | |
9009 | return bnxt_init_chip(bp, irq_re_init); |
9010 | } |
9011 | |
9012 | static int bnxt_set_real_num_queues(struct bnxt *bp) |
9013 | { |
9014 | int rc; |
9015 | struct net_device *dev = bp->dev; |
9016 | |
9017 | rc = netif_set_real_num_tx_queues(dev, txq: bp->tx_nr_rings - |
9018 | bp->tx_nr_rings_xdp); |
9019 | if (rc) |
9020 | return rc; |
9021 | |
9022 | rc = netif_set_real_num_rx_queues(dev, rxq: bp->rx_nr_rings); |
9023 | if (rc) |
9024 | return rc; |
9025 | |
9026 | #ifdef CONFIG_RFS_ACCEL |
9027 | if (bp->flags & BNXT_FLAG_RFS) |
9028 | dev->rx_cpu_rmap = alloc_irq_cpu_rmap(size: bp->rx_nr_rings); |
9029 | #endif |
9030 | |
9031 | return rc; |
9032 | } |
9033 | |
9034 | static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, |
9035 | bool shared) |
9036 | { |
9037 | int _rx = *rx, _tx = *tx; |
9038 | |
9039 | if (shared) { |
9040 | *rx = min_t(int, _rx, max); |
9041 | *tx = min_t(int, _tx, max); |
9042 | } else { |
9043 | if (max < 2) |
9044 | return -ENOMEM; |
9045 | |
9046 | while (_rx + _tx > max) { |
9047 | if (_rx > _tx && _rx > 1) |
9048 | _rx--; |
9049 | else if (_tx > 1) |
9050 | _tx--; |
9051 | } |
9052 | *rx = _rx; |
9053 | *tx = _tx; |
9054 | } |
9055 | return 0; |
9056 | } |
9057 | |
9058 | static void bnxt_setup_msix(struct bnxt *bp) |
9059 | { |
9060 | const int len = sizeof(bp->irq_tbl[0].name); |
9061 | struct net_device *dev = bp->dev; |
9062 | int tcs, i; |
9063 | |
9064 | tcs = netdev_get_num_tc(dev); |
9065 | if (tcs) { |
9066 | int i, off, count; |
9067 | |
9068 | for (i = 0; i < tcs; i++) { |
9069 | count = bp->tx_nr_rings_per_tc; |
9070 | off = i * count; |
9071 | netdev_set_tc_queue(dev, tc: i, count, offset: off); |
9072 | } |
9073 | } |
9074 | |
9075 | for (i = 0; i < bp->cp_nr_rings; i++) { |
9076 | int map_idx = bnxt_cp_num_to_irq_num(bp, n: i); |
9077 | char *attr; |
9078 | |
9079 | if (bp->flags & BNXT_FLAG_SHARED_RINGS) |
9080 | attr = "TxRx" ; |
9081 | else if (i < bp->rx_nr_rings) |
9082 | attr = "rx" ; |
9083 | else |
9084 | attr = "tx" ; |
9085 | |
9086 | snprintf(buf: bp->irq_tbl[map_idx].name, size: len, fmt: "%s-%s-%d" , dev->name, |
9087 | attr, i); |
9088 | bp->irq_tbl[map_idx].handler = bnxt_msix; |
9089 | } |
9090 | } |
9091 | |
9092 | static void bnxt_setup_inta(struct bnxt *bp) |
9093 | { |
9094 | const int len = sizeof(bp->irq_tbl[0].name); |
9095 | |
9096 | if (netdev_get_num_tc(dev: bp->dev)) |
9097 | netdev_reset_tc(dev: bp->dev); |
9098 | |
9099 | snprintf(buf: bp->irq_tbl[0].name, size: len, fmt: "%s-%s-%d" , bp->dev->name, "TxRx" , |
9100 | 0); |
9101 | bp->irq_tbl[0].handler = bnxt_inta; |
9102 | } |
9103 | |
9104 | static int bnxt_init_int_mode(struct bnxt *bp); |
9105 | |
9106 | static int bnxt_setup_int_mode(struct bnxt *bp) |
9107 | { |
9108 | int rc; |
9109 | |
9110 | if (!bp->irq_tbl) { |
9111 | rc = bnxt_init_int_mode(bp); |
9112 | if (rc || !bp->irq_tbl) |
9113 | return rc ?: -ENODEV; |
9114 | } |
9115 | |
9116 | if (bp->flags & BNXT_FLAG_USING_MSIX) |
9117 | bnxt_setup_msix(bp); |
9118 | else |
9119 | bnxt_setup_inta(bp); |
9120 | |
9121 | rc = bnxt_set_real_num_queues(bp); |
9122 | return rc; |
9123 | } |
9124 | |
9125 | #ifdef CONFIG_RFS_ACCEL |
9126 | static unsigned int (struct bnxt *bp) |
9127 | { |
9128 | return bp->hw_resc.max_rsscos_ctxs; |
9129 | } |
9130 | |
9131 | static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) |
9132 | { |
9133 | return bp->hw_resc.max_vnics; |
9134 | } |
9135 | #endif |
9136 | |
9137 | unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) |
9138 | { |
9139 | return bp->hw_resc.max_stat_ctxs; |
9140 | } |
9141 | |
9142 | unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) |
9143 | { |
9144 | return bp->hw_resc.max_cp_rings; |
9145 | } |
9146 | |
9147 | static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) |
9148 | { |
9149 | unsigned int cp = bp->hw_resc.max_cp_rings; |
9150 | |
9151 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
9152 | cp -= bnxt_get_ulp_msix_num(bp); |
9153 | |
9154 | return cp; |
9155 | } |
9156 | |
9157 | static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) |
9158 | { |
9159 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
9160 | |
9161 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
9162 | return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); |
9163 | |
9164 | return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); |
9165 | } |
9166 | |
9167 | static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) |
9168 | { |
9169 | bp->hw_resc.max_irqs = max_irqs; |
9170 | } |
9171 | |
9172 | unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) |
9173 | { |
9174 | unsigned int cp; |
9175 | |
9176 | cp = bnxt_get_max_func_cp_rings_for_en(bp); |
9177 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
9178 | return cp - bp->rx_nr_rings - bp->tx_nr_rings; |
9179 | else |
9180 | return cp - bp->cp_nr_rings; |
9181 | } |
9182 | |
9183 | unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) |
9184 | { |
9185 | return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); |
9186 | } |
9187 | |
9188 | int bnxt_get_avail_msix(struct bnxt *bp, int num) |
9189 | { |
9190 | int max_cp = bnxt_get_max_func_cp_rings(bp); |
9191 | int max_irq = bnxt_get_max_func_irqs(bp); |
9192 | int total_req = bp->cp_nr_rings + num; |
9193 | int max_idx, avail_msix; |
9194 | |
9195 | max_idx = bp->total_irqs; |
9196 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
9197 | max_idx = min_t(int, bp->total_irqs, max_cp); |
9198 | avail_msix = max_idx - bp->cp_nr_rings; |
9199 | if (!BNXT_NEW_RM(bp) || avail_msix >= num) |
9200 | return avail_msix; |
9201 | |
9202 | if (max_irq < total_req) { |
9203 | num = max_irq - bp->cp_nr_rings; |
9204 | if (num <= 0) |
9205 | return 0; |
9206 | } |
9207 | return num; |
9208 | } |
9209 | |
9210 | static int bnxt_get_num_msix(struct bnxt *bp) |
9211 | { |
9212 | if (!BNXT_NEW_RM(bp)) |
9213 | return bnxt_get_max_func_irqs(bp); |
9214 | |
9215 | return bnxt_nq_rings_in_use(bp); |
9216 | } |
9217 | |
9218 | static int bnxt_init_msix(struct bnxt *bp) |
9219 | { |
9220 | int i, total_vecs, max, rc = 0, min = 1, ulp_msix; |
9221 | struct msix_entry *msix_ent; |
9222 | |
9223 | total_vecs = bnxt_get_num_msix(bp); |
9224 | max = bnxt_get_max_func_irqs(bp); |
9225 | if (total_vecs > max) |
9226 | total_vecs = max; |
9227 | |
9228 | if (!total_vecs) |
9229 | return 0; |
9230 | |
9231 | msix_ent = kcalloc(n: total_vecs, size: sizeof(struct msix_entry), GFP_KERNEL); |
9232 | if (!msix_ent) |
9233 | return -ENOMEM; |
9234 | |
9235 | for (i = 0; i < total_vecs; i++) { |
9236 | msix_ent[i].entry = i; |
9237 | msix_ent[i].vector = 0; |
9238 | } |
9239 | |
9240 | if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) |
9241 | min = 2; |
9242 | |
9243 | total_vecs = pci_enable_msix_range(dev: bp->pdev, entries: msix_ent, minvec: min, maxvec: total_vecs); |
9244 | ulp_msix = bnxt_get_ulp_msix_num(bp); |
9245 | if (total_vecs < 0 || total_vecs < ulp_msix) { |
9246 | rc = -ENODEV; |
9247 | goto msix_setup_exit; |
9248 | } |
9249 | |
9250 | bp->irq_tbl = kcalloc(n: total_vecs, size: sizeof(struct bnxt_irq), GFP_KERNEL); |
9251 | if (bp->irq_tbl) { |
9252 | for (i = 0; i < total_vecs; i++) |
9253 | bp->irq_tbl[i].vector = msix_ent[i].vector; |
9254 | |
9255 | bp->total_irqs = total_vecs; |
9256 | /* Trim rings based upon num of vectors allocated */ |
9257 | rc = bnxt_trim_rings(bp, rx: &bp->rx_nr_rings, tx: &bp->tx_nr_rings, |
9258 | max: total_vecs - ulp_msix, shared: min == 1); |
9259 | if (rc) |
9260 | goto msix_setup_exit; |
9261 | |
9262 | bp->cp_nr_rings = (min == 1) ? |
9263 | max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : |
9264 | bp->tx_nr_rings + bp->rx_nr_rings; |
9265 | |
9266 | } else { |
9267 | rc = -ENOMEM; |
9268 | goto msix_setup_exit; |
9269 | } |
9270 | bp->flags |= BNXT_FLAG_USING_MSIX; |
9271 | kfree(objp: msix_ent); |
9272 | return 0; |
9273 | |
9274 | msix_setup_exit: |
9275 | netdev_err(dev: bp->dev, format: "bnxt_init_msix err: %x\n" , rc); |
9276 | kfree(objp: bp->irq_tbl); |
9277 | bp->irq_tbl = NULL; |
9278 | pci_disable_msix(dev: bp->pdev); |
9279 | kfree(objp: msix_ent); |
9280 | return rc; |
9281 | } |
9282 | |
9283 | static int bnxt_init_inta(struct bnxt *bp) |
9284 | { |
9285 | bp->irq_tbl = kzalloc(size: sizeof(struct bnxt_irq), GFP_KERNEL); |
9286 | if (!bp->irq_tbl) |
9287 | return -ENOMEM; |
9288 | |
9289 | bp->total_irqs = 1; |
9290 | bp->rx_nr_rings = 1; |
9291 | bp->tx_nr_rings = 1; |
9292 | bp->cp_nr_rings = 1; |
9293 | bp->flags |= BNXT_FLAG_SHARED_RINGS; |
9294 | bp->irq_tbl[0].vector = bp->pdev->irq; |
9295 | return 0; |
9296 | } |
9297 | |
9298 | static int bnxt_init_int_mode(struct bnxt *bp) |
9299 | { |
9300 | int rc = -ENODEV; |
9301 | |
9302 | if (bp->flags & BNXT_FLAG_MSIX_CAP) |
9303 | rc = bnxt_init_msix(bp); |
9304 | |
9305 | if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { |
9306 | /* fallback to INTA */ |
9307 | rc = bnxt_init_inta(bp); |
9308 | } |
9309 | return rc; |
9310 | } |
9311 | |
9312 | static void bnxt_clear_int_mode(struct bnxt *bp) |
9313 | { |
9314 | if (bp->flags & BNXT_FLAG_USING_MSIX) |
9315 | pci_disable_msix(dev: bp->pdev); |
9316 | |
9317 | kfree(objp: bp->irq_tbl); |
9318 | bp->irq_tbl = NULL; |
9319 | bp->flags &= ~BNXT_FLAG_USING_MSIX; |
9320 | } |
9321 | |
9322 | int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) |
9323 | { |
9324 | int tcs = netdev_get_num_tc(dev: bp->dev); |
9325 | bool irq_cleared = false; |
9326 | int rc; |
9327 | |
9328 | if (!bnxt_need_reserve_rings(bp)) |
9329 | return 0; |
9330 | |
9331 | if (irq_re_init && BNXT_NEW_RM(bp) && |
9332 | bnxt_get_num_msix(bp) != bp->total_irqs) { |
9333 | bnxt_ulp_irq_stop(bp); |
9334 | bnxt_clear_int_mode(bp); |
9335 | irq_cleared = true; |
9336 | } |
9337 | rc = __bnxt_reserve_rings(bp); |
9338 | if (irq_cleared) { |
9339 | if (!rc) |
9340 | rc = bnxt_init_int_mode(bp); |
9341 | bnxt_ulp_irq_restart(bp, err: rc); |
9342 | } |
9343 | if (rc) { |
9344 | netdev_err(dev: bp->dev, format: "ring reservation/IRQ init failure rc: %d\n" , rc); |
9345 | return rc; |
9346 | } |
9347 | if (tcs && (bp->tx_nr_rings_per_tc * tcs != |
9348 | bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { |
9349 | netdev_err(dev: bp->dev, format: "tx ring reservation failure\n" ); |
9350 | netdev_reset_tc(dev: bp->dev); |
9351 | if (bp->tx_nr_rings_xdp) |
9352 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; |
9353 | else |
9354 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
9355 | return -ENOMEM; |
9356 | } |
9357 | return 0; |
9358 | } |
9359 | |
9360 | static void bnxt_free_irq(struct bnxt *bp) |
9361 | { |
9362 | struct bnxt_irq *irq; |
9363 | int i; |
9364 | |
9365 | #ifdef CONFIG_RFS_ACCEL |
9366 | free_irq_cpu_rmap(rmap: bp->dev->rx_cpu_rmap); |
9367 | bp->dev->rx_cpu_rmap = NULL; |
9368 | #endif |
9369 | if (!bp->irq_tbl || !bp->bnapi) |
9370 | return; |
9371 | |
9372 | for (i = 0; i < bp->cp_nr_rings; i++) { |
9373 | int map_idx = bnxt_cp_num_to_irq_num(bp, n: i); |
9374 | |
9375 | irq = &bp->irq_tbl[map_idx]; |
9376 | if (irq->requested) { |
9377 | if (irq->have_cpumask) { |
9378 | irq_set_affinity_hint(irq: irq->vector, NULL); |
9379 | free_cpumask_var(mask: irq->cpu_mask); |
9380 | irq->have_cpumask = 0; |
9381 | } |
9382 | free_irq(irq->vector, bp->bnapi[i]); |
9383 | } |
9384 | |
9385 | irq->requested = 0; |
9386 | } |
9387 | } |
9388 | |
9389 | static int bnxt_request_irq(struct bnxt *bp) |
9390 | { |
9391 | int i, j, rc = 0; |
9392 | unsigned long flags = 0; |
9393 | #ifdef CONFIG_RFS_ACCEL |
9394 | struct cpu_rmap *rmap; |
9395 | #endif |
9396 | |
9397 | rc = bnxt_setup_int_mode(bp); |
9398 | if (rc) { |
9399 | netdev_err(dev: bp->dev, format: "bnxt_setup_int_mode err: %x\n" , |
9400 | rc); |
9401 | return rc; |
9402 | } |
9403 | #ifdef CONFIG_RFS_ACCEL |
9404 | rmap = bp->dev->rx_cpu_rmap; |
9405 | #endif |
9406 | if (!(bp->flags & BNXT_FLAG_USING_MSIX)) |
9407 | flags = IRQF_SHARED; |
9408 | |
9409 | for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { |
9410 | int map_idx = bnxt_cp_num_to_irq_num(bp, n: i); |
9411 | struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; |
9412 | |
9413 | #ifdef CONFIG_RFS_ACCEL |
9414 | if (rmap && bp->bnapi[i]->rx_ring) { |
9415 | rc = irq_cpu_rmap_add(rmap, irq: irq->vector); |
9416 | if (rc) |
9417 | netdev_warn(dev: bp->dev, format: "failed adding irq rmap for ring %d\n" , |
9418 | j); |
9419 | j++; |
9420 | } |
9421 | #endif |
9422 | rc = request_irq(irq: irq->vector, handler: irq->handler, flags, name: irq->name, |
9423 | dev: bp->bnapi[i]); |
9424 | if (rc) |
9425 | break; |
9426 | |
9427 | irq->requested = 1; |
9428 | |
9429 | if (zalloc_cpumask_var(mask: &irq->cpu_mask, GFP_KERNEL)) { |
9430 | int numa_node = dev_to_node(dev: &bp->pdev->dev); |
9431 | |
9432 | irq->have_cpumask = 1; |
9433 | cpumask_set_cpu(cpu: cpumask_local_spread(i, node: numa_node), |
9434 | dstp: irq->cpu_mask); |
9435 | rc = irq_set_affinity_hint(irq: irq->vector, m: irq->cpu_mask); |
9436 | if (rc) { |
9437 | netdev_warn(dev: bp->dev, |
9438 | format: "Set affinity failed, IRQ = %d\n" , |
9439 | irq->vector); |
9440 | break; |
9441 | } |
9442 | } |
9443 | } |
9444 | return rc; |
9445 | } |
9446 | |
9447 | static void bnxt_del_napi(struct bnxt *bp) |
9448 | { |
9449 | int i; |
9450 | |
9451 | if (!bp->bnapi) |
9452 | return; |
9453 | |
9454 | for (i = 0; i < bp->cp_nr_rings; i++) { |
9455 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
9456 | |
9457 | __netif_napi_del(napi: &bnapi->napi); |
9458 | } |
9459 | /* We called __netif_napi_del(), we need |
9460 | * to respect an RCU grace period before freeing napi structures. |
9461 | */ |
9462 | synchronize_net(); |
9463 | } |
9464 | |
9465 | static void bnxt_init_napi(struct bnxt *bp) |
9466 | { |
9467 | int i; |
9468 | unsigned int cp_nr_rings = bp->cp_nr_rings; |
9469 | struct bnxt_napi *bnapi; |
9470 | |
9471 | if (bp->flags & BNXT_FLAG_USING_MSIX) { |
9472 | int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; |
9473 | |
9474 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
9475 | poll_fn = bnxt_poll_p5; |
9476 | else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) |
9477 | cp_nr_rings--; |
9478 | for (i = 0; i < cp_nr_rings; i++) { |
9479 | bnapi = bp->bnapi[i]; |
9480 | netif_napi_add(dev: bp->dev, napi: &bnapi->napi, poll: poll_fn); |
9481 | } |
9482 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { |
9483 | bnapi = bp->bnapi[cp_nr_rings]; |
9484 | netif_napi_add(dev: bp->dev, napi: &bnapi->napi, |
9485 | poll: bnxt_poll_nitroa0); |
9486 | } |
9487 | } else { |
9488 | bnapi = bp->bnapi[0]; |
9489 | netif_napi_add(dev: bp->dev, napi: &bnapi->napi, poll: bnxt_poll); |
9490 | } |
9491 | } |
9492 | |
9493 | static void bnxt_disable_napi(struct bnxt *bp) |
9494 | { |
9495 | int i; |
9496 | |
9497 | if (!bp->bnapi || |
9498 | test_and_set_bit(BNXT_STATE_NAPI_DISABLED, addr: &bp->state)) |
9499 | return; |
9500 | |
9501 | for (i = 0; i < bp->cp_nr_rings; i++) { |
9502 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
9503 | struct bnxt_cp_ring_info *cpr; |
9504 | |
9505 | cpr = &bnapi->cp_ring; |
9506 | if (bnapi->tx_fault) |
9507 | cpr->sw_stats.tx.tx_resets++; |
9508 | if (bnapi->in_reset) |
9509 | cpr->sw_stats.rx.rx_resets++; |
9510 | napi_disable(n: &bnapi->napi); |
9511 | if (bnapi->rx_ring) |
9512 | cancel_work_sync(work: &cpr->dim.work); |
9513 | } |
9514 | } |
9515 | |
9516 | static void bnxt_enable_napi(struct bnxt *bp) |
9517 | { |
9518 | int i; |
9519 | |
9520 | clear_bit(BNXT_STATE_NAPI_DISABLED, addr: &bp->state); |
9521 | for (i = 0; i < bp->cp_nr_rings; i++) { |
9522 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
9523 | struct bnxt_cp_ring_info *cpr; |
9524 | |
9525 | bnapi->tx_fault = 0; |
9526 | |
9527 | cpr = &bnapi->cp_ring; |
9528 | bnapi->in_reset = false; |
9529 | |
9530 | bnapi->tx_pkts = 0; |
9531 | |
9532 | if (bnapi->rx_ring) { |
9533 | INIT_WORK(&cpr->dim.work, bnxt_dim_work); |
9534 | cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
9535 | } |
9536 | napi_enable(n: &bnapi->napi); |
9537 | } |
9538 | } |
9539 | |
9540 | void bnxt_tx_disable(struct bnxt *bp) |
9541 | { |
9542 | int i; |
9543 | struct bnxt_tx_ring_info *txr; |
9544 | |
9545 | if (bp->tx_ring) { |
9546 | for (i = 0; i < bp->tx_nr_rings; i++) { |
9547 | txr = &bp->tx_ring[i]; |
9548 | WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); |
9549 | } |
9550 | } |
9551 | /* Make sure napi polls see @dev_state change */ |
9552 | synchronize_net(); |
9553 | /* Drop carrier first to prevent TX timeout */ |
9554 | netif_carrier_off(dev: bp->dev); |
9555 | /* Stop all TX queues */ |
9556 | netif_tx_disable(dev: bp->dev); |
9557 | } |
9558 | |
9559 | void bnxt_tx_enable(struct bnxt *bp) |
9560 | { |
9561 | int i; |
9562 | struct bnxt_tx_ring_info *txr; |
9563 | |
9564 | for (i = 0; i < bp->tx_nr_rings; i++) { |
9565 | txr = &bp->tx_ring[i]; |
9566 | WRITE_ONCE(txr->dev_state, 0); |
9567 | } |
9568 | /* Make sure napi polls see @dev_state change */ |
9569 | synchronize_net(); |
9570 | netif_tx_wake_all_queues(dev: bp->dev); |
9571 | if (BNXT_LINK_IS_UP(bp)) |
9572 | netif_carrier_on(dev: bp->dev); |
9573 | } |
9574 | |
9575 | static char *bnxt_report_fec(struct bnxt_link_info *link_info) |
9576 | { |
9577 | u8 active_fec = link_info->active_fec_sig_mode & |
9578 | PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; |
9579 | |
9580 | switch (active_fec) { |
9581 | default: |
9582 | case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: |
9583 | return "None" ; |
9584 | case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: |
9585 | return "Clause 74 BaseR" ; |
9586 | case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: |
9587 | return "Clause 91 RS(528,514)" ; |
9588 | case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: |
9589 | return "Clause 91 RS544_1XN" ; |
9590 | case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: |
9591 | return "Clause 91 RS(544,514)" ; |
9592 | case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: |
9593 | return "Clause 91 RS272_1XN" ; |
9594 | case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: |
9595 | return "Clause 91 RS(272,257)" ; |
9596 | } |
9597 | } |
9598 | |
9599 | void bnxt_report_link(struct bnxt *bp) |
9600 | { |
9601 | if (BNXT_LINK_IS_UP(bp)) { |
9602 | const char *signal = "" ; |
9603 | const char *flow_ctrl; |
9604 | const char *duplex; |
9605 | u32 speed; |
9606 | u16 fec; |
9607 | |
9608 | netif_carrier_on(dev: bp->dev); |
9609 | speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); |
9610 | if (speed == SPEED_UNKNOWN) { |
9611 | netdev_info(dev: bp->dev, format: "NIC Link is Up, speed unknown\n" ); |
9612 | return; |
9613 | } |
9614 | if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) |
9615 | duplex = "full" ; |
9616 | else |
9617 | duplex = "half" ; |
9618 | if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) |
9619 | flow_ctrl = "ON - receive & transmit" ; |
9620 | else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) |
9621 | flow_ctrl = "ON - transmit" ; |
9622 | else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) |
9623 | flow_ctrl = "ON - receive" ; |
9624 | else |
9625 | flow_ctrl = "none" ; |
9626 | if (bp->link_info.phy_qcfg_resp.option_flags & |
9627 | PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) { |
9628 | u8 sig_mode = bp->link_info.active_fec_sig_mode & |
9629 | PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; |
9630 | switch (sig_mode) { |
9631 | case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ: |
9632 | signal = "(NRZ) " ; |
9633 | break; |
9634 | case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4: |
9635 | signal = "(PAM4) " ; |
9636 | break; |
9637 | default: |
9638 | break; |
9639 | } |
9640 | } |
9641 | netdev_info(dev: bp->dev, format: "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n" , |
9642 | speed, signal, duplex, flow_ctrl); |
9643 | if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) |
9644 | netdev_info(dev: bp->dev, format: "EEE is %s\n" , |
9645 | bp->eee.eee_active ? "active" : |
9646 | "not active" ); |
9647 | fec = bp->link_info.fec_cfg; |
9648 | if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) |
9649 | netdev_info(dev: bp->dev, format: "FEC autoneg %s encoding: %s\n" , |
9650 | (fec & BNXT_FEC_AUTONEG) ? "on" : "off" , |
9651 | bnxt_report_fec(link_info: &bp->link_info)); |
9652 | } else { |
9653 | netif_carrier_off(dev: bp->dev); |
9654 | netdev_err(dev: bp->dev, format: "NIC Link is Down\n" ); |
9655 | } |
9656 | } |
9657 | |
9658 | static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) |
9659 | { |
9660 | if (!resp->supported_speeds_auto_mode && |
9661 | !resp->supported_speeds_force_mode && |
9662 | !resp->supported_pam4_speeds_auto_mode && |
9663 | !resp->supported_pam4_speeds_force_mode) |
9664 | return true; |
9665 | return false; |
9666 | } |
9667 | |
9668 | static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) |
9669 | { |
9670 | struct bnxt_link_info *link_info = &bp->link_info; |
9671 | struct hwrm_port_phy_qcaps_output *resp; |
9672 | struct hwrm_port_phy_qcaps_input *req; |
9673 | int rc = 0; |
9674 | |
9675 | if (bp->hwrm_spec_code < 0x10201) |
9676 | return 0; |
9677 | |
9678 | rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); |
9679 | if (rc) |
9680 | return rc; |
9681 | |
9682 | resp = hwrm_req_hold(bp, req); |
9683 | rc = hwrm_req_send(bp, req); |
9684 | if (rc) |
9685 | goto hwrm_phy_qcaps_exit; |
9686 | |
9687 | bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8); |
9688 | if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { |
9689 | struct ethtool_eee *eee = &bp->eee; |
9690 | u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); |
9691 | |
9692 | eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); |
9693 | bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & |
9694 | PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; |
9695 | bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & |
9696 | PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; |
9697 | } |
9698 | |
9699 | if (bp->hwrm_spec_code >= 0x10a01) { |
9700 | if (bnxt_phy_qcaps_no_speed(resp)) { |
9701 | link_info->phy_state = BNXT_PHY_STATE_DISABLED; |
9702 | netdev_warn(dev: bp->dev, format: "Ethernet link disabled\n" ); |
9703 | } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) { |
9704 | link_info->phy_state = BNXT_PHY_STATE_ENABLED; |
9705 | netdev_info(dev: bp->dev, format: "Ethernet link enabled\n" ); |
9706 | /* Phy re-enabled, reprobe the speeds */ |
9707 | link_info->support_auto_speeds = 0; |
9708 | link_info->support_pam4_auto_speeds = 0; |
9709 | } |
9710 | } |
9711 | if (resp->supported_speeds_auto_mode) |
9712 | link_info->support_auto_speeds = |
9713 | le16_to_cpu(resp->supported_speeds_auto_mode); |
9714 | if (resp->supported_pam4_speeds_auto_mode) |
9715 | link_info->support_pam4_auto_speeds = |
9716 | le16_to_cpu(resp->supported_pam4_speeds_auto_mode); |
9717 | |
9718 | bp->port_count = resp->port_cnt; |
9719 | |
9720 | hwrm_phy_qcaps_exit: |
9721 | hwrm_req_drop(bp, req); |
9722 | return rc; |
9723 | } |
9724 | |
9725 | static bool bnxt_support_dropped(u16 advertising, u16 supported) |
9726 | { |
9727 | u16 diff = advertising ^ supported; |
9728 | |
9729 | return ((supported | diff) != supported); |
9730 | } |
9731 | |
9732 | static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info) |
9733 | { |
9734 | /* Check if any advertised speeds are no longer supported. The caller |
9735 | * holds the link_lock mutex, so we can modify link_info settings. |
9736 | */ |
9737 | if (bnxt_support_dropped(advertising: link_info->advertising, |
9738 | supported: link_info->support_auto_speeds)) { |
9739 | link_info->advertising = link_info->support_auto_speeds; |
9740 | return true; |
9741 | } |
9742 | if (bnxt_support_dropped(advertising: link_info->advertising_pam4, |
9743 | supported: link_info->support_pam4_auto_speeds)) { |
9744 | link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; |
9745 | return true; |
9746 | } |
9747 | return false; |
9748 | } |
9749 | |
9750 | int bnxt_update_link(struct bnxt *bp, bool chng_link_state) |
9751 | { |
9752 | struct bnxt_link_info *link_info = &bp->link_info; |
9753 | struct hwrm_port_phy_qcfg_output *resp; |
9754 | struct hwrm_port_phy_qcfg_input *req; |
9755 | u8 link_state = link_info->link_state; |
9756 | bool support_changed; |
9757 | int rc; |
9758 | |
9759 | rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); |
9760 | if (rc) |
9761 | return rc; |
9762 | |
9763 | resp = hwrm_req_hold(bp, req); |
9764 | rc = hwrm_req_send(bp, req); |
9765 | if (rc) { |
9766 | hwrm_req_drop(bp, req); |
9767 | if (BNXT_VF(bp) && rc == -ENODEV) { |
9768 | netdev_warn(dev: bp->dev, format: "Cannot obtain link state while PF unavailable.\n" ); |
9769 | rc = 0; |
9770 | } |
9771 | return rc; |
9772 | } |
9773 | |
9774 | memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); |
9775 | link_info->phy_link_status = resp->link; |
9776 | link_info->duplex = resp->duplex_cfg; |
9777 | if (bp->hwrm_spec_code >= 0x10800) |
9778 | link_info->duplex = resp->duplex_state; |
9779 | link_info->pause = resp->pause; |
9780 | link_info->auto_mode = resp->auto_mode; |
9781 | link_info->auto_pause_setting = resp->auto_pause; |
9782 | link_info->lp_pause = resp->link_partner_adv_pause; |
9783 | link_info->force_pause_setting = resp->force_pause; |
9784 | link_info->duplex_setting = resp->duplex_cfg; |
9785 | if (link_info->phy_link_status == BNXT_LINK_LINK) |
9786 | link_info->link_speed = le16_to_cpu(resp->link_speed); |
9787 | else |
9788 | link_info->link_speed = 0; |
9789 | link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); |
9790 | link_info->force_pam4_link_speed = |
9791 | le16_to_cpu(resp->force_pam4_link_speed); |
9792 | link_info->support_speeds = le16_to_cpu(resp->support_speeds); |
9793 | link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); |
9794 | link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); |
9795 | link_info->auto_pam4_link_speeds = |
9796 | le16_to_cpu(resp->auto_pam4_link_speed_mask); |
9797 | link_info->lp_auto_link_speeds = |
9798 | le16_to_cpu(resp->link_partner_adv_speeds); |
9799 | link_info->lp_auto_pam4_link_speeds = |
9800 | resp->link_partner_pam4_adv_speeds; |
9801 | link_info->preemphasis = le32_to_cpu(resp->preemphasis); |
9802 | link_info->phy_ver[0] = resp->phy_maj; |
9803 | link_info->phy_ver[1] = resp->phy_min; |
9804 | link_info->phy_ver[2] = resp->phy_bld; |
9805 | link_info->media_type = resp->media_type; |
9806 | link_info->phy_type = resp->phy_type; |
9807 | link_info->transceiver = resp->xcvr_pkg_type; |
9808 | link_info->phy_addr = resp->eee_config_phy_addr & |
9809 | PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; |
9810 | link_info->module_status = resp->module_status; |
9811 | |
9812 | if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) { |
9813 | struct ethtool_eee *eee = &bp->eee; |
9814 | u16 fw_speeds; |
9815 | |
9816 | eee->eee_active = 0; |
9817 | if (resp->eee_config_phy_addr & |
9818 | PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { |
9819 | eee->eee_active = 1; |
9820 | fw_speeds = le16_to_cpu( |
9821 | resp->link_partner_adv_eee_link_speed_mask); |
9822 | eee->lp_advertised = |
9823 | _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); |
9824 | } |
9825 | |
9826 | /* Pull initial EEE config */ |
9827 | if (!chng_link_state) { |
9828 | if (resp->eee_config_phy_addr & |
9829 | PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) |
9830 | eee->eee_enabled = 1; |
9831 | |
9832 | fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); |
9833 | eee->advertised = |
9834 | _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); |
9835 | |
9836 | if (resp->eee_config_phy_addr & |
9837 | PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { |
9838 | __le32 tmr; |
9839 | |
9840 | eee->tx_lpi_enabled = 1; |
9841 | tmr = resp->xcvr_identifier_type_tx_lpi_timer; |
9842 | eee->tx_lpi_timer = le32_to_cpu(tmr) & |
9843 | PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; |
9844 | } |
9845 | } |
9846 | } |
9847 | |
9848 | link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; |
9849 | if (bp->hwrm_spec_code >= 0x10504) { |
9850 | link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); |
9851 | link_info->active_fec_sig_mode = resp->active_fec_signal_mode; |
9852 | } |
9853 | /* TODO: need to add more logic to report VF link */ |
9854 | if (chng_link_state) { |
9855 | if (link_info->phy_link_status == BNXT_LINK_LINK) |
9856 | link_info->link_state = BNXT_LINK_STATE_UP; |
9857 | else |
9858 | link_info->link_state = BNXT_LINK_STATE_DOWN; |
9859 | if (link_state != link_info->link_state) |
9860 | bnxt_report_link(bp); |
9861 | } else { |
9862 | /* always link down if not require to update link state */ |
9863 | link_info->link_state = BNXT_LINK_STATE_DOWN; |
9864 | } |
9865 | hwrm_req_drop(bp, req); |
9866 | |
9867 | if (!BNXT_PHY_CFG_ABLE(bp)) |
9868 | return 0; |
9869 | |
9870 | support_changed = bnxt_support_speed_dropped(link_info); |
9871 | if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) |
9872 | bnxt_hwrm_set_link_setting(bp, true, false); |
9873 | return 0; |
9874 | } |
9875 | |
9876 | static void bnxt_get_port_module_status(struct bnxt *bp) |
9877 | { |
9878 | struct bnxt_link_info *link_info = &bp->link_info; |
9879 | struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; |
9880 | u8 module_status; |
9881 | |
9882 | if (bnxt_update_link(bp, chng_link_state: true)) |
9883 | return; |
9884 | |
9885 | module_status = link_info->module_status; |
9886 | switch (module_status) { |
9887 | case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: |
9888 | case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: |
9889 | case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: |
9890 | netdev_warn(dev: bp->dev, format: "Unqualified SFP+ module detected on port %d\n" , |
9891 | bp->pf.port_id); |
9892 | if (bp->hwrm_spec_code >= 0x10201) { |
9893 | netdev_warn(dev: bp->dev, format: "Module part number %s\n" , |
9894 | resp->phy_vendor_partnumber); |
9895 | } |
9896 | if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) |
9897 | netdev_warn(dev: bp->dev, format: "TX is disabled\n" ); |
9898 | if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) |
9899 | netdev_warn(dev: bp->dev, format: "SFP+ module is shutdown\n" ); |
9900 | } |
9901 | } |
9902 | |
9903 | static void |
9904 | bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) |
9905 | { |
9906 | if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { |
9907 | if (bp->hwrm_spec_code >= 0x10201) |
9908 | req->auto_pause = |
9909 | PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; |
9910 | if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) |
9911 | req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; |
9912 | if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) |
9913 | req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; |
9914 | req->enables |= |
9915 | cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); |
9916 | } else { |
9917 | if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) |
9918 | req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; |
9919 | if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) |
9920 | req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; |
9921 | req->enables |= |
9922 | cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); |
9923 | if (bp->hwrm_spec_code >= 0x10201) { |
9924 | req->auto_pause = req->force_pause; |
9925 | req->enables |= cpu_to_le32( |
9926 | PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); |
9927 | } |
9928 | } |
9929 | } |
9930 | |
9931 | static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) |
9932 | { |
9933 | if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { |
9934 | req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; |
9935 | if (bp->link_info.advertising) { |
9936 | req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); |
9937 | req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); |
9938 | } |
9939 | if (bp->link_info.advertising_pam4) { |
9940 | req->enables |= |
9941 | cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK); |
9942 | req->auto_link_pam4_speed_mask = |
9943 | cpu_to_le16(bp->link_info.advertising_pam4); |
9944 | } |
9945 | req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); |
9946 | req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); |
9947 | } else { |
9948 | req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); |
9949 | if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { |
9950 | req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); |
9951 | req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); |
9952 | } else { |
9953 | req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); |
9954 | } |
9955 | } |
9956 | |
9957 | /* tell chimp that the setting takes effect immediately */ |
9958 | req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); |
9959 | } |
9960 | |
9961 | int bnxt_hwrm_set_pause(struct bnxt *bp) |
9962 | { |
9963 | struct hwrm_port_phy_cfg_input *req; |
9964 | int rc; |
9965 | |
9966 | rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); |
9967 | if (rc) |
9968 | return rc; |
9969 | |
9970 | bnxt_hwrm_set_pause_common(bp, req); |
9971 | |
9972 | if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || |
9973 | bp->link_info.force_link_chng) |
9974 | bnxt_hwrm_set_link_common(bp, req); |
9975 | |
9976 | rc = hwrm_req_send(bp, req); |
9977 | if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { |
9978 | /* since changing of pause setting doesn't trigger any link |
9979 | * change event, the driver needs to update the current pause |
9980 | * result upon successfully return of the phy_cfg command |
9981 | */ |
9982 | bp->link_info.pause = |
9983 | bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; |
9984 | bp->link_info.auto_pause_setting = 0; |
9985 | if (!bp->link_info.force_link_chng) |
9986 | bnxt_report_link(bp); |
9987 | } |
9988 | bp->link_info.force_link_chng = false; |
9989 | return rc; |
9990 | } |
9991 | |
9992 | static void bnxt_hwrm_set_eee(struct bnxt *bp, |
9993 | struct hwrm_port_phy_cfg_input *req) |
9994 | { |
9995 | struct ethtool_eee *eee = &bp->eee; |
9996 | |
9997 | if (eee->eee_enabled) { |
9998 | u16 eee_speeds; |
9999 | u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; |
10000 | |
10001 | if (eee->tx_lpi_enabled) |
10002 | flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; |
10003 | else |
10004 | flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; |
10005 | |
10006 | req->flags |= cpu_to_le32(flags); |
10007 | eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); |
10008 | req->eee_link_speed_mask = cpu_to_le16(eee_speeds); |
10009 | req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); |
10010 | } else { |
10011 | req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); |
10012 | } |
10013 | } |
10014 | |
10015 | int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) |
10016 | { |
10017 | struct hwrm_port_phy_cfg_input *req; |
10018 | int rc; |
10019 | |
10020 | rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); |
10021 | if (rc) |
10022 | return rc; |
10023 | |
10024 | if (set_pause) |
10025 | bnxt_hwrm_set_pause_common(bp, req); |
10026 | |
10027 | bnxt_hwrm_set_link_common(bp, req); |
10028 | |
10029 | if (set_eee) |
10030 | bnxt_hwrm_set_eee(bp, req); |
10031 | return hwrm_req_send(bp, req); |
10032 | } |
10033 | |
10034 | static int bnxt_hwrm_shutdown_link(struct bnxt *bp) |
10035 | { |
10036 | struct hwrm_port_phy_cfg_input *req; |
10037 | int rc; |
10038 | |
10039 | if (!BNXT_SINGLE_PF(bp)) |
10040 | return 0; |
10041 | |
10042 | if (pci_num_vf(dev: bp->pdev) && |
10043 | !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) |
10044 | return 0; |
10045 | |
10046 | rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); |
10047 | if (rc) |
10048 | return rc; |
10049 | |
10050 | req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); |
10051 | rc = hwrm_req_send(bp, req); |
10052 | if (!rc) { |
10053 | mutex_lock(&bp->link_lock); |
10054 | /* Device is not obliged link down in certain scenarios, even |
10055 | * when forced. Setting the state unknown is consistent with |
10056 | * driver startup and will force link state to be reported |
10057 | * during subsequent open based on PORT_PHY_QCFG. |
10058 | */ |
10059 | bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN; |
10060 | mutex_unlock(lock: &bp->link_lock); |
10061 | } |
10062 | return rc; |
10063 | } |
10064 | |
10065 | static int bnxt_fw_reset_via_optee(struct bnxt *bp) |
10066 | { |
10067 | #ifdef CONFIG_TEE_BNXT_FW |
10068 | int rc = tee_bnxt_fw_load(); |
10069 | |
10070 | if (rc) |
10071 | netdev_err(dev: bp->dev, format: "Failed FW reset via OP-TEE, rc=%d\n" , rc); |
10072 | |
10073 | return rc; |
10074 | #else |
10075 | netdev_err(bp->dev, "OP-TEE not supported\n" ); |
10076 | return -ENODEV; |
10077 | #endif |
10078 | } |
10079 | |
10080 | static int bnxt_try_recover_fw(struct bnxt *bp) |
10081 | { |
10082 | if (bp->fw_health && bp->fw_health->status_reliable) { |
10083 | int retry = 0, rc; |
10084 | u32 sts; |
10085 | |
10086 | do { |
10087 | sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); |
10088 | rc = bnxt_hwrm_poll(bp); |
10089 | if (!BNXT_FW_IS_BOOTING(sts) && |
10090 | !BNXT_FW_IS_RECOVERING(sts)) |
10091 | break; |
10092 | retry++; |
10093 | } while (rc == -EBUSY && retry < BNXT_FW_RETRY); |
10094 | |
10095 | if (!BNXT_FW_IS_HEALTHY(sts)) { |
10096 | netdev_err(dev: bp->dev, |
10097 | format: "Firmware not responding, status: 0x%x\n" , |
10098 | sts); |
10099 | rc = -ENODEV; |
10100 | } |
10101 | if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) { |
10102 | netdev_warn(dev: bp->dev, format: "Firmware recover via OP-TEE requested\n" ); |
10103 | return bnxt_fw_reset_via_optee(bp); |
10104 | } |
10105 | return rc; |
10106 | } |
10107 | |
10108 | return -ENODEV; |
10109 | } |
10110 | |
10111 | static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset) |
10112 | { |
10113 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
10114 | |
10115 | if (!BNXT_NEW_RM(bp)) |
10116 | return; /* no resource reservations required */ |
10117 | |
10118 | hw_resc->resv_cp_rings = 0; |
10119 | hw_resc->resv_stat_ctxs = 0; |
10120 | hw_resc->resv_irqs = 0; |
10121 | hw_resc->resv_tx_rings = 0; |
10122 | hw_resc->resv_rx_rings = 0; |
10123 | hw_resc->resv_hw_ring_grps = 0; |
10124 | hw_resc->resv_vnics = 0; |
10125 | if (!fw_reset) { |
10126 | bp->tx_nr_rings = 0; |
10127 | bp->rx_nr_rings = 0; |
10128 | } |
10129 | } |
10130 | |
10131 | int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) |
10132 | { |
10133 | int rc; |
10134 | |
10135 | if (!BNXT_NEW_RM(bp)) |
10136 | return 0; /* no resource reservations required */ |
10137 | |
10138 | rc = bnxt_hwrm_func_resc_qcaps(bp, all: true); |
10139 | if (rc) |
10140 | netdev_err(dev: bp->dev, format: "resc_qcaps failed\n" ); |
10141 | |
10142 | bnxt_clear_reservations(bp, fw_reset); |
10143 | |
10144 | return rc; |
10145 | } |
10146 | |
10147 | static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) |
10148 | { |
10149 | struct hwrm_func_drv_if_change_output *resp; |
10150 | struct hwrm_func_drv_if_change_input *req; |
10151 | bool fw_reset = !bp->irq_tbl; |
10152 | bool resc_reinit = false; |
10153 | int rc, retry = 0; |
10154 | u32 flags = 0; |
10155 | |
10156 | if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) |
10157 | return 0; |
10158 | |
10159 | rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE); |
10160 | if (rc) |
10161 | return rc; |
10162 | |
10163 | if (up) |
10164 | req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); |
10165 | resp = hwrm_req_hold(bp, req); |
10166 | |
10167 | hwrm_req_flags(bp, req, flags: BNXT_HWRM_FULL_WAIT); |
10168 | while (retry < BNXT_FW_IF_RETRY) { |
10169 | rc = hwrm_req_send(bp, req); |
10170 | if (rc != -EAGAIN) |
10171 | break; |
10172 | |
10173 | msleep(msecs: 50); |
10174 | retry++; |
10175 | } |
10176 | |
10177 | if (rc == -EAGAIN) { |
10178 | hwrm_req_drop(bp, req); |
10179 | return rc; |
10180 | } else if (!rc) { |
10181 | flags = le32_to_cpu(resp->flags); |
10182 | } else if (up) { |
10183 | rc = bnxt_try_recover_fw(bp); |
10184 | fw_reset = true; |
10185 | } |
10186 | hwrm_req_drop(bp, req); |
10187 | if (rc) |
10188 | return rc; |
10189 | |
10190 | if (!up) { |
10191 | bnxt_inv_fw_health_reg(bp); |
10192 | return 0; |
10193 | } |
10194 | |
10195 | if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) |
10196 | resc_reinit = true; |
10197 | if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE || |
10198 | test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) |
10199 | fw_reset = true; |
10200 | else |
10201 | bnxt_remap_fw_health_regs(bp); |
10202 | |
10203 | if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { |
10204 | netdev_err(dev: bp->dev, format: "RESET_DONE not set during FW reset.\n" ); |
10205 | set_bit(BNXT_STATE_ABORT_ERR, addr: &bp->state); |
10206 | return -ENODEV; |
10207 | } |
10208 | if (resc_reinit || fw_reset) { |
10209 | if (fw_reset) { |
10210 | set_bit(BNXT_STATE_FW_RESET_DET, addr: &bp->state); |
10211 | if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) |
10212 | bnxt_ulp_stop(bp); |
10213 | bnxt_free_ctx_mem(bp); |
10214 | kfree(objp: bp->ctx); |
10215 | bp->ctx = NULL; |
10216 | bnxt_dcb_free(bp); |
10217 | rc = bnxt_fw_init_one(bp); |
10218 | if (rc) { |
10219 | clear_bit(BNXT_STATE_FW_RESET_DET, addr: &bp->state); |
10220 | set_bit(BNXT_STATE_ABORT_ERR, addr: &bp->state); |
10221 | return rc; |
10222 | } |
10223 | bnxt_clear_int_mode(bp); |
10224 | rc = bnxt_init_int_mode(bp); |
10225 | if (rc) { |
10226 | clear_bit(BNXT_STATE_FW_RESET_DET, addr: &bp->state); |
10227 | netdev_err(dev: bp->dev, format: "init int mode failed\n" ); |
10228 | return rc; |
10229 | } |
10230 | } |
10231 | rc = bnxt_cancel_reservations(bp, fw_reset); |
10232 | } |
10233 | return rc; |
10234 | } |
10235 | |
10236 | static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) |
10237 | { |
10238 | struct hwrm_port_led_qcaps_output *resp; |
10239 | struct hwrm_port_led_qcaps_input *req; |
10240 | struct bnxt_pf_info *pf = &bp->pf; |
10241 | int rc; |
10242 | |
10243 | bp->num_leds = 0; |
10244 | if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) |
10245 | return 0; |
10246 | |
10247 | rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS); |
10248 | if (rc) |
10249 | return rc; |
10250 | |
10251 | req->port_id = cpu_to_le16(pf->port_id); |
10252 | resp = hwrm_req_hold(bp, req); |
10253 | rc = hwrm_req_send(bp, req); |
10254 | if (rc) { |
10255 | hwrm_req_drop(bp, req); |
10256 | return rc; |
10257 | } |
10258 | if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { |
10259 | int i; |
10260 | |
10261 | bp->num_leds = resp->num_leds; |
10262 | memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * |
10263 | bp->num_leds); |
10264 | for (i = 0; i < bp->num_leds; i++) { |
10265 | struct bnxt_led_info *led = &bp->leds[i]; |
10266 | __le16 caps = led->led_state_caps; |
10267 | |
10268 | if (!led->led_group_id || |
10269 | !BNXT_LED_ALT_BLINK_CAP(caps)) { |
10270 | bp->num_leds = 0; |
10271 | break; |
10272 | } |
10273 | } |
10274 | } |
10275 | hwrm_req_drop(bp, req); |
10276 | return 0; |
10277 | } |
10278 | |
10279 | int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) |
10280 | { |
10281 | struct hwrm_wol_filter_alloc_output *resp; |
10282 | struct hwrm_wol_filter_alloc_input *req; |
10283 | int rc; |
10284 | |
10285 | rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC); |
10286 | if (rc) |
10287 | return rc; |
10288 | |
10289 | req->port_id = cpu_to_le16(bp->pf.port_id); |
10290 | req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; |
10291 | req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); |
10292 | memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN); |
10293 | |
10294 | resp = hwrm_req_hold(bp, req); |
10295 | rc = hwrm_req_send(bp, req); |
10296 | if (!rc) |
10297 | bp->wol_filter_id = resp->wol_filter_id; |
10298 | hwrm_req_drop(bp, req); |
10299 | return rc; |
10300 | } |
10301 | |
10302 | int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) |
10303 | { |
10304 | struct hwrm_wol_filter_free_input *req; |
10305 | int rc; |
10306 | |
10307 | rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE); |
10308 | if (rc) |
10309 | return rc; |
10310 | |
10311 | req->port_id = cpu_to_le16(bp->pf.port_id); |
10312 | req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); |
10313 | req->wol_filter_id = bp->wol_filter_id; |
10314 | |
10315 | return hwrm_req_send(bp, req); |
10316 | } |
10317 | |
10318 | static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) |
10319 | { |
10320 | struct hwrm_wol_filter_qcfg_output *resp; |
10321 | struct hwrm_wol_filter_qcfg_input *req; |
10322 | u16 next_handle = 0; |
10323 | int rc; |
10324 | |
10325 | rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG); |
10326 | if (rc) |
10327 | return rc; |
10328 | |
10329 | req->port_id = cpu_to_le16(bp->pf.port_id); |
10330 | req->handle = cpu_to_le16(handle); |
10331 | resp = hwrm_req_hold(bp, req); |
10332 | rc = hwrm_req_send(bp, req); |
10333 | if (!rc) { |
10334 | next_handle = le16_to_cpu(resp->next_handle); |
10335 | if (next_handle != 0) { |
10336 | if (resp->wol_type == |
10337 | WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { |
10338 | bp->wol = 1; |
10339 | bp->wol_filter_id = resp->wol_filter_id; |
10340 | } |
10341 | } |
10342 | } |
10343 | hwrm_req_drop(bp, req); |
10344 | return next_handle; |
10345 | } |
10346 | |
10347 | static void bnxt_get_wol_settings(struct bnxt *bp) |
10348 | { |
10349 | u16 handle = 0; |
10350 | |
10351 | bp->wol = 0; |
10352 | if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) |
10353 | return; |
10354 | |
10355 | do { |
10356 | handle = bnxt_hwrm_get_wol_fltrs(bp, handle); |
10357 | } while (handle && handle != 0xffff); |
10358 | } |
10359 | |
10360 | static bool bnxt_eee_config_ok(struct bnxt *bp) |
10361 | { |
10362 | struct ethtool_eee *eee = &bp->eee; |
10363 | struct bnxt_link_info *link_info = &bp->link_info; |
10364 | |
10365 | if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) |
10366 | return true; |
10367 | |
10368 | if (eee->eee_enabled) { |
10369 | u32 advertising = |
10370 | _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); |
10371 | |
10372 | if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { |
10373 | eee->eee_enabled = 0; |
10374 | return false; |
10375 | } |
10376 | if (eee->advertised & ~advertising) { |
10377 | eee->advertised = advertising & eee->supported; |
10378 | return false; |
10379 | } |
10380 | } |
10381 | return true; |
10382 | } |
10383 | |
10384 | static int bnxt_update_phy_setting(struct bnxt *bp) |
10385 | { |
10386 | int rc; |
10387 | bool update_link = false; |
10388 | bool update_pause = false; |
10389 | bool update_eee = false; |
10390 | struct bnxt_link_info *link_info = &bp->link_info; |
10391 | |
10392 | rc = bnxt_update_link(bp, chng_link_state: true); |
10393 | if (rc) { |
10394 | netdev_err(dev: bp->dev, format: "failed to update link (rc: %x)\n" , |
10395 | rc); |
10396 | return rc; |
10397 | } |
10398 | if (!BNXT_SINGLE_PF(bp)) |
10399 | return 0; |
10400 | |
10401 | if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && |
10402 | (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != |
10403 | link_info->req_flow_ctrl) |
10404 | update_pause = true; |
10405 | if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && |
10406 | link_info->force_pause_setting != link_info->req_flow_ctrl) |
10407 | update_pause = true; |
10408 | if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { |
10409 | if (BNXT_AUTO_MODE(link_info->auto_mode)) |
10410 | update_link = true; |
10411 | if (bnxt_force_speed_updated(link_info)) |
10412 | update_link = true; |
10413 | if (link_info->req_duplex != link_info->duplex_setting) |
10414 | update_link = true; |
10415 | } else { |
10416 | if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) |
10417 | update_link = true; |
10418 | if (bnxt_auto_speed_updated(link_info)) |
10419 | update_link = true; |
10420 | } |
10421 | |
10422 | /* The last close may have shutdown the link, so need to call |
10423 | * PHY_CFG to bring it back up. |
10424 | */ |
10425 | if (!BNXT_LINK_IS_UP(bp)) |
10426 | update_link = true; |
10427 | |
10428 | if (!bnxt_eee_config_ok(bp)) |
10429 | update_eee = true; |
10430 | |
10431 | if (update_link) |
10432 | rc = bnxt_hwrm_set_link_setting(bp, set_pause: update_pause, set_eee: update_eee); |
10433 | else if (update_pause) |
10434 | rc = bnxt_hwrm_set_pause(bp); |
10435 | if (rc) { |
10436 | netdev_err(dev: bp->dev, format: "failed to update phy setting (rc: %x)\n" , |
10437 | rc); |
10438 | return rc; |
10439 | } |
10440 | |
10441 | return rc; |
10442 | } |
10443 | |
10444 | /* Common routine to pre-map certain register block to different GRC window. |
10445 | * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows |
10446 | * in PF and 3 windows in VF that can be customized to map in different |
10447 | * register blocks. |
10448 | */ |
10449 | static void bnxt_preset_reg_win(struct bnxt *bp) |
10450 | { |
10451 | if (BNXT_PF(bp)) { |
10452 | /* CAG registers map to GRC window #4 */ |
10453 | writel(BNXT_CAG_REG_BASE, |
10454 | addr: bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); |
10455 | } |
10456 | } |
10457 | |
10458 | static int bnxt_init_dflt_ring_mode(struct bnxt *bp); |
10459 | |
10460 | static int bnxt_reinit_after_abort(struct bnxt *bp) |
10461 | { |
10462 | int rc; |
10463 | |
10464 | if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) |
10465 | return -EBUSY; |
10466 | |
10467 | if (bp->dev->reg_state == NETREG_UNREGISTERED) |
10468 | return -ENODEV; |
10469 | |
10470 | rc = bnxt_fw_init_one(bp); |
10471 | if (!rc) { |
10472 | bnxt_clear_int_mode(bp); |
10473 | rc = bnxt_init_int_mode(bp); |
10474 | if (!rc) { |
10475 | clear_bit(BNXT_STATE_ABORT_ERR, addr: &bp->state); |
10476 | set_bit(BNXT_STATE_FW_RESET_DET, addr: &bp->state); |
10477 | } |
10478 | } |
10479 | return rc; |
10480 | } |
10481 | |
10482 | static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) |
10483 | { |
10484 | int rc = 0; |
10485 | |
10486 | bnxt_preset_reg_win(bp); |
10487 | netif_carrier_off(dev: bp->dev); |
10488 | if (irq_re_init) { |
10489 | /* Reserve rings now if none were reserved at driver probe. */ |
10490 | rc = bnxt_init_dflt_ring_mode(bp); |
10491 | if (rc) { |
10492 | netdev_err(dev: bp->dev, format: "Failed to reserve default rings at open\n" ); |
10493 | return rc; |
10494 | } |
10495 | } |
10496 | rc = bnxt_reserve_rings(bp, irq_re_init); |
10497 | if (rc) |
10498 | return rc; |
10499 | if ((bp->flags & BNXT_FLAG_RFS) && |
10500 | !(bp->flags & BNXT_FLAG_USING_MSIX)) { |
10501 | /* disable RFS if falling back to INTA */ |
10502 | bp->dev->hw_features &= ~NETIF_F_NTUPLE; |
10503 | bp->flags &= ~BNXT_FLAG_RFS; |
10504 | } |
10505 | |
10506 | rc = bnxt_alloc_mem(bp, irq_re_init); |
10507 | if (rc) { |
10508 | netdev_err(dev: bp->dev, format: "bnxt_alloc_mem err: %x\n" , rc); |
10509 | goto open_err_free_mem; |
10510 | } |
10511 | |
10512 | if (irq_re_init) { |
10513 | bnxt_init_napi(bp); |
10514 | rc = bnxt_request_irq(bp); |
10515 | if (rc) { |
10516 | netdev_err(dev: bp->dev, format: "bnxt_request_irq err: %x\n" , rc); |
10517 | goto open_err_irq; |
10518 | } |
10519 | } |
10520 | |
10521 | rc = bnxt_init_nic(bp, irq_re_init); |
10522 | if (rc) { |
10523 | netdev_err(dev: bp->dev, format: "bnxt_init_nic err: %x\n" , rc); |
10524 | goto open_err_irq; |
10525 | } |
10526 | |
10527 | bnxt_enable_napi(bp); |
10528 | bnxt_debug_dev_init(bp); |
10529 | |
10530 | if (link_re_init) { |
10531 | mutex_lock(&bp->link_lock); |
10532 | rc = bnxt_update_phy_setting(bp); |
10533 | mutex_unlock(lock: &bp->link_lock); |
10534 | if (rc) { |
10535 | netdev_warn(dev: bp->dev, format: "failed to update phy settings\n" ); |
10536 | if (BNXT_SINGLE_PF(bp)) { |
10537 | bp->link_info.phy_retry = true; |
10538 | bp->link_info.phy_retry_expires = |
10539 | jiffies + 5 * HZ; |
10540 | } |
10541 | } |
10542 | } |
10543 | |
10544 | if (irq_re_init) |
10545 | udp_tunnel_nic_reset_ntf(dev: bp->dev); |
10546 | |
10547 | if (bp->tx_nr_rings_xdp < num_possible_cpus()) { |
10548 | if (!static_key_enabled(&bnxt_xdp_locking_key)) |
10549 | static_branch_enable(&bnxt_xdp_locking_key); |
10550 | } else if (static_key_enabled(&bnxt_xdp_locking_key)) { |
10551 | static_branch_disable(&bnxt_xdp_locking_key); |
10552 | } |
10553 | set_bit(BNXT_STATE_OPEN, addr: &bp->state); |
10554 | bnxt_enable_int(bp); |
10555 | /* Enable TX queues */ |
10556 | bnxt_tx_enable(bp); |
10557 | mod_timer(timer: &bp->timer, expires: jiffies + bp->current_interval); |
10558 | /* Poll link status and check for SFP+ module status */ |
10559 | mutex_lock(&bp->link_lock); |
10560 | bnxt_get_port_module_status(bp); |
10561 | mutex_unlock(lock: &bp->link_lock); |
10562 | |
10563 | /* VF-reps may need to be re-opened after the PF is re-opened */ |
10564 | if (BNXT_PF(bp)) |
10565 | bnxt_vf_reps_open(bp); |
10566 | bnxt_ptp_init_rtc(bp, phc_cfg: true); |
10567 | bnxt_ptp_cfg_tstamp_filters(bp); |
10568 | return 0; |
10569 | |
10570 | open_err_irq: |
10571 | bnxt_del_napi(bp); |
10572 | |
10573 | open_err_free_mem: |
10574 | bnxt_free_skbs(bp); |
10575 | bnxt_free_irq(bp); |
10576 | bnxt_free_mem(bp, irq_re_init: true); |
10577 | return rc; |
10578 | } |
10579 | |
10580 | /* rtnl_lock held */ |
10581 | int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) |
10582 | { |
10583 | int rc = 0; |
10584 | |
10585 | if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) |
10586 | rc = -EIO; |
10587 | if (!rc) |
10588 | rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); |
10589 | if (rc) { |
10590 | netdev_err(dev: bp->dev, format: "nic open fail (rc: %x)\n" , rc); |
10591 | dev_close(dev: bp->dev); |
10592 | } |
10593 | return rc; |
10594 | } |
10595 | |
10596 | /* rtnl_lock held, open the NIC half way by allocating all resources, but |
10597 | * NAPI, IRQ, and TX are not enabled. This is mainly used for offline |
10598 | * self tests. |
10599 | */ |
10600 | int bnxt_half_open_nic(struct bnxt *bp) |
10601 | { |
10602 | int rc = 0; |
10603 | |
10604 | if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { |
10605 | netdev_err(dev: bp->dev, format: "A previous firmware reset has not completed, aborting half open\n" ); |
10606 | rc = -ENODEV; |
10607 | goto half_open_err; |
10608 | } |
10609 | |
10610 | rc = bnxt_alloc_mem(bp, irq_re_init: true); |
10611 | if (rc) { |
10612 | netdev_err(dev: bp->dev, format: "bnxt_alloc_mem err: %x\n" , rc); |
10613 | goto half_open_err; |
10614 | } |
10615 | set_bit(BNXT_STATE_HALF_OPEN, addr: &bp->state); |
10616 | rc = bnxt_init_nic(bp, irq_re_init: true); |
10617 | if (rc) { |
10618 | clear_bit(BNXT_STATE_HALF_OPEN, addr: &bp->state); |
10619 | netdev_err(dev: bp->dev, format: "bnxt_init_nic err: %x\n" , rc); |
10620 | goto half_open_err; |
10621 | } |
10622 | return 0; |
10623 | |
10624 | half_open_err: |
10625 | bnxt_free_skbs(bp); |
10626 | bnxt_free_mem(bp, irq_re_init: true); |
10627 | dev_close(dev: bp->dev); |
10628 | return rc; |
10629 | } |
10630 | |
10631 | /* rtnl_lock held, this call can only be made after a previous successful |
10632 | * call to bnxt_half_open_nic(). |
10633 | */ |
10634 | void bnxt_half_close_nic(struct bnxt *bp) |
10635 | { |
10636 | bnxt_hwrm_resource_free(bp, close_path: false, irq_re_init: true); |
10637 | bnxt_free_skbs(bp); |
10638 | bnxt_free_mem(bp, irq_re_init: true); |
10639 | clear_bit(BNXT_STATE_HALF_OPEN, addr: &bp->state); |
10640 | } |
10641 | |
10642 | void bnxt_reenable_sriov(struct bnxt *bp) |
10643 | { |
10644 | if (BNXT_PF(bp)) { |
10645 | struct bnxt_pf_info *pf = &bp->pf; |
10646 | int n = pf->active_vfs; |
10647 | |
10648 | if (n) |
10649 | bnxt_cfg_hw_sriov(bp, num_vfs: &n, reset: true); |
10650 | } |
10651 | } |
10652 | |
10653 | static int bnxt_open(struct net_device *dev) |
10654 | { |
10655 | struct bnxt *bp = netdev_priv(dev); |
10656 | int rc; |
10657 | |
10658 | if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { |
10659 | rc = bnxt_reinit_after_abort(bp); |
10660 | if (rc) { |
10661 | if (rc == -EBUSY) |
10662 | netdev_err(dev: bp->dev, format: "A previous firmware reset has not completed, aborting\n" ); |
10663 | else |
10664 | netdev_err(dev: bp->dev, format: "Failed to reinitialize after aborted firmware reset\n" ); |
10665 | return -ENODEV; |
10666 | } |
10667 | } |
10668 | |
10669 | rc = bnxt_hwrm_if_change(bp, up: true); |
10670 | if (rc) |
10671 | return rc; |
10672 | |
10673 | rc = __bnxt_open_nic(bp, irq_re_init: true, link_re_init: true); |
10674 | if (rc) { |
10675 | bnxt_hwrm_if_change(bp, up: false); |
10676 | } else { |
10677 | if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, addr: &bp->state)) { |
10678 | if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { |
10679 | bnxt_ulp_start(bp, err: 0); |
10680 | bnxt_reenable_sriov(bp); |
10681 | } |
10682 | } |
10683 | } |
10684 | |
10685 | return rc; |
10686 | } |
10687 | |
10688 | static bool bnxt_drv_busy(struct bnxt *bp) |
10689 | { |
10690 | return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || |
10691 | test_bit(BNXT_STATE_READ_STATS, &bp->state)); |
10692 | } |
10693 | |
10694 | static void bnxt_get_ring_stats(struct bnxt *bp, |
10695 | struct rtnl_link_stats64 *stats); |
10696 | |
10697 | static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, |
10698 | bool link_re_init) |
10699 | { |
10700 | /* Close the VF-reps before closing PF */ |
10701 | if (BNXT_PF(bp)) |
10702 | bnxt_vf_reps_close(bp); |
10703 | |
10704 | /* Change device state to avoid TX queue wake up's */ |
10705 | bnxt_tx_disable(bp); |
10706 | |
10707 | clear_bit(BNXT_STATE_OPEN, addr: &bp->state); |
10708 | smp_mb__after_atomic(); |
10709 | while (bnxt_drv_busy(bp)) |
10710 | msleep(msecs: 20); |
10711 | |
10712 | /* Flush rings and disable interrupts */ |
10713 | bnxt_shutdown_nic(bp, irq_re_init); |
10714 | |
10715 | /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ |
10716 | |
10717 | bnxt_debug_dev_exit(bp); |
10718 | bnxt_disable_napi(bp); |
10719 | del_timer_sync(timer: &bp->timer); |
10720 | bnxt_free_skbs(bp); |
10721 | |
10722 | /* Save ring stats before shutdown */ |
10723 | if (bp->bnapi && irq_re_init) { |
10724 | bnxt_get_ring_stats(bp, stats: &bp->net_stats_prev); |
10725 | bnxt_get_ring_err_stats(bp, stats: &bp->ring_err_stats_prev); |
10726 | } |
10727 | if (irq_re_init) { |
10728 | bnxt_free_irq(bp); |
10729 | bnxt_del_napi(bp); |
10730 | } |
10731 | bnxt_free_mem(bp, irq_re_init); |
10732 | } |
10733 | |
10734 | int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) |
10735 | { |
10736 | int rc = 0; |
10737 | |
10738 | if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { |
10739 | /* If we get here, it means firmware reset is in progress |
10740 | * while we are trying to close. We can safely proceed with |
10741 | * the close because we are holding rtnl_lock(). Some firmware |
10742 | * messages may fail as we proceed to close. We set the |
10743 | * ABORT_ERR flag here so that the FW reset thread will later |
10744 | * abort when it gets the rtnl_lock() and sees the flag. |
10745 | */ |
10746 | netdev_warn(dev: bp->dev, format: "FW reset in progress during close, FW reset will be aborted\n" ); |
10747 | set_bit(BNXT_STATE_ABORT_ERR, addr: &bp->state); |
10748 | } |
10749 | |
10750 | #ifdef CONFIG_BNXT_SRIOV |
10751 | if (bp->sriov_cfg) { |
10752 | rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, |
10753 | !bp->sriov_cfg, |
10754 | BNXT_SRIOV_CFG_WAIT_TMO); |
10755 | if (rc) |
10756 | netdev_warn(dev: bp->dev, format: "timeout waiting for SRIOV config operation to complete!\n" ); |
10757 | } |
10758 | #endif |
10759 | __bnxt_close_nic(bp, irq_re_init, link_re_init); |
10760 | return rc; |
10761 | } |
10762 | |
10763 | static int bnxt_close(struct net_device *dev) |
10764 | { |
10765 | struct bnxt *bp = netdev_priv(dev); |
10766 | |
10767 | bnxt_close_nic(bp, irq_re_init: true, link_re_init: true); |
10768 | bnxt_hwrm_shutdown_link(bp); |
10769 | bnxt_hwrm_if_change(bp, up: false); |
10770 | return 0; |
10771 | } |
10772 | |
10773 | static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, |
10774 | u16 *val) |
10775 | { |
10776 | struct hwrm_port_phy_mdio_read_output *resp; |
10777 | struct hwrm_port_phy_mdio_read_input *req; |
10778 | int rc; |
10779 | |
10780 | if (bp->hwrm_spec_code < 0x10a00) |
10781 | return -EOPNOTSUPP; |
10782 | |
10783 | rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ); |
10784 | if (rc) |
10785 | return rc; |
10786 | |
10787 | req->port_id = cpu_to_le16(bp->pf.port_id); |
10788 | req->phy_addr = phy_addr; |
10789 | req->reg_addr = cpu_to_le16(reg & 0x1f); |
10790 | if (mdio_phy_id_is_c45(phy_id: phy_addr)) { |
10791 | req->cl45_mdio = 1; |
10792 | req->phy_addr = mdio_phy_id_prtad(phy_id: phy_addr); |
10793 | req->dev_addr = mdio_phy_id_devad(phy_id: phy_addr); |
10794 | req->reg_addr = cpu_to_le16(reg); |
10795 | } |
10796 | |
10797 | resp = hwrm_req_hold(bp, req); |
10798 | rc = hwrm_req_send(bp, req); |
10799 | if (!rc) |
10800 | *val = le16_to_cpu(resp->reg_data); |
10801 | hwrm_req_drop(bp, req); |
10802 | return rc; |
10803 | } |
10804 | |
10805 | static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, |
10806 | u16 val) |
10807 | { |
10808 | struct hwrm_port_phy_mdio_write_input *req; |
10809 | int rc; |
10810 | |
10811 | if (bp->hwrm_spec_code < 0x10a00) |
10812 | return -EOPNOTSUPP; |
10813 | |
10814 | rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE); |
10815 | if (rc) |
10816 | return rc; |
10817 | |
10818 | req->port_id = cpu_to_le16(bp->pf.port_id); |
10819 | req->phy_addr = phy_addr; |
10820 | req->reg_addr = cpu_to_le16(reg & 0x1f); |
10821 | if (mdio_phy_id_is_c45(phy_id: phy_addr)) { |
10822 | req->cl45_mdio = 1; |
10823 | req->phy_addr = mdio_phy_id_prtad(phy_id: phy_addr); |
10824 | req->dev_addr = mdio_phy_id_devad(phy_id: phy_addr); |
10825 | req->reg_addr = cpu_to_le16(reg); |
10826 | } |
10827 | req->reg_data = cpu_to_le16(val); |
10828 | |
10829 | return hwrm_req_send(bp, req); |
10830 | } |
10831 | |
10832 | /* rtnl_lock held */ |
10833 | static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
10834 | { |
10835 | struct mii_ioctl_data *mdio = if_mii(rq: ifr); |
10836 | struct bnxt *bp = netdev_priv(dev); |
10837 | int rc; |
10838 | |
10839 | switch (cmd) { |
10840 | case SIOCGMIIPHY: |
10841 | mdio->phy_id = bp->link_info.phy_addr; |
10842 | |
10843 | fallthrough; |
10844 | case SIOCGMIIREG: { |
10845 | u16 mii_regval = 0; |
10846 | |
10847 | if (!netif_running(dev)) |
10848 | return -EAGAIN; |
10849 | |
10850 | rc = bnxt_hwrm_port_phy_read(bp, phy_addr: mdio->phy_id, reg: mdio->reg_num, |
10851 | val: &mii_regval); |
10852 | mdio->val_out = mii_regval; |
10853 | return rc; |
10854 | } |
10855 | |
10856 | case SIOCSMIIREG: |
10857 | if (!netif_running(dev)) |
10858 | return -EAGAIN; |
10859 | |
10860 | return bnxt_hwrm_port_phy_write(bp, phy_addr: mdio->phy_id, reg: mdio->reg_num, |
10861 | val: mdio->val_in); |
10862 | |
10863 | case SIOCSHWTSTAMP: |
10864 | return bnxt_hwtstamp_set(dev, ifr); |
10865 | |
10866 | case SIOCGHWTSTAMP: |
10867 | return bnxt_hwtstamp_get(dev, ifr); |
10868 | |
10869 | default: |
10870 | /* do nothing */ |
10871 | break; |
10872 | } |
10873 | return -EOPNOTSUPP; |
10874 | } |
10875 | |
10876 | static void bnxt_get_ring_stats(struct bnxt *bp, |
10877 | struct rtnl_link_stats64 *stats) |
10878 | { |
10879 | int i; |
10880 | |
10881 | for (i = 0; i < bp->cp_nr_rings; i++) { |
10882 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
10883 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
10884 | u64 *sw = cpr->stats.sw_stats; |
10885 | |
10886 | stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); |
10887 | stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); |
10888 | stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); |
10889 | |
10890 | stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); |
10891 | stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); |
10892 | stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); |
10893 | |
10894 | stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); |
10895 | stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); |
10896 | stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); |
10897 | |
10898 | stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); |
10899 | stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); |
10900 | stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); |
10901 | |
10902 | stats->rx_missed_errors += |
10903 | BNXT_GET_RING_STATS64(sw, rx_discard_pkts); |
10904 | |
10905 | stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); |
10906 | |
10907 | stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); |
10908 | |
10909 | stats->rx_dropped += |
10910 | cpr->sw_stats.rx.rx_netpoll_discards + |
10911 | cpr->sw_stats.rx.rx_oom_discards; |
10912 | } |
10913 | } |
10914 | |
10915 | static void bnxt_add_prev_stats(struct bnxt *bp, |
10916 | struct rtnl_link_stats64 *stats) |
10917 | { |
10918 | struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; |
10919 | |
10920 | stats->rx_packets += prev_stats->rx_packets; |
10921 | stats->tx_packets += prev_stats->tx_packets; |
10922 | stats->rx_bytes += prev_stats->rx_bytes; |
10923 | stats->tx_bytes += prev_stats->tx_bytes; |
10924 | stats->rx_missed_errors += prev_stats->rx_missed_errors; |
10925 | stats->multicast += prev_stats->multicast; |
10926 | stats->rx_dropped += prev_stats->rx_dropped; |
10927 | stats->tx_dropped += prev_stats->tx_dropped; |
10928 | } |
10929 | |
10930 | static void |
10931 | bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) |
10932 | { |
10933 | struct bnxt *bp = netdev_priv(dev); |
10934 | |
10935 | set_bit(BNXT_STATE_READ_STATS, addr: &bp->state); |
10936 | /* Make sure bnxt_close_nic() sees that we are reading stats before |
10937 | * we check the BNXT_STATE_OPEN flag. |
10938 | */ |
10939 | smp_mb__after_atomic(); |
10940 | if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { |
10941 | clear_bit(BNXT_STATE_READ_STATS, addr: &bp->state); |
10942 | *stats = bp->net_stats_prev; |
10943 | return; |
10944 | } |
10945 | |
10946 | bnxt_get_ring_stats(bp, stats); |
10947 | bnxt_add_prev_stats(bp, stats); |
10948 | |
10949 | if (bp->flags & BNXT_FLAG_PORT_STATS) { |
10950 | u64 *rx = bp->port_stats.sw_stats; |
10951 | u64 *tx = bp->port_stats.sw_stats + |
10952 | BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; |
10953 | |
10954 | stats->rx_crc_errors = |
10955 | BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); |
10956 | stats->rx_frame_errors = |
10957 | BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); |
10958 | stats->rx_length_errors = |
10959 | BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + |
10960 | BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + |
10961 | BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); |
10962 | stats->rx_errors = |
10963 | BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + |
10964 | BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); |
10965 | stats->collisions = |
10966 | BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); |
10967 | stats->tx_fifo_errors = |
10968 | BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); |
10969 | stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); |
10970 | } |
10971 | clear_bit(BNXT_STATE_READ_STATS, addr: &bp->state); |
10972 | } |
10973 | |
10974 | static void bnxt_get_one_ring_err_stats(struct bnxt *bp, |
10975 | struct bnxt_total_ring_err_stats *stats, |
10976 | struct bnxt_cp_ring_info *cpr) |
10977 | { |
10978 | struct bnxt_sw_stats *sw_stats = &cpr->sw_stats; |
10979 | u64 *hw_stats = cpr->stats.sw_stats; |
10980 | |
10981 | stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors; |
10982 | stats->rx_total_resets += sw_stats->rx.rx_resets; |
10983 | stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors; |
10984 | stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards; |
10985 | stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards; |
10986 | stats->rx_total_ring_discards += |
10987 | BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts); |
10988 | stats->tx_total_resets += sw_stats->tx.tx_resets; |
10989 | stats->tx_total_ring_discards += |
10990 | BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts); |
10991 | stats->total_missed_irqs += sw_stats->cmn.missed_irqs; |
10992 | } |
10993 | |
10994 | void bnxt_get_ring_err_stats(struct bnxt *bp, |
10995 | struct bnxt_total_ring_err_stats *stats) |
10996 | { |
10997 | int i; |
10998 | |
10999 | for (i = 0; i < bp->cp_nr_rings; i++) |
11000 | bnxt_get_one_ring_err_stats(bp, stats, cpr: &bp->bnapi[i]->cp_ring); |
11001 | } |
11002 | |
11003 | static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) |
11004 | { |
11005 | struct net_device *dev = bp->dev; |
11006 | struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; |
11007 | struct netdev_hw_addr *ha; |
11008 | u8 *haddr; |
11009 | int mc_count = 0; |
11010 | bool update = false; |
11011 | int off = 0; |
11012 | |
11013 | netdev_for_each_mc_addr(ha, dev) { |
11014 | if (mc_count >= BNXT_MAX_MC_ADDRS) { |
11015 | *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; |
11016 | vnic->mc_list_count = 0; |
11017 | return false; |
11018 | } |
11019 | haddr = ha->addr; |
11020 | if (!ether_addr_equal(addr1: haddr, addr2: vnic->mc_list + off)) { |
11021 | memcpy(vnic->mc_list + off, haddr, ETH_ALEN); |
11022 | update = true; |
11023 | } |
11024 | off += ETH_ALEN; |
11025 | mc_count++; |
11026 | } |
11027 | if (mc_count) |
11028 | *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; |
11029 | |
11030 | if (mc_count != vnic->mc_list_count) { |
11031 | vnic->mc_list_count = mc_count; |
11032 | update = true; |
11033 | } |
11034 | return update; |
11035 | } |
11036 | |
11037 | static bool bnxt_uc_list_updated(struct bnxt *bp) |
11038 | { |
11039 | struct net_device *dev = bp->dev; |
11040 | struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; |
11041 | struct netdev_hw_addr *ha; |
11042 | int off = 0; |
11043 | |
11044 | if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) |
11045 | return true; |
11046 | |
11047 | netdev_for_each_uc_addr(ha, dev) { |
11048 | if (!ether_addr_equal(addr1: ha->addr, addr2: vnic->uc_list + off)) |
11049 | return true; |
11050 | |
11051 | off += ETH_ALEN; |
11052 | } |
11053 | return false; |
11054 | } |
11055 | |
11056 | static void bnxt_set_rx_mode(struct net_device *dev) |
11057 | { |
11058 | struct bnxt *bp = netdev_priv(dev); |
11059 | struct bnxt_vnic_info *vnic; |
11060 | bool mc_update = false; |
11061 | bool uc_update; |
11062 | u32 mask; |
11063 | |
11064 | if (!test_bit(BNXT_STATE_OPEN, &bp->state)) |
11065 | return; |
11066 | |
11067 | vnic = &bp->vnic_info[0]; |
11068 | mask = vnic->rx_mask; |
11069 | mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | |
11070 | CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | |
11071 | CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | |
11072 | CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); |
11073 | |
11074 | if (dev->flags & IFF_PROMISC) |
11075 | mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; |
11076 | |
11077 | uc_update = bnxt_uc_list_updated(bp); |
11078 | |
11079 | if (dev->flags & IFF_BROADCAST) |
11080 | mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; |
11081 | if (dev->flags & IFF_ALLMULTI) { |
11082 | mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; |
11083 | vnic->mc_list_count = 0; |
11084 | } else if (dev->flags & IFF_MULTICAST) { |
11085 | mc_update = bnxt_mc_list_updated(bp, rx_mask: &mask); |
11086 | } |
11087 | |
11088 | if (mask != vnic->rx_mask || uc_update || mc_update) { |
11089 | vnic->rx_mask = mask; |
11090 | |
11091 | bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); |
11092 | } |
11093 | } |
11094 | |
11095 | static int bnxt_cfg_rx_mode(struct bnxt *bp) |
11096 | { |
11097 | struct net_device *dev = bp->dev; |
11098 | struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; |
11099 | struct hwrm_cfa_l2_filter_free_input *req; |
11100 | struct netdev_hw_addr *ha; |
11101 | int i, off = 0, rc; |
11102 | bool uc_update; |
11103 | |
11104 | netif_addr_lock_bh(dev); |
11105 | uc_update = bnxt_uc_list_updated(bp); |
11106 | netif_addr_unlock_bh(dev); |
11107 | |
11108 | if (!uc_update) |
11109 | goto skip_uc; |
11110 | |
11111 | rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); |
11112 | if (rc) |
11113 | return rc; |
11114 | hwrm_req_hold(bp, req); |
11115 | for (i = 1; i < vnic->uc_filter_count; i++) { |
11116 | req->l2_filter_id = vnic->fw_l2_filter_id[i]; |
11117 | |
11118 | rc = hwrm_req_send(bp, req); |
11119 | } |
11120 | hwrm_req_drop(bp, req); |
11121 | |
11122 | vnic->uc_filter_count = 1; |
11123 | |
11124 | netif_addr_lock_bh(dev); |
11125 | if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { |
11126 | vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; |
11127 | } else { |
11128 | netdev_for_each_uc_addr(ha, dev) { |
11129 | memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); |
11130 | off += ETH_ALEN; |
11131 | vnic->uc_filter_count++; |
11132 | } |
11133 | } |
11134 | netif_addr_unlock_bh(dev); |
11135 | |
11136 | for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { |
11137 | rc = bnxt_hwrm_set_vnic_filter(bp, vnic_id: 0, idx: i, mac_addr: vnic->uc_list + off); |
11138 | if (rc) { |
11139 | if (BNXT_VF(bp) && rc == -ENODEV) { |
11140 | if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, addr: &bp->state)) |
11141 | netdev_warn(dev: bp->dev, format: "Cannot configure L2 filters while PF is unavailable, will retry\n" ); |
11142 | else |
11143 | netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n" ); |
11144 | rc = 0; |
11145 | } else { |
11146 | netdev_err(dev: bp->dev, format: "HWRM vnic filter failure rc: %x\n" , rc); |
11147 | } |
11148 | vnic->uc_filter_count = i; |
11149 | return rc; |
11150 | } |
11151 | } |
11152 | if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, addr: &bp->state)) |
11153 | netdev_notice(dev: bp->dev, format: "Retry of L2 filter configuration successful.\n" ); |
11154 | |
11155 | skip_uc: |
11156 | if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) && |
11157 | !bnxt_promisc_ok(bp)) |
11158 | vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; |
11159 | rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic_id: 0); |
11160 | if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { |
11161 | netdev_info(dev: bp->dev, format: "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n" , |
11162 | rc); |
11163 | vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; |
11164 | vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; |
11165 | vnic->mc_list_count = 0; |
11166 | rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic_id: 0); |
11167 | } |
11168 | if (rc) |
11169 | netdev_err(dev: bp->dev, format: "HWRM cfa l2 rx mask failure rc: %d\n" , |
11170 | rc); |
11171 | |
11172 | return rc; |
11173 | } |
11174 | |
11175 | static bool bnxt_can_reserve_rings(struct bnxt *bp) |
11176 | { |
11177 | #ifdef CONFIG_BNXT_SRIOV |
11178 | if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { |
11179 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
11180 | |
11181 | /* No minimum rings were provisioned by the PF. Don't |
11182 | * reserve rings by default when device is down. |
11183 | */ |
11184 | if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) |
11185 | return true; |
11186 | |
11187 | if (!netif_running(dev: bp->dev)) |
11188 | return false; |
11189 | } |
11190 | #endif |
11191 | return true; |
11192 | } |
11193 | |
11194 | /* If the chip and firmware supports RFS */ |
11195 | static bool bnxt_rfs_supported(struct bnxt *bp) |
11196 | { |
11197 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
11198 | if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) |
11199 | return true; |
11200 | return false; |
11201 | } |
11202 | /* 212 firmware is broken for aRFS */ |
11203 | if (BNXT_FW_MAJ(bp) == 212) |
11204 | return false; |
11205 | if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) |
11206 | return true; |
11207 | if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) |
11208 | return true; |
11209 | return false; |
11210 | } |
11211 | |
11212 | /* If runtime conditions support RFS */ |
11213 | static bool bnxt_rfs_capable(struct bnxt *bp) |
11214 | { |
11215 | #ifdef CONFIG_RFS_ACCEL |
11216 | int vnics, max_vnics, ; |
11217 | |
11218 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
11219 | return bnxt_rfs_supported(bp); |
11220 | if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) |
11221 | return false; |
11222 | |
11223 | vnics = 1 + bp->rx_nr_rings; |
11224 | max_vnics = bnxt_get_max_func_vnics(bp); |
11225 | max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); |
11226 | |
11227 | /* RSS contexts not a limiting factor */ |
11228 | if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) |
11229 | max_rss_ctxs = max_vnics; |
11230 | if (vnics > max_vnics || vnics > max_rss_ctxs) { |
11231 | if (bp->rx_nr_rings > 1) |
11232 | netdev_warn(dev: bp->dev, |
11233 | format: "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n" , |
11234 | min(max_rss_ctxs - 1, max_vnics - 1)); |
11235 | return false; |
11236 | } |
11237 | |
11238 | if (!BNXT_NEW_RM(bp)) |
11239 | return true; |
11240 | |
11241 | if (vnics == bp->hw_resc.resv_vnics) |
11242 | return true; |
11243 | |
11244 | bnxt_hwrm_reserve_rings(bp, tx: 0, rx: 0, grp: 0, cp: 0, stat: 0, vnic: vnics); |
11245 | if (vnics <= bp->hw_resc.resv_vnics) |
11246 | return true; |
11247 | |
11248 | netdev_warn(dev: bp->dev, format: "Unable to reserve resources to support NTUPLE filters.\n" ); |
11249 | bnxt_hwrm_reserve_rings(bp, tx: 0, rx: 0, grp: 0, cp: 0, stat: 0, vnic: 1); |
11250 | return false; |
11251 | #else |
11252 | return false; |
11253 | #endif |
11254 | } |
11255 | |
11256 | static netdev_features_t bnxt_fix_features(struct net_device *dev, |
11257 | netdev_features_t features) |
11258 | { |
11259 | struct bnxt *bp = netdev_priv(dev); |
11260 | netdev_features_t vlan_features; |
11261 | |
11262 | if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) |
11263 | features &= ~NETIF_F_NTUPLE; |
11264 | |
11265 | if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog) |
11266 | features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); |
11267 | |
11268 | if (!(features & NETIF_F_GRO)) |
11269 | features &= ~NETIF_F_GRO_HW; |
11270 | |
11271 | if (features & NETIF_F_GRO_HW) |
11272 | features &= ~NETIF_F_LRO; |
11273 | |
11274 | /* Both CTAG and STAG VLAN accelaration on the RX side have to be |
11275 | * turned on or off together. |
11276 | */ |
11277 | vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; |
11278 | if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) { |
11279 | if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) |
11280 | features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; |
11281 | else if (vlan_features) |
11282 | features |= BNXT_HW_FEATURE_VLAN_ALL_RX; |
11283 | } |
11284 | #ifdef CONFIG_BNXT_SRIOV |
11285 | if (BNXT_VF(bp) && bp->vf.vlan) |
11286 | features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; |
11287 | #endif |
11288 | return features; |
11289 | } |
11290 | |
11291 | static int bnxt_set_features(struct net_device *dev, netdev_features_t features) |
11292 | { |
11293 | struct bnxt *bp = netdev_priv(dev); |
11294 | u32 flags = bp->flags; |
11295 | u32 changes; |
11296 | int rc = 0; |
11297 | bool re_init = false; |
11298 | bool update_tpa = false; |
11299 | |
11300 | flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; |
11301 | if (features & NETIF_F_GRO_HW) |
11302 | flags |= BNXT_FLAG_GRO; |
11303 | else if (features & NETIF_F_LRO) |
11304 | flags |= BNXT_FLAG_LRO; |
11305 | |
11306 | if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) |
11307 | flags &= ~BNXT_FLAG_TPA; |
11308 | |
11309 | if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) |
11310 | flags |= BNXT_FLAG_STRIP_VLAN; |
11311 | |
11312 | if (features & NETIF_F_NTUPLE) |
11313 | flags |= BNXT_FLAG_RFS; |
11314 | |
11315 | changes = flags ^ bp->flags; |
11316 | if (changes & BNXT_FLAG_TPA) { |
11317 | update_tpa = true; |
11318 | if ((bp->flags & BNXT_FLAG_TPA) == 0 || |
11319 | (flags & BNXT_FLAG_TPA) == 0 || |
11320 | (bp->flags & BNXT_FLAG_CHIP_P5)) |
11321 | re_init = true; |
11322 | } |
11323 | |
11324 | if (changes & ~BNXT_FLAG_TPA) |
11325 | re_init = true; |
11326 | |
11327 | if (flags != bp->flags) { |
11328 | u32 old_flags = bp->flags; |
11329 | |
11330 | if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { |
11331 | bp->flags = flags; |
11332 | if (update_tpa) |
11333 | bnxt_set_ring_params(bp); |
11334 | return rc; |
11335 | } |
11336 | |
11337 | if (re_init) { |
11338 | bnxt_close_nic(bp, irq_re_init: false, link_re_init: false); |
11339 | bp->flags = flags; |
11340 | if (update_tpa) |
11341 | bnxt_set_ring_params(bp); |
11342 | |
11343 | return bnxt_open_nic(bp, irq_re_init: false, link_re_init: false); |
11344 | } |
11345 | if (update_tpa) { |
11346 | bp->flags = flags; |
11347 | rc = bnxt_set_tpa(bp, |
11348 | set_tpa: (flags & BNXT_FLAG_TPA) ? |
11349 | true : false); |
11350 | if (rc) |
11351 | bp->flags = old_flags; |
11352 | } |
11353 | } |
11354 | return rc; |
11355 | } |
11356 | |
11357 | static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off, |
11358 | u8 **nextp) |
11359 | { |
11360 | struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off); |
11361 | struct hop_jumbo_hdr *jhdr; |
11362 | int hdr_count = 0; |
11363 | u8 *nexthdr; |
11364 | int start; |
11365 | |
11366 | /* Check that there are at most 2 IPv6 extension headers, no |
11367 | * fragment header, and each is <= 64 bytes. |
11368 | */ |
11369 | start = nw_off + sizeof(*ip6h); |
11370 | nexthdr = &ip6h->nexthdr; |
11371 | while (ipv6_ext_hdr(nexthdr: *nexthdr)) { |
11372 | struct ipv6_opt_hdr *hp; |
11373 | int hdrlen; |
11374 | |
11375 | if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE || |
11376 | *nexthdr == NEXTHDR_FRAGMENT) |
11377 | return false; |
11378 | hp = __skb_header_pointer(NULL, offset: start, len: sizeof(*hp), data: skb->data, |
11379 | hlen: skb_headlen(skb), NULL); |
11380 | if (!hp) |
11381 | return false; |
11382 | if (*nexthdr == NEXTHDR_AUTH) |
11383 | hdrlen = ipv6_authlen(hp); |
11384 | else |
11385 | hdrlen = ipv6_optlen(hp); |
11386 | |
11387 | if (hdrlen > 64) |
11388 | return false; |
11389 | |
11390 | /* The ext header may be a hop-by-hop header inserted for |
11391 | * big TCP purposes. This will be removed before sending |
11392 | * from NIC, so do not count it. |
11393 | */ |
11394 | if (*nexthdr == NEXTHDR_HOP) { |
11395 | if (likely(skb->len <= GRO_LEGACY_MAX_SIZE)) |
11396 | goto increment_hdr; |
11397 | |
11398 | jhdr = (struct hop_jumbo_hdr *)hp; |
11399 | if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || |
11400 | jhdr->nexthdr != IPPROTO_TCP) |
11401 | goto increment_hdr; |
11402 | |
11403 | goto next_hdr; |
11404 | } |
11405 | increment_hdr: |
11406 | hdr_count++; |
11407 | next_hdr: |
11408 | nexthdr = &hp->nexthdr; |
11409 | start += hdrlen; |
11410 | } |
11411 | if (nextp) { |
11412 | /* Caller will check inner protocol */ |
11413 | if (skb->encapsulation) { |
11414 | *nextp = nexthdr; |
11415 | return true; |
11416 | } |
11417 | *nextp = NULL; |
11418 | } |
11419 | /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */ |
11420 | return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP; |
11421 | } |
11422 | |
11423 | /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */ |
11424 | static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb) |
11425 | { |
11426 | struct udphdr *uh = udp_hdr(skb); |
11427 | __be16 udp_port = uh->dest; |
11428 | |
11429 | if (udp_port != bp->vxlan_port && udp_port != bp->nge_port) |
11430 | return false; |
11431 | if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) { |
11432 | struct ethhdr *eh = inner_eth_hdr(skb); |
11433 | |
11434 | switch (eh->h_proto) { |
11435 | case htons(ETH_P_IP): |
11436 | return true; |
11437 | case htons(ETH_P_IPV6): |
11438 | return bnxt_exthdr_check(bp, skb, |
11439 | nw_off: skb_inner_network_offset(skb), |
11440 | NULL); |
11441 | } |
11442 | } |
11443 | return false; |
11444 | } |
11445 | |
11446 | static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto) |
11447 | { |
11448 | switch (l4_proto) { |
11449 | case IPPROTO_UDP: |
11450 | return bnxt_udp_tunl_check(bp, skb); |
11451 | case IPPROTO_IPIP: |
11452 | return true; |
11453 | case IPPROTO_GRE: { |
11454 | switch (skb->inner_protocol) { |
11455 | default: |
11456 | return false; |
11457 | case htons(ETH_P_IP): |
11458 | return true; |
11459 | case htons(ETH_P_IPV6): |
11460 | fallthrough; |
11461 | } |
11462 | } |
11463 | case IPPROTO_IPV6: |
11464 | /* Check ext headers of inner ipv6 */ |
11465 | return bnxt_exthdr_check(bp, skb, nw_off: skb_inner_network_offset(skb), |
11466 | NULL); |
11467 | } |
11468 | return false; |
11469 | } |
11470 | |
11471 | static netdev_features_t bnxt_features_check(struct sk_buff *skb, |
11472 | struct net_device *dev, |
11473 | netdev_features_t features) |
11474 | { |
11475 | struct bnxt *bp = netdev_priv(dev); |
11476 | u8 *l4_proto; |
11477 | |
11478 | features = vlan_features_check(skb, features); |
11479 | switch (vlan_get_protocol(skb)) { |
11480 | case htons(ETH_P_IP): |
11481 | if (!skb->encapsulation) |
11482 | return features; |
11483 | l4_proto = &ip_hdr(skb)->protocol; |
11484 | if (bnxt_tunl_check(bp, skb, l4_proto: *l4_proto)) |
11485 | return features; |
11486 | break; |
11487 | case htons(ETH_P_IPV6): |
11488 | if (!bnxt_exthdr_check(bp, skb, nw_off: skb_network_offset(skb), |
11489 | nextp: &l4_proto)) |
11490 | break; |
11491 | if (!l4_proto || bnxt_tunl_check(bp, skb, l4_proto: *l4_proto)) |
11492 | return features; |
11493 | break; |
11494 | } |
11495 | return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); |
11496 | } |
11497 | |
11498 | int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, |
11499 | u32 *reg_buf) |
11500 | { |
11501 | struct hwrm_dbg_read_direct_output *resp; |
11502 | struct hwrm_dbg_read_direct_input *req; |
11503 | __le32 *dbg_reg_buf; |
11504 | dma_addr_t mapping; |
11505 | int rc, i; |
11506 | |
11507 | rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT); |
11508 | if (rc) |
11509 | return rc; |
11510 | |
11511 | dbg_reg_buf = hwrm_req_dma_slice(bp, req, size: num_words * 4, |
11512 | dma: &mapping); |
11513 | if (!dbg_reg_buf) { |
11514 | rc = -ENOMEM; |
11515 | goto dbg_rd_reg_exit; |
11516 | } |
11517 | |
11518 | req->host_dest_addr = cpu_to_le64(mapping); |
11519 | |
11520 | resp = hwrm_req_hold(bp, req); |
11521 | req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); |
11522 | req->read_len32 = cpu_to_le32(num_words); |
11523 | |
11524 | rc = hwrm_req_send(bp, req); |
11525 | if (rc || resp->error_code) { |
11526 | rc = -EIO; |
11527 | goto dbg_rd_reg_exit; |
11528 | } |
11529 | for (i = 0; i < num_words; i++) |
11530 | reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); |
11531 | |
11532 | dbg_rd_reg_exit: |
11533 | hwrm_req_drop(bp, req); |
11534 | return rc; |
11535 | } |
11536 | |
11537 | static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, |
11538 | u32 ring_id, u32 *prod, u32 *cons) |
11539 | { |
11540 | struct hwrm_dbg_ring_info_get_output *resp; |
11541 | struct hwrm_dbg_ring_info_get_input *req; |
11542 | int rc; |
11543 | |
11544 | rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET); |
11545 | if (rc) |
11546 | return rc; |
11547 | |
11548 | req->ring_type = ring_type; |
11549 | req->fw_ring_id = cpu_to_le32(ring_id); |
11550 | resp = hwrm_req_hold(bp, req); |
11551 | rc = hwrm_req_send(bp, req); |
11552 | if (!rc) { |
11553 | *prod = le32_to_cpu(resp->producer_index); |
11554 | *cons = le32_to_cpu(resp->consumer_index); |
11555 | } |
11556 | hwrm_req_drop(bp, req); |
11557 | return rc; |
11558 | } |
11559 | |
11560 | static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) |
11561 | { |
11562 | struct bnxt_tx_ring_info *txr = bnapi->tx_ring; |
11563 | int i = bnapi->index; |
11564 | |
11565 | if (!txr) |
11566 | return; |
11567 | |
11568 | netdev_info(dev: bnapi->bp->dev, format: "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n" , |
11569 | i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, |
11570 | txr->tx_cons); |
11571 | } |
11572 | |
11573 | static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) |
11574 | { |
11575 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; |
11576 | int i = bnapi->index; |
11577 | |
11578 | if (!rxr) |
11579 | return; |
11580 | |
11581 | netdev_info(dev: bnapi->bp->dev, format: "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n" , |
11582 | i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, |
11583 | rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, |
11584 | rxr->rx_sw_agg_prod); |
11585 | } |
11586 | |
11587 | static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) |
11588 | { |
11589 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
11590 | int i = bnapi->index; |
11591 | |
11592 | netdev_info(dev: bnapi->bp->dev, format: "[%d]: cp{fw_ring: %d raw_cons: %x}\n" , |
11593 | i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); |
11594 | } |
11595 | |
11596 | static void bnxt_dbg_dump_states(struct bnxt *bp) |
11597 | { |
11598 | int i; |
11599 | struct bnxt_napi *bnapi; |
11600 | |
11601 | for (i = 0; i < bp->cp_nr_rings; i++) { |
11602 | bnapi = bp->bnapi[i]; |
11603 | if (netif_msg_drv(bp)) { |
11604 | bnxt_dump_tx_sw_state(bnapi); |
11605 | bnxt_dump_rx_sw_state(bnapi); |
11606 | bnxt_dump_cp_sw_state(bnapi); |
11607 | } |
11608 | } |
11609 | } |
11610 | |
11611 | static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) |
11612 | { |
11613 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; |
11614 | struct hwrm_ring_reset_input *req; |
11615 | struct bnxt_napi *bnapi = rxr->bnapi; |
11616 | struct bnxt_cp_ring_info *cpr; |
11617 | u16 cp_ring_id; |
11618 | int rc; |
11619 | |
11620 | rc = hwrm_req_init(bp, req, HWRM_RING_RESET); |
11621 | if (rc) |
11622 | return rc; |
11623 | |
11624 | cpr = &bnapi->cp_ring; |
11625 | cp_ring_id = cpr->cp_ring_struct.fw_ring_id; |
11626 | req->cmpl_ring = cpu_to_le16(cp_ring_id); |
11627 | req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; |
11628 | req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); |
11629 | return hwrm_req_send_silent(bp, req); |
11630 | } |
11631 | |
11632 | static void bnxt_reset_task(struct bnxt *bp, bool silent) |
11633 | { |
11634 | if (!silent) |
11635 | bnxt_dbg_dump_states(bp); |
11636 | if (netif_running(dev: bp->dev)) { |
11637 | int rc; |
11638 | |
11639 | if (silent) { |
11640 | bnxt_close_nic(bp, irq_re_init: false, link_re_init: false); |
11641 | bnxt_open_nic(bp, irq_re_init: false, link_re_init: false); |
11642 | } else { |
11643 | bnxt_ulp_stop(bp); |
11644 | bnxt_close_nic(bp, irq_re_init: true, link_re_init: false); |
11645 | rc = bnxt_open_nic(bp, irq_re_init: true, link_re_init: false); |
11646 | bnxt_ulp_start(bp, err: rc); |
11647 | } |
11648 | } |
11649 | } |
11650 | |
11651 | static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) |
11652 | { |
11653 | struct bnxt *bp = netdev_priv(dev); |
11654 | |
11655 | netdev_err(dev: bp->dev, format: "TX timeout detected, starting reset task!\n" ); |
11656 | bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); |
11657 | } |
11658 | |
11659 | static void bnxt_fw_health_check(struct bnxt *bp) |
11660 | { |
11661 | struct bnxt_fw_health *fw_health = bp->fw_health; |
11662 | struct pci_dev *pdev = bp->pdev; |
11663 | u32 val; |
11664 | |
11665 | if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) |
11666 | return; |
11667 | |
11668 | /* Make sure it is enabled before checking the tmr_counter. */ |
11669 | smp_rmb(); |
11670 | if (fw_health->tmr_counter) { |
11671 | fw_health->tmr_counter--; |
11672 | return; |
11673 | } |
11674 | |
11675 | val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); |
11676 | if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) { |
11677 | fw_health->arrests++; |
11678 | goto fw_reset; |
11679 | } |
11680 | |
11681 | fw_health->last_fw_heartbeat = val; |
11682 | |
11683 | val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); |
11684 | if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) { |
11685 | fw_health->discoveries++; |
11686 | goto fw_reset; |
11687 | } |
11688 | |
11689 | fw_health->tmr_counter = fw_health->tmr_multiplier; |
11690 | return; |
11691 | |
11692 | fw_reset: |
11693 | bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT); |
11694 | } |
11695 | |
11696 | static void bnxt_timer(struct timer_list *t) |
11697 | { |
11698 | struct bnxt *bp = from_timer(bp, t, timer); |
11699 | struct net_device *dev = bp->dev; |
11700 | |
11701 | if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) |
11702 | return; |
11703 | |
11704 | if (atomic_read(v: &bp->intr_sem) != 0) |
11705 | goto bnxt_restart_timer; |
11706 | |
11707 | if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) |
11708 | bnxt_fw_health_check(bp); |
11709 | |
11710 | if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) |
11711 | bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT); |
11712 | |
11713 | if (bnxt_tc_flower_enabled(bp)) |
11714 | bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT); |
11715 | |
11716 | #ifdef CONFIG_RFS_ACCEL |
11717 | if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) |
11718 | bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); |
11719 | #endif /*CONFIG_RFS_ACCEL*/ |
11720 | |
11721 | if (bp->link_info.phy_retry) { |
11722 | if (time_after(jiffies, bp->link_info.phy_retry_expires)) { |
11723 | bp->link_info.phy_retry = false; |
11724 | netdev_warn(dev: bp->dev, format: "failed to update phy settings after maximum retries.\n" ); |
11725 | } else { |
11726 | bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT); |
11727 | } |
11728 | } |
11729 | |
11730 | if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) |
11731 | bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); |
11732 | |
11733 | if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev && |
11734 | netif_carrier_ok(dev)) |
11735 | bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT); |
11736 | |
11737 | bnxt_restart_timer: |
11738 | mod_timer(timer: &bp->timer, expires: jiffies + bp->current_interval); |
11739 | } |
11740 | |
11741 | static void bnxt_rtnl_lock_sp(struct bnxt *bp) |
11742 | { |
11743 | /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK |
11744 | * set. If the device is being closed, bnxt_close() may be holding |
11745 | * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we |
11746 | * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). |
11747 | */ |
11748 | clear_bit(BNXT_STATE_IN_SP_TASK, addr: &bp->state); |
11749 | rtnl_lock(); |
11750 | } |
11751 | |
11752 | static void bnxt_rtnl_unlock_sp(struct bnxt *bp) |
11753 | { |
11754 | set_bit(BNXT_STATE_IN_SP_TASK, addr: &bp->state); |
11755 | rtnl_unlock(); |
11756 | } |
11757 | |
11758 | /* Only called from bnxt_sp_task() */ |
11759 | static void bnxt_reset(struct bnxt *bp, bool silent) |
11760 | { |
11761 | bnxt_rtnl_lock_sp(bp); |
11762 | if (test_bit(BNXT_STATE_OPEN, &bp->state)) |
11763 | bnxt_reset_task(bp, silent); |
11764 | bnxt_rtnl_unlock_sp(bp); |
11765 | } |
11766 | |
11767 | /* Only called from bnxt_sp_task() */ |
11768 | static void bnxt_rx_ring_reset(struct bnxt *bp) |
11769 | { |
11770 | int i; |
11771 | |
11772 | bnxt_rtnl_lock_sp(bp); |
11773 | if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { |
11774 | bnxt_rtnl_unlock_sp(bp); |
11775 | return; |
11776 | } |
11777 | /* Disable and flush TPA before resetting the RX ring */ |
11778 | if (bp->flags & BNXT_FLAG_TPA) |
11779 | bnxt_set_tpa(bp, set_tpa: false); |
11780 | for (i = 0; i < bp->rx_nr_rings; i++) { |
11781 | struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; |
11782 | struct bnxt_cp_ring_info *cpr; |
11783 | int rc; |
11784 | |
11785 | if (!rxr->bnapi->in_reset) |
11786 | continue; |
11787 | |
11788 | rc = bnxt_hwrm_rx_ring_reset(bp, ring_nr: i); |
11789 | if (rc) { |
11790 | if (rc == -EINVAL || rc == -EOPNOTSUPP) |
11791 | netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n" ); |
11792 | else |
11793 | netdev_warn(dev: bp->dev, format: "RX ring reset failed, rc = %d, falling back to global reset\n" , |
11794 | rc); |
11795 | bnxt_reset_task(bp, silent: true); |
11796 | break; |
11797 | } |
11798 | bnxt_free_one_rx_ring_skbs(bp, ring_nr: i); |
11799 | rxr->rx_prod = 0; |
11800 | rxr->rx_agg_prod = 0; |
11801 | rxr->rx_sw_agg_prod = 0; |
11802 | rxr->rx_next_cons = 0; |
11803 | rxr->bnapi->in_reset = false; |
11804 | bnxt_alloc_one_rx_ring(bp, ring_nr: i); |
11805 | cpr = &rxr->bnapi->cp_ring; |
11806 | cpr->sw_stats.rx.rx_resets++; |
11807 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
11808 | bnxt_db_write(bp, db: &rxr->rx_agg_db, idx: rxr->rx_agg_prod); |
11809 | bnxt_db_write(bp, db: &rxr->rx_db, idx: rxr->rx_prod); |
11810 | } |
11811 | if (bp->flags & BNXT_FLAG_TPA) |
11812 | bnxt_set_tpa(bp, set_tpa: true); |
11813 | bnxt_rtnl_unlock_sp(bp); |
11814 | } |
11815 | |
11816 | static void bnxt_fw_reset_close(struct bnxt *bp) |
11817 | { |
11818 | bnxt_ulp_stop(bp); |
11819 | /* When firmware is in fatal state, quiesce device and disable |
11820 | * bus master to prevent any potential bad DMAs before freeing |
11821 | * kernel memory. |
11822 | */ |
11823 | if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { |
11824 | u16 val = 0; |
11825 | |
11826 | pci_read_config_word(dev: bp->pdev, PCI_SUBSYSTEM_ID, val: &val); |
11827 | if (val == 0xffff) |
11828 | bp->fw_reset_min_dsecs = 0; |
11829 | bnxt_tx_disable(bp); |
11830 | bnxt_disable_napi(bp); |
11831 | bnxt_disable_int_sync(bp); |
11832 | bnxt_free_irq(bp); |
11833 | bnxt_clear_int_mode(bp); |
11834 | pci_disable_device(dev: bp->pdev); |
11835 | } |
11836 | __bnxt_close_nic(bp, irq_re_init: true, link_re_init: false); |
11837 | bnxt_vf_reps_free(bp); |
11838 | bnxt_clear_int_mode(bp); |
11839 | bnxt_hwrm_func_drv_unrgtr(bp); |
11840 | if (pci_is_enabled(pdev: bp->pdev)) |
11841 | pci_disable_device(dev: bp->pdev); |
11842 | bnxt_free_ctx_mem(bp); |
11843 | kfree(objp: bp->ctx); |
11844 | bp->ctx = NULL; |
11845 | } |
11846 | |
11847 | static bool is_bnxt_fw_ok(struct bnxt *bp) |
11848 | { |
11849 | struct bnxt_fw_health *fw_health = bp->fw_health; |
11850 | bool no_heartbeat = false, has_reset = false; |
11851 | u32 val; |
11852 | |
11853 | val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); |
11854 | if (val == fw_health->last_fw_heartbeat) |
11855 | no_heartbeat = true; |
11856 | |
11857 | val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); |
11858 | if (val != fw_health->last_fw_reset_cnt) |
11859 | has_reset = true; |
11860 | |
11861 | if (!no_heartbeat && has_reset) |
11862 | return true; |
11863 | |
11864 | return false; |
11865 | } |
11866 | |
11867 | /* rtnl_lock is acquired before calling this function */ |
11868 | static void bnxt_force_fw_reset(struct bnxt *bp) |
11869 | { |
11870 | struct bnxt_fw_health *fw_health = bp->fw_health; |
11871 | struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; |
11872 | u32 wait_dsecs; |
11873 | |
11874 | if (!test_bit(BNXT_STATE_OPEN, &bp->state) || |
11875 | test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) |
11876 | return; |
11877 | |
11878 | if (ptp) { |
11879 | spin_lock_bh(lock: &ptp->ptp_lock); |
11880 | set_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
11881 | spin_unlock_bh(lock: &ptp->ptp_lock); |
11882 | } else { |
11883 | set_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
11884 | } |
11885 | bnxt_fw_reset_close(bp); |
11886 | wait_dsecs = fw_health->master_func_wait_dsecs; |
11887 | if (fw_health->primary) { |
11888 | if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) |
11889 | wait_dsecs = 0; |
11890 | bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; |
11891 | } else { |
11892 | bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; |
11893 | wait_dsecs = fw_health->normal_func_wait_dsecs; |
11894 | bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; |
11895 | } |
11896 | |
11897 | bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; |
11898 | bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; |
11899 | bnxt_queue_fw_reset_work(bp, delay: wait_dsecs * HZ / 10); |
11900 | } |
11901 | |
11902 | void bnxt_fw_exception(struct bnxt *bp) |
11903 | { |
11904 | netdev_warn(dev: bp->dev, format: "Detected firmware fatal condition, initiating reset\n" ); |
11905 | set_bit(BNXT_STATE_FW_FATAL_COND, addr: &bp->state); |
11906 | bnxt_rtnl_lock_sp(bp); |
11907 | bnxt_force_fw_reset(bp); |
11908 | bnxt_rtnl_unlock_sp(bp); |
11909 | } |
11910 | |
11911 | /* Returns the number of registered VFs, or 1 if VF configuration is pending, or |
11912 | * < 0 on error. |
11913 | */ |
11914 | static int bnxt_get_registered_vfs(struct bnxt *bp) |
11915 | { |
11916 | #ifdef CONFIG_BNXT_SRIOV |
11917 | int rc; |
11918 | |
11919 | if (!BNXT_PF(bp)) |
11920 | return 0; |
11921 | |
11922 | rc = bnxt_hwrm_func_qcfg(bp); |
11923 | if (rc) { |
11924 | netdev_err(dev: bp->dev, format: "func_qcfg cmd failed, rc = %d\n" , rc); |
11925 | return rc; |
11926 | } |
11927 | if (bp->pf.registered_vfs) |
11928 | return bp->pf.registered_vfs; |
11929 | if (bp->sriov_cfg) |
11930 | return 1; |
11931 | #endif |
11932 | return 0; |
11933 | } |
11934 | |
11935 | void bnxt_fw_reset(struct bnxt *bp) |
11936 | { |
11937 | bnxt_rtnl_lock_sp(bp); |
11938 | if (test_bit(BNXT_STATE_OPEN, &bp->state) && |
11939 | !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { |
11940 | struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; |
11941 | int n = 0, tmo; |
11942 | |
11943 | if (ptp) { |
11944 | spin_lock_bh(lock: &ptp->ptp_lock); |
11945 | set_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
11946 | spin_unlock_bh(lock: &ptp->ptp_lock); |
11947 | } else { |
11948 | set_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
11949 | } |
11950 | if (bp->pf.active_vfs && |
11951 | !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) |
11952 | n = bnxt_get_registered_vfs(bp); |
11953 | if (n < 0) { |
11954 | netdev_err(dev: bp->dev, format: "Firmware reset aborted, rc = %d\n" , |
11955 | n); |
11956 | clear_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
11957 | dev_close(dev: bp->dev); |
11958 | goto fw_reset_exit; |
11959 | } else if (n > 0) { |
11960 | u16 vf_tmo_dsecs = n * 10; |
11961 | |
11962 | if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) |
11963 | bp->fw_reset_max_dsecs = vf_tmo_dsecs; |
11964 | bp->fw_reset_state = |
11965 | BNXT_FW_RESET_STATE_POLL_VF; |
11966 | bnxt_queue_fw_reset_work(bp, HZ / 10); |
11967 | goto fw_reset_exit; |
11968 | } |
11969 | bnxt_fw_reset_close(bp); |
11970 | if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { |
11971 | bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; |
11972 | tmo = HZ / 10; |
11973 | } else { |
11974 | bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; |
11975 | tmo = bp->fw_reset_min_dsecs * HZ / 10; |
11976 | } |
11977 | bnxt_queue_fw_reset_work(bp, delay: tmo); |
11978 | } |
11979 | fw_reset_exit: |
11980 | bnxt_rtnl_unlock_sp(bp); |
11981 | } |
11982 | |
11983 | static void bnxt_chk_missed_irq(struct bnxt *bp) |
11984 | { |
11985 | int i; |
11986 | |
11987 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
11988 | return; |
11989 | |
11990 | for (i = 0; i < bp->cp_nr_rings; i++) { |
11991 | struct bnxt_napi *bnapi = bp->bnapi[i]; |
11992 | struct bnxt_cp_ring_info *cpr; |
11993 | u32 fw_ring_id; |
11994 | int j; |
11995 | |
11996 | if (!bnapi) |
11997 | continue; |
11998 | |
11999 | cpr = &bnapi->cp_ring; |
12000 | for (j = 0; j < 2; j++) { |
12001 | struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; |
12002 | u32 val[2]; |
12003 | |
12004 | if (!cpr2 || cpr2->has_more_work || |
12005 | !bnxt_has_work(bp, cpr: cpr2)) |
12006 | continue; |
12007 | |
12008 | if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { |
12009 | cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; |
12010 | continue; |
12011 | } |
12012 | fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; |
12013 | bnxt_dbg_hwrm_ring_info_get(bp, |
12014 | DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, |
12015 | ring_id: fw_ring_id, prod: &val[0], cons: &val[1]); |
12016 | cpr->sw_stats.cmn.missed_irqs++; |
12017 | } |
12018 | } |
12019 | } |
12020 | |
12021 | static void bnxt_cfg_ntp_filters(struct bnxt *); |
12022 | |
12023 | static void bnxt_init_ethtool_link_settings(struct bnxt *bp) |
12024 | { |
12025 | struct bnxt_link_info *link_info = &bp->link_info; |
12026 | |
12027 | if (BNXT_AUTO_MODE(link_info->auto_mode)) { |
12028 | link_info->autoneg = BNXT_AUTONEG_SPEED; |
12029 | if (bp->hwrm_spec_code >= 0x10201) { |
12030 | if (link_info->auto_pause_setting & |
12031 | PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) |
12032 | link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; |
12033 | } else { |
12034 | link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; |
12035 | } |
12036 | bnxt_set_auto_speed(link_info); |
12037 | } else { |
12038 | bnxt_set_force_speed(link_info); |
12039 | link_info->req_duplex = link_info->duplex_setting; |
12040 | } |
12041 | if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) |
12042 | link_info->req_flow_ctrl = |
12043 | link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; |
12044 | else |
12045 | link_info->req_flow_ctrl = link_info->force_pause_setting; |
12046 | } |
12047 | |
12048 | static void bnxt_fw_echo_reply(struct bnxt *bp) |
12049 | { |
12050 | struct bnxt_fw_health *fw_health = bp->fw_health; |
12051 | struct hwrm_func_echo_response_input *req; |
12052 | int rc; |
12053 | |
12054 | rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE); |
12055 | if (rc) |
12056 | return; |
12057 | req->event_data1 = cpu_to_le32(fw_health->echo_req_data1); |
12058 | req->event_data2 = cpu_to_le32(fw_health->echo_req_data2); |
12059 | hwrm_req_send(bp, req); |
12060 | } |
12061 | |
12062 | static void bnxt_sp_task(struct work_struct *work) |
12063 | { |
12064 | struct bnxt *bp = container_of(work, struct bnxt, sp_task); |
12065 | |
12066 | set_bit(BNXT_STATE_IN_SP_TASK, addr: &bp->state); |
12067 | smp_mb__after_atomic(); |
12068 | if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { |
12069 | clear_bit(BNXT_STATE_IN_SP_TASK, addr: &bp->state); |
12070 | return; |
12071 | } |
12072 | |
12073 | if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, addr: &bp->sp_event)) |
12074 | bnxt_cfg_rx_mode(bp); |
12075 | |
12076 | if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, addr: &bp->sp_event)) |
12077 | bnxt_cfg_ntp_filters(bp); |
12078 | if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, addr: &bp->sp_event)) |
12079 | bnxt_hwrm_exec_fwd_req(bp); |
12080 | if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, addr: &bp->sp_event)) { |
12081 | bnxt_hwrm_port_qstats(bp, flags: 0); |
12082 | bnxt_hwrm_port_qstats_ext(bp, flags: 0); |
12083 | bnxt_accumulate_all_stats(bp); |
12084 | } |
12085 | |
12086 | if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, addr: &bp->sp_event)) { |
12087 | int rc; |
12088 | |
12089 | mutex_lock(&bp->link_lock); |
12090 | if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, |
12091 | addr: &bp->sp_event)) |
12092 | bnxt_hwrm_phy_qcaps(bp); |
12093 | |
12094 | rc = bnxt_update_link(bp, chng_link_state: true); |
12095 | if (rc) |
12096 | netdev_err(dev: bp->dev, format: "SP task can't update link (rc: %x)\n" , |
12097 | rc); |
12098 | |
12099 | if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, |
12100 | addr: &bp->sp_event)) |
12101 | bnxt_init_ethtool_link_settings(bp); |
12102 | mutex_unlock(lock: &bp->link_lock); |
12103 | } |
12104 | if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, addr: &bp->sp_event)) { |
12105 | int rc; |
12106 | |
12107 | mutex_lock(&bp->link_lock); |
12108 | rc = bnxt_update_phy_setting(bp); |
12109 | mutex_unlock(lock: &bp->link_lock); |
12110 | if (rc) { |
12111 | netdev_warn(dev: bp->dev, format: "update phy settings retry failed\n" ); |
12112 | } else { |
12113 | bp->link_info.phy_retry = false; |
12114 | netdev_info(dev: bp->dev, format: "update phy settings retry succeeded\n" ); |
12115 | } |
12116 | } |
12117 | if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, addr: &bp->sp_event)) { |
12118 | mutex_lock(&bp->link_lock); |
12119 | bnxt_get_port_module_status(bp); |
12120 | mutex_unlock(lock: &bp->link_lock); |
12121 | } |
12122 | |
12123 | if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, addr: &bp->sp_event)) |
12124 | bnxt_tc_flow_stats_work(bp); |
12125 | |
12126 | if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, addr: &bp->sp_event)) |
12127 | bnxt_chk_missed_irq(bp); |
12128 | |
12129 | if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, addr: &bp->sp_event)) |
12130 | bnxt_fw_echo_reply(bp); |
12131 | |
12132 | if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, addr: &bp->sp_event)) |
12133 | bnxt_hwmon_notify_event(bp); |
12134 | |
12135 | /* These functions below will clear BNXT_STATE_IN_SP_TASK. They |
12136 | * must be the last functions to be called before exiting. |
12137 | */ |
12138 | if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, addr: &bp->sp_event)) |
12139 | bnxt_reset(bp, silent: false); |
12140 | |
12141 | if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, addr: &bp->sp_event)) |
12142 | bnxt_reset(bp, silent: true); |
12143 | |
12144 | if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, addr: &bp->sp_event)) |
12145 | bnxt_rx_ring_reset(bp); |
12146 | |
12147 | if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, addr: &bp->sp_event)) { |
12148 | if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || |
12149 | test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) |
12150 | bnxt_devlink_health_fw_report(bp); |
12151 | else |
12152 | bnxt_fw_reset(bp); |
12153 | } |
12154 | |
12155 | if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, addr: &bp->sp_event)) { |
12156 | if (!is_bnxt_fw_ok(bp)) |
12157 | bnxt_devlink_health_fw_report(bp); |
12158 | } |
12159 | |
12160 | smp_mb__before_atomic(); |
12161 | clear_bit(BNXT_STATE_IN_SP_TASK, addr: &bp->state); |
12162 | } |
12163 | |
12164 | /* Under rtnl_lock */ |
12165 | int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, |
12166 | int tx_xdp) |
12167 | { |
12168 | int max_rx, max_tx, tx_sets = 1; |
12169 | int tx_rings_needed, stats; |
12170 | int rx_rings = rx; |
12171 | int cp, vnics, rc; |
12172 | |
12173 | if (tcs) |
12174 | tx_sets = tcs; |
12175 | |
12176 | rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh); |
12177 | if (rc) |
12178 | return rc; |
12179 | |
12180 | if (max_rx < rx) |
12181 | return -ENOMEM; |
12182 | |
12183 | tx_rings_needed = tx * tx_sets + tx_xdp; |
12184 | if (max_tx < tx_rings_needed) |
12185 | return -ENOMEM; |
12186 | |
12187 | vnics = 1; |
12188 | if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) |
12189 | vnics += rx_rings; |
12190 | |
12191 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
12192 | rx_rings <<= 1; |
12193 | cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; |
12194 | stats = cp; |
12195 | if (BNXT_NEW_RM(bp)) { |
12196 | cp += bnxt_get_ulp_msix_num(bp); |
12197 | stats += bnxt_get_ulp_stat_ctxs(bp); |
12198 | } |
12199 | return bnxt_hwrm_check_rings(bp, tx_rings: tx_rings_needed, rx_rings, ring_grps: rx, cp_rings: cp, |
12200 | stats, vnics); |
12201 | } |
12202 | |
12203 | static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) |
12204 | { |
12205 | if (bp->bar2) { |
12206 | pci_iounmap(dev: pdev, bp->bar2); |
12207 | bp->bar2 = NULL; |
12208 | } |
12209 | |
12210 | if (bp->bar1) { |
12211 | pci_iounmap(dev: pdev, bp->bar1); |
12212 | bp->bar1 = NULL; |
12213 | } |
12214 | |
12215 | if (bp->bar0) { |
12216 | pci_iounmap(dev: pdev, bp->bar0); |
12217 | bp->bar0 = NULL; |
12218 | } |
12219 | } |
12220 | |
12221 | static void bnxt_cleanup_pci(struct bnxt *bp) |
12222 | { |
12223 | bnxt_unmap_bars(bp, pdev: bp->pdev); |
12224 | pci_release_regions(bp->pdev); |
12225 | if (pci_is_enabled(pdev: bp->pdev)) |
12226 | pci_disable_device(dev: bp->pdev); |
12227 | } |
12228 | |
12229 | static void bnxt_init_dflt_coal(struct bnxt *bp) |
12230 | { |
12231 | struct bnxt_coal_cap *coal_cap = &bp->coal_cap; |
12232 | struct bnxt_coal *coal; |
12233 | u16 flags = 0; |
12234 | |
12235 | if (coal_cap->cmpl_params & |
12236 | RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) |
12237 | flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; |
12238 | |
12239 | /* Tick values in micro seconds. |
12240 | * 1 coal_buf x bufs_per_record = 1 completion record. |
12241 | */ |
12242 | coal = &bp->rx_coal; |
12243 | coal->coal_ticks = 10; |
12244 | coal->coal_bufs = 30; |
12245 | coal->coal_ticks_irq = 1; |
12246 | coal->coal_bufs_irq = 2; |
12247 | coal->idle_thresh = 50; |
12248 | coal->bufs_per_record = 2; |
12249 | coal->budget = 64; /* NAPI budget */ |
12250 | coal->flags = flags; |
12251 | |
12252 | coal = &bp->tx_coal; |
12253 | coal->coal_ticks = 28; |
12254 | coal->coal_bufs = 30; |
12255 | coal->coal_ticks_irq = 2; |
12256 | coal->coal_bufs_irq = 2; |
12257 | coal->bufs_per_record = 1; |
12258 | coal->flags = flags; |
12259 | |
12260 | bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; |
12261 | } |
12262 | |
12263 | /* FW that pre-reserves 1 VNIC per function */ |
12264 | static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp) |
12265 | { |
12266 | u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp); |
12267 | |
12268 | if (!(bp->flags & BNXT_FLAG_CHIP_P5) && |
12269 | (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18))) |
12270 | return true; |
12271 | if ((bp->flags & BNXT_FLAG_CHIP_P5) && |
12272 | (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172))) |
12273 | return true; |
12274 | return false; |
12275 | } |
12276 | |
12277 | static int bnxt_fw_init_one_p1(struct bnxt *bp) |
12278 | { |
12279 | int rc; |
12280 | |
12281 | bp->fw_cap = 0; |
12282 | rc = bnxt_hwrm_ver_get(bp); |
12283 | bnxt_try_map_fw_health_reg(bp); |
12284 | if (rc) { |
12285 | rc = bnxt_try_recover_fw(bp); |
12286 | if (rc) |
12287 | return rc; |
12288 | rc = bnxt_hwrm_ver_get(bp); |
12289 | if (rc) |
12290 | return rc; |
12291 | } |
12292 | |
12293 | bnxt_nvm_cfg_ver_get(bp); |
12294 | |
12295 | rc = bnxt_hwrm_func_reset(bp); |
12296 | if (rc) |
12297 | return -ENODEV; |
12298 | |
12299 | bnxt_hwrm_fw_set_time(bp); |
12300 | return 0; |
12301 | } |
12302 | |
12303 | static int bnxt_fw_init_one_p2(struct bnxt *bp) |
12304 | { |
12305 | int rc; |
12306 | |
12307 | /* Get the MAX capabilities for this function */ |
12308 | rc = bnxt_hwrm_func_qcaps(bp); |
12309 | if (rc) { |
12310 | netdev_err(dev: bp->dev, format: "hwrm query capability failure rc: %x\n" , |
12311 | rc); |
12312 | return -ENODEV; |
12313 | } |
12314 | |
12315 | rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); |
12316 | if (rc) |
12317 | netdev_warn(dev: bp->dev, format: "hwrm query adv flow mgnt failure rc: %d\n" , |
12318 | rc); |
12319 | |
12320 | if (bnxt_alloc_fw_health(bp)) { |
12321 | netdev_warn(dev: bp->dev, format: "no memory for firmware error recovery\n" ); |
12322 | } else { |
12323 | rc = bnxt_hwrm_error_recovery_qcfg(bp); |
12324 | if (rc) |
12325 | netdev_warn(dev: bp->dev, format: "hwrm query error recovery failure rc: %d\n" , |
12326 | rc); |
12327 | } |
12328 | |
12329 | rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, bmap_size: 0, async_only: false); |
12330 | if (rc) |
12331 | return -ENODEV; |
12332 | |
12333 | if (bnxt_fw_pre_resv_vnics(bp)) |
12334 | bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS; |
12335 | |
12336 | bnxt_hwrm_func_qcfg(bp); |
12337 | bnxt_hwrm_vnic_qcaps(bp); |
12338 | bnxt_hwrm_port_led_qcaps(bp); |
12339 | bnxt_ethtool_init(bp); |
12340 | if (bp->fw_cap & BNXT_FW_CAP_PTP) |
12341 | __bnxt_hwrm_ptp_qcfg(bp); |
12342 | bnxt_dcb_init(bp); |
12343 | bnxt_hwmon_init(bp); |
12344 | return 0; |
12345 | } |
12346 | |
12347 | static void (struct bnxt *bp) |
12348 | { |
12349 | bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP; |
12350 | bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | |
12351 | VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | |
12352 | VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | |
12353 | VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; |
12354 | if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA) |
12355 | bp->rss_hash_delta = bp->rss_hash_cfg; |
12356 | if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { |
12357 | bp->flags |= BNXT_FLAG_UDP_RSS_CAP; |
12358 | bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | |
12359 | VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; |
12360 | } |
12361 | } |
12362 | |
12363 | static void bnxt_set_dflt_rfs(struct bnxt *bp) |
12364 | { |
12365 | struct net_device *dev = bp->dev; |
12366 | |
12367 | dev->hw_features &= ~NETIF_F_NTUPLE; |
12368 | dev->features &= ~NETIF_F_NTUPLE; |
12369 | bp->flags &= ~BNXT_FLAG_RFS; |
12370 | if (bnxt_rfs_supported(bp)) { |
12371 | dev->hw_features |= NETIF_F_NTUPLE; |
12372 | if (bnxt_rfs_capable(bp)) { |
12373 | bp->flags |= BNXT_FLAG_RFS; |
12374 | dev->features |= NETIF_F_NTUPLE; |
12375 | } |
12376 | } |
12377 | } |
12378 | |
12379 | static void bnxt_fw_init_one_p3(struct bnxt *bp) |
12380 | { |
12381 | struct pci_dev *pdev = bp->pdev; |
12382 | |
12383 | bnxt_set_dflt_rss_hash_type(bp); |
12384 | bnxt_set_dflt_rfs(bp); |
12385 | |
12386 | bnxt_get_wol_settings(bp); |
12387 | if (bp->flags & BNXT_FLAG_WOL_CAP) |
12388 | device_set_wakeup_enable(dev: &pdev->dev, enable: bp->wol); |
12389 | else |
12390 | device_set_wakeup_capable(dev: &pdev->dev, capable: false); |
12391 | |
12392 | bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); |
12393 | bnxt_hwrm_coal_params_qcaps(bp); |
12394 | } |
12395 | |
12396 | static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt); |
12397 | |
12398 | int bnxt_fw_init_one(struct bnxt *bp) |
12399 | { |
12400 | int rc; |
12401 | |
12402 | rc = bnxt_fw_init_one_p1(bp); |
12403 | if (rc) { |
12404 | netdev_err(dev: bp->dev, format: "Firmware init phase 1 failed\n" ); |
12405 | return rc; |
12406 | } |
12407 | rc = bnxt_fw_init_one_p2(bp); |
12408 | if (rc) { |
12409 | netdev_err(dev: bp->dev, format: "Firmware init phase 2 failed\n" ); |
12410 | return rc; |
12411 | } |
12412 | rc = bnxt_probe_phy(bp, fw_dflt: false); |
12413 | if (rc) |
12414 | return rc; |
12415 | rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); |
12416 | if (rc) |
12417 | return rc; |
12418 | |
12419 | bnxt_fw_init_one_p3(bp); |
12420 | return 0; |
12421 | } |
12422 | |
12423 | static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) |
12424 | { |
12425 | struct bnxt_fw_health *fw_health = bp->fw_health; |
12426 | u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; |
12427 | u32 val = fw_health->fw_reset_seq_vals[reg_idx]; |
12428 | u32 reg_type, reg_off, delay_msecs; |
12429 | |
12430 | delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; |
12431 | reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); |
12432 | reg_off = BNXT_FW_HEALTH_REG_OFF(reg); |
12433 | switch (reg_type) { |
12434 | case BNXT_FW_HEALTH_REG_TYPE_CFG: |
12435 | pci_write_config_dword(dev: bp->pdev, where: reg_off, val); |
12436 | break; |
12437 | case BNXT_FW_HEALTH_REG_TYPE_GRC: |
12438 | writel(val: reg_off & BNXT_GRC_BASE_MASK, |
12439 | addr: bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); |
12440 | reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; |
12441 | fallthrough; |
12442 | case BNXT_FW_HEALTH_REG_TYPE_BAR0: |
12443 | writel(val, addr: bp->bar0 + reg_off); |
12444 | break; |
12445 | case BNXT_FW_HEALTH_REG_TYPE_BAR1: |
12446 | writel(val, addr: bp->bar1 + reg_off); |
12447 | break; |
12448 | } |
12449 | if (delay_msecs) { |
12450 | pci_read_config_dword(dev: bp->pdev, where: 0, val: &val); |
12451 | msleep(msecs: delay_msecs); |
12452 | } |
12453 | } |
12454 | |
12455 | bool bnxt_hwrm_reset_permitted(struct bnxt *bp) |
12456 | { |
12457 | struct hwrm_func_qcfg_output *resp; |
12458 | struct hwrm_func_qcfg_input *req; |
12459 | bool result = true; /* firmware will enforce if unknown */ |
12460 | |
12461 | if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) |
12462 | return result; |
12463 | |
12464 | if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG)) |
12465 | return result; |
12466 | |
12467 | req->fid = cpu_to_le16(0xffff); |
12468 | resp = hwrm_req_hold(bp, req); |
12469 | if (!hwrm_req_send(bp, req)) |
12470 | result = !!(le16_to_cpu(resp->flags) & |
12471 | FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED); |
12472 | hwrm_req_drop(bp, req); |
12473 | return result; |
12474 | } |
12475 | |
12476 | static void bnxt_reset_all(struct bnxt *bp) |
12477 | { |
12478 | struct bnxt_fw_health *fw_health = bp->fw_health; |
12479 | int i, rc; |
12480 | |
12481 | if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { |
12482 | bnxt_fw_reset_via_optee(bp); |
12483 | bp->fw_reset_timestamp = jiffies; |
12484 | return; |
12485 | } |
12486 | |
12487 | if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { |
12488 | for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) |
12489 | bnxt_fw_reset_writel(bp, reg_idx: i); |
12490 | } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { |
12491 | struct hwrm_fw_reset_input *req; |
12492 | |
12493 | rc = hwrm_req_init(bp, req, HWRM_FW_RESET); |
12494 | if (!rc) { |
12495 | req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG); |
12496 | req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; |
12497 | req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; |
12498 | req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; |
12499 | rc = hwrm_req_send(bp, req); |
12500 | } |
12501 | if (rc != -ENODEV) |
12502 | netdev_warn(dev: bp->dev, format: "Unable to reset FW rc=%d\n" , rc); |
12503 | } |
12504 | bp->fw_reset_timestamp = jiffies; |
12505 | } |
12506 | |
12507 | static bool bnxt_fw_reset_timeout(struct bnxt *bp) |
12508 | { |
12509 | return time_after(jiffies, bp->fw_reset_timestamp + |
12510 | (bp->fw_reset_max_dsecs * HZ / 10)); |
12511 | } |
12512 | |
12513 | static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) |
12514 | { |
12515 | clear_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
12516 | if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) { |
12517 | bnxt_ulp_start(bp, err: rc); |
12518 | bnxt_dl_health_fw_status_update(bp, healthy: false); |
12519 | } |
12520 | bp->fw_reset_state = 0; |
12521 | dev_close(dev: bp->dev); |
12522 | } |
12523 | |
12524 | static void bnxt_fw_reset_task(struct work_struct *work) |
12525 | { |
12526 | struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); |
12527 | int rc = 0; |
12528 | |
12529 | if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { |
12530 | netdev_err(dev: bp->dev, format: "bnxt_fw_reset_task() called when not in fw reset mode!\n" ); |
12531 | return; |
12532 | } |
12533 | |
12534 | switch (bp->fw_reset_state) { |
12535 | case BNXT_FW_RESET_STATE_POLL_VF: { |
12536 | int n = bnxt_get_registered_vfs(bp); |
12537 | int tmo; |
12538 | |
12539 | if (n < 0) { |
12540 | netdev_err(dev: bp->dev, format: "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n" , |
12541 | n, jiffies_to_msecs(j: jiffies - |
12542 | bp->fw_reset_timestamp)); |
12543 | goto fw_reset_abort; |
12544 | } else if (n > 0) { |
12545 | if (bnxt_fw_reset_timeout(bp)) { |
12546 | clear_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
12547 | bp->fw_reset_state = 0; |
12548 | netdev_err(dev: bp->dev, format: "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n" , |
12549 | n); |
12550 | return; |
12551 | } |
12552 | bnxt_queue_fw_reset_work(bp, HZ / 10); |
12553 | return; |
12554 | } |
12555 | bp->fw_reset_timestamp = jiffies; |
12556 | rtnl_lock(); |
12557 | if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { |
12558 | bnxt_fw_reset_abort(bp, rc); |
12559 | rtnl_unlock(); |
12560 | return; |
12561 | } |
12562 | bnxt_fw_reset_close(bp); |
12563 | if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { |
12564 | bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; |
12565 | tmo = HZ / 10; |
12566 | } else { |
12567 | bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; |
12568 | tmo = bp->fw_reset_min_dsecs * HZ / 10; |
12569 | } |
12570 | rtnl_unlock(); |
12571 | bnxt_queue_fw_reset_work(bp, delay: tmo); |
12572 | return; |
12573 | } |
12574 | case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { |
12575 | u32 val; |
12576 | |
12577 | val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); |
12578 | if (!(val & BNXT_FW_STATUS_SHUTDOWN) && |
12579 | !bnxt_fw_reset_timeout(bp)) { |
12580 | bnxt_queue_fw_reset_work(bp, HZ / 5); |
12581 | return; |
12582 | } |
12583 | |
12584 | if (!bp->fw_health->primary) { |
12585 | u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; |
12586 | |
12587 | bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; |
12588 | bnxt_queue_fw_reset_work(bp, delay: wait_dsecs * HZ / 10); |
12589 | return; |
12590 | } |
12591 | bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; |
12592 | } |
12593 | fallthrough; |
12594 | case BNXT_FW_RESET_STATE_RESET_FW: |
12595 | bnxt_reset_all(bp); |
12596 | bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; |
12597 | bnxt_queue_fw_reset_work(bp, delay: bp->fw_reset_min_dsecs * HZ / 10); |
12598 | return; |
12599 | case BNXT_FW_RESET_STATE_ENABLE_DEV: |
12600 | bnxt_inv_fw_health_reg(bp); |
12601 | if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && |
12602 | !bp->fw_reset_min_dsecs) { |
12603 | u16 val; |
12604 | |
12605 | pci_read_config_word(dev: bp->pdev, PCI_SUBSYSTEM_ID, val: &val); |
12606 | if (val == 0xffff) { |
12607 | if (bnxt_fw_reset_timeout(bp)) { |
12608 | netdev_err(dev: bp->dev, format: "Firmware reset aborted, PCI config space invalid\n" ); |
12609 | rc = -ETIMEDOUT; |
12610 | goto fw_reset_abort; |
12611 | } |
12612 | bnxt_queue_fw_reset_work(bp, HZ / 1000); |
12613 | return; |
12614 | } |
12615 | } |
12616 | clear_bit(BNXT_STATE_FW_FATAL_COND, addr: &bp->state); |
12617 | clear_bit(BNXT_STATE_FW_NON_FATAL_COND, addr: &bp->state); |
12618 | if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, addr: &bp->state) && |
12619 | !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) |
12620 | bnxt_dl_remote_reload(bp); |
12621 | if (pci_enable_device(dev: bp->pdev)) { |
12622 | netdev_err(dev: bp->dev, format: "Cannot re-enable PCI device\n" ); |
12623 | rc = -ENODEV; |
12624 | goto fw_reset_abort; |
12625 | } |
12626 | pci_set_master(dev: bp->pdev); |
12627 | bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; |
12628 | fallthrough; |
12629 | case BNXT_FW_RESET_STATE_POLL_FW: |
12630 | bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; |
12631 | rc = bnxt_hwrm_poll(bp); |
12632 | if (rc) { |
12633 | if (bnxt_fw_reset_timeout(bp)) { |
12634 | netdev_err(dev: bp->dev, format: "Firmware reset aborted\n" ); |
12635 | goto fw_reset_abort_status; |
12636 | } |
12637 | bnxt_queue_fw_reset_work(bp, HZ / 5); |
12638 | return; |
12639 | } |
12640 | bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; |
12641 | bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; |
12642 | fallthrough; |
12643 | case BNXT_FW_RESET_STATE_OPENING: |
12644 | while (!rtnl_trylock()) { |
12645 | bnxt_queue_fw_reset_work(bp, HZ / 10); |
12646 | return; |
12647 | } |
12648 | rc = bnxt_open(dev: bp->dev); |
12649 | if (rc) { |
12650 | netdev_err(dev: bp->dev, format: "bnxt_open() failed during FW reset\n" ); |
12651 | bnxt_fw_reset_abort(bp, rc); |
12652 | rtnl_unlock(); |
12653 | return; |
12654 | } |
12655 | |
12656 | if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && |
12657 | bp->fw_health->enabled) { |
12658 | bp->fw_health->last_fw_reset_cnt = |
12659 | bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); |
12660 | } |
12661 | bp->fw_reset_state = 0; |
12662 | /* Make sure fw_reset_state is 0 before clearing the flag */ |
12663 | smp_mb__before_atomic(); |
12664 | clear_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
12665 | bnxt_ulp_start(bp, err: 0); |
12666 | bnxt_reenable_sriov(bp); |
12667 | bnxt_vf_reps_alloc(bp); |
12668 | bnxt_vf_reps_open(bp); |
12669 | bnxt_ptp_reapply_pps(bp); |
12670 | clear_bit(BNXT_STATE_FW_ACTIVATE, addr: &bp->state); |
12671 | if (test_and_clear_bit(BNXT_STATE_RECOVER, addr: &bp->state)) { |
12672 | bnxt_dl_health_fw_recovery_done(bp); |
12673 | bnxt_dl_health_fw_status_update(bp, healthy: true); |
12674 | } |
12675 | rtnl_unlock(); |
12676 | break; |
12677 | } |
12678 | return; |
12679 | |
12680 | fw_reset_abort_status: |
12681 | if (bp->fw_health->status_reliable || |
12682 | (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { |
12683 | u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); |
12684 | |
12685 | netdev_err(dev: bp->dev, format: "fw_health_status 0x%x\n" , sts); |
12686 | } |
12687 | fw_reset_abort: |
12688 | rtnl_lock(); |
12689 | bnxt_fw_reset_abort(bp, rc); |
12690 | rtnl_unlock(); |
12691 | } |
12692 | |
12693 | static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) |
12694 | { |
12695 | int rc; |
12696 | struct bnxt *bp = netdev_priv(dev); |
12697 | |
12698 | SET_NETDEV_DEV(dev, &pdev->dev); |
12699 | |
12700 | /* enable device (incl. PCI PM wakeup), and bus-mastering */ |
12701 | rc = pci_enable_device(dev: pdev); |
12702 | if (rc) { |
12703 | dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n" ); |
12704 | goto init_err; |
12705 | } |
12706 | |
12707 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
12708 | dev_err(&pdev->dev, |
12709 | "Cannot find PCI device base address, aborting\n" ); |
12710 | rc = -ENODEV; |
12711 | goto init_err_disable; |
12712 | } |
12713 | |
12714 | rc = pci_request_regions(pdev, DRV_MODULE_NAME); |
12715 | if (rc) { |
12716 | dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n" ); |
12717 | goto init_err_disable; |
12718 | } |
12719 | |
12720 | if (dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64)) != 0 && |
12721 | dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(32)) != 0) { |
12722 | dev_err(&pdev->dev, "System does not support DMA, aborting\n" ); |
12723 | rc = -EIO; |
12724 | goto init_err_release; |
12725 | } |
12726 | |
12727 | pci_set_master(dev: pdev); |
12728 | |
12729 | bp->dev = dev; |
12730 | bp->pdev = pdev; |
12731 | |
12732 | /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() |
12733 | * determines the BAR size. |
12734 | */ |
12735 | bp->bar0 = pci_ioremap_bar(pdev, bar: 0); |
12736 | if (!bp->bar0) { |
12737 | dev_err(&pdev->dev, "Cannot map device registers, aborting\n" ); |
12738 | rc = -ENOMEM; |
12739 | goto init_err_release; |
12740 | } |
12741 | |
12742 | bp->bar2 = pci_ioremap_bar(pdev, bar: 4); |
12743 | if (!bp->bar2) { |
12744 | dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n" ); |
12745 | rc = -ENOMEM; |
12746 | goto init_err_release; |
12747 | } |
12748 | |
12749 | INIT_WORK(&bp->sp_task, bnxt_sp_task); |
12750 | INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); |
12751 | |
12752 | spin_lock_init(&bp->ntp_fltr_lock); |
12753 | #if BITS_PER_LONG == 32 |
12754 | spin_lock_init(&bp->db_lock); |
12755 | #endif |
12756 | |
12757 | bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; |
12758 | bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; |
12759 | |
12760 | timer_setup(&bp->timer, bnxt_timer, 0); |
12761 | bp->current_interval = BNXT_TIMER_INTERVAL; |
12762 | |
12763 | bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; |
12764 | bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; |
12765 | |
12766 | clear_bit(BNXT_STATE_OPEN, addr: &bp->state); |
12767 | return 0; |
12768 | |
12769 | init_err_release: |
12770 | bnxt_unmap_bars(bp, pdev); |
12771 | pci_release_regions(pdev); |
12772 | |
12773 | init_err_disable: |
12774 | pci_disable_device(dev: pdev); |
12775 | |
12776 | init_err: |
12777 | return rc; |
12778 | } |
12779 | |
12780 | /* rtnl_lock held */ |
12781 | static int bnxt_change_mac_addr(struct net_device *dev, void *p) |
12782 | { |
12783 | struct sockaddr *addr = p; |
12784 | struct bnxt *bp = netdev_priv(dev); |
12785 | int rc = 0; |
12786 | |
12787 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
12788 | return -EADDRNOTAVAIL; |
12789 | |
12790 | if (ether_addr_equal(addr1: addr->sa_data, addr2: dev->dev_addr)) |
12791 | return 0; |
12792 | |
12793 | rc = bnxt_approve_mac(bp, addr->sa_data, true); |
12794 | if (rc) |
12795 | return rc; |
12796 | |
12797 | eth_hw_addr_set(dev, addr: addr->sa_data); |
12798 | if (netif_running(dev)) { |
12799 | bnxt_close_nic(bp, irq_re_init: false, link_re_init: false); |
12800 | rc = bnxt_open_nic(bp, irq_re_init: false, link_re_init: false); |
12801 | } |
12802 | |
12803 | return rc; |
12804 | } |
12805 | |
12806 | /* rtnl_lock held */ |
12807 | static int bnxt_change_mtu(struct net_device *dev, int new_mtu) |
12808 | { |
12809 | struct bnxt *bp = netdev_priv(dev); |
12810 | |
12811 | if (netif_running(dev)) |
12812 | bnxt_close_nic(bp, irq_re_init: true, link_re_init: false); |
12813 | |
12814 | dev->mtu = new_mtu; |
12815 | bnxt_set_ring_params(bp); |
12816 | |
12817 | if (netif_running(dev)) |
12818 | return bnxt_open_nic(bp, irq_re_init: true, link_re_init: false); |
12819 | |
12820 | return 0; |
12821 | } |
12822 | |
12823 | int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) |
12824 | { |
12825 | struct bnxt *bp = netdev_priv(dev); |
12826 | bool sh = false; |
12827 | int rc; |
12828 | |
12829 | if (tc > bp->max_tc) { |
12830 | netdev_err(dev, format: "Too many traffic classes requested: %d. Max supported is %d.\n" , |
12831 | tc, bp->max_tc); |
12832 | return -EINVAL; |
12833 | } |
12834 | |
12835 | if (netdev_get_num_tc(dev) == tc) |
12836 | return 0; |
12837 | |
12838 | if (bp->flags & BNXT_FLAG_SHARED_RINGS) |
12839 | sh = true; |
12840 | |
12841 | rc = bnxt_check_rings(bp, tx: bp->tx_nr_rings_per_tc, rx: bp->rx_nr_rings, |
12842 | sh, tcs: tc, tx_xdp: bp->tx_nr_rings_xdp); |
12843 | if (rc) |
12844 | return rc; |
12845 | |
12846 | /* Needs to close the device and do hw resource re-allocations */ |
12847 | if (netif_running(dev: bp->dev)) |
12848 | bnxt_close_nic(bp, irq_re_init: true, link_re_init: false); |
12849 | |
12850 | if (tc) { |
12851 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; |
12852 | netdev_set_num_tc(dev, num_tc: tc); |
12853 | } else { |
12854 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc; |
12855 | netdev_reset_tc(dev); |
12856 | } |
12857 | bp->tx_nr_rings += bp->tx_nr_rings_xdp; |
12858 | bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : |
12859 | bp->tx_nr_rings + bp->rx_nr_rings; |
12860 | |
12861 | if (netif_running(dev: bp->dev)) |
12862 | return bnxt_open_nic(bp, irq_re_init: true, link_re_init: false); |
12863 | |
12864 | return 0; |
12865 | } |
12866 | |
12867 | static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, |
12868 | void *cb_priv) |
12869 | { |
12870 | struct bnxt *bp = cb_priv; |
12871 | |
12872 | if (!bnxt_tc_flower_enabled(bp) || |
12873 | !tc_cls_can_offload_and_chain0(dev: bp->dev, common: type_data)) |
12874 | return -EOPNOTSUPP; |
12875 | |
12876 | switch (type) { |
12877 | case TC_SETUP_CLSFLOWER: |
12878 | return bnxt_tc_setup_flower(bp, src_fid: bp->pf.fw_fid, cls_flower: type_data); |
12879 | default: |
12880 | return -EOPNOTSUPP; |
12881 | } |
12882 | } |
12883 | |
12884 | LIST_HEAD(bnxt_block_cb_list); |
12885 | |
12886 | static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, |
12887 | void *type_data) |
12888 | { |
12889 | struct bnxt *bp = netdev_priv(dev); |
12890 | |
12891 | switch (type) { |
12892 | case TC_SETUP_BLOCK: |
12893 | return flow_block_cb_setup_simple(f: type_data, |
12894 | driver_list: &bnxt_block_cb_list, |
12895 | cb: bnxt_setup_tc_block_cb, |
12896 | cb_ident: bp, cb_priv: bp, ingress_only: true); |
12897 | case TC_SETUP_QDISC_MQPRIO: { |
12898 | struct tc_mqprio_qopt *mqprio = type_data; |
12899 | |
12900 | mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; |
12901 | |
12902 | return bnxt_setup_mq_tc(dev, tc: mqprio->num_tc); |
12903 | } |
12904 | default: |
12905 | return -EOPNOTSUPP; |
12906 | } |
12907 | } |
12908 | |
12909 | #ifdef CONFIG_RFS_ACCEL |
12910 | static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, |
12911 | struct bnxt_ntuple_filter *f2) |
12912 | { |
12913 | struct flow_keys *keys1 = &f1->fkeys; |
12914 | struct flow_keys *keys2 = &f2->fkeys; |
12915 | |
12916 | if (keys1->basic.n_proto != keys2->basic.n_proto || |
12917 | keys1->basic.ip_proto != keys2->basic.ip_proto) |
12918 | return false; |
12919 | |
12920 | if (keys1->basic.n_proto == htons(ETH_P_IP)) { |
12921 | if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || |
12922 | keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst) |
12923 | return false; |
12924 | } else { |
12925 | if (memcmp(p: &keys1->addrs.v6addrs.src, q: &keys2->addrs.v6addrs.src, |
12926 | size: sizeof(keys1->addrs.v6addrs.src)) || |
12927 | memcmp(p: &keys1->addrs.v6addrs.dst, q: &keys2->addrs.v6addrs.dst, |
12928 | size: sizeof(keys1->addrs.v6addrs.dst))) |
12929 | return false; |
12930 | } |
12931 | |
12932 | if (keys1->ports.ports == keys2->ports.ports && |
12933 | keys1->control.flags == keys2->control.flags && |
12934 | ether_addr_equal(addr1: f1->src_mac_addr, addr2: f2->src_mac_addr) && |
12935 | ether_addr_equal(addr1: f1->dst_mac_addr, addr2: f2->dst_mac_addr)) |
12936 | return true; |
12937 | |
12938 | return false; |
12939 | } |
12940 | |
12941 | static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, |
12942 | u16 rxq_index, u32 flow_id) |
12943 | { |
12944 | struct bnxt *bp = netdev_priv(dev); |
12945 | struct bnxt_ntuple_filter *fltr, *new_fltr; |
12946 | struct flow_keys *fkeys; |
12947 | struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); |
12948 | int rc = 0, idx, bit_id, l2_idx = 0; |
12949 | struct hlist_head *head; |
12950 | u32 flags; |
12951 | |
12952 | if (!ether_addr_equal(addr1: dev->dev_addr, addr2: eth->h_dest)) { |
12953 | struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; |
12954 | int off = 0, j; |
12955 | |
12956 | netif_addr_lock_bh(dev); |
12957 | for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { |
12958 | if (ether_addr_equal(addr1: eth->h_dest, |
12959 | addr2: vnic->uc_list + off)) { |
12960 | l2_idx = j + 1; |
12961 | break; |
12962 | } |
12963 | } |
12964 | netif_addr_unlock_bh(dev); |
12965 | if (!l2_idx) |
12966 | return -EINVAL; |
12967 | } |
12968 | new_fltr = kzalloc(size: sizeof(*new_fltr), GFP_ATOMIC); |
12969 | if (!new_fltr) |
12970 | return -ENOMEM; |
12971 | |
12972 | fkeys = &new_fltr->fkeys; |
12973 | if (!skb_flow_dissect_flow_keys(skb, flow: fkeys, flags: 0)) { |
12974 | rc = -EPROTONOSUPPORT; |
12975 | goto err_free; |
12976 | } |
12977 | |
12978 | if ((fkeys->basic.n_proto != htons(ETH_P_IP) && |
12979 | fkeys->basic.n_proto != htons(ETH_P_IPV6)) || |
12980 | ((fkeys->basic.ip_proto != IPPROTO_TCP) && |
12981 | (fkeys->basic.ip_proto != IPPROTO_UDP))) { |
12982 | rc = -EPROTONOSUPPORT; |
12983 | goto err_free; |
12984 | } |
12985 | if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && |
12986 | bp->hwrm_spec_code < 0x10601) { |
12987 | rc = -EPROTONOSUPPORT; |
12988 | goto err_free; |
12989 | } |
12990 | flags = fkeys->control.flags; |
12991 | if (((flags & FLOW_DIS_ENCAPSULATION) && |
12992 | bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { |
12993 | rc = -EPROTONOSUPPORT; |
12994 | goto err_free; |
12995 | } |
12996 | |
12997 | memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); |
12998 | memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); |
12999 | |
13000 | idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; |
13001 | head = &bp->ntp_fltr_hash_tbl[idx]; |
13002 | rcu_read_lock(); |
13003 | hlist_for_each_entry_rcu(fltr, head, hash) { |
13004 | if (bnxt_fltr_match(f1: fltr, f2: new_fltr)) { |
13005 | rc = fltr->sw_id; |
13006 | rcu_read_unlock(); |
13007 | goto err_free; |
13008 | } |
13009 | } |
13010 | rcu_read_unlock(); |
13011 | |
13012 | spin_lock_bh(lock: &bp->ntp_fltr_lock); |
13013 | bit_id = bitmap_find_free_region(bitmap: bp->ntp_fltr_bmap, |
13014 | BNXT_NTP_FLTR_MAX_FLTR, order: 0); |
13015 | if (bit_id < 0) { |
13016 | spin_unlock_bh(lock: &bp->ntp_fltr_lock); |
13017 | rc = -ENOMEM; |
13018 | goto err_free; |
13019 | } |
13020 | |
13021 | new_fltr->sw_id = (u16)bit_id; |
13022 | new_fltr->flow_id = flow_id; |
13023 | new_fltr->l2_fltr_idx = l2_idx; |
13024 | new_fltr->rxq = rxq_index; |
13025 | hlist_add_head_rcu(n: &new_fltr->hash, h: head); |
13026 | bp->ntp_fltr_count++; |
13027 | spin_unlock_bh(lock: &bp->ntp_fltr_lock); |
13028 | |
13029 | bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); |
13030 | |
13031 | return new_fltr->sw_id; |
13032 | |
13033 | err_free: |
13034 | kfree(objp: new_fltr); |
13035 | return rc; |
13036 | } |
13037 | |
13038 | static void bnxt_cfg_ntp_filters(struct bnxt *bp) |
13039 | { |
13040 | int i; |
13041 | |
13042 | for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { |
13043 | struct hlist_head *head; |
13044 | struct hlist_node *tmp; |
13045 | struct bnxt_ntuple_filter *fltr; |
13046 | int rc; |
13047 | |
13048 | head = &bp->ntp_fltr_hash_tbl[i]; |
13049 | hlist_for_each_entry_safe(fltr, tmp, head, hash) { |
13050 | bool del = false; |
13051 | |
13052 | if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { |
13053 | if (rps_may_expire_flow(dev: bp->dev, rxq_index: fltr->rxq, |
13054 | flow_id: fltr->flow_id, |
13055 | filter_id: fltr->sw_id)) { |
13056 | bnxt_hwrm_cfa_ntuple_filter_free(bp, |
13057 | fltr); |
13058 | del = true; |
13059 | } |
13060 | } else { |
13061 | rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, |
13062 | fltr); |
13063 | if (rc) |
13064 | del = true; |
13065 | else |
13066 | set_bit(BNXT_FLTR_VALID, addr: &fltr->state); |
13067 | } |
13068 | |
13069 | if (del) { |
13070 | spin_lock_bh(lock: &bp->ntp_fltr_lock); |
13071 | hlist_del_rcu(n: &fltr->hash); |
13072 | bp->ntp_fltr_count--; |
13073 | spin_unlock_bh(lock: &bp->ntp_fltr_lock); |
13074 | synchronize_rcu(); |
13075 | clear_bit(nr: fltr->sw_id, addr: bp->ntp_fltr_bmap); |
13076 | kfree(objp: fltr); |
13077 | } |
13078 | } |
13079 | } |
13080 | if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, addr: &bp->sp_event)) |
13081 | netdev_info(dev: bp->dev, format: "Receive PF driver unload event!\n" ); |
13082 | } |
13083 | |
13084 | #else |
13085 | |
13086 | static void bnxt_cfg_ntp_filters(struct bnxt *bp) |
13087 | { |
13088 | } |
13089 | |
13090 | #endif /* CONFIG_RFS_ACCEL */ |
13091 | |
13092 | static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, |
13093 | unsigned int entry, struct udp_tunnel_info *ti) |
13094 | { |
13095 | struct bnxt *bp = netdev_priv(dev: netdev); |
13096 | unsigned int cmd; |
13097 | |
13098 | if (ti->type == UDP_TUNNEL_TYPE_VXLAN) |
13099 | cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; |
13100 | else |
13101 | cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; |
13102 | |
13103 | return bnxt_hwrm_tunnel_dst_port_alloc(bp, port: ti->port, tunnel_type: cmd); |
13104 | } |
13105 | |
13106 | static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, |
13107 | unsigned int entry, struct udp_tunnel_info *ti) |
13108 | { |
13109 | struct bnxt *bp = netdev_priv(dev: netdev); |
13110 | unsigned int cmd; |
13111 | |
13112 | if (ti->type == UDP_TUNNEL_TYPE_VXLAN) |
13113 | cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; |
13114 | else |
13115 | cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; |
13116 | |
13117 | return bnxt_hwrm_tunnel_dst_port_free(bp, tunnel_type: cmd); |
13118 | } |
13119 | |
13120 | static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { |
13121 | .set_port = bnxt_udp_tunnel_set_port, |
13122 | .unset_port = bnxt_udp_tunnel_unset_port, |
13123 | .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | |
13124 | UDP_TUNNEL_NIC_INFO_OPEN_ONLY, |
13125 | .tables = { |
13126 | { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, |
13127 | { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, |
13128 | }, |
13129 | }; |
13130 | |
13131 | static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
13132 | struct net_device *dev, u32 filter_mask, |
13133 | int nlflags) |
13134 | { |
13135 | struct bnxt *bp = netdev_priv(dev); |
13136 | |
13137 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode: bp->br_mode, flags: 0, mask: 0, |
13138 | nlflags, filter_mask, NULL); |
13139 | } |
13140 | |
13141 | static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, |
13142 | u16 flags, struct netlink_ext_ack *extack) |
13143 | { |
13144 | struct bnxt *bp = netdev_priv(dev); |
13145 | struct nlattr *attr, *br_spec; |
13146 | int rem, rc = 0; |
13147 | |
13148 | if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) |
13149 | return -EOPNOTSUPP; |
13150 | |
13151 | br_spec = nlmsg_find_attr(nlh, hdrlen: sizeof(struct ifinfomsg), attrtype: IFLA_AF_SPEC); |
13152 | if (!br_spec) |
13153 | return -EINVAL; |
13154 | |
13155 | nla_for_each_nested(attr, br_spec, rem) { |
13156 | u16 mode; |
13157 | |
13158 | if (nla_type(nla: attr) != IFLA_BRIDGE_MODE) |
13159 | continue; |
13160 | |
13161 | mode = nla_get_u16(nla: attr); |
13162 | if (mode == bp->br_mode) |
13163 | break; |
13164 | |
13165 | rc = bnxt_hwrm_set_br_mode(bp, br_mode: mode); |
13166 | if (!rc) |
13167 | bp->br_mode = mode; |
13168 | break; |
13169 | } |
13170 | return rc; |
13171 | } |
13172 | |
13173 | int bnxt_get_port_parent_id(struct net_device *dev, |
13174 | struct netdev_phys_item_id *ppid) |
13175 | { |
13176 | struct bnxt *bp = netdev_priv(dev); |
13177 | |
13178 | if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) |
13179 | return -EOPNOTSUPP; |
13180 | |
13181 | /* The PF and it's VF-reps only support the switchdev framework */ |
13182 | if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) |
13183 | return -EOPNOTSUPP; |
13184 | |
13185 | ppid->id_len = sizeof(bp->dsn); |
13186 | memcpy(ppid->id, bp->dsn, ppid->id_len); |
13187 | |
13188 | return 0; |
13189 | } |
13190 | |
13191 | static const struct net_device_ops bnxt_netdev_ops = { |
13192 | .ndo_open = bnxt_open, |
13193 | .ndo_start_xmit = bnxt_start_xmit, |
13194 | .ndo_stop = bnxt_close, |
13195 | .ndo_get_stats64 = bnxt_get_stats64, |
13196 | .ndo_set_rx_mode = bnxt_set_rx_mode, |
13197 | .ndo_eth_ioctl = bnxt_ioctl, |
13198 | .ndo_validate_addr = eth_validate_addr, |
13199 | .ndo_set_mac_address = bnxt_change_mac_addr, |
13200 | .ndo_change_mtu = bnxt_change_mtu, |
13201 | .ndo_fix_features = bnxt_fix_features, |
13202 | .ndo_set_features = bnxt_set_features, |
13203 | .ndo_features_check = bnxt_features_check, |
13204 | .ndo_tx_timeout = bnxt_tx_timeout, |
13205 | #ifdef CONFIG_BNXT_SRIOV |
13206 | .ndo_get_vf_config = bnxt_get_vf_config, |
13207 | .ndo_set_vf_mac = bnxt_set_vf_mac, |
13208 | .ndo_set_vf_vlan = bnxt_set_vf_vlan, |
13209 | .ndo_set_vf_rate = bnxt_set_vf_bw, |
13210 | .ndo_set_vf_link_state = bnxt_set_vf_link_state, |
13211 | .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, |
13212 | .ndo_set_vf_trust = bnxt_set_vf_trust, |
13213 | #endif |
13214 | .ndo_setup_tc = bnxt_setup_tc, |
13215 | #ifdef CONFIG_RFS_ACCEL |
13216 | .ndo_rx_flow_steer = bnxt_rx_flow_steer, |
13217 | #endif |
13218 | .ndo_bpf = bnxt_xdp, |
13219 | .ndo_xdp_xmit = bnxt_xdp_xmit, |
13220 | .ndo_bridge_getlink = bnxt_bridge_getlink, |
13221 | .ndo_bridge_setlink = bnxt_bridge_setlink, |
13222 | }; |
13223 | |
13224 | static void bnxt_remove_one(struct pci_dev *pdev) |
13225 | { |
13226 | struct net_device *dev = pci_get_drvdata(pdev); |
13227 | struct bnxt *bp = netdev_priv(dev); |
13228 | |
13229 | if (BNXT_PF(bp)) |
13230 | bnxt_sriov_disable(bp); |
13231 | |
13232 | bnxt_rdma_aux_device_uninit(bp); |
13233 | |
13234 | bnxt_ptp_clear(bp); |
13235 | unregister_netdev(dev); |
13236 | clear_bit(BNXT_STATE_IN_FW_RESET, addr: &bp->state); |
13237 | /* Flush any pending tasks */ |
13238 | cancel_work_sync(work: &bp->sp_task); |
13239 | cancel_delayed_work_sync(dwork: &bp->fw_reset_task); |
13240 | bp->sp_event = 0; |
13241 | |
13242 | bnxt_dl_fw_reporters_destroy(bp); |
13243 | bnxt_dl_unregister(bp); |
13244 | bnxt_shutdown_tc(bp); |
13245 | |
13246 | bnxt_clear_int_mode(bp); |
13247 | bnxt_hwrm_func_drv_unrgtr(bp); |
13248 | bnxt_free_hwrm_resources(bp); |
13249 | bnxt_hwmon_uninit(bp); |
13250 | bnxt_ethtool_free(bp); |
13251 | bnxt_dcb_free(bp); |
13252 | kfree(objp: bp->ptp_cfg); |
13253 | bp->ptp_cfg = NULL; |
13254 | kfree(objp: bp->fw_health); |
13255 | bp->fw_health = NULL; |
13256 | bnxt_cleanup_pci(bp); |
13257 | bnxt_free_ctx_mem(bp); |
13258 | kfree(objp: bp->ctx); |
13259 | bp->ctx = NULL; |
13260 | kfree(objp: bp->rss_indir_tbl); |
13261 | bp->rss_indir_tbl = NULL; |
13262 | bnxt_free_port_stats(bp); |
13263 | free_netdev(dev); |
13264 | } |
13265 | |
13266 | static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) |
13267 | { |
13268 | int rc = 0; |
13269 | struct bnxt_link_info *link_info = &bp->link_info; |
13270 | |
13271 | bp->phy_flags = 0; |
13272 | rc = bnxt_hwrm_phy_qcaps(bp); |
13273 | if (rc) { |
13274 | netdev_err(dev: bp->dev, format: "Probe phy can't get phy capabilities (rc: %x)\n" , |
13275 | rc); |
13276 | return rc; |
13277 | } |
13278 | if (bp->phy_flags & BNXT_PHY_FL_NO_FCS) |
13279 | bp->dev->priv_flags |= IFF_SUPP_NOFCS; |
13280 | else |
13281 | bp->dev->priv_flags &= ~IFF_SUPP_NOFCS; |
13282 | if (!fw_dflt) |
13283 | return 0; |
13284 | |
13285 | mutex_lock(&bp->link_lock); |
13286 | rc = bnxt_update_link(bp, chng_link_state: false); |
13287 | if (rc) { |
13288 | mutex_unlock(lock: &bp->link_lock); |
13289 | netdev_err(dev: bp->dev, format: "Probe phy can't update link (rc: %x)\n" , |
13290 | rc); |
13291 | return rc; |
13292 | } |
13293 | |
13294 | /* Older firmware does not have supported_auto_speeds, so assume |
13295 | * that all supported speeds can be autonegotiated. |
13296 | */ |
13297 | if (link_info->auto_link_speeds && !link_info->support_auto_speeds) |
13298 | link_info->support_auto_speeds = link_info->support_speeds; |
13299 | |
13300 | bnxt_init_ethtool_link_settings(bp); |
13301 | mutex_unlock(lock: &bp->link_lock); |
13302 | return 0; |
13303 | } |
13304 | |
13305 | static int bnxt_get_max_irq(struct pci_dev *pdev) |
13306 | { |
13307 | u16 ctrl; |
13308 | |
13309 | if (!pdev->msix_cap) |
13310 | return 1; |
13311 | |
13312 | pci_read_config_word(dev: pdev, where: pdev->msix_cap + PCI_MSIX_FLAGS, val: &ctrl); |
13313 | return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; |
13314 | } |
13315 | |
13316 | static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, |
13317 | int *max_cp) |
13318 | { |
13319 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
13320 | int max_ring_grps = 0, max_irq; |
13321 | |
13322 | *max_tx = hw_resc->max_tx_rings; |
13323 | *max_rx = hw_resc->max_rx_rings; |
13324 | *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); |
13325 | max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - |
13326 | bnxt_get_ulp_msix_num(bp), |
13327 | hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); |
13328 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) |
13329 | *max_cp = min_t(int, *max_cp, max_irq); |
13330 | max_ring_grps = hw_resc->max_hw_ring_grps; |
13331 | if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { |
13332 | *max_cp -= 1; |
13333 | *max_rx -= 2; |
13334 | } |
13335 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
13336 | *max_rx >>= 1; |
13337 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
13338 | bnxt_trim_rings(bp, rx: max_rx, tx: max_tx, max: *max_cp, shared: false); |
13339 | /* On P5 chips, max_cp output param should be available NQs */ |
13340 | *max_cp = max_irq; |
13341 | } |
13342 | *max_rx = min_t(int, *max_rx, max_ring_grps); |
13343 | } |
13344 | |
13345 | int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) |
13346 | { |
13347 | int rx, tx, cp; |
13348 | |
13349 | _bnxt_get_max_rings(bp, max_rx: &rx, max_tx: &tx, max_cp: &cp); |
13350 | *max_rx = rx; |
13351 | *max_tx = tx; |
13352 | if (!rx || !tx || !cp) |
13353 | return -ENOMEM; |
13354 | |
13355 | return bnxt_trim_rings(bp, rx: max_rx, tx: max_tx, max: cp, shared); |
13356 | } |
13357 | |
13358 | static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, |
13359 | bool shared) |
13360 | { |
13361 | int rc; |
13362 | |
13363 | rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); |
13364 | if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { |
13365 | /* Not enough rings, try disabling agg rings. */ |
13366 | bp->flags &= ~BNXT_FLAG_AGG_RINGS; |
13367 | rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); |
13368 | if (rc) { |
13369 | /* set BNXT_FLAG_AGG_RINGS back for consistency */ |
13370 | bp->flags |= BNXT_FLAG_AGG_RINGS; |
13371 | return rc; |
13372 | } |
13373 | bp->flags |= BNXT_FLAG_NO_AGG_RINGS; |
13374 | bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); |
13375 | bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); |
13376 | bnxt_set_ring_params(bp); |
13377 | } |
13378 | |
13379 | if (bp->flags & BNXT_FLAG_ROCE_CAP) { |
13380 | int max_cp, max_stat, max_irq; |
13381 | |
13382 | /* Reserve minimum resources for RoCE */ |
13383 | max_cp = bnxt_get_max_func_cp_rings(bp); |
13384 | max_stat = bnxt_get_max_func_stat_ctxs(bp); |
13385 | max_irq = bnxt_get_max_func_irqs(bp); |
13386 | if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || |
13387 | max_irq <= BNXT_MIN_ROCE_CP_RINGS || |
13388 | max_stat <= BNXT_MIN_ROCE_STAT_CTXS) |
13389 | return 0; |
13390 | |
13391 | max_cp -= BNXT_MIN_ROCE_CP_RINGS; |
13392 | max_irq -= BNXT_MIN_ROCE_CP_RINGS; |
13393 | max_stat -= BNXT_MIN_ROCE_STAT_CTXS; |
13394 | max_cp = min_t(int, max_cp, max_irq); |
13395 | max_cp = min_t(int, max_cp, max_stat); |
13396 | rc = bnxt_trim_rings(bp, rx: max_rx, tx: max_tx, max: max_cp, shared); |
13397 | if (rc) |
13398 | rc = 0; |
13399 | } |
13400 | return rc; |
13401 | } |
13402 | |
13403 | /* In initial default shared ring setting, each shared ring must have a |
13404 | * RX/TX ring pair. |
13405 | */ |
13406 | static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) |
13407 | { |
13408 | bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); |
13409 | bp->rx_nr_rings = bp->cp_nr_rings; |
13410 | bp->tx_nr_rings_per_tc = bp->cp_nr_rings; |
13411 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc; |
13412 | } |
13413 | |
13414 | static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) |
13415 | { |
13416 | int dflt_rings, max_rx_rings, max_tx_rings, rc; |
13417 | |
13418 | if (!bnxt_can_reserve_rings(bp)) |
13419 | return 0; |
13420 | |
13421 | if (sh) |
13422 | bp->flags |= BNXT_FLAG_SHARED_RINGS; |
13423 | dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); |
13424 | /* Reduce default rings on multi-port cards so that total default |
13425 | * rings do not exceed CPU count. |
13426 | */ |
13427 | if (bp->port_count > 1) { |
13428 | int max_rings = |
13429 | max_t(int, num_online_cpus() / bp->port_count, 1); |
13430 | |
13431 | dflt_rings = min_t(int, dflt_rings, max_rings); |
13432 | } |
13433 | rc = bnxt_get_dflt_rings(bp, max_rx: &max_rx_rings, max_tx: &max_tx_rings, shared: sh); |
13434 | if (rc) |
13435 | return rc; |
13436 | bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); |
13437 | bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); |
13438 | if (sh) |
13439 | bnxt_trim_dflt_sh_rings(bp); |
13440 | else |
13441 | bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; |
13442 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc; |
13443 | |
13444 | rc = __bnxt_reserve_rings(bp); |
13445 | if (rc && rc != -ENODEV) |
13446 | netdev_warn(dev: bp->dev, format: "Unable to reserve tx rings\n" ); |
13447 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
13448 | if (sh) |
13449 | bnxt_trim_dflt_sh_rings(bp); |
13450 | |
13451 | /* Rings may have been trimmed, re-reserve the trimmed rings. */ |
13452 | if (bnxt_need_reserve_rings(bp)) { |
13453 | rc = __bnxt_reserve_rings(bp); |
13454 | if (rc && rc != -ENODEV) |
13455 | netdev_warn(dev: bp->dev, format: "2nd rings reservation failed.\n" ); |
13456 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
13457 | } |
13458 | if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { |
13459 | bp->rx_nr_rings++; |
13460 | bp->cp_nr_rings++; |
13461 | } |
13462 | if (rc) { |
13463 | bp->tx_nr_rings = 0; |
13464 | bp->rx_nr_rings = 0; |
13465 | } |
13466 | return rc; |
13467 | } |
13468 | |
13469 | static int bnxt_init_dflt_ring_mode(struct bnxt *bp) |
13470 | { |
13471 | int rc; |
13472 | |
13473 | if (bp->tx_nr_rings) |
13474 | return 0; |
13475 | |
13476 | bnxt_ulp_irq_stop(bp); |
13477 | bnxt_clear_int_mode(bp); |
13478 | rc = bnxt_set_dflt_rings(bp, sh: true); |
13479 | if (rc) { |
13480 | if (BNXT_VF(bp) && rc == -ENODEV) |
13481 | netdev_err(dev: bp->dev, format: "Cannot configure VF rings while PF is unavailable.\n" ); |
13482 | else |
13483 | netdev_err(dev: bp->dev, format: "Not enough rings available.\n" ); |
13484 | goto init_dflt_ring_err; |
13485 | } |
13486 | rc = bnxt_init_int_mode(bp); |
13487 | if (rc) |
13488 | goto init_dflt_ring_err; |
13489 | |
13490 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
13491 | |
13492 | bnxt_set_dflt_rfs(bp); |
13493 | |
13494 | init_dflt_ring_err: |
13495 | bnxt_ulp_irq_restart(bp, err: rc); |
13496 | return rc; |
13497 | } |
13498 | |
13499 | int bnxt_restore_pf_fw_resources(struct bnxt *bp) |
13500 | { |
13501 | int rc; |
13502 | |
13503 | ASSERT_RTNL(); |
13504 | bnxt_hwrm_func_qcaps(bp); |
13505 | |
13506 | if (netif_running(dev: bp->dev)) |
13507 | __bnxt_close_nic(bp, irq_re_init: true, link_re_init: false); |
13508 | |
13509 | bnxt_ulp_irq_stop(bp); |
13510 | bnxt_clear_int_mode(bp); |
13511 | rc = bnxt_init_int_mode(bp); |
13512 | bnxt_ulp_irq_restart(bp, err: rc); |
13513 | |
13514 | if (netif_running(dev: bp->dev)) { |
13515 | if (rc) |
13516 | dev_close(dev: bp->dev); |
13517 | else |
13518 | rc = bnxt_open_nic(bp, irq_re_init: true, link_re_init: false); |
13519 | } |
13520 | |
13521 | return rc; |
13522 | } |
13523 | |
13524 | static int bnxt_init_mac_addr(struct bnxt *bp) |
13525 | { |
13526 | int rc = 0; |
13527 | |
13528 | if (BNXT_PF(bp)) { |
13529 | eth_hw_addr_set(dev: bp->dev, addr: bp->pf.mac_addr); |
13530 | } else { |
13531 | #ifdef CONFIG_BNXT_SRIOV |
13532 | struct bnxt_vf_info *vf = &bp->vf; |
13533 | bool strict_approval = true; |
13534 | |
13535 | if (is_valid_ether_addr(addr: vf->mac_addr)) { |
13536 | /* overwrite netdev dev_addr with admin VF MAC */ |
13537 | eth_hw_addr_set(dev: bp->dev, addr: vf->mac_addr); |
13538 | /* Older PF driver or firmware may not approve this |
13539 | * correctly. |
13540 | */ |
13541 | strict_approval = false; |
13542 | } else { |
13543 | eth_hw_addr_random(dev: bp->dev); |
13544 | } |
13545 | rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); |
13546 | #endif |
13547 | } |
13548 | return rc; |
13549 | } |
13550 | |
13551 | static void bnxt_vpd_read_info(struct bnxt *bp) |
13552 | { |
13553 | struct pci_dev *pdev = bp->pdev; |
13554 | unsigned int vpd_size, kw_len; |
13555 | int pos, size; |
13556 | u8 *vpd_data; |
13557 | |
13558 | vpd_data = pci_vpd_alloc(dev: pdev, size: &vpd_size); |
13559 | if (IS_ERR(ptr: vpd_data)) { |
13560 | pci_warn(pdev, "Unable to read VPD\n" ); |
13561 | return; |
13562 | } |
13563 | |
13564 | pos = pci_vpd_find_ro_info_keyword(buf: vpd_data, len: vpd_size, |
13565 | PCI_VPD_RO_KEYWORD_PARTNO, size: &kw_len); |
13566 | if (pos < 0) |
13567 | goto read_sn; |
13568 | |
13569 | size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); |
13570 | memcpy(bp->board_partno, &vpd_data[pos], size); |
13571 | |
13572 | read_sn: |
13573 | pos = pci_vpd_find_ro_info_keyword(buf: vpd_data, len: vpd_size, |
13574 | PCI_VPD_RO_KEYWORD_SERIALNO, |
13575 | size: &kw_len); |
13576 | if (pos < 0) |
13577 | goto exit; |
13578 | |
13579 | size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); |
13580 | memcpy(bp->board_serialno, &vpd_data[pos], size); |
13581 | exit: |
13582 | kfree(objp: vpd_data); |
13583 | } |
13584 | |
13585 | static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) |
13586 | { |
13587 | struct pci_dev *pdev = bp->pdev; |
13588 | u64 qword; |
13589 | |
13590 | qword = pci_get_dsn(dev: pdev); |
13591 | if (!qword) { |
13592 | netdev_info(dev: bp->dev, format: "Unable to read adapter's DSN\n" ); |
13593 | return -EOPNOTSUPP; |
13594 | } |
13595 | |
13596 | put_unaligned_le64(val: qword, p: dsn); |
13597 | |
13598 | bp->flags |= BNXT_FLAG_DSN_VALID; |
13599 | return 0; |
13600 | } |
13601 | |
13602 | static int bnxt_map_db_bar(struct bnxt *bp) |
13603 | { |
13604 | if (!bp->db_size) |
13605 | return -ENODEV; |
13606 | bp->bar1 = pci_iomap(dev: bp->pdev, bar: 2, max: bp->db_size); |
13607 | if (!bp->bar1) |
13608 | return -ENOMEM; |
13609 | return 0; |
13610 | } |
13611 | |
13612 | void bnxt_print_device_info(struct bnxt *bp) |
13613 | { |
13614 | netdev_info(dev: bp->dev, format: "%s found at mem %lx, node addr %pM\n" , |
13615 | board_info[bp->board_idx].name, |
13616 | (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr); |
13617 | |
13618 | pcie_print_link_status(dev: bp->pdev); |
13619 | } |
13620 | |
13621 | static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
13622 | { |
13623 | struct net_device *dev; |
13624 | struct bnxt *bp; |
13625 | int rc, max_irqs; |
13626 | |
13627 | if (pci_is_bridge(dev: pdev)) |
13628 | return -ENODEV; |
13629 | |
13630 | /* Clear any pending DMA transactions from crash kernel |
13631 | * while loading driver in capture kernel. |
13632 | */ |
13633 | if (is_kdump_kernel()) { |
13634 | pci_clear_master(dev: pdev); |
13635 | pcie_flr(dev: pdev); |
13636 | } |
13637 | |
13638 | max_irqs = bnxt_get_max_irq(pdev); |
13639 | dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); |
13640 | if (!dev) |
13641 | return -ENOMEM; |
13642 | |
13643 | bp = netdev_priv(dev); |
13644 | bp->board_idx = ent->driver_data; |
13645 | bp->msg_enable = BNXT_DEF_MSG_ENABLE; |
13646 | bnxt_set_max_func_irqs(bp, max_irqs); |
13647 | |
13648 | if (bnxt_vf_pciid(idx: bp->board_idx)) |
13649 | bp->flags |= BNXT_FLAG_VF; |
13650 | |
13651 | /* No devlink port registration in case of a VF */ |
13652 | if (BNXT_PF(bp)) |
13653 | SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port); |
13654 | |
13655 | if (pdev->msix_cap) |
13656 | bp->flags |= BNXT_FLAG_MSIX_CAP; |
13657 | |
13658 | rc = bnxt_init_board(pdev, dev); |
13659 | if (rc < 0) |
13660 | goto init_err_free; |
13661 | |
13662 | dev->netdev_ops = &bnxt_netdev_ops; |
13663 | dev->watchdog_timeo = BNXT_TX_TIMEOUT; |
13664 | dev->ethtool_ops = &bnxt_ethtool_ops; |
13665 | pci_set_drvdata(pdev, data: dev); |
13666 | |
13667 | rc = bnxt_alloc_hwrm_resources(bp); |
13668 | if (rc) |
13669 | goto init_err_pci_clean; |
13670 | |
13671 | mutex_init(&bp->hwrm_cmd_lock); |
13672 | mutex_init(&bp->link_lock); |
13673 | |
13674 | rc = bnxt_fw_init_one_p1(bp); |
13675 | if (rc) |
13676 | goto init_err_pci_clean; |
13677 | |
13678 | if (BNXT_PF(bp)) |
13679 | bnxt_vpd_read_info(bp); |
13680 | |
13681 | if (BNXT_CHIP_P5(bp)) { |
13682 | bp->flags |= BNXT_FLAG_CHIP_P5; |
13683 | if (BNXT_CHIP_SR2(bp)) |
13684 | bp->flags |= BNXT_FLAG_CHIP_SR2; |
13685 | } |
13686 | |
13687 | rc = bnxt_alloc_rss_indir_tbl(bp); |
13688 | if (rc) |
13689 | goto init_err_pci_clean; |
13690 | |
13691 | rc = bnxt_fw_init_one_p2(bp); |
13692 | if (rc) |
13693 | goto init_err_pci_clean; |
13694 | |
13695 | rc = bnxt_map_db_bar(bp); |
13696 | if (rc) { |
13697 | dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n" , |
13698 | rc); |
13699 | goto init_err_pci_clean; |
13700 | } |
13701 | |
13702 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | |
13703 | NETIF_F_TSO | NETIF_F_TSO6 | |
13704 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | |
13705 | NETIF_F_GSO_IPXIP4 | |
13706 | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | |
13707 | NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | |
13708 | NETIF_F_RXCSUM | NETIF_F_GRO; |
13709 | |
13710 | if (BNXT_SUPPORTS_TPA(bp)) |
13711 | dev->hw_features |= NETIF_F_LRO; |
13712 | |
13713 | dev->hw_enc_features = |
13714 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | |
13715 | NETIF_F_TSO | NETIF_F_TSO6 | |
13716 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | |
13717 | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | |
13718 | NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; |
13719 | dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; |
13720 | |
13721 | dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | |
13722 | NETIF_F_GSO_GRE_CSUM; |
13723 | dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; |
13724 | if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) |
13725 | dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; |
13726 | if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) |
13727 | dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; |
13728 | if (BNXT_SUPPORTS_TPA(bp)) |
13729 | dev->hw_features |= NETIF_F_GRO_HW; |
13730 | dev->features |= dev->hw_features | NETIF_F_HIGHDMA; |
13731 | if (dev->features & NETIF_F_GRO_HW) |
13732 | dev->features &= ~NETIF_F_LRO; |
13733 | dev->priv_flags |= IFF_UNICAST_FLT; |
13734 | |
13735 | netif_set_tso_max_size(dev, GSO_MAX_SIZE); |
13736 | |
13737 | dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | |
13738 | NETDEV_XDP_ACT_RX_SG; |
13739 | |
13740 | #ifdef CONFIG_BNXT_SRIOV |
13741 | init_waitqueue_head(&bp->sriov_cfg_wait); |
13742 | #endif |
13743 | if (BNXT_SUPPORTS_TPA(bp)) { |
13744 | bp->gro_func = bnxt_gro_func_5730x; |
13745 | if (BNXT_CHIP_P4(bp)) |
13746 | bp->gro_func = bnxt_gro_func_5731x; |
13747 | else if (BNXT_CHIP_P5(bp)) |
13748 | bp->gro_func = bnxt_gro_func_5750x; |
13749 | } |
13750 | if (!BNXT_CHIP_P4_PLUS(bp)) |
13751 | bp->flags |= BNXT_FLAG_DOUBLE_DB; |
13752 | |
13753 | rc = bnxt_init_mac_addr(bp); |
13754 | if (rc) { |
13755 | dev_err(&pdev->dev, "Unable to initialize mac address.\n" ); |
13756 | rc = -EADDRNOTAVAIL; |
13757 | goto init_err_pci_clean; |
13758 | } |
13759 | |
13760 | if (BNXT_PF(bp)) { |
13761 | /* Read the adapter's DSN to use as the eswitch switch_id */ |
13762 | rc = bnxt_pcie_dsn_get(bp, dsn: bp->dsn); |
13763 | } |
13764 | |
13765 | /* MTU range: 60 - FW defined max */ |
13766 | dev->min_mtu = ETH_ZLEN; |
13767 | dev->max_mtu = bp->max_mtu; |
13768 | |
13769 | rc = bnxt_probe_phy(bp, fw_dflt: true); |
13770 | if (rc) |
13771 | goto init_err_pci_clean; |
13772 | |
13773 | bnxt_set_rx_skb_mode(bp, page_mode: false); |
13774 | bnxt_set_tpa_flags(bp); |
13775 | bnxt_set_ring_params(bp); |
13776 | rc = bnxt_set_dflt_rings(bp, sh: true); |
13777 | if (rc) { |
13778 | if (BNXT_VF(bp) && rc == -ENODEV) { |
13779 | netdev_err(dev: bp->dev, format: "Cannot configure VF rings while PF is unavailable.\n" ); |
13780 | } else { |
13781 | netdev_err(dev: bp->dev, format: "Not enough rings available.\n" ); |
13782 | rc = -ENOMEM; |
13783 | } |
13784 | goto init_err_pci_clean; |
13785 | } |
13786 | |
13787 | bnxt_fw_init_one_p3(bp); |
13788 | |
13789 | bnxt_init_dflt_coal(bp); |
13790 | |
13791 | if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) |
13792 | bp->flags |= BNXT_FLAG_STRIP_VLAN; |
13793 | |
13794 | rc = bnxt_init_int_mode(bp); |
13795 | if (rc) |
13796 | goto init_err_pci_clean; |
13797 | |
13798 | /* No TC has been set yet and rings may have been trimmed due to |
13799 | * limited MSIX, so we re-initialize the TX rings per TC. |
13800 | */ |
13801 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
13802 | |
13803 | if (BNXT_PF(bp)) { |
13804 | if (!bnxt_pf_wq) { |
13805 | bnxt_pf_wq = |
13806 | create_singlethread_workqueue("bnxt_pf_wq" ); |
13807 | if (!bnxt_pf_wq) { |
13808 | dev_err(&pdev->dev, "Unable to create workqueue.\n" ); |
13809 | rc = -ENOMEM; |
13810 | goto init_err_pci_clean; |
13811 | } |
13812 | } |
13813 | rc = bnxt_init_tc(bp); |
13814 | if (rc) |
13815 | netdev_err(dev, format: "Failed to initialize TC flower offload, err = %d.\n" , |
13816 | rc); |
13817 | } |
13818 | |
13819 | bnxt_inv_fw_health_reg(bp); |
13820 | rc = bnxt_dl_register(bp); |
13821 | if (rc) |
13822 | goto init_err_dl; |
13823 | |
13824 | rc = register_netdev(dev); |
13825 | if (rc) |
13826 | goto init_err_cleanup; |
13827 | |
13828 | bnxt_dl_fw_reporters_create(bp); |
13829 | |
13830 | bnxt_rdma_aux_device_init(bp); |
13831 | |
13832 | bnxt_print_device_info(bp); |
13833 | |
13834 | pci_save_state(dev: pdev); |
13835 | |
13836 | return 0; |
13837 | init_err_cleanup: |
13838 | bnxt_dl_unregister(bp); |
13839 | init_err_dl: |
13840 | bnxt_shutdown_tc(bp); |
13841 | bnxt_clear_int_mode(bp); |
13842 | |
13843 | init_err_pci_clean: |
13844 | bnxt_hwrm_func_drv_unrgtr(bp); |
13845 | bnxt_free_hwrm_resources(bp); |
13846 | bnxt_hwmon_uninit(bp); |
13847 | bnxt_ethtool_free(bp); |
13848 | bnxt_ptp_clear(bp); |
13849 | kfree(objp: bp->ptp_cfg); |
13850 | bp->ptp_cfg = NULL; |
13851 | kfree(objp: bp->fw_health); |
13852 | bp->fw_health = NULL; |
13853 | bnxt_cleanup_pci(bp); |
13854 | bnxt_free_ctx_mem(bp); |
13855 | kfree(objp: bp->ctx); |
13856 | bp->ctx = NULL; |
13857 | kfree(objp: bp->rss_indir_tbl); |
13858 | bp->rss_indir_tbl = NULL; |
13859 | |
13860 | init_err_free: |
13861 | free_netdev(dev); |
13862 | return rc; |
13863 | } |
13864 | |
13865 | static void bnxt_shutdown(struct pci_dev *pdev) |
13866 | { |
13867 | struct net_device *dev = pci_get_drvdata(pdev); |
13868 | struct bnxt *bp; |
13869 | |
13870 | if (!dev) |
13871 | return; |
13872 | |
13873 | rtnl_lock(); |
13874 | bp = netdev_priv(dev); |
13875 | if (!bp) |
13876 | goto shutdown_exit; |
13877 | |
13878 | if (netif_running(dev)) |
13879 | dev_close(dev); |
13880 | |
13881 | bnxt_clear_int_mode(bp); |
13882 | pci_disable_device(dev: pdev); |
13883 | |
13884 | if (system_state == SYSTEM_POWER_OFF) { |
13885 | pci_wake_from_d3(dev: pdev, enable: bp->wol); |
13886 | pci_set_power_state(dev: pdev, PCI_D3hot); |
13887 | } |
13888 | |
13889 | shutdown_exit: |
13890 | rtnl_unlock(); |
13891 | } |
13892 | |
13893 | #ifdef CONFIG_PM_SLEEP |
13894 | static int bnxt_suspend(struct device *device) |
13895 | { |
13896 | struct net_device *dev = dev_get_drvdata(dev: device); |
13897 | struct bnxt *bp = netdev_priv(dev); |
13898 | int rc = 0; |
13899 | |
13900 | rtnl_lock(); |
13901 | bnxt_ulp_stop(bp); |
13902 | if (netif_running(dev)) { |
13903 | netif_device_detach(dev); |
13904 | rc = bnxt_close(dev); |
13905 | } |
13906 | bnxt_hwrm_func_drv_unrgtr(bp); |
13907 | pci_disable_device(dev: bp->pdev); |
13908 | bnxt_free_ctx_mem(bp); |
13909 | kfree(objp: bp->ctx); |
13910 | bp->ctx = NULL; |
13911 | rtnl_unlock(); |
13912 | return rc; |
13913 | } |
13914 | |
13915 | static int bnxt_resume(struct device *device) |
13916 | { |
13917 | struct net_device *dev = dev_get_drvdata(dev: device); |
13918 | struct bnxt *bp = netdev_priv(dev); |
13919 | int rc = 0; |
13920 | |
13921 | rtnl_lock(); |
13922 | rc = pci_enable_device(dev: bp->pdev); |
13923 | if (rc) { |
13924 | netdev_err(dev, format: "Cannot re-enable PCI device during resume, err = %d\n" , |
13925 | rc); |
13926 | goto resume_exit; |
13927 | } |
13928 | pci_set_master(dev: bp->pdev); |
13929 | if (bnxt_hwrm_ver_get(bp)) { |
13930 | rc = -ENODEV; |
13931 | goto resume_exit; |
13932 | } |
13933 | rc = bnxt_hwrm_func_reset(bp); |
13934 | if (rc) { |
13935 | rc = -EBUSY; |
13936 | goto resume_exit; |
13937 | } |
13938 | |
13939 | rc = bnxt_hwrm_func_qcaps(bp); |
13940 | if (rc) |
13941 | goto resume_exit; |
13942 | |
13943 | if (bnxt_hwrm_func_drv_rgtr(bp, NULL, bmap_size: 0, async_only: false)) { |
13944 | rc = -ENODEV; |
13945 | goto resume_exit; |
13946 | } |
13947 | |
13948 | bnxt_get_wol_settings(bp); |
13949 | if (netif_running(dev)) { |
13950 | rc = bnxt_open(dev); |
13951 | if (!rc) |
13952 | netif_device_attach(dev); |
13953 | } |
13954 | |
13955 | resume_exit: |
13956 | bnxt_ulp_start(bp, err: rc); |
13957 | if (!rc) |
13958 | bnxt_reenable_sriov(bp); |
13959 | rtnl_unlock(); |
13960 | return rc; |
13961 | } |
13962 | |
13963 | static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); |
13964 | #define BNXT_PM_OPS (&bnxt_pm_ops) |
13965 | |
13966 | #else |
13967 | |
13968 | #define BNXT_PM_OPS NULL |
13969 | |
13970 | #endif /* CONFIG_PM_SLEEP */ |
13971 | |
13972 | /** |
13973 | * bnxt_io_error_detected - called when PCI error is detected |
13974 | * @pdev: Pointer to PCI device |
13975 | * @state: The current pci connection state |
13976 | * |
13977 | * This function is called after a PCI bus error affecting |
13978 | * this device has been detected. |
13979 | */ |
13980 | static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, |
13981 | pci_channel_state_t state) |
13982 | { |
13983 | struct net_device *netdev = pci_get_drvdata(pdev); |
13984 | struct bnxt *bp = netdev_priv(dev: netdev); |
13985 | |
13986 | netdev_info(dev: netdev, format: "PCI I/O error detected\n" ); |
13987 | |
13988 | rtnl_lock(); |
13989 | netif_device_detach(dev: netdev); |
13990 | |
13991 | bnxt_ulp_stop(bp); |
13992 | |
13993 | if (state == pci_channel_io_perm_failure) { |
13994 | rtnl_unlock(); |
13995 | return PCI_ERS_RESULT_DISCONNECT; |
13996 | } |
13997 | |
13998 | if (state == pci_channel_io_frozen) |
13999 | set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, addr: &bp->state); |
14000 | |
14001 | if (netif_running(dev: netdev)) |
14002 | bnxt_close(dev: netdev); |
14003 | |
14004 | if (pci_is_enabled(pdev)) |
14005 | pci_disable_device(dev: pdev); |
14006 | bnxt_free_ctx_mem(bp); |
14007 | kfree(objp: bp->ctx); |
14008 | bp->ctx = NULL; |
14009 | rtnl_unlock(); |
14010 | |
14011 | /* Request a slot slot reset. */ |
14012 | return PCI_ERS_RESULT_NEED_RESET; |
14013 | } |
14014 | |
14015 | /** |
14016 | * bnxt_io_slot_reset - called after the pci bus has been reset. |
14017 | * @pdev: Pointer to PCI device |
14018 | * |
14019 | * Restart the card from scratch, as if from a cold-boot. |
14020 | * At this point, the card has exprienced a hard reset, |
14021 | * followed by fixups by BIOS, and has its config space |
14022 | * set up identically to what it was at cold boot. |
14023 | */ |
14024 | static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) |
14025 | { |
14026 | pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; |
14027 | struct net_device *netdev = pci_get_drvdata(pdev); |
14028 | struct bnxt *bp = netdev_priv(dev: netdev); |
14029 | int retry = 0; |
14030 | int err = 0; |
14031 | int off; |
14032 | |
14033 | netdev_info(dev: bp->dev, format: "PCI Slot Reset\n" ); |
14034 | |
14035 | rtnl_lock(); |
14036 | |
14037 | if (pci_enable_device(dev: pdev)) { |
14038 | dev_err(&pdev->dev, |
14039 | "Cannot re-enable PCI device after reset.\n" ); |
14040 | } else { |
14041 | pci_set_master(dev: pdev); |
14042 | /* Upon fatal error, our device internal logic that latches to |
14043 | * BAR value is getting reset and will restore only upon |
14044 | * rewritting the BARs. |
14045 | * |
14046 | * As pci_restore_state() does not re-write the BARs if the |
14047 | * value is same as saved value earlier, driver needs to |
14048 | * write the BARs to 0 to force restore, in case of fatal error. |
14049 | */ |
14050 | if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, |
14051 | addr: &bp->state)) { |
14052 | for (off = PCI_BASE_ADDRESS_0; |
14053 | off <= PCI_BASE_ADDRESS_5; off += 4) |
14054 | pci_write_config_dword(dev: bp->pdev, where: off, val: 0); |
14055 | } |
14056 | pci_restore_state(dev: pdev); |
14057 | pci_save_state(dev: pdev); |
14058 | |
14059 | bnxt_inv_fw_health_reg(bp); |
14060 | bnxt_try_map_fw_health_reg(bp); |
14061 | |
14062 | /* In some PCIe AER scenarios, firmware may take up to |
14063 | * 10 seconds to become ready in the worst case. |
14064 | */ |
14065 | do { |
14066 | err = bnxt_try_recover_fw(bp); |
14067 | if (!err) |
14068 | break; |
14069 | retry++; |
14070 | } while (retry < BNXT_FW_SLOT_RESET_RETRY); |
14071 | |
14072 | if (err) { |
14073 | dev_err(&pdev->dev, "Firmware not ready\n" ); |
14074 | goto reset_exit; |
14075 | } |
14076 | |
14077 | err = bnxt_hwrm_func_reset(bp); |
14078 | if (!err) |
14079 | result = PCI_ERS_RESULT_RECOVERED; |
14080 | |
14081 | bnxt_ulp_irq_stop(bp); |
14082 | bnxt_clear_int_mode(bp); |
14083 | err = bnxt_init_int_mode(bp); |
14084 | bnxt_ulp_irq_restart(bp, err); |
14085 | } |
14086 | |
14087 | reset_exit: |
14088 | bnxt_clear_reservations(bp, fw_reset: true); |
14089 | rtnl_unlock(); |
14090 | |
14091 | return result; |
14092 | } |
14093 | |
14094 | /** |
14095 | * bnxt_io_resume - called when traffic can start flowing again. |
14096 | * @pdev: Pointer to PCI device |
14097 | * |
14098 | * This callback is called when the error recovery driver tells |
14099 | * us that its OK to resume normal operation. |
14100 | */ |
14101 | static void bnxt_io_resume(struct pci_dev *pdev) |
14102 | { |
14103 | struct net_device *netdev = pci_get_drvdata(pdev); |
14104 | struct bnxt *bp = netdev_priv(dev: netdev); |
14105 | int err; |
14106 | |
14107 | netdev_info(dev: bp->dev, format: "PCI Slot Resume\n" ); |
14108 | rtnl_lock(); |
14109 | |
14110 | err = bnxt_hwrm_func_qcaps(bp); |
14111 | if (!err && netif_running(dev: netdev)) |
14112 | err = bnxt_open(dev: netdev); |
14113 | |
14114 | bnxt_ulp_start(bp, err); |
14115 | if (!err) { |
14116 | bnxt_reenable_sriov(bp); |
14117 | netif_device_attach(dev: netdev); |
14118 | } |
14119 | |
14120 | rtnl_unlock(); |
14121 | } |
14122 | |
14123 | static const struct pci_error_handlers bnxt_err_handler = { |
14124 | .error_detected = bnxt_io_error_detected, |
14125 | .slot_reset = bnxt_io_slot_reset, |
14126 | .resume = bnxt_io_resume |
14127 | }; |
14128 | |
14129 | static struct pci_driver bnxt_pci_driver = { |
14130 | .name = DRV_MODULE_NAME, |
14131 | .id_table = bnxt_pci_tbl, |
14132 | .probe = bnxt_init_one, |
14133 | .remove = bnxt_remove_one, |
14134 | .shutdown = bnxt_shutdown, |
14135 | .driver.pm = BNXT_PM_OPS, |
14136 | .err_handler = &bnxt_err_handler, |
14137 | #if defined(CONFIG_BNXT_SRIOV) |
14138 | .sriov_configure = bnxt_sriov_configure, |
14139 | #endif |
14140 | }; |
14141 | |
14142 | static int __init bnxt_init(void) |
14143 | { |
14144 | int err; |
14145 | |
14146 | bnxt_debug_init(); |
14147 | err = pci_register_driver(&bnxt_pci_driver); |
14148 | if (err) { |
14149 | bnxt_debug_exit(); |
14150 | return err; |
14151 | } |
14152 | |
14153 | return 0; |
14154 | } |
14155 | |
14156 | static void __exit bnxt_exit(void) |
14157 | { |
14158 | pci_unregister_driver(dev: &bnxt_pci_driver); |
14159 | if (bnxt_pf_wq) |
14160 | destroy_workqueue(wq: bnxt_pf_wq); |
14161 | bnxt_debug_exit(); |
14162 | } |
14163 | |
14164 | module_init(bnxt_init); |
14165 | module_exit(bnxt_exit); |
14166 | |