1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause |
2 | /* |
3 | * Copyright(c) 2015 - 2020 Intel Corporation. |
4 | * Copyright(c) 2021 Cornelis Networks. |
5 | */ |
6 | |
7 | /* |
8 | * This file contains all of the code that is specific to the HFI chip |
9 | */ |
10 | |
11 | #include <linux/pci.h> |
12 | #include <linux/delay.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/module.h> |
15 | |
16 | #include "hfi.h" |
17 | #include "trace.h" |
18 | #include "mad.h" |
19 | #include "pio.h" |
20 | #include "sdma.h" |
21 | #include "eprom.h" |
22 | #include "efivar.h" |
23 | #include "platform.h" |
24 | #include "aspm.h" |
25 | #include "affinity.h" |
26 | #include "debugfs.h" |
27 | #include "fault.h" |
28 | #include "netdev.h" |
29 | |
30 | uint num_vls = HFI1_MAX_VLS_SUPPORTED; |
31 | module_param(num_vls, uint, S_IRUGO); |
32 | MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)" ); |
33 | |
34 | /* |
35 | * Default time to aggregate two 10K packets from the idle state |
36 | * (timer not running). The timer starts at the end of the first packet, |
37 | * so only the time for one 10K packet and header plus a bit extra is needed. |
38 | * 10 * 1024 + 64 header byte = 10304 byte |
39 | * 10304 byte / 12.5 GB/s = 824.32ns |
40 | */ |
41 | uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */ |
42 | module_param(rcv_intr_timeout, uint, S_IRUGO); |
43 | MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns" ); |
44 | |
45 | uint rcv_intr_count = 16; /* same as qib */ |
46 | module_param(rcv_intr_count, uint, S_IRUGO); |
47 | MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count" ); |
48 | |
49 | ushort link_crc_mask = SUPPORTED_CRCS; |
50 | module_param(link_crc_mask, ushort, S_IRUGO); |
51 | MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link" ); |
52 | |
53 | uint loopback; |
54 | module_param_named(loopback, loopback, uint, S_IRUGO); |
55 | MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable" ); |
56 | |
57 | /* Other driver tunables */ |
58 | uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/ |
59 | static ushort crc_14b_sideband = 1; |
60 | static uint use_flr = 1; |
61 | uint quick_linkup; /* skip LNI */ |
62 | |
63 | struct flag_table { |
64 | u64 flag; /* the flag */ |
65 | char *str; /* description string */ |
66 | u16 ; /* extra information */ |
67 | u16 unused0; |
68 | u32 unused1; |
69 | }; |
70 | |
71 | /* str must be a string constant */ |
72 | #define FLAG_ENTRY(str, extra, flag) {flag, str, extra} |
73 | #define FLAG_ENTRY0(str, flag) {flag, str, 0} |
74 | |
75 | /* Send Error Consequences */ |
76 | #define SEC_WRITE_DROPPED 0x1 |
77 | #define SEC_PACKET_DROPPED 0x2 |
78 | #define SEC_SC_HALTED 0x4 /* per-context only */ |
79 | #define SEC_SPC_FREEZE 0x8 /* per-HFI only */ |
80 | |
81 | #define DEFAULT_KRCVQS 2 |
82 | #define MIN_KERNEL_KCTXTS 2 |
83 | #define FIRST_KERNEL_KCTXT 1 |
84 | |
85 | /* |
86 | * RSM instance allocation |
87 | * 0 - User Fecn Handling |
88 | * 1 - Vnic |
89 | * 2 - AIP |
90 | * 3 - Verbs |
91 | */ |
92 | #define RSM_INS_FECN 0 |
93 | #define RSM_INS_VNIC 1 |
94 | #define RSM_INS_AIP 2 |
95 | #define RSM_INS_VERBS 3 |
96 | |
97 | /* Bit offset into the GUID which carries HFI id information */ |
98 | #define GUID_HFI_INDEX_SHIFT 39 |
99 | |
100 | /* extract the emulation revision */ |
101 | #define emulator_rev(dd) ((dd)->irev >> 8) |
102 | /* parallel and serial emulation versions are 3 and 4 respectively */ |
103 | #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3) |
104 | #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4) |
105 | |
106 | /* RSM fields for Verbs */ |
107 | /* packet type */ |
108 | #define IB_PACKET_TYPE 2ull |
109 | #define QW_SHIFT 6ull |
110 | /* QPN[7..1] */ |
111 | #define QPN_WIDTH 7ull |
112 | |
113 | /* LRH.BTH: QW 0, OFFSET 48 - for match */ |
114 | #define LRH_BTH_QW 0ull |
115 | #define LRH_BTH_BIT_OFFSET 48ull |
116 | #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off)) |
117 | #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET) |
118 | #define LRH_BTH_SELECT |
119 | #define LRH_BTH_MASK 3ull |
120 | #define LRH_BTH_VALUE 2ull |
121 | |
122 | /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */ |
123 | #define LRH_SC_QW 0ull |
124 | #define LRH_SC_BIT_OFFSET 56ull |
125 | #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off)) |
126 | #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET) |
127 | #define LRH_SC_MASK 128ull |
128 | #define LRH_SC_VALUE 0ull |
129 | |
130 | /* SC[n..0] QW 0, OFFSET 60 - for select */ |
131 | #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull)) |
132 | |
133 | /* QPN[m+n:1] QW 1, OFFSET 1 */ |
134 | #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull)) |
135 | |
136 | /* RSM fields for AIP */ |
137 | /* LRH.BTH above is reused for this rule */ |
138 | |
139 | /* BTH.DESTQP: QW 1, OFFSET 16 for match */ |
140 | #define BTH_DESTQP_QW 1ull |
141 | #define BTH_DESTQP_BIT_OFFSET 16ull |
142 | #define BTH_DESTQP_OFFSET(off) ((BTH_DESTQP_QW << QW_SHIFT) | (off)) |
143 | #define BTH_DESTQP_MATCH_OFFSET BTH_DESTQP_OFFSET(BTH_DESTQP_BIT_OFFSET) |
144 | #define BTH_DESTQP_MASK 0xFFull |
145 | #define BTH_DESTQP_VALUE 0x81ull |
146 | |
147 | /* DETH.SQPN: QW 1 Offset 56 for select */ |
148 | /* We use 8 most significant Soure QPN bits as entropy fpr AIP */ |
149 | #define DETH_AIP_SQPN_QW 3ull |
150 | #define DETH_AIP_SQPN_BIT_OFFSET 56ull |
151 | #define DETH_AIP_SQPN_OFFSET(off) ((DETH_AIP_SQPN_QW << QW_SHIFT) | (off)) |
152 | #define DETH_AIP_SQPN_SELECT_OFFSET \ |
153 | DETH_AIP_SQPN_OFFSET(DETH_AIP_SQPN_BIT_OFFSET) |
154 | |
155 | /* RSM fields for Vnic */ |
156 | /* L2_TYPE: QW 0, OFFSET 61 - for match */ |
157 | #define L2_TYPE_QW 0ull |
158 | #define L2_TYPE_BIT_OFFSET 61ull |
159 | #define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off)) |
160 | #define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET) |
161 | #define L2_TYPE_MASK 3ull |
162 | #define L2_16B_VALUE 2ull |
163 | |
164 | /* L4_TYPE QW 1, OFFSET 0 - for match */ |
165 | #define L4_TYPE_QW 1ull |
166 | #define L4_TYPE_BIT_OFFSET 0ull |
167 | #define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off)) |
168 | #define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET) |
169 | #define L4_16B_TYPE_MASK 0xFFull |
170 | #define L4_16B_ETH_VALUE 0x78ull |
171 | |
172 | /* 16B VESWID - for select */ |
173 | #define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull)) |
174 | /* 16B ENTROPY - for select */ |
175 | #define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull)) |
176 | |
177 | /* defines to build power on SC2VL table */ |
178 | #define SC2VL_VAL( \ |
179 | num, \ |
180 | sc0, sc0val, \ |
181 | sc1, sc1val, \ |
182 | sc2, sc2val, \ |
183 | sc3, sc3val, \ |
184 | sc4, sc4val, \ |
185 | sc5, sc5val, \ |
186 | sc6, sc6val, \ |
187 | sc7, sc7val) \ |
188 | ( \ |
189 | ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \ |
190 | ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \ |
191 | ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \ |
192 | ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \ |
193 | ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \ |
194 | ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \ |
195 | ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \ |
196 | ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \ |
197 | ) |
198 | |
199 | #define DC_SC_VL_VAL( \ |
200 | range, \ |
201 | e0, e0val, \ |
202 | e1, e1val, \ |
203 | e2, e2val, \ |
204 | e3, e3val, \ |
205 | e4, e4val, \ |
206 | e5, e5val, \ |
207 | e6, e6val, \ |
208 | e7, e7val, \ |
209 | e8, e8val, \ |
210 | e9, e9val, \ |
211 | e10, e10val, \ |
212 | e11, e11val, \ |
213 | e12, e12val, \ |
214 | e13, e13val, \ |
215 | e14, e14val, \ |
216 | e15, e15val) \ |
217 | ( \ |
218 | ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \ |
219 | ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \ |
220 | ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \ |
221 | ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \ |
222 | ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \ |
223 | ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \ |
224 | ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \ |
225 | ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \ |
226 | ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \ |
227 | ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \ |
228 | ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \ |
229 | ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \ |
230 | ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \ |
231 | ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \ |
232 | ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \ |
233 | ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \ |
234 | ) |
235 | |
236 | /* all CceStatus sub-block freeze bits */ |
237 | #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \ |
238 | | CCE_STATUS_RXE_FROZE_SMASK \ |
239 | | CCE_STATUS_TXE_FROZE_SMASK \ |
240 | | CCE_STATUS_TXE_PIO_FROZE_SMASK) |
241 | /* all CceStatus sub-block TXE pause bits */ |
242 | #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \ |
243 | | CCE_STATUS_TXE_PAUSED_SMASK \ |
244 | | CCE_STATUS_SDMA_PAUSED_SMASK) |
245 | /* all CceStatus sub-block RXE pause bits */ |
246 | #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK |
247 | |
248 | #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL |
249 | #define CNTR_32BIT_MAX 0x00000000FFFFFFFF |
250 | |
251 | /* |
252 | * CCE Error flags. |
253 | */ |
254 | static struct flag_table cce_err_status_flags[] = { |
255 | /* 0*/ FLAG_ENTRY0("CceCsrParityErr" , |
256 | CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK), |
257 | /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr" , |
258 | CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK), |
259 | /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr" , |
260 | CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK), |
261 | /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr" , |
262 | CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK), |
263 | /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr" , |
264 | CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK), |
265 | /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr" , |
266 | CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK), |
267 | /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr" , |
268 | CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK), |
269 | /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr" , |
270 | CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK), |
271 | /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr" , |
272 | CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK), |
273 | /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr" , |
274 | CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK), |
275 | /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr" , |
276 | CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK), |
277 | /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError" , |
278 | CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK), |
279 | /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError" , |
280 | CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK), |
281 | /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr" , |
282 | CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK), |
283 | /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr" , |
284 | CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK), |
285 | /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr" , |
286 | CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK), |
287 | /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr" , |
288 | CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK), |
289 | /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr" , |
290 | CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK), |
291 | /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr" , |
292 | CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK), |
293 | /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr" , |
294 | CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK), |
295 | /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr" , |
296 | CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK), |
297 | /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr" , |
298 | CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK), |
299 | /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr" , |
300 | CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK), |
301 | /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr" , |
302 | CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK), |
303 | /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr" , |
304 | CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK), |
305 | /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr" , |
306 | CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK), |
307 | /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr" , |
308 | CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK), |
309 | /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr" , |
310 | CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK), |
311 | /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr" , |
312 | CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK), |
313 | /*29*/ FLAG_ENTRY0("PcicReceiveParityErr" , |
314 | CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK), |
315 | /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr" , |
316 | CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK), |
317 | /*31*/ FLAG_ENTRY0("LATriggered" , |
318 | CCE_ERR_STATUS_LA_TRIGGERED_SMASK), |
319 | /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr" , |
320 | CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK), |
321 | /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr" , |
322 | CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK), |
323 | /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr" , |
324 | CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK), |
325 | /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr" , |
326 | CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK), |
327 | /*36*/ FLAG_ENTRY0("CceMsixTableCorErr" , |
328 | CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK), |
329 | /*37*/ FLAG_ENTRY0("CceMsixTableUncErr" , |
330 | CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK), |
331 | /*38*/ FLAG_ENTRY0("CceIntMapCorErr" , |
332 | CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK), |
333 | /*39*/ FLAG_ENTRY0("CceIntMapUncErr" , |
334 | CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK), |
335 | /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr" , |
336 | CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK), |
337 | /*41-63 reserved*/ |
338 | }; |
339 | |
340 | /* |
341 | * Misc Error flags |
342 | */ |
343 | #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK |
344 | static struct flag_table misc_err_status_flags[] = { |
345 | /* 0*/ FLAG_ENTRY0("CSR_PARITY" , MES(CSR_PARITY)), |
346 | /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR" , MES(CSR_READ_BAD_ADDR)), |
347 | /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR" , MES(CSR_WRITE_BAD_ADDR)), |
348 | /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED" , MES(SBUS_WRITE_FAILED)), |
349 | /* 4*/ FLAG_ENTRY0("KEY_MISMATCH" , MES(KEY_MISMATCH)), |
350 | /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED" , MES(FW_AUTH_FAILED)), |
351 | /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY" , MES(EFUSE_CSR_PARITY)), |
352 | /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR" , MES(EFUSE_READ_BAD_ADDR)), |
353 | /* 8*/ FLAG_ENTRY0("EFUSE_WRITE" , MES(EFUSE_WRITE)), |
354 | /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY" , MES(EFUSE_DONE_PARITY)), |
355 | /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD" , MES(INVALID_EEP_CMD)), |
356 | /*11*/ FLAG_ENTRY0("MBIST_FAIL" , MES(MBIST_FAIL)), |
357 | /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL" , MES(PLL_LOCK_FAIL)) |
358 | }; |
359 | |
360 | /* |
361 | * TXE PIO Error flags and consequences |
362 | */ |
363 | static struct flag_table pio_err_status_flags[] = { |
364 | /* 0*/ FLAG_ENTRY("PioWriteBadCtxt" , |
365 | SEC_WRITE_DROPPED, |
366 | SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK), |
367 | /* 1*/ FLAG_ENTRY("PioWriteAddrParity" , |
368 | SEC_SPC_FREEZE, |
369 | SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK), |
370 | /* 2*/ FLAG_ENTRY("PioCsrParity" , |
371 | SEC_SPC_FREEZE, |
372 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK), |
373 | /* 3*/ FLAG_ENTRY("PioSbMemFifo0" , |
374 | SEC_SPC_FREEZE, |
375 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK), |
376 | /* 4*/ FLAG_ENTRY("PioSbMemFifo1" , |
377 | SEC_SPC_FREEZE, |
378 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK), |
379 | /* 5*/ FLAG_ENTRY("PioPccFifoParity" , |
380 | SEC_SPC_FREEZE, |
381 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK), |
382 | /* 6*/ FLAG_ENTRY("PioPecFifoParity" , |
383 | SEC_SPC_FREEZE, |
384 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK), |
385 | /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity" , |
386 | SEC_SPC_FREEZE, |
387 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK), |
388 | /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity" , |
389 | SEC_SPC_FREEZE, |
390 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK), |
391 | /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr" , |
392 | SEC_SPC_FREEZE, |
393 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK), |
394 | /*10*/ FLAG_ENTRY("PioSmPktResetParity" , |
395 | SEC_SPC_FREEZE, |
396 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK), |
397 | /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc" , |
398 | SEC_SPC_FREEZE, |
399 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK), |
400 | /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc" , |
401 | SEC_SPC_FREEZE, |
402 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK), |
403 | /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor" , |
404 | 0, |
405 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK), |
406 | /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor" , |
407 | 0, |
408 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK), |
409 | /*15*/ FLAG_ENTRY("PioCreditRetFifoParity" , |
410 | SEC_SPC_FREEZE, |
411 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK), |
412 | /*16*/ FLAG_ENTRY("PioPpmcPblFifo" , |
413 | SEC_SPC_FREEZE, |
414 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK), |
415 | /*17*/ FLAG_ENTRY("PioInitSmIn" , |
416 | 0, |
417 | SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK), |
418 | /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm" , |
419 | SEC_SPC_FREEZE, |
420 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK), |
421 | /*19*/ FLAG_ENTRY("PioHostAddrMemUnc" , |
422 | SEC_SPC_FREEZE, |
423 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK), |
424 | /*20*/ FLAG_ENTRY("PioHostAddrMemCor" , |
425 | 0, |
426 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK), |
427 | /*21*/ FLAG_ENTRY("PioWriteDataParity" , |
428 | SEC_SPC_FREEZE, |
429 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK), |
430 | /*22*/ FLAG_ENTRY("PioStateMachine" , |
431 | SEC_SPC_FREEZE, |
432 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK), |
433 | /*23*/ FLAG_ENTRY("PioWriteQwValidParity" , |
434 | SEC_WRITE_DROPPED | SEC_SPC_FREEZE, |
435 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK), |
436 | /*24*/ FLAG_ENTRY("PioBlockQwCountParity" , |
437 | SEC_WRITE_DROPPED | SEC_SPC_FREEZE, |
438 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK), |
439 | /*25*/ FLAG_ENTRY("PioVlfVlLenParity" , |
440 | SEC_SPC_FREEZE, |
441 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK), |
442 | /*26*/ FLAG_ENTRY("PioVlfSopParity" , |
443 | SEC_SPC_FREEZE, |
444 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK), |
445 | /*27*/ FLAG_ENTRY("PioVlFifoParity" , |
446 | SEC_SPC_FREEZE, |
447 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK), |
448 | /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity" , |
449 | SEC_SPC_FREEZE, |
450 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK), |
451 | /*29*/ FLAG_ENTRY("PioPpmcSopLen" , |
452 | SEC_SPC_FREEZE, |
453 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK), |
454 | /*30-31 reserved*/ |
455 | /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity" , |
456 | SEC_SPC_FREEZE, |
457 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK), |
458 | /*33*/ FLAG_ENTRY("PioLastReturnedCntParity" , |
459 | SEC_SPC_FREEZE, |
460 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK), |
461 | /*34*/ FLAG_ENTRY("PioPccSopHeadParity" , |
462 | SEC_SPC_FREEZE, |
463 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK), |
464 | /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr" , |
465 | SEC_SPC_FREEZE, |
466 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK), |
467 | /*36-63 reserved*/ |
468 | }; |
469 | |
470 | /* TXE PIO errors that cause an SPC freeze */ |
471 | #define ALL_PIO_FREEZE_ERR \ |
472 | (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \ |
473 | | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \ |
474 | | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \ |
475 | | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \ |
476 | | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \ |
477 | | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \ |
478 | | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \ |
479 | | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \ |
480 | | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \ |
481 | | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \ |
482 | | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \ |
483 | | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \ |
484 | | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \ |
485 | | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \ |
486 | | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \ |
487 | | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \ |
488 | | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \ |
489 | | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \ |
490 | | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \ |
491 | | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \ |
492 | | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \ |
493 | | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \ |
494 | | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \ |
495 | | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \ |
496 | | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \ |
497 | | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \ |
498 | | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \ |
499 | | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \ |
500 | | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK) |
501 | |
502 | /* |
503 | * TXE SDMA Error flags |
504 | */ |
505 | static struct flag_table sdma_err_status_flags[] = { |
506 | /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr" , |
507 | SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK), |
508 | /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr" , |
509 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK), |
510 | /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr" , |
511 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK), |
512 | /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr" , |
513 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK), |
514 | /*04-63 reserved*/ |
515 | }; |
516 | |
517 | /* TXE SDMA errors that cause an SPC freeze */ |
518 | #define ALL_SDMA_FREEZE_ERR \ |
519 | (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \ |
520 | | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \ |
521 | | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK) |
522 | |
523 | /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */ |
524 | #define PORT_DISCARD_EGRESS_ERRS \ |
525 | (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \ |
526 | | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \ |
527 | | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK) |
528 | |
529 | /* |
530 | * TXE Egress Error flags |
531 | */ |
532 | #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK |
533 | static struct flag_table egress_err_status_flags[] = { |
534 | /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr" , SEES(TX_PKT_INTEGRITY_MEM_COR)), |
535 | /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr" , SEES(TX_PKT_INTEGRITY_MEM_UNC)), |
536 | /* 2 reserved */ |
537 | /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr" , |
538 | SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)), |
539 | /* 4*/ FLAG_ENTRY0("TxLinkdownErr" , SEES(TX_LINKDOWN)), |
540 | /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr" , SEES(TX_INCORRECT_LINK_STATE)), |
541 | /* 6 reserved */ |
542 | /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr" , |
543 | SEES(TX_PIO_LAUNCH_INTF_PARITY)), |
544 | /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr" , |
545 | SEES(TX_SDMA_LAUNCH_INTF_PARITY)), |
546 | /* 9-10 reserved */ |
547 | /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr" , |
548 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)), |
549 | /*12*/ FLAG_ENTRY0("TxIllegalVLErr" , SEES(TX_ILLEGAL_VL)), |
550 | /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr" , SEES(TX_LAUNCH_CSR_PARITY)), |
551 | /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr" , SEES(TX_SBRD_CTL_CSR_PARITY)), |
552 | /*15*/ FLAG_ENTRY0("TxConfigParityErr" , SEES(TX_CONFIG_PARITY)), |
553 | /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr" , |
554 | SEES(TX_SDMA0_DISALLOWED_PACKET)), |
555 | /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr" , |
556 | SEES(TX_SDMA1_DISALLOWED_PACKET)), |
557 | /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr" , |
558 | SEES(TX_SDMA2_DISALLOWED_PACKET)), |
559 | /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr" , |
560 | SEES(TX_SDMA3_DISALLOWED_PACKET)), |
561 | /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr" , |
562 | SEES(TX_SDMA4_DISALLOWED_PACKET)), |
563 | /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr" , |
564 | SEES(TX_SDMA5_DISALLOWED_PACKET)), |
565 | /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr" , |
566 | SEES(TX_SDMA6_DISALLOWED_PACKET)), |
567 | /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr" , |
568 | SEES(TX_SDMA7_DISALLOWED_PACKET)), |
569 | /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr" , |
570 | SEES(TX_SDMA8_DISALLOWED_PACKET)), |
571 | /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr" , |
572 | SEES(TX_SDMA9_DISALLOWED_PACKET)), |
573 | /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr" , |
574 | SEES(TX_SDMA10_DISALLOWED_PACKET)), |
575 | /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr" , |
576 | SEES(TX_SDMA11_DISALLOWED_PACKET)), |
577 | /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr" , |
578 | SEES(TX_SDMA12_DISALLOWED_PACKET)), |
579 | /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr" , |
580 | SEES(TX_SDMA13_DISALLOWED_PACKET)), |
581 | /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr" , |
582 | SEES(TX_SDMA14_DISALLOWED_PACKET)), |
583 | /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr" , |
584 | SEES(TX_SDMA15_DISALLOWED_PACKET)), |
585 | /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr" , |
586 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)), |
587 | /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr" , |
588 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)), |
589 | /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr" , |
590 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)), |
591 | /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr" , |
592 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)), |
593 | /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr" , |
594 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)), |
595 | /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr" , |
596 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)), |
597 | /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr" , |
598 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)), |
599 | /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr" , |
600 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)), |
601 | /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr" , |
602 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)), |
603 | /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr" , SEES(TX_CREDIT_RETURN_PARITY)), |
604 | /*42*/ FLAG_ENTRY0("TxSbHdrUncErr" , SEES(TX_SB_HDR_UNC)), |
605 | /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr" , SEES(TX_READ_SDMA_MEMORY_UNC)), |
606 | /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr" , SEES(TX_READ_PIO_MEMORY_UNC)), |
607 | /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr" , SEES(TX_EGRESS_FIFO_UNC)), |
608 | /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr" , SEES(TX_HCRC_INSERTION)), |
609 | /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr" , SEES(TX_CREDIT_RETURN_VL)), |
610 | /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr" , SEES(TX_LAUNCH_FIFO0_COR)), |
611 | /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr" , SEES(TX_LAUNCH_FIFO1_COR)), |
612 | /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr" , SEES(TX_LAUNCH_FIFO2_COR)), |
613 | /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr" , SEES(TX_LAUNCH_FIFO3_COR)), |
614 | /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr" , SEES(TX_LAUNCH_FIFO4_COR)), |
615 | /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr" , SEES(TX_LAUNCH_FIFO5_COR)), |
616 | /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr" , SEES(TX_LAUNCH_FIFO6_COR)), |
617 | /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr" , SEES(TX_LAUNCH_FIFO7_COR)), |
618 | /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr" , SEES(TX_LAUNCH_FIFO8_COR)), |
619 | /*57*/ FLAG_ENTRY0("TxCreditOverrunErr" , SEES(TX_CREDIT_OVERRUN)), |
620 | /*58*/ FLAG_ENTRY0("TxSbHdrCorErr" , SEES(TX_SB_HDR_COR)), |
621 | /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr" , SEES(TX_READ_SDMA_MEMORY_COR)), |
622 | /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr" , SEES(TX_READ_PIO_MEMORY_COR)), |
623 | /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr" , SEES(TX_EGRESS_FIFO_COR)), |
624 | /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr" , |
625 | SEES(TX_READ_SDMA_MEMORY_CSR_UNC)), |
626 | /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr" , |
627 | SEES(TX_READ_PIO_MEMORY_CSR_UNC)), |
628 | }; |
629 | |
630 | /* |
631 | * TXE Egress Error Info flags |
632 | */ |
633 | #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK |
634 | static struct flag_table egress_err_info_flags[] = { |
635 | /* 0*/ FLAG_ENTRY0("Reserved" , 0ull), |
636 | /* 1*/ FLAG_ENTRY0("VLErr" , SEEI(VL)), |
637 | /* 2*/ FLAG_ENTRY0("JobKeyErr" , SEEI(JOB_KEY)), |
638 | /* 3*/ FLAG_ENTRY0("JobKeyErr" , SEEI(JOB_KEY)), |
639 | /* 4*/ FLAG_ENTRY0("PartitionKeyErr" , SEEI(PARTITION_KEY)), |
640 | /* 5*/ FLAG_ENTRY0("SLIDErr" , SEEI(SLID)), |
641 | /* 6*/ FLAG_ENTRY0("OpcodeErr" , SEEI(OPCODE)), |
642 | /* 7*/ FLAG_ENTRY0("VLMappingErr" , SEEI(VL_MAPPING)), |
643 | /* 8*/ FLAG_ENTRY0("RawErr" , SEEI(RAW)), |
644 | /* 9*/ FLAG_ENTRY0("RawIPv6Err" , SEEI(RAW_IPV6)), |
645 | /*10*/ FLAG_ENTRY0("GRHErr" , SEEI(GRH)), |
646 | /*11*/ FLAG_ENTRY0("BypassErr" , SEEI(BYPASS)), |
647 | /*12*/ FLAG_ENTRY0("KDETHPacketsErr" , SEEI(KDETH_PACKETS)), |
648 | /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr" , SEEI(NON_KDETH_PACKETS)), |
649 | /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr" , SEEI(TOO_SMALL_IB_PACKETS)), |
650 | /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr" , SEEI(TOO_SMALL_BYPASS_PACKETS)), |
651 | /*16*/ FLAG_ENTRY0("PbcTestErr" , SEEI(PBC_TEST)), |
652 | /*17*/ FLAG_ENTRY0("BadPktLenErr" , SEEI(BAD_PKT_LEN)), |
653 | /*18*/ FLAG_ENTRY0("TooLongIBPacketErr" , SEEI(TOO_LONG_IB_PACKET)), |
654 | /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr" , SEEI(TOO_LONG_BYPASS_PACKETS)), |
655 | /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr" , SEEI(PBC_STATIC_RATE_CONTROL)), |
656 | /*21*/ FLAG_ENTRY0("BypassBadPktLenErr" , SEEI(BAD_PKT_LEN)), |
657 | }; |
658 | |
659 | /* TXE Egress errors that cause an SPC freeze */ |
660 | #define ALL_TXE_EGRESS_FREEZE_ERR \ |
661 | (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \ |
662 | | SEES(TX_PIO_LAUNCH_INTF_PARITY) \ |
663 | | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \ |
664 | | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \ |
665 | | SEES(TX_LAUNCH_CSR_PARITY) \ |
666 | | SEES(TX_SBRD_CTL_CSR_PARITY) \ |
667 | | SEES(TX_CONFIG_PARITY) \ |
668 | | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \ |
669 | | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \ |
670 | | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \ |
671 | | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \ |
672 | | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \ |
673 | | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \ |
674 | | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \ |
675 | | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \ |
676 | | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \ |
677 | | SEES(TX_CREDIT_RETURN_PARITY)) |
678 | |
679 | /* |
680 | * TXE Send error flags |
681 | */ |
682 | #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK |
683 | static struct flag_table send_err_status_flags[] = { |
684 | /* 0*/ FLAG_ENTRY0("SendCsrParityErr" , SES(CSR_PARITY)), |
685 | /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr" , SES(CSR_READ_BAD_ADDR)), |
686 | /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr" , SES(CSR_WRITE_BAD_ADDR)) |
687 | }; |
688 | |
689 | /* |
690 | * TXE Send Context Error flags and consequences |
691 | */ |
692 | static struct flag_table sc_err_status_flags[] = { |
693 | /* 0*/ FLAG_ENTRY("InconsistentSop" , |
694 | SEC_PACKET_DROPPED | SEC_SC_HALTED, |
695 | SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK), |
696 | /* 1*/ FLAG_ENTRY("DisallowedPacket" , |
697 | SEC_PACKET_DROPPED | SEC_SC_HALTED, |
698 | SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK), |
699 | /* 2*/ FLAG_ENTRY("WriteCrossesBoundary" , |
700 | SEC_WRITE_DROPPED | SEC_SC_HALTED, |
701 | SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK), |
702 | /* 3*/ FLAG_ENTRY("WriteOverflow" , |
703 | SEC_WRITE_DROPPED | SEC_SC_HALTED, |
704 | SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK), |
705 | /* 4*/ FLAG_ENTRY("WriteOutOfBounds" , |
706 | SEC_WRITE_DROPPED | SEC_SC_HALTED, |
707 | SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK), |
708 | /* 5-63 reserved*/ |
709 | }; |
710 | |
711 | /* |
712 | * RXE Receive Error flags |
713 | */ |
714 | #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK |
715 | static struct flag_table rxe_err_status_flags[] = { |
716 | /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr" , RXES(DMA_CSR_COR)), |
717 | /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr" , RXES(DC_INTF_PARITY)), |
718 | /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr" , RXES(RCV_HDR_UNC)), |
719 | /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr" , RXES(RCV_HDR_COR)), |
720 | /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr" , RXES(RCV_DATA_UNC)), |
721 | /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr" , RXES(RCV_DATA_COR)), |
722 | /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr" , RXES(RCV_QP_MAP_TABLE_UNC)), |
723 | /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr" , RXES(RCV_QP_MAP_TABLE_COR)), |
724 | /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr" , RXES(RCV_CSR_PARITY)), |
725 | /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr" , RXES(DC_SOP_EOP_PARITY)), |
726 | /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr" , RXES(DMA_FLAG_UNC)), |
727 | /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr" , RXES(DMA_FLAG_COR)), |
728 | /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr" , RXES(RCV_FSM_ENCODING)), |
729 | /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr" , RXES(RBUF_FREE_LIST_UNC)), |
730 | /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr" , RXES(RBUF_FREE_LIST_COR)), |
731 | /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr" , RXES(RBUF_LOOKUP_DES_REG_UNC)), |
732 | /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr" , |
733 | RXES(RBUF_LOOKUP_DES_REG_UNC_COR)), |
734 | /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr" , RXES(RBUF_LOOKUP_DES_UNC)), |
735 | /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr" , RXES(RBUF_LOOKUP_DES_COR)), |
736 | /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr" , |
737 | RXES(RBUF_BLOCK_LIST_READ_UNC)), |
738 | /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr" , |
739 | RXES(RBUF_BLOCK_LIST_READ_COR)), |
740 | /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr" , |
741 | RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)), |
742 | /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr" , |
743 | RXES(RBUF_CSR_QENT_CNT_PARITY)), |
744 | /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr" , |
745 | RXES(RBUF_CSR_QNEXT_BUF_PARITY)), |
746 | /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr" , |
747 | RXES(RBUF_CSR_QVLD_BIT_PARITY)), |
748 | /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr" , RXES(RBUF_CSR_QHD_PTR_PARITY)), |
749 | /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr" , RXES(RBUF_CSR_QTL_PTR_PARITY)), |
750 | /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr" , |
751 | RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)), |
752 | /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr" , RXES(RBUF_CSR_QEOPDW_PARITY)), |
753 | /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr" , RXES(RBUF_CTX_ID_PARITY)), |
754 | /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr" , RXES(RBUF_BAD_LOOKUP)), |
755 | /*31*/ FLAG_ENTRY0("RxRbufFullErr" , RXES(RBUF_FULL)), |
756 | /*32*/ FLAG_ENTRY0("RxRbufEmptyErr" , RXES(RBUF_EMPTY)), |
757 | /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr" , RXES(RBUF_FL_RD_ADDR_PARITY)), |
758 | /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr" , RXES(RBUF_FL_WR_ADDR_PARITY)), |
759 | /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr" , |
760 | RXES(RBUF_FL_INITDONE_PARITY)), |
761 | /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr" , |
762 | RXES(RBUF_FL_INIT_WR_ADDR_PARITY)), |
763 | /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr" , RXES(RBUF_NEXT_FREE_BUF_UNC)), |
764 | /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr" , RXES(RBUF_NEXT_FREE_BUF_COR)), |
765 | /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr" , RXES(LOOKUP_DES_PART1_UNC)), |
766 | /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr" , |
767 | RXES(LOOKUP_DES_PART1_UNC_COR)), |
768 | /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr" , |
769 | RXES(LOOKUP_DES_PART2_PARITY)), |
770 | /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr" , RXES(LOOKUP_RCV_ARRAY_UNC)), |
771 | /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr" , RXES(LOOKUP_RCV_ARRAY_COR)), |
772 | /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr" , RXES(LOOKUP_CSR_PARITY)), |
773 | /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr" , RXES(HQ_INTR_CSR_PARITY)), |
774 | /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr" , RXES(HQ_INTR_FSM)), |
775 | /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr" , RXES(RBUF_DESC_PART1_UNC)), |
776 | /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr" , RXES(RBUF_DESC_PART1_COR)), |
777 | /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr" , RXES(RBUF_DESC_PART2_UNC)), |
778 | /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr" , RXES(RBUF_DESC_PART2_COR)), |
779 | /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr" , RXES(DMA_HDR_FIFO_RD_UNC)), |
780 | /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr" , RXES(DMA_HDR_FIFO_RD_COR)), |
781 | /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr" , RXES(DMA_DATA_FIFO_RD_UNC)), |
782 | /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr" , RXES(DMA_DATA_FIFO_RD_COR)), |
783 | /*55*/ FLAG_ENTRY0("RxRbufDataUncErr" , RXES(RBUF_DATA_UNC)), |
784 | /*56*/ FLAG_ENTRY0("RxRbufDataCorErr" , RXES(RBUF_DATA_COR)), |
785 | /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr" , RXES(DMA_CSR_PARITY)), |
786 | /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr" , RXES(DMA_EQ_FSM_ENCODING)), |
787 | /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr" , RXES(DMA_DQ_FSM_ENCODING)), |
788 | /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr" , RXES(DMA_CSR_UNC)), |
789 | /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr" , RXES(CSR_READ_BAD_ADDR)), |
790 | /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr" , RXES(CSR_WRITE_BAD_ADDR)), |
791 | /*63*/ FLAG_ENTRY0("RxCsrParityErr" , RXES(CSR_PARITY)) |
792 | }; |
793 | |
794 | /* RXE errors that will trigger an SPC freeze */ |
795 | #define ALL_RXE_FREEZE_ERR \ |
796 | (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \ |
797 | | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \ |
798 | | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \ |
799 | | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \ |
800 | | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \ |
801 | | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \ |
802 | | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \ |
803 | | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \ |
804 | | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \ |
805 | | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \ |
806 | | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \ |
807 | | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \ |
808 | | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \ |
809 | | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \ |
810 | | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \ |
811 | | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \ |
812 | | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \ |
813 | | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \ |
814 | | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \ |
815 | | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \ |
816 | | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \ |
817 | | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \ |
818 | | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \ |
819 | | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \ |
820 | | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \ |
821 | | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \ |
822 | | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \ |
823 | | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \ |
824 | | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \ |
825 | | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \ |
826 | | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \ |
827 | | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \ |
828 | | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \ |
829 | | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \ |
830 | | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \ |
831 | | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \ |
832 | | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \ |
833 | | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \ |
834 | | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \ |
835 | | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \ |
836 | | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \ |
837 | | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \ |
838 | | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \ |
839 | | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK) |
840 | |
841 | #define RXE_FREEZE_ABORT_MASK \ |
842 | (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \ |
843 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \ |
844 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK) |
845 | |
846 | /* |
847 | * DCC Error Flags |
848 | */ |
849 | #define DCCE(name) DCC_ERR_FLG_##name##_SMASK |
850 | static struct flag_table dcc_err_flags[] = { |
851 | FLAG_ENTRY0("bad_l2_err" , DCCE(BAD_L2_ERR)), |
852 | FLAG_ENTRY0("bad_sc_err" , DCCE(BAD_SC_ERR)), |
853 | FLAG_ENTRY0("bad_mid_tail_err" , DCCE(BAD_MID_TAIL_ERR)), |
854 | FLAG_ENTRY0("bad_preemption_err" , DCCE(BAD_PREEMPTION_ERR)), |
855 | FLAG_ENTRY0("preemption_err" , DCCE(PREEMPTION_ERR)), |
856 | FLAG_ENTRY0("preemptionvl15_err" , DCCE(PREEMPTIONVL15_ERR)), |
857 | FLAG_ENTRY0("bad_vl_marker_err" , DCCE(BAD_VL_MARKER_ERR)), |
858 | FLAG_ENTRY0("bad_dlid_target_err" , DCCE(BAD_DLID_TARGET_ERR)), |
859 | FLAG_ENTRY0("bad_lver_err" , DCCE(BAD_LVER_ERR)), |
860 | FLAG_ENTRY0("uncorrectable_err" , DCCE(UNCORRECTABLE_ERR)), |
861 | FLAG_ENTRY0("bad_crdt_ack_err" , DCCE(BAD_CRDT_ACK_ERR)), |
862 | FLAG_ENTRY0("unsup_pkt_type" , DCCE(UNSUP_PKT_TYPE)), |
863 | FLAG_ENTRY0("bad_ctrl_flit_err" , DCCE(BAD_CTRL_FLIT_ERR)), |
864 | FLAG_ENTRY0("event_cntr_parity_err" , DCCE(EVENT_CNTR_PARITY_ERR)), |
865 | FLAG_ENTRY0("event_cntr_rollover_err" , DCCE(EVENT_CNTR_ROLLOVER_ERR)), |
866 | FLAG_ENTRY0("link_err" , DCCE(LINK_ERR)), |
867 | FLAG_ENTRY0("misc_cntr_rollover_err" , DCCE(MISC_CNTR_ROLLOVER_ERR)), |
868 | FLAG_ENTRY0("bad_ctrl_dist_err" , DCCE(BAD_CTRL_DIST_ERR)), |
869 | FLAG_ENTRY0("bad_tail_dist_err" , DCCE(BAD_TAIL_DIST_ERR)), |
870 | FLAG_ENTRY0("bad_head_dist_err" , DCCE(BAD_HEAD_DIST_ERR)), |
871 | FLAG_ENTRY0("nonvl15_state_err" , DCCE(NONVL15_STATE_ERR)), |
872 | FLAG_ENTRY0("vl15_multi_err" , DCCE(VL15_MULTI_ERR)), |
873 | FLAG_ENTRY0("bad_pkt_length_err" , DCCE(BAD_PKT_LENGTH_ERR)), |
874 | FLAG_ENTRY0("unsup_vl_err" , DCCE(UNSUP_VL_ERR)), |
875 | FLAG_ENTRY0("perm_nvl15_err" , DCCE(PERM_NVL15_ERR)), |
876 | FLAG_ENTRY0("slid_zero_err" , DCCE(SLID_ZERO_ERR)), |
877 | FLAG_ENTRY0("dlid_zero_err" , DCCE(DLID_ZERO_ERR)), |
878 | FLAG_ENTRY0("length_mtu_err" , DCCE(LENGTH_MTU_ERR)), |
879 | FLAG_ENTRY0("rx_early_drop_err" , DCCE(RX_EARLY_DROP_ERR)), |
880 | FLAG_ENTRY0("late_short_err" , DCCE(LATE_SHORT_ERR)), |
881 | FLAG_ENTRY0("late_long_err" , DCCE(LATE_LONG_ERR)), |
882 | FLAG_ENTRY0("late_ebp_err" , DCCE(LATE_EBP_ERR)), |
883 | FLAG_ENTRY0("fpe_tx_fifo_ovflw_err" , DCCE(FPE_TX_FIFO_OVFLW_ERR)), |
884 | FLAG_ENTRY0("fpe_tx_fifo_unflw_err" , DCCE(FPE_TX_FIFO_UNFLW_ERR)), |
885 | FLAG_ENTRY0("csr_access_blocked_host" , DCCE(CSR_ACCESS_BLOCKED_HOST)), |
886 | FLAG_ENTRY0("csr_access_blocked_uc" , DCCE(CSR_ACCESS_BLOCKED_UC)), |
887 | FLAG_ENTRY0("tx_ctrl_parity_err" , DCCE(TX_CTRL_PARITY_ERR)), |
888 | FLAG_ENTRY0("tx_ctrl_parity_mbe_err" , DCCE(TX_CTRL_PARITY_MBE_ERR)), |
889 | FLAG_ENTRY0("tx_sc_parity_err" , DCCE(TX_SC_PARITY_ERR)), |
890 | FLAG_ENTRY0("rx_ctrl_parity_mbe_err" , DCCE(RX_CTRL_PARITY_MBE_ERR)), |
891 | FLAG_ENTRY0("csr_parity_err" , DCCE(CSR_PARITY_ERR)), |
892 | FLAG_ENTRY0("csr_inval_addr" , DCCE(CSR_INVAL_ADDR)), |
893 | FLAG_ENTRY0("tx_byte_shft_parity_err" , DCCE(TX_BYTE_SHFT_PARITY_ERR)), |
894 | FLAG_ENTRY0("rx_byte_shft_parity_err" , DCCE(RX_BYTE_SHFT_PARITY_ERR)), |
895 | FLAG_ENTRY0("fmconfig_err" , DCCE(FMCONFIG_ERR)), |
896 | FLAG_ENTRY0("rcvport_err" , DCCE(RCVPORT_ERR)), |
897 | }; |
898 | |
899 | /* |
900 | * LCB error flags |
901 | */ |
902 | #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK |
903 | static struct flag_table lcb_err_flags[] = { |
904 | /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR" , LCBE(CSR_PARITY_ERR)), |
905 | /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR" , LCBE(INVALID_CSR_ADDR)), |
906 | /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW" , LCBE(RST_FOR_FAILED_DESKEW)), |
907 | /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST" , |
908 | LCBE(ALL_LNS_FAILED_REINIT_TEST)), |
909 | /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS" , LCBE(LOST_REINIT_STALL_OR_TOS)), |
910 | /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS" , LCBE(TX_LESS_THAN_FOUR_LNS)), |
911 | /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS" , LCBE(RX_LESS_THAN_FOUR_LNS)), |
912 | /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR" , LCBE(SEQ_CRC_ERR)), |
913 | /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER" , LCBE(REINIT_FROM_PEER)), |
914 | /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE" , LCBE(REINIT_FOR_LN_DEGRADE)), |
915 | /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT" , LCBE(CRC_ERR_CNT_HIT_LIMIT)), |
916 | /*11*/ FLAG_ENTRY0("RCLK_STOPPED" , LCBE(RCLK_STOPPED)), |
917 | /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER" , LCBE(UNEXPECTED_REPLAY_MARKER)), |
918 | /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER" , |
919 | LCBE(UNEXPECTED_ROUND_TRIP_MARKER)), |
920 | /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP" , LCBE(ILLEGAL_NULL_LTP)), |
921 | /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING" , LCBE(ILLEGAL_FLIT_ENCODING)), |
922 | /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW" , LCBE(FLIT_INPUT_BUF_OFLW)), |
923 | /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW" , LCBE(VL_ACK_INPUT_BUF_OFLW)), |
924 | /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR" , LCBE(VL_ACK_INPUT_PARITY_ERR)), |
925 | /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE" , |
926 | LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)), |
927 | /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE" , LCBE(FLIT_INPUT_BUF_MBE)), |
928 | /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE" , LCBE(FLIT_INPUT_BUF_SBE)), |
929 | /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE" , LCBE(REPLAY_BUF_MBE)), |
930 | /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE" , LCBE(REPLAY_BUF_SBE)), |
931 | /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE" , LCBE(CREDIT_RETURN_FLIT_MBE)), |
932 | /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT" , LCBE(RST_FOR_LINK_TIMEOUT)), |
933 | /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP" , |
934 | LCBE(RST_FOR_INCOMPLT_RND_TRIP)), |
935 | /*27*/ FLAG_ENTRY0("HOLD_REINIT" , LCBE(HOLD_REINIT)), |
936 | /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE" , |
937 | LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)), |
938 | /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR" , |
939 | LCBE(REDUNDANT_FLIT_PARITY_ERR)) |
940 | }; |
941 | |
942 | /* |
943 | * DC8051 Error Flags |
944 | */ |
945 | #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK |
946 | static struct flag_table dc8051_err_flags[] = { |
947 | FLAG_ENTRY0("SET_BY_8051" , D8E(SET_BY_8051)), |
948 | FLAG_ENTRY0("LOST_8051_HEART_BEAT" , D8E(LOST_8051_HEART_BEAT)), |
949 | FLAG_ENTRY0("CRAM_MBE" , D8E(CRAM_MBE)), |
950 | FLAG_ENTRY0("CRAM_SBE" , D8E(CRAM_SBE)), |
951 | FLAG_ENTRY0("DRAM_MBE" , D8E(DRAM_MBE)), |
952 | FLAG_ENTRY0("DRAM_SBE" , D8E(DRAM_SBE)), |
953 | FLAG_ENTRY0("IRAM_MBE" , D8E(IRAM_MBE)), |
954 | FLAG_ENTRY0("IRAM_SBE" , D8E(IRAM_SBE)), |
955 | FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES" , |
956 | D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)), |
957 | FLAG_ENTRY0("INVALID_CSR_ADDR" , D8E(INVALID_CSR_ADDR)), |
958 | }; |
959 | |
960 | /* |
961 | * DC8051 Information Error flags |
962 | * |
963 | * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field. |
964 | */ |
965 | static struct flag_table dc8051_info_err_flags[] = { |
966 | FLAG_ENTRY0("Spico ROM check failed" , SPICO_ROM_FAILED), |
967 | FLAG_ENTRY0("Unknown frame received" , UNKNOWN_FRAME), |
968 | FLAG_ENTRY0("Target BER not met" , TARGET_BER_NOT_MET), |
969 | FLAG_ENTRY0("Serdes internal loopback failure" , |
970 | FAILED_SERDES_INTERNAL_LOOPBACK), |
971 | FLAG_ENTRY0("Failed SerDes init" , FAILED_SERDES_INIT), |
972 | FLAG_ENTRY0("Failed LNI(Polling)" , FAILED_LNI_POLLING), |
973 | FLAG_ENTRY0("Failed LNI(Debounce)" , FAILED_LNI_DEBOUNCE), |
974 | FLAG_ENTRY0("Failed LNI(EstbComm)" , FAILED_LNI_ESTBCOMM), |
975 | FLAG_ENTRY0("Failed LNI(OptEq)" , FAILED_LNI_OPTEQ), |
976 | FLAG_ENTRY0("Failed LNI(VerifyCap_1)" , FAILED_LNI_VERIFY_CAP1), |
977 | FLAG_ENTRY0("Failed LNI(VerifyCap_2)" , FAILED_LNI_VERIFY_CAP2), |
978 | FLAG_ENTRY0("Failed LNI(ConfigLT)" , FAILED_LNI_CONFIGLT), |
979 | FLAG_ENTRY0("Host Handshake Timeout" , HOST_HANDSHAKE_TIMEOUT), |
980 | FLAG_ENTRY0("External Device Request Timeout" , |
981 | EXTERNAL_DEVICE_REQ_TIMEOUT), |
982 | }; |
983 | |
984 | /* |
985 | * DC8051 Information Host Information flags |
986 | * |
987 | * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field. |
988 | */ |
989 | static struct flag_table dc8051_info_host_msg_flags[] = { |
990 | FLAG_ENTRY0("Host request done" , 0x0001), |
991 | FLAG_ENTRY0("BC PWR_MGM message" , 0x0002), |
992 | FLAG_ENTRY0("BC SMA message" , 0x0004), |
993 | FLAG_ENTRY0("BC Unknown message (BCC)" , 0x0008), |
994 | FLAG_ENTRY0("BC Unknown message (LCB)" , 0x0010), |
995 | FLAG_ENTRY0("External device config request" , 0x0020), |
996 | FLAG_ENTRY0("VerifyCap all frames received" , 0x0040), |
997 | FLAG_ENTRY0("LinkUp achieved" , 0x0080), |
998 | FLAG_ENTRY0("Link going down" , 0x0100), |
999 | FLAG_ENTRY0("Link width downgraded" , 0x0200), |
1000 | }; |
1001 | |
1002 | static u32 encoded_size(u32 size); |
1003 | static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate); |
1004 | static int set_physical_link_state(struct hfi1_devdata *dd, u64 state); |
1005 | static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management, |
1006 | u8 *continuous); |
1007 | static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z, |
1008 | u8 *vcu, u16 *vl15buf, u8 *crc_sizes); |
1009 | static void read_vc_remote_link_width(struct hfi1_devdata *dd, |
1010 | u8 *remote_tx_rate, u16 *link_widths); |
1011 | static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits, |
1012 | u8 *flag_bits, u16 *link_widths); |
1013 | static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id, |
1014 | u8 *device_rev); |
1015 | static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx); |
1016 | static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx, |
1017 | u8 *tx_polarity_inversion, |
1018 | u8 *rx_polarity_inversion, u8 *max_rate); |
1019 | static void handle_sdma_eng_err(struct hfi1_devdata *dd, |
1020 | unsigned int context, u64 err_status); |
1021 | static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg); |
1022 | static void handle_dcc_err(struct hfi1_devdata *dd, |
1023 | unsigned int context, u64 err_status); |
1024 | static void handle_lcb_err(struct hfi1_devdata *dd, |
1025 | unsigned int context, u64 err_status); |
1026 | static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg); |
1027 | static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg); |
1028 | static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg); |
1029 | static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg); |
1030 | static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg); |
1031 | static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg); |
1032 | static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg); |
1033 | static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg); |
1034 | static void set_partition_keys(struct hfi1_pportdata *ppd); |
1035 | static const char *link_state_name(u32 state); |
1036 | static const char *link_state_reason_name(struct hfi1_pportdata *ppd, |
1037 | u32 state); |
1038 | static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data, |
1039 | u64 *out_data); |
1040 | static int read_idle_sma(struct hfi1_devdata *dd, u64 *data); |
1041 | static int thermal_init(struct hfi1_devdata *dd); |
1042 | |
1043 | static void update_statusp(struct hfi1_pportdata *ppd, u32 state); |
1044 | static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, |
1045 | int msecs); |
1046 | static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, |
1047 | int msecs); |
1048 | static void log_state_transition(struct hfi1_pportdata *ppd, u32 state); |
1049 | static void log_physical_state(struct hfi1_pportdata *ppd, u32 state); |
1050 | static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state, |
1051 | int msecs); |
1052 | static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd, |
1053 | int msecs); |
1054 | static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc); |
1055 | static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr); |
1056 | static void handle_temp_err(struct hfi1_devdata *dd); |
1057 | static void dc_shutdown(struct hfi1_devdata *dd); |
1058 | static void dc_start(struct hfi1_devdata *dd); |
1059 | static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp, |
1060 | unsigned int *np); |
1061 | static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd); |
1062 | static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms); |
1063 | static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index); |
1064 | static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width); |
1065 | |
1066 | /* |
1067 | * Error interrupt table entry. This is used as input to the interrupt |
1068 | * "clear down" routine used for all second tier error interrupt register. |
1069 | * Second tier interrupt registers have a single bit representing them |
1070 | * in the top-level CceIntStatus. |
1071 | */ |
1072 | struct err_reg_info { |
1073 | u32 status; /* status CSR offset */ |
1074 | u32 clear; /* clear CSR offset */ |
1075 | u32 mask; /* mask CSR offset */ |
1076 | void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg); |
1077 | const char *desc; |
1078 | }; |
1079 | |
1080 | #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START) |
1081 | #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START) |
1082 | #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START) |
1083 | |
1084 | /* |
1085 | * Helpers for building HFI and DC error interrupt table entries. Different |
1086 | * helpers are needed because of inconsistent register names. |
1087 | */ |
1088 | #define EE(reg, handler, desc) \ |
1089 | { reg##_STATUS, reg##_CLEAR, reg##_MASK, \ |
1090 | handler, desc } |
1091 | #define DC_EE1(reg, handler, desc) \ |
1092 | { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc } |
1093 | #define DC_EE2(reg, handler, desc) \ |
1094 | { reg##_FLG, reg##_CLR, reg##_EN, handler, desc } |
1095 | |
1096 | /* |
1097 | * Table of the "misc" grouping of error interrupts. Each entry refers to |
1098 | * another register containing more information. |
1099 | */ |
1100 | static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = { |
1101 | /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr" ), |
1102 | /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr" ), |
1103 | /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr" ), |
1104 | /* 3*/ { 0, 0, 0, NULL }, /* reserved */ |
1105 | /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr" ), |
1106 | /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr" ), |
1107 | /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr" ), |
1108 | /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr" ) |
1109 | /* the rest are reserved */ |
1110 | }; |
1111 | |
1112 | /* |
1113 | * Index into the Various section of the interrupt sources |
1114 | * corresponding to the Critical Temperature interrupt. |
1115 | */ |
1116 | #define TCRIT_INT_SOURCE 4 |
1117 | |
1118 | /* |
1119 | * SDMA error interrupt entry - refers to another register containing more |
1120 | * information. |
1121 | */ |
1122 | static const struct err_reg_info sdma_eng_err = |
1123 | EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr" ); |
1124 | |
1125 | static const struct err_reg_info various_err[NUM_VARIOUS] = { |
1126 | /* 0*/ { .status: 0, .clear: 0, .mask: 0, NULL }, /* PbcInt */ |
1127 | /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */ |
1128 | /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1" ), |
1129 | /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2" ), |
1130 | /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */ |
1131 | /* rest are reserved */ |
1132 | }; |
1133 | |
1134 | /* |
1135 | * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG |
1136 | * register can not be derived from the MTU value because 10K is not |
1137 | * a power of 2. Therefore, we need a constant. Everything else can |
1138 | * be calculated. |
1139 | */ |
1140 | #define DCC_CFG_PORT_MTU_CAP_10240 7 |
1141 | |
1142 | /* |
1143 | * Table of the DC grouping of error interrupts. Each entry refers to |
1144 | * another register containing more information. |
1145 | */ |
1146 | static const struct err_reg_info dc_errs[NUM_DC_ERRS] = { |
1147 | /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err" ), |
1148 | /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err" ), |
1149 | /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt" ), |
1150 | /* 3*/ /* dc_lbm_int - special, see is_dc_int() */ |
1151 | /* the rest are reserved */ |
1152 | }; |
1153 | |
1154 | struct cntr_entry { |
1155 | /* |
1156 | * counter name |
1157 | */ |
1158 | char *name; |
1159 | |
1160 | /* |
1161 | * csr to read for name (if applicable) |
1162 | */ |
1163 | u64 csr; |
1164 | |
1165 | /* |
1166 | * offset into dd or ppd to store the counter's value |
1167 | */ |
1168 | int offset; |
1169 | |
1170 | /* |
1171 | * flags |
1172 | */ |
1173 | u8 flags; |
1174 | |
1175 | /* |
1176 | * accessor for stat element, context either dd or ppd |
1177 | */ |
1178 | u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl, |
1179 | int mode, u64 data); |
1180 | }; |
1181 | |
1182 | #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0 |
1183 | #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159 |
1184 | |
1185 | #define CNTR_ELEM(name, csr, offset, flags, accessor) \ |
1186 | { \ |
1187 | name, \ |
1188 | csr, \ |
1189 | offset, \ |
1190 | flags, \ |
1191 | accessor \ |
1192 | } |
1193 | |
1194 | /* 32bit RXE */ |
1195 | #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \ |
1196 | CNTR_ELEM(#name, \ |
1197 | (counter * 8 + RCV_COUNTER_ARRAY32), \ |
1198 | 0, flags | CNTR_32BIT, \ |
1199 | port_access_u32_csr) |
1200 | |
1201 | #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \ |
1202 | CNTR_ELEM(#name, \ |
1203 | (counter * 8 + RCV_COUNTER_ARRAY32), \ |
1204 | 0, flags | CNTR_32BIT, \ |
1205 | dev_access_u32_csr) |
1206 | |
1207 | /* 64bit RXE */ |
1208 | #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \ |
1209 | CNTR_ELEM(#name, \ |
1210 | (counter * 8 + RCV_COUNTER_ARRAY64), \ |
1211 | 0, flags, \ |
1212 | port_access_u64_csr) |
1213 | |
1214 | #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \ |
1215 | CNTR_ELEM(#name, \ |
1216 | (counter * 8 + RCV_COUNTER_ARRAY64), \ |
1217 | 0, flags, \ |
1218 | dev_access_u64_csr) |
1219 | |
1220 | #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx |
1221 | #define OVR_ELM(ctx) \ |
1222 | CNTR_ELEM("RcvHdrOvr" #ctx, \ |
1223 | (RCV_HDR_OVFL_CNT + ctx * 0x100), \ |
1224 | 0, CNTR_NORMAL, port_access_u64_csr) |
1225 | |
1226 | /* 32bit TXE */ |
1227 | #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \ |
1228 | CNTR_ELEM(#name, \ |
1229 | (counter * 8 + SEND_COUNTER_ARRAY32), \ |
1230 | 0, flags | CNTR_32BIT, \ |
1231 | port_access_u32_csr) |
1232 | |
1233 | /* 64bit TXE */ |
1234 | #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \ |
1235 | CNTR_ELEM(#name, \ |
1236 | (counter * 8 + SEND_COUNTER_ARRAY64), \ |
1237 | 0, flags, \ |
1238 | port_access_u64_csr) |
1239 | |
1240 | # define TX64_DEV_CNTR_ELEM(name, counter, flags) \ |
1241 | CNTR_ELEM(#name,\ |
1242 | counter * 8 + SEND_COUNTER_ARRAY64, \ |
1243 | 0, \ |
1244 | flags, \ |
1245 | dev_access_u64_csr) |
1246 | |
1247 | /* CCE */ |
1248 | #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \ |
1249 | CNTR_ELEM(#name, \ |
1250 | (counter * 8 + CCE_COUNTER_ARRAY32), \ |
1251 | 0, flags | CNTR_32BIT, \ |
1252 | dev_access_u32_csr) |
1253 | |
1254 | #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \ |
1255 | CNTR_ELEM(#name, \ |
1256 | (counter * 8 + CCE_INT_COUNTER_ARRAY32), \ |
1257 | 0, flags | CNTR_32BIT, \ |
1258 | dev_access_u32_csr) |
1259 | |
1260 | /* DC */ |
1261 | #define DC_PERF_CNTR(name, counter, flags) \ |
1262 | CNTR_ELEM(#name, \ |
1263 | counter, \ |
1264 | 0, \ |
1265 | flags, \ |
1266 | dev_access_u64_csr) |
1267 | |
1268 | #define DC_PERF_CNTR_LCB(name, counter, flags) \ |
1269 | CNTR_ELEM(#name, \ |
1270 | counter, \ |
1271 | 0, \ |
1272 | flags, \ |
1273 | dc_access_lcb_cntr) |
1274 | |
1275 | /* ibp counters */ |
1276 | #define SW_IBP_CNTR(name, cntr) \ |
1277 | CNTR_ELEM(#name, \ |
1278 | 0, \ |
1279 | 0, \ |
1280 | CNTR_SYNTH, \ |
1281 | access_ibp_##cntr) |
1282 | |
1283 | /** |
1284 | * hfi1_addr_from_offset - return addr for readq/writeq |
1285 | * @dd: the dd device |
1286 | * @offset: the offset of the CSR within bar0 |
1287 | * |
1288 | * This routine selects the appropriate base address |
1289 | * based on the indicated offset. |
1290 | */ |
1291 | static inline void __iomem *hfi1_addr_from_offset( |
1292 | const struct hfi1_devdata *dd, |
1293 | u32 offset) |
1294 | { |
1295 | if (offset >= dd->base2_start) |
1296 | return dd->kregbase2 + (offset - dd->base2_start); |
1297 | return dd->kregbase1 + offset; |
1298 | } |
1299 | |
1300 | /** |
1301 | * read_csr - read CSR at the indicated offset |
1302 | * @dd: the dd device |
1303 | * @offset: the offset of the CSR within bar0 |
1304 | * |
1305 | * Return: the value read or all FF's if there |
1306 | * is no mapping |
1307 | */ |
1308 | u64 read_csr(const struct hfi1_devdata *dd, u32 offset) |
1309 | { |
1310 | if (dd->flags & HFI1_PRESENT) |
1311 | return readq(addr: hfi1_addr_from_offset(dd, offset)); |
1312 | return -1; |
1313 | } |
1314 | |
1315 | /** |
1316 | * write_csr - write CSR at the indicated offset |
1317 | * @dd: the dd device |
1318 | * @offset: the offset of the CSR within bar0 |
1319 | * @value: value to write |
1320 | */ |
1321 | void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value) |
1322 | { |
1323 | if (dd->flags & HFI1_PRESENT) { |
1324 | void __iomem *base = hfi1_addr_from_offset(dd, offset); |
1325 | |
1326 | /* avoid write to RcvArray */ |
1327 | if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start)) |
1328 | return; |
1329 | writeq(val: value, addr: base); |
1330 | } |
1331 | } |
1332 | |
1333 | /** |
1334 | * get_csr_addr - return te iomem address for offset |
1335 | * @dd: the dd device |
1336 | * @offset: the offset of the CSR within bar0 |
1337 | * |
1338 | * Return: The iomem address to use in subsequent |
1339 | * writeq/readq operations. |
1340 | */ |
1341 | void __iomem *get_csr_addr( |
1342 | const struct hfi1_devdata *dd, |
1343 | u32 offset) |
1344 | { |
1345 | if (dd->flags & HFI1_PRESENT) |
1346 | return hfi1_addr_from_offset(dd, offset); |
1347 | return NULL; |
1348 | } |
1349 | |
1350 | static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr, |
1351 | int mode, u64 value) |
1352 | { |
1353 | u64 ret; |
1354 | |
1355 | if (mode == CNTR_MODE_R) { |
1356 | ret = read_csr(dd, offset: csr); |
1357 | } else if (mode == CNTR_MODE_W) { |
1358 | write_csr(dd, offset: csr, value); |
1359 | ret = value; |
1360 | } else { |
1361 | dd_dev_err(dd, "Invalid cntr register access mode" ); |
1362 | return 0; |
1363 | } |
1364 | |
1365 | hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d" , csr, ret, mode); |
1366 | return ret; |
1367 | } |
1368 | |
1369 | /* Dev Access */ |
1370 | static u64 dev_access_u32_csr(const struct cntr_entry *entry, |
1371 | void *context, int vl, int mode, u64 data) |
1372 | { |
1373 | struct hfi1_devdata *dd = context; |
1374 | u64 csr = entry->csr; |
1375 | |
1376 | if (entry->flags & CNTR_SDMA) { |
1377 | if (vl == CNTR_INVALID_VL) |
1378 | return 0; |
1379 | csr += 0x100 * vl; |
1380 | } else { |
1381 | if (vl != CNTR_INVALID_VL) |
1382 | return 0; |
1383 | } |
1384 | return read_write_csr(dd, csr, mode, value: data); |
1385 | } |
1386 | |
1387 | static u64 access_sde_err_cnt(const struct cntr_entry *entry, |
1388 | void *context, int idx, int mode, u64 data) |
1389 | { |
1390 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1391 | |
1392 | if (dd->per_sdma && idx < dd->num_sdma) |
1393 | return dd->per_sdma[idx].err_cnt; |
1394 | return 0; |
1395 | } |
1396 | |
1397 | static u64 access_sde_int_cnt(const struct cntr_entry *entry, |
1398 | void *context, int idx, int mode, u64 data) |
1399 | { |
1400 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1401 | |
1402 | if (dd->per_sdma && idx < dd->num_sdma) |
1403 | return dd->per_sdma[idx].sdma_int_cnt; |
1404 | return 0; |
1405 | } |
1406 | |
1407 | static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry, |
1408 | void *context, int idx, int mode, u64 data) |
1409 | { |
1410 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1411 | |
1412 | if (dd->per_sdma && idx < dd->num_sdma) |
1413 | return dd->per_sdma[idx].idle_int_cnt; |
1414 | return 0; |
1415 | } |
1416 | |
1417 | static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry, |
1418 | void *context, int idx, int mode, |
1419 | u64 data) |
1420 | { |
1421 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1422 | |
1423 | if (dd->per_sdma && idx < dd->num_sdma) |
1424 | return dd->per_sdma[idx].progress_int_cnt; |
1425 | return 0; |
1426 | } |
1427 | |
1428 | static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context, |
1429 | int vl, int mode, u64 data) |
1430 | { |
1431 | struct hfi1_devdata *dd = context; |
1432 | |
1433 | u64 val = 0; |
1434 | u64 csr = entry->csr; |
1435 | |
1436 | if (entry->flags & CNTR_VL) { |
1437 | if (vl == CNTR_INVALID_VL) |
1438 | return 0; |
1439 | csr += 8 * vl; |
1440 | } else { |
1441 | if (vl != CNTR_INVALID_VL) |
1442 | return 0; |
1443 | } |
1444 | |
1445 | val = read_write_csr(dd, csr, mode, value: data); |
1446 | return val; |
1447 | } |
1448 | |
1449 | static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context, |
1450 | int vl, int mode, u64 data) |
1451 | { |
1452 | struct hfi1_devdata *dd = context; |
1453 | u32 csr = entry->csr; |
1454 | int ret = 0; |
1455 | |
1456 | if (vl != CNTR_INVALID_VL) |
1457 | return 0; |
1458 | if (mode == CNTR_MODE_R) |
1459 | ret = read_lcb_csr(dd, offset: csr, data: &data); |
1460 | else if (mode == CNTR_MODE_W) |
1461 | ret = write_lcb_csr(dd, offset: csr, data); |
1462 | |
1463 | if (ret) { |
1464 | if (!(dd->flags & HFI1_SHUTDOWN)) |
1465 | dd_dev_err(dd, "Could not acquire LCB for counter 0x%x" , csr); |
1466 | return 0; |
1467 | } |
1468 | |
1469 | hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d" , csr, data, mode); |
1470 | return data; |
1471 | } |
1472 | |
1473 | /* Port Access */ |
1474 | static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context, |
1475 | int vl, int mode, u64 data) |
1476 | { |
1477 | struct hfi1_pportdata *ppd = context; |
1478 | |
1479 | if (vl != CNTR_INVALID_VL) |
1480 | return 0; |
1481 | return read_write_csr(dd: ppd->dd, csr: entry->csr, mode, value: data); |
1482 | } |
1483 | |
1484 | static u64 port_access_u64_csr(const struct cntr_entry *entry, |
1485 | void *context, int vl, int mode, u64 data) |
1486 | { |
1487 | struct hfi1_pportdata *ppd = context; |
1488 | u64 val; |
1489 | u64 csr = entry->csr; |
1490 | |
1491 | if (entry->flags & CNTR_VL) { |
1492 | if (vl == CNTR_INVALID_VL) |
1493 | return 0; |
1494 | csr += 8 * vl; |
1495 | } else { |
1496 | if (vl != CNTR_INVALID_VL) |
1497 | return 0; |
1498 | } |
1499 | val = read_write_csr(dd: ppd->dd, csr, mode, value: data); |
1500 | return val; |
1501 | } |
1502 | |
1503 | /* Software defined */ |
1504 | static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode, |
1505 | u64 data) |
1506 | { |
1507 | u64 ret; |
1508 | |
1509 | if (mode == CNTR_MODE_R) { |
1510 | ret = *cntr; |
1511 | } else if (mode == CNTR_MODE_W) { |
1512 | *cntr = data; |
1513 | ret = data; |
1514 | } else { |
1515 | dd_dev_err(dd, "Invalid cntr sw access mode" ); |
1516 | return 0; |
1517 | } |
1518 | |
1519 | hfi1_cdbg(CNTR, "val 0x%llx mode %d" , ret, mode); |
1520 | |
1521 | return ret; |
1522 | } |
1523 | |
1524 | static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context, |
1525 | int vl, int mode, u64 data) |
1526 | { |
1527 | struct hfi1_pportdata *ppd = context; |
1528 | |
1529 | if (vl != CNTR_INVALID_VL) |
1530 | return 0; |
1531 | return read_write_sw(dd: ppd->dd, cntr: &ppd->link_downed, mode, data); |
1532 | } |
1533 | |
1534 | static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context, |
1535 | int vl, int mode, u64 data) |
1536 | { |
1537 | struct hfi1_pportdata *ppd = context; |
1538 | |
1539 | if (vl != CNTR_INVALID_VL) |
1540 | return 0; |
1541 | return read_write_sw(dd: ppd->dd, cntr: &ppd->link_up, mode, data); |
1542 | } |
1543 | |
1544 | static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry, |
1545 | void *context, int vl, int mode, |
1546 | u64 data) |
1547 | { |
1548 | struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; |
1549 | |
1550 | if (vl != CNTR_INVALID_VL) |
1551 | return 0; |
1552 | return read_write_sw(dd: ppd->dd, cntr: &ppd->unknown_frame_count, mode, data); |
1553 | } |
1554 | |
1555 | static u64 access_sw_xmit_discards(const struct cntr_entry *entry, |
1556 | void *context, int vl, int mode, u64 data) |
1557 | { |
1558 | struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; |
1559 | u64 zero = 0; |
1560 | u64 *counter; |
1561 | |
1562 | if (vl == CNTR_INVALID_VL) |
1563 | counter = &ppd->port_xmit_discards; |
1564 | else if (vl >= 0 && vl < C_VL_COUNT) |
1565 | counter = &ppd->port_xmit_discards_vl[vl]; |
1566 | else |
1567 | counter = &zero; |
1568 | |
1569 | return read_write_sw(dd: ppd->dd, cntr: counter, mode, data); |
1570 | } |
1571 | |
1572 | static u64 access_xmit_constraint_errs(const struct cntr_entry *entry, |
1573 | void *context, int vl, int mode, |
1574 | u64 data) |
1575 | { |
1576 | struct hfi1_pportdata *ppd = context; |
1577 | |
1578 | if (vl != CNTR_INVALID_VL) |
1579 | return 0; |
1580 | |
1581 | return read_write_sw(dd: ppd->dd, cntr: &ppd->port_xmit_constraint_errors, |
1582 | mode, data); |
1583 | } |
1584 | |
1585 | static u64 access_rcv_constraint_errs(const struct cntr_entry *entry, |
1586 | void *context, int vl, int mode, u64 data) |
1587 | { |
1588 | struct hfi1_pportdata *ppd = context; |
1589 | |
1590 | if (vl != CNTR_INVALID_VL) |
1591 | return 0; |
1592 | |
1593 | return read_write_sw(dd: ppd->dd, cntr: &ppd->port_rcv_constraint_errors, |
1594 | mode, data); |
1595 | } |
1596 | |
1597 | u64 get_all_cpu_total(u64 __percpu *cntr) |
1598 | { |
1599 | int cpu; |
1600 | u64 counter = 0; |
1601 | |
1602 | for_each_possible_cpu(cpu) |
1603 | counter += *per_cpu_ptr(cntr, cpu); |
1604 | return counter; |
1605 | } |
1606 | |
1607 | static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val, |
1608 | u64 __percpu *cntr, |
1609 | int vl, int mode, u64 data) |
1610 | { |
1611 | u64 ret = 0; |
1612 | |
1613 | if (vl != CNTR_INVALID_VL) |
1614 | return 0; |
1615 | |
1616 | if (mode == CNTR_MODE_R) { |
1617 | ret = get_all_cpu_total(cntr) - *z_val; |
1618 | } else if (mode == CNTR_MODE_W) { |
1619 | /* A write can only zero the counter */ |
1620 | if (data == 0) |
1621 | *z_val = get_all_cpu_total(cntr); |
1622 | else |
1623 | dd_dev_err(dd, "Per CPU cntrs can only be zeroed" ); |
1624 | } else { |
1625 | dd_dev_err(dd, "Invalid cntr sw cpu access mode" ); |
1626 | return 0; |
1627 | } |
1628 | |
1629 | return ret; |
1630 | } |
1631 | |
1632 | static u64 access_sw_cpu_intr(const struct cntr_entry *entry, |
1633 | void *context, int vl, int mode, u64 data) |
1634 | { |
1635 | struct hfi1_devdata *dd = context; |
1636 | |
1637 | return read_write_cpu(dd, z_val: &dd->z_int_counter, cntr: dd->int_counter, vl, |
1638 | mode, data); |
1639 | } |
1640 | |
1641 | static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry, |
1642 | void *context, int vl, int mode, u64 data) |
1643 | { |
1644 | struct hfi1_devdata *dd = context; |
1645 | |
1646 | return read_write_cpu(dd, z_val: &dd->z_rcv_limit, cntr: dd->rcv_limit, vl, |
1647 | mode, data); |
1648 | } |
1649 | |
1650 | static u64 access_sw_pio_wait(const struct cntr_entry *entry, |
1651 | void *context, int vl, int mode, u64 data) |
1652 | { |
1653 | struct hfi1_devdata *dd = context; |
1654 | |
1655 | return dd->verbs_dev.n_piowait; |
1656 | } |
1657 | |
1658 | static u64 access_sw_pio_drain(const struct cntr_entry *entry, |
1659 | void *context, int vl, int mode, u64 data) |
1660 | { |
1661 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1662 | |
1663 | return dd->verbs_dev.n_piodrain; |
1664 | } |
1665 | |
1666 | static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry, |
1667 | void *context, int vl, int mode, u64 data) |
1668 | { |
1669 | struct hfi1_devdata *dd = context; |
1670 | |
1671 | return dd->ctx0_seq_drop; |
1672 | } |
1673 | |
1674 | static u64 access_sw_vtx_wait(const struct cntr_entry *entry, |
1675 | void *context, int vl, int mode, u64 data) |
1676 | { |
1677 | struct hfi1_devdata *dd = context; |
1678 | |
1679 | return dd->verbs_dev.n_txwait; |
1680 | } |
1681 | |
1682 | static u64 access_sw_kmem_wait(const struct cntr_entry *entry, |
1683 | void *context, int vl, int mode, u64 data) |
1684 | { |
1685 | struct hfi1_devdata *dd = context; |
1686 | |
1687 | return dd->verbs_dev.n_kmem_wait; |
1688 | } |
1689 | |
1690 | static u64 access_sw_send_schedule(const struct cntr_entry *entry, |
1691 | void *context, int vl, int mode, u64 data) |
1692 | { |
1693 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1694 | |
1695 | return read_write_cpu(dd, z_val: &dd->z_send_schedule, cntr: dd->send_schedule, vl, |
1696 | mode, data); |
1697 | } |
1698 | |
1699 | /* Software counters for the error status bits within MISC_ERR_STATUS */ |
1700 | static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry, |
1701 | void *context, int vl, int mode, |
1702 | u64 data) |
1703 | { |
1704 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1705 | |
1706 | return dd->misc_err_status_cnt[12]; |
1707 | } |
1708 | |
1709 | static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry, |
1710 | void *context, int vl, int mode, |
1711 | u64 data) |
1712 | { |
1713 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1714 | |
1715 | return dd->misc_err_status_cnt[11]; |
1716 | } |
1717 | |
1718 | static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry, |
1719 | void *context, int vl, int mode, |
1720 | u64 data) |
1721 | { |
1722 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1723 | |
1724 | return dd->misc_err_status_cnt[10]; |
1725 | } |
1726 | |
1727 | static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry, |
1728 | void *context, int vl, |
1729 | int mode, u64 data) |
1730 | { |
1731 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1732 | |
1733 | return dd->misc_err_status_cnt[9]; |
1734 | } |
1735 | |
1736 | static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry, |
1737 | void *context, int vl, int mode, |
1738 | u64 data) |
1739 | { |
1740 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1741 | |
1742 | return dd->misc_err_status_cnt[8]; |
1743 | } |
1744 | |
1745 | static u64 access_misc_efuse_read_bad_addr_err_cnt( |
1746 | const struct cntr_entry *entry, |
1747 | void *context, int vl, int mode, u64 data) |
1748 | { |
1749 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1750 | |
1751 | return dd->misc_err_status_cnt[7]; |
1752 | } |
1753 | |
1754 | static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry, |
1755 | void *context, int vl, |
1756 | int mode, u64 data) |
1757 | { |
1758 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1759 | |
1760 | return dd->misc_err_status_cnt[6]; |
1761 | } |
1762 | |
1763 | static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry, |
1764 | void *context, int vl, int mode, |
1765 | u64 data) |
1766 | { |
1767 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1768 | |
1769 | return dd->misc_err_status_cnt[5]; |
1770 | } |
1771 | |
1772 | static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry, |
1773 | void *context, int vl, int mode, |
1774 | u64 data) |
1775 | { |
1776 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1777 | |
1778 | return dd->misc_err_status_cnt[4]; |
1779 | } |
1780 | |
1781 | static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry, |
1782 | void *context, int vl, |
1783 | int mode, u64 data) |
1784 | { |
1785 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1786 | |
1787 | return dd->misc_err_status_cnt[3]; |
1788 | } |
1789 | |
1790 | static u64 access_misc_csr_write_bad_addr_err_cnt( |
1791 | const struct cntr_entry *entry, |
1792 | void *context, int vl, int mode, u64 data) |
1793 | { |
1794 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1795 | |
1796 | return dd->misc_err_status_cnt[2]; |
1797 | } |
1798 | |
1799 | static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, |
1800 | void *context, int vl, |
1801 | int mode, u64 data) |
1802 | { |
1803 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1804 | |
1805 | return dd->misc_err_status_cnt[1]; |
1806 | } |
1807 | |
1808 | static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry, |
1809 | void *context, int vl, int mode, |
1810 | u64 data) |
1811 | { |
1812 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1813 | |
1814 | return dd->misc_err_status_cnt[0]; |
1815 | } |
1816 | |
1817 | /* |
1818 | * Software counter for the aggregate of |
1819 | * individual CceErrStatus counters |
1820 | */ |
1821 | static u64 access_sw_cce_err_status_aggregated_cnt( |
1822 | const struct cntr_entry *entry, |
1823 | void *context, int vl, int mode, u64 data) |
1824 | { |
1825 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1826 | |
1827 | return dd->sw_cce_err_status_aggregate; |
1828 | } |
1829 | |
1830 | /* |
1831 | * Software counters corresponding to each of the |
1832 | * error status bits within CceErrStatus |
1833 | */ |
1834 | static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry, |
1835 | void *context, int vl, int mode, |
1836 | u64 data) |
1837 | { |
1838 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1839 | |
1840 | return dd->cce_err_status_cnt[40]; |
1841 | } |
1842 | |
1843 | static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry, |
1844 | void *context, int vl, int mode, |
1845 | u64 data) |
1846 | { |
1847 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1848 | |
1849 | return dd->cce_err_status_cnt[39]; |
1850 | } |
1851 | |
1852 | static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry, |
1853 | void *context, int vl, int mode, |
1854 | u64 data) |
1855 | { |
1856 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1857 | |
1858 | return dd->cce_err_status_cnt[38]; |
1859 | } |
1860 | |
1861 | static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry, |
1862 | void *context, int vl, int mode, |
1863 | u64 data) |
1864 | { |
1865 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1866 | |
1867 | return dd->cce_err_status_cnt[37]; |
1868 | } |
1869 | |
1870 | static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry, |
1871 | void *context, int vl, int mode, |
1872 | u64 data) |
1873 | { |
1874 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1875 | |
1876 | return dd->cce_err_status_cnt[36]; |
1877 | } |
1878 | |
1879 | static u64 access_cce_rxdma_conv_fifo_parity_err_cnt( |
1880 | const struct cntr_entry *entry, |
1881 | void *context, int vl, int mode, u64 data) |
1882 | { |
1883 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1884 | |
1885 | return dd->cce_err_status_cnt[35]; |
1886 | } |
1887 | |
1888 | static u64 access_cce_rcpl_async_fifo_parity_err_cnt( |
1889 | const struct cntr_entry *entry, |
1890 | void *context, int vl, int mode, u64 data) |
1891 | { |
1892 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1893 | |
1894 | return dd->cce_err_status_cnt[34]; |
1895 | } |
1896 | |
1897 | static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry, |
1898 | void *context, int vl, |
1899 | int mode, u64 data) |
1900 | { |
1901 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1902 | |
1903 | return dd->cce_err_status_cnt[33]; |
1904 | } |
1905 | |
1906 | static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry, |
1907 | void *context, int vl, int mode, |
1908 | u64 data) |
1909 | { |
1910 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1911 | |
1912 | return dd->cce_err_status_cnt[32]; |
1913 | } |
1914 | |
1915 | static u64 access_la_triggered_cnt(const struct cntr_entry *entry, |
1916 | void *context, int vl, int mode, u64 data) |
1917 | { |
1918 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1919 | |
1920 | return dd->cce_err_status_cnt[31]; |
1921 | } |
1922 | |
1923 | static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry, |
1924 | void *context, int vl, int mode, |
1925 | u64 data) |
1926 | { |
1927 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1928 | |
1929 | return dd->cce_err_status_cnt[30]; |
1930 | } |
1931 | |
1932 | static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry, |
1933 | void *context, int vl, int mode, |
1934 | u64 data) |
1935 | { |
1936 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1937 | |
1938 | return dd->cce_err_status_cnt[29]; |
1939 | } |
1940 | |
1941 | static u64 access_pcic_transmit_back_parity_err_cnt( |
1942 | const struct cntr_entry *entry, |
1943 | void *context, int vl, int mode, u64 data) |
1944 | { |
1945 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1946 | |
1947 | return dd->cce_err_status_cnt[28]; |
1948 | } |
1949 | |
1950 | static u64 access_pcic_transmit_front_parity_err_cnt( |
1951 | const struct cntr_entry *entry, |
1952 | void *context, int vl, int mode, u64 data) |
1953 | { |
1954 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1955 | |
1956 | return dd->cce_err_status_cnt[27]; |
1957 | } |
1958 | |
1959 | static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry, |
1960 | void *context, int vl, int mode, |
1961 | u64 data) |
1962 | { |
1963 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1964 | |
1965 | return dd->cce_err_status_cnt[26]; |
1966 | } |
1967 | |
1968 | static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry, |
1969 | void *context, int vl, int mode, |
1970 | u64 data) |
1971 | { |
1972 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1973 | |
1974 | return dd->cce_err_status_cnt[25]; |
1975 | } |
1976 | |
1977 | static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry, |
1978 | void *context, int vl, int mode, |
1979 | u64 data) |
1980 | { |
1981 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1982 | |
1983 | return dd->cce_err_status_cnt[24]; |
1984 | } |
1985 | |
1986 | static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry, |
1987 | void *context, int vl, int mode, |
1988 | u64 data) |
1989 | { |
1990 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
1991 | |
1992 | return dd->cce_err_status_cnt[23]; |
1993 | } |
1994 | |
1995 | static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry, |
1996 | void *context, int vl, |
1997 | int mode, u64 data) |
1998 | { |
1999 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2000 | |
2001 | return dd->cce_err_status_cnt[22]; |
2002 | } |
2003 | |
2004 | static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry, |
2005 | void *context, int vl, int mode, |
2006 | u64 data) |
2007 | { |
2008 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2009 | |
2010 | return dd->cce_err_status_cnt[21]; |
2011 | } |
2012 | |
2013 | static u64 access_pcic_n_post_dat_q_parity_err_cnt( |
2014 | const struct cntr_entry *entry, |
2015 | void *context, int vl, int mode, u64 data) |
2016 | { |
2017 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2018 | |
2019 | return dd->cce_err_status_cnt[20]; |
2020 | } |
2021 | |
2022 | static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry, |
2023 | void *context, int vl, |
2024 | int mode, u64 data) |
2025 | { |
2026 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2027 | |
2028 | return dd->cce_err_status_cnt[19]; |
2029 | } |
2030 | |
2031 | static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry, |
2032 | void *context, int vl, int mode, |
2033 | u64 data) |
2034 | { |
2035 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2036 | |
2037 | return dd->cce_err_status_cnt[18]; |
2038 | } |
2039 | |
2040 | static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry, |
2041 | void *context, int vl, int mode, |
2042 | u64 data) |
2043 | { |
2044 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2045 | |
2046 | return dd->cce_err_status_cnt[17]; |
2047 | } |
2048 | |
2049 | static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry, |
2050 | void *context, int vl, int mode, |
2051 | u64 data) |
2052 | { |
2053 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2054 | |
2055 | return dd->cce_err_status_cnt[16]; |
2056 | } |
2057 | |
2058 | static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry, |
2059 | void *context, int vl, int mode, |
2060 | u64 data) |
2061 | { |
2062 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2063 | |
2064 | return dd->cce_err_status_cnt[15]; |
2065 | } |
2066 | |
2067 | static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry, |
2068 | void *context, int vl, |
2069 | int mode, u64 data) |
2070 | { |
2071 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2072 | |
2073 | return dd->cce_err_status_cnt[14]; |
2074 | } |
2075 | |
2076 | static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry, |
2077 | void *context, int vl, int mode, |
2078 | u64 data) |
2079 | { |
2080 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2081 | |
2082 | return dd->cce_err_status_cnt[13]; |
2083 | } |
2084 | |
2085 | static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt( |
2086 | const struct cntr_entry *entry, |
2087 | void *context, int vl, int mode, u64 data) |
2088 | { |
2089 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2090 | |
2091 | return dd->cce_err_status_cnt[12]; |
2092 | } |
2093 | |
2094 | static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt( |
2095 | const struct cntr_entry *entry, |
2096 | void *context, int vl, int mode, u64 data) |
2097 | { |
2098 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2099 | |
2100 | return dd->cce_err_status_cnt[11]; |
2101 | } |
2102 | |
2103 | static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt( |
2104 | const struct cntr_entry *entry, |
2105 | void *context, int vl, int mode, u64 data) |
2106 | { |
2107 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2108 | |
2109 | return dd->cce_err_status_cnt[10]; |
2110 | } |
2111 | |
2112 | static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt( |
2113 | const struct cntr_entry *entry, |
2114 | void *context, int vl, int mode, u64 data) |
2115 | { |
2116 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2117 | |
2118 | return dd->cce_err_status_cnt[9]; |
2119 | } |
2120 | |
2121 | static u64 access_cce_cli2_async_fifo_parity_err_cnt( |
2122 | const struct cntr_entry *entry, |
2123 | void *context, int vl, int mode, u64 data) |
2124 | { |
2125 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2126 | |
2127 | return dd->cce_err_status_cnt[8]; |
2128 | } |
2129 | |
2130 | static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry, |
2131 | void *context, int vl, |
2132 | int mode, u64 data) |
2133 | { |
2134 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2135 | |
2136 | return dd->cce_err_status_cnt[7]; |
2137 | } |
2138 | |
2139 | static u64 access_cce_cli0_async_fifo_parity_err_cnt( |
2140 | const struct cntr_entry *entry, |
2141 | void *context, int vl, int mode, u64 data) |
2142 | { |
2143 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2144 | |
2145 | return dd->cce_err_status_cnt[6]; |
2146 | } |
2147 | |
2148 | static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry, |
2149 | void *context, int vl, int mode, |
2150 | u64 data) |
2151 | { |
2152 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2153 | |
2154 | return dd->cce_err_status_cnt[5]; |
2155 | } |
2156 | |
2157 | static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry, |
2158 | void *context, int vl, int mode, |
2159 | u64 data) |
2160 | { |
2161 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2162 | |
2163 | return dd->cce_err_status_cnt[4]; |
2164 | } |
2165 | |
2166 | static u64 access_cce_trgt_async_fifo_parity_err_cnt( |
2167 | const struct cntr_entry *entry, |
2168 | void *context, int vl, int mode, u64 data) |
2169 | { |
2170 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2171 | |
2172 | return dd->cce_err_status_cnt[3]; |
2173 | } |
2174 | |
2175 | static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry, |
2176 | void *context, int vl, |
2177 | int mode, u64 data) |
2178 | { |
2179 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2180 | |
2181 | return dd->cce_err_status_cnt[2]; |
2182 | } |
2183 | |
2184 | static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, |
2185 | void *context, int vl, |
2186 | int mode, u64 data) |
2187 | { |
2188 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2189 | |
2190 | return dd->cce_err_status_cnt[1]; |
2191 | } |
2192 | |
2193 | static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry, |
2194 | void *context, int vl, int mode, |
2195 | u64 data) |
2196 | { |
2197 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2198 | |
2199 | return dd->cce_err_status_cnt[0]; |
2200 | } |
2201 | |
2202 | /* |
2203 | * Software counters corresponding to each of the |
2204 | * error status bits within RcvErrStatus |
2205 | */ |
2206 | static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry, |
2207 | void *context, int vl, int mode, |
2208 | u64 data) |
2209 | { |
2210 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2211 | |
2212 | return dd->rcv_err_status_cnt[63]; |
2213 | } |
2214 | |
2215 | static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry, |
2216 | void *context, int vl, |
2217 | int mode, u64 data) |
2218 | { |
2219 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2220 | |
2221 | return dd->rcv_err_status_cnt[62]; |
2222 | } |
2223 | |
2224 | static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, |
2225 | void *context, int vl, int mode, |
2226 | u64 data) |
2227 | { |
2228 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2229 | |
2230 | return dd->rcv_err_status_cnt[61]; |
2231 | } |
2232 | |
2233 | static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry, |
2234 | void *context, int vl, int mode, |
2235 | u64 data) |
2236 | { |
2237 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2238 | |
2239 | return dd->rcv_err_status_cnt[60]; |
2240 | } |
2241 | |
2242 | static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry, |
2243 | void *context, int vl, |
2244 | int mode, u64 data) |
2245 | { |
2246 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2247 | |
2248 | return dd->rcv_err_status_cnt[59]; |
2249 | } |
2250 | |
2251 | static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry, |
2252 | void *context, int vl, |
2253 | int mode, u64 data) |
2254 | { |
2255 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2256 | |
2257 | return dd->rcv_err_status_cnt[58]; |
2258 | } |
2259 | |
2260 | static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry, |
2261 | void *context, int vl, int mode, |
2262 | u64 data) |
2263 | { |
2264 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2265 | |
2266 | return dd->rcv_err_status_cnt[57]; |
2267 | } |
2268 | |
2269 | static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry, |
2270 | void *context, int vl, int mode, |
2271 | u64 data) |
2272 | { |
2273 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2274 | |
2275 | return dd->rcv_err_status_cnt[56]; |
2276 | } |
2277 | |
2278 | static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry, |
2279 | void *context, int vl, int mode, |
2280 | u64 data) |
2281 | { |
2282 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2283 | |
2284 | return dd->rcv_err_status_cnt[55]; |
2285 | } |
2286 | |
2287 | static u64 access_rx_dma_data_fifo_rd_cor_err_cnt( |
2288 | const struct cntr_entry *entry, |
2289 | void *context, int vl, int mode, u64 data) |
2290 | { |
2291 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2292 | |
2293 | return dd->rcv_err_status_cnt[54]; |
2294 | } |
2295 | |
2296 | static u64 access_rx_dma_data_fifo_rd_unc_err_cnt( |
2297 | const struct cntr_entry *entry, |
2298 | void *context, int vl, int mode, u64 data) |
2299 | { |
2300 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2301 | |
2302 | return dd->rcv_err_status_cnt[53]; |
2303 | } |
2304 | |
2305 | static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry, |
2306 | void *context, int vl, |
2307 | int mode, u64 data) |
2308 | { |
2309 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2310 | |
2311 | return dd->rcv_err_status_cnt[52]; |
2312 | } |
2313 | |
2314 | static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry, |
2315 | void *context, int vl, |
2316 | int mode, u64 data) |
2317 | { |
2318 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2319 | |
2320 | return dd->rcv_err_status_cnt[51]; |
2321 | } |
2322 | |
2323 | static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry, |
2324 | void *context, int vl, |
2325 | int mode, u64 data) |
2326 | { |
2327 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2328 | |
2329 | return dd->rcv_err_status_cnt[50]; |
2330 | } |
2331 | |
2332 | static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry, |
2333 | void *context, int vl, |
2334 | int mode, u64 data) |
2335 | { |
2336 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2337 | |
2338 | return dd->rcv_err_status_cnt[49]; |
2339 | } |
2340 | |
2341 | static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry, |
2342 | void *context, int vl, |
2343 | int mode, u64 data) |
2344 | { |
2345 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2346 | |
2347 | return dd->rcv_err_status_cnt[48]; |
2348 | } |
2349 | |
2350 | static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry, |
2351 | void *context, int vl, |
2352 | int mode, u64 data) |
2353 | { |
2354 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2355 | |
2356 | return dd->rcv_err_status_cnt[47]; |
2357 | } |
2358 | |
2359 | static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry, |
2360 | void *context, int vl, int mode, |
2361 | u64 data) |
2362 | { |
2363 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2364 | |
2365 | return dd->rcv_err_status_cnt[46]; |
2366 | } |
2367 | |
2368 | static u64 access_rx_hq_intr_csr_parity_err_cnt( |
2369 | const struct cntr_entry *entry, |
2370 | void *context, int vl, int mode, u64 data) |
2371 | { |
2372 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2373 | |
2374 | return dd->rcv_err_status_cnt[45]; |
2375 | } |
2376 | |
2377 | static u64 access_rx_lookup_csr_parity_err_cnt( |
2378 | const struct cntr_entry *entry, |
2379 | void *context, int vl, int mode, u64 data) |
2380 | { |
2381 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2382 | |
2383 | return dd->rcv_err_status_cnt[44]; |
2384 | } |
2385 | |
2386 | static u64 access_rx_lookup_rcv_array_cor_err_cnt( |
2387 | const struct cntr_entry *entry, |
2388 | void *context, int vl, int mode, u64 data) |
2389 | { |
2390 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2391 | |
2392 | return dd->rcv_err_status_cnt[43]; |
2393 | } |
2394 | |
2395 | static u64 access_rx_lookup_rcv_array_unc_err_cnt( |
2396 | const struct cntr_entry *entry, |
2397 | void *context, int vl, int mode, u64 data) |
2398 | { |
2399 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2400 | |
2401 | return dd->rcv_err_status_cnt[42]; |
2402 | } |
2403 | |
2404 | static u64 access_rx_lookup_des_part2_parity_err_cnt( |
2405 | const struct cntr_entry *entry, |
2406 | void *context, int vl, int mode, u64 data) |
2407 | { |
2408 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2409 | |
2410 | return dd->rcv_err_status_cnt[41]; |
2411 | } |
2412 | |
2413 | static u64 access_rx_lookup_des_part1_unc_cor_err_cnt( |
2414 | const struct cntr_entry *entry, |
2415 | void *context, int vl, int mode, u64 data) |
2416 | { |
2417 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2418 | |
2419 | return dd->rcv_err_status_cnt[40]; |
2420 | } |
2421 | |
2422 | static u64 access_rx_lookup_des_part1_unc_err_cnt( |
2423 | const struct cntr_entry *entry, |
2424 | void *context, int vl, int mode, u64 data) |
2425 | { |
2426 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2427 | |
2428 | return dd->rcv_err_status_cnt[39]; |
2429 | } |
2430 | |
2431 | static u64 access_rx_rbuf_next_free_buf_cor_err_cnt( |
2432 | const struct cntr_entry *entry, |
2433 | void *context, int vl, int mode, u64 data) |
2434 | { |
2435 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2436 | |
2437 | return dd->rcv_err_status_cnt[38]; |
2438 | } |
2439 | |
2440 | static u64 access_rx_rbuf_next_free_buf_unc_err_cnt( |
2441 | const struct cntr_entry *entry, |
2442 | void *context, int vl, int mode, u64 data) |
2443 | { |
2444 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2445 | |
2446 | return dd->rcv_err_status_cnt[37]; |
2447 | } |
2448 | |
2449 | static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt( |
2450 | const struct cntr_entry *entry, |
2451 | void *context, int vl, int mode, u64 data) |
2452 | { |
2453 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2454 | |
2455 | return dd->rcv_err_status_cnt[36]; |
2456 | } |
2457 | |
2458 | static u64 access_rx_rbuf_fl_initdone_parity_err_cnt( |
2459 | const struct cntr_entry *entry, |
2460 | void *context, int vl, int mode, u64 data) |
2461 | { |
2462 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2463 | |
2464 | return dd->rcv_err_status_cnt[35]; |
2465 | } |
2466 | |
2467 | static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt( |
2468 | const struct cntr_entry *entry, |
2469 | void *context, int vl, int mode, u64 data) |
2470 | { |
2471 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2472 | |
2473 | return dd->rcv_err_status_cnt[34]; |
2474 | } |
2475 | |
2476 | static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt( |
2477 | const struct cntr_entry *entry, |
2478 | void *context, int vl, int mode, u64 data) |
2479 | { |
2480 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2481 | |
2482 | return dd->rcv_err_status_cnt[33]; |
2483 | } |
2484 | |
2485 | static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry, |
2486 | void *context, int vl, int mode, |
2487 | u64 data) |
2488 | { |
2489 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2490 | |
2491 | return dd->rcv_err_status_cnt[32]; |
2492 | } |
2493 | |
2494 | static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry, |
2495 | void *context, int vl, int mode, |
2496 | u64 data) |
2497 | { |
2498 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2499 | |
2500 | return dd->rcv_err_status_cnt[31]; |
2501 | } |
2502 | |
2503 | static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry, |
2504 | void *context, int vl, int mode, |
2505 | u64 data) |
2506 | { |
2507 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2508 | |
2509 | return dd->rcv_err_status_cnt[30]; |
2510 | } |
2511 | |
2512 | static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry, |
2513 | void *context, int vl, int mode, |
2514 | u64 data) |
2515 | { |
2516 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2517 | |
2518 | return dd->rcv_err_status_cnt[29]; |
2519 | } |
2520 | |
2521 | static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry, |
2522 | void *context, int vl, |
2523 | int mode, u64 data) |
2524 | { |
2525 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2526 | |
2527 | return dd->rcv_err_status_cnt[28]; |
2528 | } |
2529 | |
2530 | static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt( |
2531 | const struct cntr_entry *entry, |
2532 | void *context, int vl, int mode, u64 data) |
2533 | { |
2534 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2535 | |
2536 | return dd->rcv_err_status_cnt[27]; |
2537 | } |
2538 | |
2539 | static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt( |
2540 | const struct cntr_entry *entry, |
2541 | void *context, int vl, int mode, u64 data) |
2542 | { |
2543 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2544 | |
2545 | return dd->rcv_err_status_cnt[26]; |
2546 | } |
2547 | |
2548 | static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt( |
2549 | const struct cntr_entry *entry, |
2550 | void *context, int vl, int mode, u64 data) |
2551 | { |
2552 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2553 | |
2554 | return dd->rcv_err_status_cnt[25]; |
2555 | } |
2556 | |
2557 | static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt( |
2558 | const struct cntr_entry *entry, |
2559 | void *context, int vl, int mode, u64 data) |
2560 | { |
2561 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2562 | |
2563 | return dd->rcv_err_status_cnt[24]; |
2564 | } |
2565 | |
2566 | static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt( |
2567 | const struct cntr_entry *entry, |
2568 | void *context, int vl, int mode, u64 data) |
2569 | { |
2570 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2571 | |
2572 | return dd->rcv_err_status_cnt[23]; |
2573 | } |
2574 | |
2575 | static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt( |
2576 | const struct cntr_entry *entry, |
2577 | void *context, int vl, int mode, u64 data) |
2578 | { |
2579 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2580 | |
2581 | return dd->rcv_err_status_cnt[22]; |
2582 | } |
2583 | |
2584 | static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt( |
2585 | const struct cntr_entry *entry, |
2586 | void *context, int vl, int mode, u64 data) |
2587 | { |
2588 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2589 | |
2590 | return dd->rcv_err_status_cnt[21]; |
2591 | } |
2592 | |
2593 | static u64 access_rx_rbuf_block_list_read_cor_err_cnt( |
2594 | const struct cntr_entry *entry, |
2595 | void *context, int vl, int mode, u64 data) |
2596 | { |
2597 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2598 | |
2599 | return dd->rcv_err_status_cnt[20]; |
2600 | } |
2601 | |
2602 | static u64 access_rx_rbuf_block_list_read_unc_err_cnt( |
2603 | const struct cntr_entry *entry, |
2604 | void *context, int vl, int mode, u64 data) |
2605 | { |
2606 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2607 | |
2608 | return dd->rcv_err_status_cnt[19]; |
2609 | } |
2610 | |
2611 | static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry, |
2612 | void *context, int vl, |
2613 | int mode, u64 data) |
2614 | { |
2615 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2616 | |
2617 | return dd->rcv_err_status_cnt[18]; |
2618 | } |
2619 | |
2620 | static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry, |
2621 | void *context, int vl, |
2622 | int mode, u64 data) |
2623 | { |
2624 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2625 | |
2626 | return dd->rcv_err_status_cnt[17]; |
2627 | } |
2628 | |
2629 | static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt( |
2630 | const struct cntr_entry *entry, |
2631 | void *context, int vl, int mode, u64 data) |
2632 | { |
2633 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2634 | |
2635 | return dd->rcv_err_status_cnt[16]; |
2636 | } |
2637 | |
2638 | static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt( |
2639 | const struct cntr_entry *entry, |
2640 | void *context, int vl, int mode, u64 data) |
2641 | { |
2642 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2643 | |
2644 | return dd->rcv_err_status_cnt[15]; |
2645 | } |
2646 | |
2647 | static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry, |
2648 | void *context, int vl, |
2649 | int mode, u64 data) |
2650 | { |
2651 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2652 | |
2653 | return dd->rcv_err_status_cnt[14]; |
2654 | } |
2655 | |
2656 | static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry, |
2657 | void *context, int vl, |
2658 | int mode, u64 data) |
2659 | { |
2660 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2661 | |
2662 | return dd->rcv_err_status_cnt[13]; |
2663 | } |
2664 | |
2665 | static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry, |
2666 | void *context, int vl, int mode, |
2667 | u64 data) |
2668 | { |
2669 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2670 | |
2671 | return dd->rcv_err_status_cnt[12]; |
2672 | } |
2673 | |
2674 | static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry, |
2675 | void *context, int vl, int mode, |
2676 | u64 data) |
2677 | { |
2678 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2679 | |
2680 | return dd->rcv_err_status_cnt[11]; |
2681 | } |
2682 | |
2683 | static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry, |
2684 | void *context, int vl, int mode, |
2685 | u64 data) |
2686 | { |
2687 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2688 | |
2689 | return dd->rcv_err_status_cnt[10]; |
2690 | } |
2691 | |
2692 | static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry, |
2693 | void *context, int vl, int mode, |
2694 | u64 data) |
2695 | { |
2696 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2697 | |
2698 | return dd->rcv_err_status_cnt[9]; |
2699 | } |
2700 | |
2701 | static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry, |
2702 | void *context, int vl, int mode, |
2703 | u64 data) |
2704 | { |
2705 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2706 | |
2707 | return dd->rcv_err_status_cnt[8]; |
2708 | } |
2709 | |
2710 | static u64 access_rx_rcv_qp_map_table_cor_err_cnt( |
2711 | const struct cntr_entry *entry, |
2712 | void *context, int vl, int mode, u64 data) |
2713 | { |
2714 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2715 | |
2716 | return dd->rcv_err_status_cnt[7]; |
2717 | } |
2718 | |
2719 | static u64 access_rx_rcv_qp_map_table_unc_err_cnt( |
2720 | const struct cntr_entry *entry, |
2721 | void *context, int vl, int mode, u64 data) |
2722 | { |
2723 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2724 | |
2725 | return dd->rcv_err_status_cnt[6]; |
2726 | } |
2727 | |
2728 | static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry, |
2729 | void *context, int vl, int mode, |
2730 | u64 data) |
2731 | { |
2732 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2733 | |
2734 | return dd->rcv_err_status_cnt[5]; |
2735 | } |
2736 | |
2737 | static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry, |
2738 | void *context, int vl, int mode, |
2739 | u64 data) |
2740 | { |
2741 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2742 | |
2743 | return dd->rcv_err_status_cnt[4]; |
2744 | } |
2745 | |
2746 | static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry, |
2747 | void *context, int vl, int mode, |
2748 | u64 data) |
2749 | { |
2750 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2751 | |
2752 | return dd->rcv_err_status_cnt[3]; |
2753 | } |
2754 | |
2755 | static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry, |
2756 | void *context, int vl, int mode, |
2757 | u64 data) |
2758 | { |
2759 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2760 | |
2761 | return dd->rcv_err_status_cnt[2]; |
2762 | } |
2763 | |
2764 | static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry, |
2765 | void *context, int vl, int mode, |
2766 | u64 data) |
2767 | { |
2768 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2769 | |
2770 | return dd->rcv_err_status_cnt[1]; |
2771 | } |
2772 | |
2773 | static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry, |
2774 | void *context, int vl, int mode, |
2775 | u64 data) |
2776 | { |
2777 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2778 | |
2779 | return dd->rcv_err_status_cnt[0]; |
2780 | } |
2781 | |
2782 | /* |
2783 | * Software counters corresponding to each of the |
2784 | * error status bits within SendPioErrStatus |
2785 | */ |
2786 | static u64 access_pio_pec_sop_head_parity_err_cnt( |
2787 | const struct cntr_entry *entry, |
2788 | void *context, int vl, int mode, u64 data) |
2789 | { |
2790 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2791 | |
2792 | return dd->send_pio_err_status_cnt[35]; |
2793 | } |
2794 | |
2795 | static u64 access_pio_pcc_sop_head_parity_err_cnt( |
2796 | const struct cntr_entry *entry, |
2797 | void *context, int vl, int mode, u64 data) |
2798 | { |
2799 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2800 | |
2801 | return dd->send_pio_err_status_cnt[34]; |
2802 | } |
2803 | |
2804 | static u64 access_pio_last_returned_cnt_parity_err_cnt( |
2805 | const struct cntr_entry *entry, |
2806 | void *context, int vl, int mode, u64 data) |
2807 | { |
2808 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2809 | |
2810 | return dd->send_pio_err_status_cnt[33]; |
2811 | } |
2812 | |
2813 | static u64 access_pio_current_free_cnt_parity_err_cnt( |
2814 | const struct cntr_entry *entry, |
2815 | void *context, int vl, int mode, u64 data) |
2816 | { |
2817 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2818 | |
2819 | return dd->send_pio_err_status_cnt[32]; |
2820 | } |
2821 | |
2822 | static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry, |
2823 | void *context, int vl, int mode, |
2824 | u64 data) |
2825 | { |
2826 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2827 | |
2828 | return dd->send_pio_err_status_cnt[31]; |
2829 | } |
2830 | |
2831 | static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry, |
2832 | void *context, int vl, int mode, |
2833 | u64 data) |
2834 | { |
2835 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2836 | |
2837 | return dd->send_pio_err_status_cnt[30]; |
2838 | } |
2839 | |
2840 | static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry, |
2841 | void *context, int vl, int mode, |
2842 | u64 data) |
2843 | { |
2844 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2845 | |
2846 | return dd->send_pio_err_status_cnt[29]; |
2847 | } |
2848 | |
2849 | static u64 access_pio_ppmc_bqc_mem_parity_err_cnt( |
2850 | const struct cntr_entry *entry, |
2851 | void *context, int vl, int mode, u64 data) |
2852 | { |
2853 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2854 | |
2855 | return dd->send_pio_err_status_cnt[28]; |
2856 | } |
2857 | |
2858 | static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry, |
2859 | void *context, int vl, int mode, |
2860 | u64 data) |
2861 | { |
2862 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2863 | |
2864 | return dd->send_pio_err_status_cnt[27]; |
2865 | } |
2866 | |
2867 | static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry, |
2868 | void *context, int vl, int mode, |
2869 | u64 data) |
2870 | { |
2871 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2872 | |
2873 | return dd->send_pio_err_status_cnt[26]; |
2874 | } |
2875 | |
2876 | static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry, |
2877 | void *context, int vl, |
2878 | int mode, u64 data) |
2879 | { |
2880 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2881 | |
2882 | return dd->send_pio_err_status_cnt[25]; |
2883 | } |
2884 | |
2885 | static u64 access_pio_block_qw_count_parity_err_cnt( |
2886 | const struct cntr_entry *entry, |
2887 | void *context, int vl, int mode, u64 data) |
2888 | { |
2889 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2890 | |
2891 | return dd->send_pio_err_status_cnt[24]; |
2892 | } |
2893 | |
2894 | static u64 access_pio_write_qw_valid_parity_err_cnt( |
2895 | const struct cntr_entry *entry, |
2896 | void *context, int vl, int mode, u64 data) |
2897 | { |
2898 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2899 | |
2900 | return dd->send_pio_err_status_cnt[23]; |
2901 | } |
2902 | |
2903 | static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry, |
2904 | void *context, int vl, int mode, |
2905 | u64 data) |
2906 | { |
2907 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2908 | |
2909 | return dd->send_pio_err_status_cnt[22]; |
2910 | } |
2911 | |
2912 | static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry, |
2913 | void *context, int vl, |
2914 | int mode, u64 data) |
2915 | { |
2916 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2917 | |
2918 | return dd->send_pio_err_status_cnt[21]; |
2919 | } |
2920 | |
2921 | static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry, |
2922 | void *context, int vl, |
2923 | int mode, u64 data) |
2924 | { |
2925 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2926 | |
2927 | return dd->send_pio_err_status_cnt[20]; |
2928 | } |
2929 | |
2930 | static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry, |
2931 | void *context, int vl, |
2932 | int mode, u64 data) |
2933 | { |
2934 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2935 | |
2936 | return dd->send_pio_err_status_cnt[19]; |
2937 | } |
2938 | |
2939 | static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt( |
2940 | const struct cntr_entry *entry, |
2941 | void *context, int vl, int mode, u64 data) |
2942 | { |
2943 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2944 | |
2945 | return dd->send_pio_err_status_cnt[18]; |
2946 | } |
2947 | |
2948 | static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry, |
2949 | void *context, int vl, int mode, |
2950 | u64 data) |
2951 | { |
2952 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2953 | |
2954 | return dd->send_pio_err_status_cnt[17]; |
2955 | } |
2956 | |
2957 | static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry, |
2958 | void *context, int vl, int mode, |
2959 | u64 data) |
2960 | { |
2961 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2962 | |
2963 | return dd->send_pio_err_status_cnt[16]; |
2964 | } |
2965 | |
2966 | static u64 access_pio_credit_ret_fifo_parity_err_cnt( |
2967 | const struct cntr_entry *entry, |
2968 | void *context, int vl, int mode, u64 data) |
2969 | { |
2970 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2971 | |
2972 | return dd->send_pio_err_status_cnt[15]; |
2973 | } |
2974 | |
2975 | static u64 access_pio_v1_len_mem_bank1_cor_err_cnt( |
2976 | const struct cntr_entry *entry, |
2977 | void *context, int vl, int mode, u64 data) |
2978 | { |
2979 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2980 | |
2981 | return dd->send_pio_err_status_cnt[14]; |
2982 | } |
2983 | |
2984 | static u64 access_pio_v1_len_mem_bank0_cor_err_cnt( |
2985 | const struct cntr_entry *entry, |
2986 | void *context, int vl, int mode, u64 data) |
2987 | { |
2988 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2989 | |
2990 | return dd->send_pio_err_status_cnt[13]; |
2991 | } |
2992 | |
2993 | static u64 access_pio_v1_len_mem_bank1_unc_err_cnt( |
2994 | const struct cntr_entry *entry, |
2995 | void *context, int vl, int mode, u64 data) |
2996 | { |
2997 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
2998 | |
2999 | return dd->send_pio_err_status_cnt[12]; |
3000 | } |
3001 | |
3002 | static u64 access_pio_v1_len_mem_bank0_unc_err_cnt( |
3003 | const struct cntr_entry *entry, |
3004 | void *context, int vl, int mode, u64 data) |
3005 | { |
3006 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3007 | |
3008 | return dd->send_pio_err_status_cnt[11]; |
3009 | } |
3010 | |
3011 | static u64 access_pio_sm_pkt_reset_parity_err_cnt( |
3012 | const struct cntr_entry *entry, |
3013 | void *context, int vl, int mode, u64 data) |
3014 | { |
3015 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3016 | |
3017 | return dd->send_pio_err_status_cnt[10]; |
3018 | } |
3019 | |
3020 | static u64 access_pio_pkt_evict_fifo_parity_err_cnt( |
3021 | const struct cntr_entry *entry, |
3022 | void *context, int vl, int mode, u64 data) |
3023 | { |
3024 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3025 | |
3026 | return dd->send_pio_err_status_cnt[9]; |
3027 | } |
3028 | |
3029 | static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt( |
3030 | const struct cntr_entry *entry, |
3031 | void *context, int vl, int mode, u64 data) |
3032 | { |
3033 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3034 | |
3035 | return dd->send_pio_err_status_cnt[8]; |
3036 | } |
3037 | |
3038 | static u64 access_pio_sbrdctl_crrel_parity_err_cnt( |
3039 | const struct cntr_entry *entry, |
3040 | void *context, int vl, int mode, u64 data) |
3041 | { |
3042 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3043 | |
3044 | return dd->send_pio_err_status_cnt[7]; |
3045 | } |
3046 | |
3047 | static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry, |
3048 | void *context, int vl, int mode, |
3049 | u64 data) |
3050 | { |
3051 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3052 | |
3053 | return dd->send_pio_err_status_cnt[6]; |
3054 | } |
3055 | |
3056 | static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry, |
3057 | void *context, int vl, int mode, |
3058 | u64 data) |
3059 | { |
3060 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3061 | |
3062 | return dd->send_pio_err_status_cnt[5]; |
3063 | } |
3064 | |
3065 | static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry, |
3066 | void *context, int vl, int mode, |
3067 | u64 data) |
3068 | { |
3069 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3070 | |
3071 | return dd->send_pio_err_status_cnt[4]; |
3072 | } |
3073 | |
3074 | static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry, |
3075 | void *context, int vl, int mode, |
3076 | u64 data) |
3077 | { |
3078 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3079 | |
3080 | return dd->send_pio_err_status_cnt[3]; |
3081 | } |
3082 | |
3083 | static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry, |
3084 | void *context, int vl, int mode, |
3085 | u64 data) |
3086 | { |
3087 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3088 | |
3089 | return dd->send_pio_err_status_cnt[2]; |
3090 | } |
3091 | |
3092 | static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry, |
3093 | void *context, int vl, |
3094 | int mode, u64 data) |
3095 | { |
3096 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3097 | |
3098 | return dd->send_pio_err_status_cnt[1]; |
3099 | } |
3100 | |
3101 | static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry, |
3102 | void *context, int vl, int mode, |
3103 | u64 data) |
3104 | { |
3105 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3106 | |
3107 | return dd->send_pio_err_status_cnt[0]; |
3108 | } |
3109 | |
3110 | /* |
3111 | * Software counters corresponding to each of the |
3112 | * error status bits within SendDmaErrStatus |
3113 | */ |
3114 | static u64 access_sdma_pcie_req_tracking_cor_err_cnt( |
3115 | const struct cntr_entry *entry, |
3116 | void *context, int vl, int mode, u64 data) |
3117 | { |
3118 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3119 | |
3120 | return dd->send_dma_err_status_cnt[3]; |
3121 | } |
3122 | |
3123 | static u64 access_sdma_pcie_req_tracking_unc_err_cnt( |
3124 | const struct cntr_entry *entry, |
3125 | void *context, int vl, int mode, u64 data) |
3126 | { |
3127 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3128 | |
3129 | return dd->send_dma_err_status_cnt[2]; |
3130 | } |
3131 | |
3132 | static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry, |
3133 | void *context, int vl, int mode, |
3134 | u64 data) |
3135 | { |
3136 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3137 | |
3138 | return dd->send_dma_err_status_cnt[1]; |
3139 | } |
3140 | |
3141 | static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry, |
3142 | void *context, int vl, int mode, |
3143 | u64 data) |
3144 | { |
3145 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3146 | |
3147 | return dd->send_dma_err_status_cnt[0]; |
3148 | } |
3149 | |
3150 | /* |
3151 | * Software counters corresponding to each of the |
3152 | * error status bits within SendEgressErrStatus |
3153 | */ |
3154 | static u64 access_tx_read_pio_memory_csr_unc_err_cnt( |
3155 | const struct cntr_entry *entry, |
3156 | void *context, int vl, int mode, u64 data) |
3157 | { |
3158 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3159 | |
3160 | return dd->send_egress_err_status_cnt[63]; |
3161 | } |
3162 | |
3163 | static u64 access_tx_read_sdma_memory_csr_err_cnt( |
3164 | const struct cntr_entry *entry, |
3165 | void *context, int vl, int mode, u64 data) |
3166 | { |
3167 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3168 | |
3169 | return dd->send_egress_err_status_cnt[62]; |
3170 | } |
3171 | |
3172 | static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry, |
3173 | void *context, int vl, int mode, |
3174 | u64 data) |
3175 | { |
3176 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3177 | |
3178 | return dd->send_egress_err_status_cnt[61]; |
3179 | } |
3180 | |
3181 | static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry, |
3182 | void *context, int vl, |
3183 | int mode, u64 data) |
3184 | { |
3185 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3186 | |
3187 | return dd->send_egress_err_status_cnt[60]; |
3188 | } |
3189 | |
3190 | static u64 access_tx_read_sdma_memory_cor_err_cnt( |
3191 | const struct cntr_entry *entry, |
3192 | void *context, int vl, int mode, u64 data) |
3193 | { |
3194 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3195 | |
3196 | return dd->send_egress_err_status_cnt[59]; |
3197 | } |
3198 | |
3199 | static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry, |
3200 | void *context, int vl, int mode, |
3201 | u64 data) |
3202 | { |
3203 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3204 | |
3205 | return dd->send_egress_err_status_cnt[58]; |
3206 | } |
3207 | |
3208 | static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry, |
3209 | void *context, int vl, int mode, |
3210 | u64 data) |
3211 | { |
3212 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3213 | |
3214 | return dd->send_egress_err_status_cnt[57]; |
3215 | } |
3216 | |
3217 | static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry, |
3218 | void *context, int vl, int mode, |
3219 | u64 data) |
3220 | { |
3221 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3222 | |
3223 | return dd->send_egress_err_status_cnt[56]; |
3224 | } |
3225 | |
3226 | static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry, |
3227 | void *context, int vl, int mode, |
3228 | u64 data) |
3229 | { |
3230 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3231 | |
3232 | return dd->send_egress_err_status_cnt[55]; |
3233 | } |
3234 | |
3235 | static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry, |
3236 | void *context, int vl, int mode, |
3237 | u64 data) |
3238 | { |
3239 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3240 | |
3241 | return dd->send_egress_err_status_cnt[54]; |
3242 | } |
3243 | |
3244 | static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry, |
3245 | void *context, int vl, int mode, |
3246 | u64 data) |
3247 | { |
3248 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3249 | |
3250 | return dd->send_egress_err_status_cnt[53]; |
3251 | } |
3252 | |
3253 | static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry, |
3254 | void *context, int vl, int mode, |
3255 | u64 data) |
3256 | { |
3257 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3258 | |
3259 | return dd->send_egress_err_status_cnt[52]; |
3260 | } |
3261 | |
3262 | static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry, |
3263 | void *context, int vl, int mode, |
3264 | u64 data) |
3265 | { |
3266 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3267 | |
3268 | return dd->send_egress_err_status_cnt[51]; |
3269 | } |
3270 | |
3271 | static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry, |
3272 | void *context, int vl, int mode, |
3273 | u64 data) |
3274 | { |
3275 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3276 | |
3277 | return dd->send_egress_err_status_cnt[50]; |
3278 | } |
3279 | |
3280 | static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry, |
3281 | void *context, int vl, int mode, |
3282 | u64 data) |
3283 | { |
3284 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3285 | |
3286 | return dd->send_egress_err_status_cnt[49]; |
3287 | } |
3288 | |
3289 | static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry, |
3290 | void *context, int vl, int mode, |
3291 | u64 data) |
3292 | { |
3293 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3294 | |
3295 | return dd->send_egress_err_status_cnt[48]; |
3296 | } |
3297 | |
3298 | static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry, |
3299 | void *context, int vl, int mode, |
3300 | u64 data) |
3301 | { |
3302 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3303 | |
3304 | return dd->send_egress_err_status_cnt[47]; |
3305 | } |
3306 | |
3307 | static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry, |
3308 | void *context, int vl, int mode, |
3309 | u64 data) |
3310 | { |
3311 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3312 | |
3313 | return dd->send_egress_err_status_cnt[46]; |
3314 | } |
3315 | |
3316 | static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry, |
3317 | void *context, int vl, int mode, |
3318 | u64 data) |
3319 | { |
3320 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3321 | |
3322 | return dd->send_egress_err_status_cnt[45]; |
3323 | } |
3324 | |
3325 | static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry, |
3326 | void *context, int vl, |
3327 | int mode, u64 data) |
3328 | { |
3329 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3330 | |
3331 | return dd->send_egress_err_status_cnt[44]; |
3332 | } |
3333 | |
3334 | static u64 access_tx_read_sdma_memory_unc_err_cnt( |
3335 | const struct cntr_entry *entry, |
3336 | void *context, int vl, int mode, u64 data) |
3337 | { |
3338 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3339 | |
3340 | return dd->send_egress_err_status_cnt[43]; |
3341 | } |
3342 | |
3343 | static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry, |
3344 | void *context, int vl, int mode, |
3345 | u64 data) |
3346 | { |
3347 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3348 | |
3349 | return dd->send_egress_err_status_cnt[42]; |
3350 | } |
3351 | |
3352 | static u64 access_tx_credit_return_partiy_err_cnt( |
3353 | const struct cntr_entry *entry, |
3354 | void *context, int vl, int mode, u64 data) |
3355 | { |
3356 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3357 | |
3358 | return dd->send_egress_err_status_cnt[41]; |
3359 | } |
3360 | |
3361 | static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt( |
3362 | const struct cntr_entry *entry, |
3363 | void *context, int vl, int mode, u64 data) |
3364 | { |
3365 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3366 | |
3367 | return dd->send_egress_err_status_cnt[40]; |
3368 | } |
3369 | |
3370 | static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt( |
3371 | const struct cntr_entry *entry, |
3372 | void *context, int vl, int mode, u64 data) |
3373 | { |
3374 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3375 | |
3376 | return dd->send_egress_err_status_cnt[39]; |
3377 | } |
3378 | |
3379 | static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt( |
3380 | const struct cntr_entry *entry, |
3381 | void *context, int vl, int mode, u64 data) |
3382 | { |
3383 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3384 | |
3385 | return dd->send_egress_err_status_cnt[38]; |
3386 | } |
3387 | |
3388 | static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt( |
3389 | const struct cntr_entry *entry, |
3390 | void *context, int vl, int mode, u64 data) |
3391 | { |
3392 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3393 | |
3394 | return dd->send_egress_err_status_cnt[37]; |
3395 | } |
3396 | |
3397 | static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt( |
3398 | const struct cntr_entry *entry, |
3399 | void *context, int vl, int mode, u64 data) |
3400 | { |
3401 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3402 | |
3403 | return dd->send_egress_err_status_cnt[36]; |
3404 | } |
3405 | |
3406 | static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt( |
3407 | const struct cntr_entry *entry, |
3408 | void *context, int vl, int mode, u64 data) |
3409 | { |
3410 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3411 | |
3412 | return dd->send_egress_err_status_cnt[35]; |
3413 | } |
3414 | |
3415 | static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt( |
3416 | const struct cntr_entry *entry, |
3417 | void *context, int vl, int mode, u64 data) |
3418 | { |
3419 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3420 | |
3421 | return dd->send_egress_err_status_cnt[34]; |
3422 | } |
3423 | |
3424 | static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt( |
3425 | const struct cntr_entry *entry, |
3426 | void *context, int vl, int mode, u64 data) |
3427 | { |
3428 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3429 | |
3430 | return dd->send_egress_err_status_cnt[33]; |
3431 | } |
3432 | |
3433 | static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt( |
3434 | const struct cntr_entry *entry, |
3435 | void *context, int vl, int mode, u64 data) |
3436 | { |
3437 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3438 | |
3439 | return dd->send_egress_err_status_cnt[32]; |
3440 | } |
3441 | |
3442 | static u64 access_tx_sdma15_disallowed_packet_err_cnt( |
3443 | const struct cntr_entry *entry, |
3444 | void *context, int vl, int mode, u64 data) |
3445 | { |
3446 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3447 | |
3448 | return dd->send_egress_err_status_cnt[31]; |
3449 | } |
3450 | |
3451 | static u64 access_tx_sdma14_disallowed_packet_err_cnt( |
3452 | const struct cntr_entry *entry, |
3453 | void *context, int vl, int mode, u64 data) |
3454 | { |
3455 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3456 | |
3457 | return dd->send_egress_err_status_cnt[30]; |
3458 | } |
3459 | |
3460 | static u64 access_tx_sdma13_disallowed_packet_err_cnt( |
3461 | const struct cntr_entry *entry, |
3462 | void *context, int vl, int mode, u64 data) |
3463 | { |
3464 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3465 | |
3466 | return dd->send_egress_err_status_cnt[29]; |
3467 | } |
3468 | |
3469 | static u64 access_tx_sdma12_disallowed_packet_err_cnt( |
3470 | const struct cntr_entry *entry, |
3471 | void *context, int vl, int mode, u64 data) |
3472 | { |
3473 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3474 | |
3475 | return dd->send_egress_err_status_cnt[28]; |
3476 | } |
3477 | |
3478 | static u64 access_tx_sdma11_disallowed_packet_err_cnt( |
3479 | const struct cntr_entry *entry, |
3480 | void *context, int vl, int mode, u64 data) |
3481 | { |
3482 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3483 | |
3484 | return dd->send_egress_err_status_cnt[27]; |
3485 | } |
3486 | |
3487 | static u64 access_tx_sdma10_disallowed_packet_err_cnt( |
3488 | const struct cntr_entry *entry, |
3489 | void *context, int vl, int mode, u64 data) |
3490 | { |
3491 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3492 | |
3493 | return dd->send_egress_err_status_cnt[26]; |
3494 | } |
3495 | |
3496 | static u64 access_tx_sdma9_disallowed_packet_err_cnt( |
3497 | const struct cntr_entry *entry, |
3498 | void *context, int vl, int mode, u64 data) |
3499 | { |
3500 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3501 | |
3502 | return dd->send_egress_err_status_cnt[25]; |
3503 | } |
3504 | |
3505 | static u64 access_tx_sdma8_disallowed_packet_err_cnt( |
3506 | const struct cntr_entry *entry, |
3507 | void *context, int vl, int mode, u64 data) |
3508 | { |
3509 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3510 | |
3511 | return dd->send_egress_err_status_cnt[24]; |
3512 | } |
3513 | |
3514 | static u64 access_tx_sdma7_disallowed_packet_err_cnt( |
3515 | const struct cntr_entry *entry, |
3516 | void *context, int vl, int mode, u64 data) |
3517 | { |
3518 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3519 | |
3520 | return dd->send_egress_err_status_cnt[23]; |
3521 | } |
3522 | |
3523 | static u64 access_tx_sdma6_disallowed_packet_err_cnt( |
3524 | const struct cntr_entry *entry, |
3525 | void *context, int vl, int mode, u64 data) |
3526 | { |
3527 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3528 | |
3529 | return dd->send_egress_err_status_cnt[22]; |
3530 | } |
3531 | |
3532 | static u64 access_tx_sdma5_disallowed_packet_err_cnt( |
3533 | const struct cntr_entry *entry, |
3534 | void *context, int vl, int mode, u64 data) |
3535 | { |
3536 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3537 | |
3538 | return dd->send_egress_err_status_cnt[21]; |
3539 | } |
3540 | |
3541 | static u64 access_tx_sdma4_disallowed_packet_err_cnt( |
3542 | const struct cntr_entry *entry, |
3543 | void *context, int vl, int mode, u64 data) |
3544 | { |
3545 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3546 | |
3547 | return dd->send_egress_err_status_cnt[20]; |
3548 | } |
3549 | |
3550 | static u64 access_tx_sdma3_disallowed_packet_err_cnt( |
3551 | const struct cntr_entry *entry, |
3552 | void *context, int vl, int mode, u64 data) |
3553 | { |
3554 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3555 | |
3556 | return dd->send_egress_err_status_cnt[19]; |
3557 | } |
3558 | |
3559 | static u64 access_tx_sdma2_disallowed_packet_err_cnt( |
3560 | const struct cntr_entry *entry, |
3561 | void *context, int vl, int mode, u64 data) |
3562 | { |
3563 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3564 | |
3565 | return dd->send_egress_err_status_cnt[18]; |
3566 | } |
3567 | |
3568 | static u64 access_tx_sdma1_disallowed_packet_err_cnt( |
3569 | const struct cntr_entry *entry, |
3570 | void *context, int vl, int mode, u64 data) |
3571 | { |
3572 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3573 | |
3574 | return dd->send_egress_err_status_cnt[17]; |
3575 | } |
3576 | |
3577 | static u64 access_tx_sdma0_disallowed_packet_err_cnt( |
3578 | const struct cntr_entry *entry, |
3579 | void *context, int vl, int mode, u64 data) |
3580 | { |
3581 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3582 | |
3583 | return dd->send_egress_err_status_cnt[16]; |
3584 | } |
3585 | |
3586 | static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry, |
3587 | void *context, int vl, int mode, |
3588 | u64 data) |
3589 | { |
3590 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3591 | |
3592 | return dd->send_egress_err_status_cnt[15]; |
3593 | } |
3594 | |
3595 | static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry, |
3596 | void *context, int vl, |
3597 | int mode, u64 data) |
3598 | { |
3599 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3600 | |
3601 | return dd->send_egress_err_status_cnt[14]; |
3602 | } |
3603 | |
3604 | static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry, |
3605 | void *context, int vl, int mode, |
3606 | u64 data) |
3607 | { |
3608 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3609 | |
3610 | return dd->send_egress_err_status_cnt[13]; |
3611 | } |
3612 | |
3613 | static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry, |
3614 | void *context, int vl, int mode, |
3615 | u64 data) |
3616 | { |
3617 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3618 | |
3619 | return dd->send_egress_err_status_cnt[12]; |
3620 | } |
3621 | |
3622 | static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt( |
3623 | const struct cntr_entry *entry, |
3624 | void *context, int vl, int mode, u64 data) |
3625 | { |
3626 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3627 | |
3628 | return dd->send_egress_err_status_cnt[11]; |
3629 | } |
3630 | |
3631 | static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry, |
3632 | void *context, int vl, int mode, |
3633 | u64 data) |
3634 | { |
3635 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3636 | |
3637 | return dd->send_egress_err_status_cnt[10]; |
3638 | } |
3639 | |
3640 | static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry, |
3641 | void *context, int vl, int mode, |
3642 | u64 data) |
3643 | { |
3644 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3645 | |
3646 | return dd->send_egress_err_status_cnt[9]; |
3647 | } |
3648 | |
3649 | static u64 access_tx_sdma_launch_intf_parity_err_cnt( |
3650 | const struct cntr_entry *entry, |
3651 | void *context, int vl, int mode, u64 data) |
3652 | { |
3653 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3654 | |
3655 | return dd->send_egress_err_status_cnt[8]; |
3656 | } |
3657 | |
3658 | static u64 access_tx_pio_launch_intf_parity_err_cnt( |
3659 | const struct cntr_entry *entry, |
3660 | void *context, int vl, int mode, u64 data) |
3661 | { |
3662 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3663 | |
3664 | return dd->send_egress_err_status_cnt[7]; |
3665 | } |
3666 | |
3667 | static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry, |
3668 | void *context, int vl, int mode, |
3669 | u64 data) |
3670 | { |
3671 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3672 | |
3673 | return dd->send_egress_err_status_cnt[6]; |
3674 | } |
3675 | |
3676 | static u64 access_tx_incorrect_link_state_err_cnt( |
3677 | const struct cntr_entry *entry, |
3678 | void *context, int vl, int mode, u64 data) |
3679 | { |
3680 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3681 | |
3682 | return dd->send_egress_err_status_cnt[5]; |
3683 | } |
3684 | |
3685 | static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry, |
3686 | void *context, int vl, int mode, |
3687 | u64 data) |
3688 | { |
3689 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3690 | |
3691 | return dd->send_egress_err_status_cnt[4]; |
3692 | } |
3693 | |
3694 | static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt( |
3695 | const struct cntr_entry *entry, |
3696 | void *context, int vl, int mode, u64 data) |
3697 | { |
3698 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3699 | |
3700 | return dd->send_egress_err_status_cnt[3]; |
3701 | } |
3702 | |
3703 | static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry, |
3704 | void *context, int vl, int mode, |
3705 | u64 data) |
3706 | { |
3707 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3708 | |
3709 | return dd->send_egress_err_status_cnt[2]; |
3710 | } |
3711 | |
3712 | static u64 access_tx_pkt_integrity_mem_unc_err_cnt( |
3713 | const struct cntr_entry *entry, |
3714 | void *context, int vl, int mode, u64 data) |
3715 | { |
3716 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3717 | |
3718 | return dd->send_egress_err_status_cnt[1]; |
3719 | } |
3720 | |
3721 | static u64 access_tx_pkt_integrity_mem_cor_err_cnt( |
3722 | const struct cntr_entry *entry, |
3723 | void *context, int vl, int mode, u64 data) |
3724 | { |
3725 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3726 | |
3727 | return dd->send_egress_err_status_cnt[0]; |
3728 | } |
3729 | |
3730 | /* |
3731 | * Software counters corresponding to each of the |
3732 | * error status bits within SendErrStatus |
3733 | */ |
3734 | static u64 access_send_csr_write_bad_addr_err_cnt( |
3735 | const struct cntr_entry *entry, |
3736 | void *context, int vl, int mode, u64 data) |
3737 | { |
3738 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3739 | |
3740 | return dd->send_err_status_cnt[2]; |
3741 | } |
3742 | |
3743 | static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, |
3744 | void *context, int vl, |
3745 | int mode, u64 data) |
3746 | { |
3747 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3748 | |
3749 | return dd->send_err_status_cnt[1]; |
3750 | } |
3751 | |
3752 | static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry, |
3753 | void *context, int vl, int mode, |
3754 | u64 data) |
3755 | { |
3756 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3757 | |
3758 | return dd->send_err_status_cnt[0]; |
3759 | } |
3760 | |
3761 | /* |
3762 | * Software counters corresponding to each of the |
3763 | * error status bits within SendCtxtErrStatus |
3764 | */ |
3765 | static u64 access_pio_write_out_of_bounds_err_cnt( |
3766 | const struct cntr_entry *entry, |
3767 | void *context, int vl, int mode, u64 data) |
3768 | { |
3769 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3770 | |
3771 | return dd->sw_ctxt_err_status_cnt[4]; |
3772 | } |
3773 | |
3774 | static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry, |
3775 | void *context, int vl, int mode, |
3776 | u64 data) |
3777 | { |
3778 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3779 | |
3780 | return dd->sw_ctxt_err_status_cnt[3]; |
3781 | } |
3782 | |
3783 | static u64 access_pio_write_crosses_boundary_err_cnt( |
3784 | const struct cntr_entry *entry, |
3785 | void *context, int vl, int mode, u64 data) |
3786 | { |
3787 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3788 | |
3789 | return dd->sw_ctxt_err_status_cnt[2]; |
3790 | } |
3791 | |
3792 | static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry, |
3793 | void *context, int vl, |
3794 | int mode, u64 data) |
3795 | { |
3796 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3797 | |
3798 | return dd->sw_ctxt_err_status_cnt[1]; |
3799 | } |
3800 | |
3801 | static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry, |
3802 | void *context, int vl, int mode, |
3803 | u64 data) |
3804 | { |
3805 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3806 | |
3807 | return dd->sw_ctxt_err_status_cnt[0]; |
3808 | } |
3809 | |
3810 | /* |
3811 | * Software counters corresponding to each of the |
3812 | * error status bits within SendDmaEngErrStatus |
3813 | */ |
3814 | static u64 ( |
3815 | const struct cntr_entry *entry, |
3816 | void *context, int vl, int mode, u64 data) |
3817 | { |
3818 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3819 | |
3820 | return dd->sw_send_dma_eng_err_status_cnt[23]; |
3821 | } |
3822 | |
3823 | static u64 ( |
3824 | const struct cntr_entry *entry, |
3825 | void *context, int vl, int mode, u64 data) |
3826 | { |
3827 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3828 | |
3829 | return dd->sw_send_dma_eng_err_status_cnt[22]; |
3830 | } |
3831 | |
3832 | static u64 access_sdma_packet_tracking_cor_err_cnt( |
3833 | const struct cntr_entry *entry, |
3834 | void *context, int vl, int mode, u64 data) |
3835 | { |
3836 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3837 | |
3838 | return dd->sw_send_dma_eng_err_status_cnt[21]; |
3839 | } |
3840 | |
3841 | static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry, |
3842 | void *context, int vl, int mode, |
3843 | u64 data) |
3844 | { |
3845 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3846 | |
3847 | return dd->sw_send_dma_eng_err_status_cnt[20]; |
3848 | } |
3849 | |
3850 | static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry, |
3851 | void *context, int vl, int mode, |
3852 | u64 data) |
3853 | { |
3854 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3855 | |
3856 | return dd->sw_send_dma_eng_err_status_cnt[19]; |
3857 | } |
3858 | |
3859 | static u64 ( |
3860 | const struct cntr_entry *entry, |
3861 | void *context, int vl, int mode, u64 data) |
3862 | { |
3863 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3864 | |
3865 | return dd->sw_send_dma_eng_err_status_cnt[18]; |
3866 | } |
3867 | |
3868 | static u64 ( |
3869 | const struct cntr_entry *entry, |
3870 | void *context, int vl, int mode, u64 data) |
3871 | { |
3872 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3873 | |
3874 | return dd->sw_send_dma_eng_err_status_cnt[17]; |
3875 | } |
3876 | |
3877 | static u64 access_sdma_packet_tracking_unc_err_cnt( |
3878 | const struct cntr_entry *entry, |
3879 | void *context, int vl, int mode, u64 data) |
3880 | { |
3881 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3882 | |
3883 | return dd->sw_send_dma_eng_err_status_cnt[16]; |
3884 | } |
3885 | |
3886 | static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry, |
3887 | void *context, int vl, int mode, |
3888 | u64 data) |
3889 | { |
3890 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3891 | |
3892 | return dd->sw_send_dma_eng_err_status_cnt[15]; |
3893 | } |
3894 | |
3895 | static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry, |
3896 | void *context, int vl, int mode, |
3897 | u64 data) |
3898 | { |
3899 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3900 | |
3901 | return dd->sw_send_dma_eng_err_status_cnt[14]; |
3902 | } |
3903 | |
3904 | static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry, |
3905 | void *context, int vl, int mode, |
3906 | u64 data) |
3907 | { |
3908 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3909 | |
3910 | return dd->sw_send_dma_eng_err_status_cnt[13]; |
3911 | } |
3912 | |
3913 | static u64 (const struct cntr_entry *entry, |
3914 | void *context, int vl, int mode, |
3915 | u64 data) |
3916 | { |
3917 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3918 | |
3919 | return dd->sw_send_dma_eng_err_status_cnt[12]; |
3920 | } |
3921 | |
3922 | static u64 (const struct cntr_entry *entry, |
3923 | void *context, int vl, int mode, |
3924 | u64 data) |
3925 | { |
3926 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3927 | |
3928 | return dd->sw_send_dma_eng_err_status_cnt[11]; |
3929 | } |
3930 | |
3931 | static u64 (const struct cntr_entry *entry, |
3932 | void *context, int vl, int mode, |
3933 | u64 data) |
3934 | { |
3935 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3936 | |
3937 | return dd->sw_send_dma_eng_err_status_cnt[10]; |
3938 | } |
3939 | |
3940 | static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry, |
3941 | void *context, int vl, int mode, |
3942 | u64 data) |
3943 | { |
3944 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3945 | |
3946 | return dd->sw_send_dma_eng_err_status_cnt[9]; |
3947 | } |
3948 | |
3949 | static u64 access_sdma_packet_desc_overflow_err_cnt( |
3950 | const struct cntr_entry *entry, |
3951 | void *context, int vl, int mode, u64 data) |
3952 | { |
3953 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3954 | |
3955 | return dd->sw_send_dma_eng_err_status_cnt[8]; |
3956 | } |
3957 | |
3958 | static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry, |
3959 | void *context, int vl, |
3960 | int mode, u64 data) |
3961 | { |
3962 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3963 | |
3964 | return dd->sw_send_dma_eng_err_status_cnt[7]; |
3965 | } |
3966 | |
3967 | static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry, |
3968 | void *context, int vl, int mode, u64 data) |
3969 | { |
3970 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3971 | |
3972 | return dd->sw_send_dma_eng_err_status_cnt[6]; |
3973 | } |
3974 | |
3975 | static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry, |
3976 | void *context, int vl, int mode, |
3977 | u64 data) |
3978 | { |
3979 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3980 | |
3981 | return dd->sw_send_dma_eng_err_status_cnt[5]; |
3982 | } |
3983 | |
3984 | static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry, |
3985 | void *context, int vl, int mode, |
3986 | u64 data) |
3987 | { |
3988 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3989 | |
3990 | return dd->sw_send_dma_eng_err_status_cnt[4]; |
3991 | } |
3992 | |
3993 | static u64 access_sdma_tail_out_of_bounds_err_cnt( |
3994 | const struct cntr_entry *entry, |
3995 | void *context, int vl, int mode, u64 data) |
3996 | { |
3997 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
3998 | |
3999 | return dd->sw_send_dma_eng_err_status_cnt[3]; |
4000 | } |
4001 | |
4002 | static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry, |
4003 | void *context, int vl, int mode, |
4004 | u64 data) |
4005 | { |
4006 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
4007 | |
4008 | return dd->sw_send_dma_eng_err_status_cnt[2]; |
4009 | } |
4010 | |
4011 | static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry, |
4012 | void *context, int vl, int mode, |
4013 | u64 data) |
4014 | { |
4015 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
4016 | |
4017 | return dd->sw_send_dma_eng_err_status_cnt[1]; |
4018 | } |
4019 | |
4020 | static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry, |
4021 | void *context, int vl, int mode, |
4022 | u64 data) |
4023 | { |
4024 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
4025 | |
4026 | return dd->sw_send_dma_eng_err_status_cnt[0]; |
4027 | } |
4028 | |
4029 | static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry, |
4030 | void *context, int vl, int mode, |
4031 | u64 data) |
4032 | { |
4033 | struct hfi1_devdata *dd = (struct hfi1_devdata *)context; |
4034 | |
4035 | u64 val = 0; |
4036 | u64 csr = entry->csr; |
4037 | |
4038 | val = read_write_csr(dd, csr, mode, value: data); |
4039 | if (mode == CNTR_MODE_R) { |
4040 | val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ? |
4041 | CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors; |
4042 | } else if (mode == CNTR_MODE_W) { |
4043 | dd->sw_rcv_bypass_packet_errors = 0; |
4044 | } else { |
4045 | dd_dev_err(dd, "Invalid cntr register access mode" ); |
4046 | return 0; |
4047 | } |
4048 | return val; |
4049 | } |
4050 | |
4051 | #define def_access_sw_cpu(cntr) \ |
4052 | static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \ |
4053 | void *context, int vl, int mode, u64 data) \ |
4054 | { \ |
4055 | struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \ |
4056 | return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \ |
4057 | ppd->ibport_data.rvp.cntr, vl, \ |
4058 | mode, data); \ |
4059 | } |
4060 | |
4061 | def_access_sw_cpu(rc_acks); |
4062 | def_access_sw_cpu(rc_qacks); |
4063 | def_access_sw_cpu(rc_delayed_comp); |
4064 | |
4065 | #define def_access_ibp_counter(cntr) \ |
4066 | static u64 access_ibp_##cntr(const struct cntr_entry *entry, \ |
4067 | void *context, int vl, int mode, u64 data) \ |
4068 | { \ |
4069 | struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \ |
4070 | \ |
4071 | if (vl != CNTR_INVALID_VL) \ |
4072 | return 0; \ |
4073 | \ |
4074 | return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \ |
4075 | mode, data); \ |
4076 | } |
4077 | |
4078 | def_access_ibp_counter(loop_pkts); |
4079 | def_access_ibp_counter(rc_resends); |
4080 | def_access_ibp_counter(rnr_naks); |
4081 | def_access_ibp_counter(other_naks); |
4082 | def_access_ibp_counter(rc_timeouts); |
4083 | def_access_ibp_counter(pkt_drops); |
4084 | def_access_ibp_counter(dmawait); |
4085 | def_access_ibp_counter(rc_seqnak); |
4086 | def_access_ibp_counter(rc_dupreq); |
4087 | def_access_ibp_counter(rdma_seq); |
4088 | def_access_ibp_counter(unaligned); |
4089 | def_access_ibp_counter(seq_naks); |
4090 | def_access_ibp_counter(rc_crwaits); |
4091 | |
4092 | static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = { |
4093 | [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH), |
4094 | [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH), |
4095 | [C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH), |
4096 | [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH), |
4097 | [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH), |
4098 | [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT, |
4099 | CNTR_NORMAL), |
4100 | [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT, |
4101 | CNTR_NORMAL), |
4102 | [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs, |
4103 | RCV_TID_FLOW_GEN_MISMATCH_CNT, |
4104 | CNTR_NORMAL), |
4105 | [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL, |
4106 | CNTR_NORMAL), |
4107 | [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs, |
4108 | RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL), |
4109 | [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt, |
4110 | CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL), |
4111 | [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT, |
4112 | CNTR_NORMAL), |
4113 | [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT, |
4114 | CNTR_NORMAL), |
4115 | [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT, |
4116 | CNTR_NORMAL), |
4117 | [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT, |
4118 | CNTR_NORMAL), |
4119 | [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT, |
4120 | CNTR_NORMAL), |
4121 | [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT, |
4122 | CNTR_NORMAL), |
4123 | [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt, |
4124 | CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL), |
4125 | [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt, |
4126 | CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL), |
4127 | [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT, |
4128 | CNTR_SYNTH), |
4129 | [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr" , DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH, |
4130 | access_dc_rcv_err_cnt), |
4131 | [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT, |
4132 | CNTR_SYNTH), |
4133 | [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT, |
4134 | CNTR_SYNTH), |
4135 | [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT, |
4136 | CNTR_SYNTH), |
4137 | [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts, |
4138 | DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH), |
4139 | [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts, |
4140 | DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT, |
4141 | CNTR_SYNTH), |
4142 | [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr, |
4143 | DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH), |
4144 | [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT, |
4145 | CNTR_SYNTH), |
4146 | [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT, |
4147 | CNTR_SYNTH), |
4148 | [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT, |
4149 | CNTR_SYNTH), |
4150 | [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT, |
4151 | CNTR_SYNTH), |
4152 | [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT, |
4153 | CNTR_SYNTH), |
4154 | [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT, |
4155 | CNTR_SYNTH), |
4156 | [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT, |
4157 | CNTR_SYNTH), |
4158 | [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT, |
4159 | CNTR_SYNTH | CNTR_VL), |
4160 | [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT, |
4161 | CNTR_SYNTH | CNTR_VL), |
4162 | [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH), |
4163 | [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT, |
4164 | CNTR_SYNTH | CNTR_VL), |
4165 | [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH), |
4166 | [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT, |
4167 | CNTR_SYNTH | CNTR_VL), |
4168 | [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT, |
4169 | CNTR_SYNTH), |
4170 | [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT, |
4171 | CNTR_SYNTH | CNTR_VL), |
4172 | [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT, |
4173 | CNTR_SYNTH), |
4174 | [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT, |
4175 | CNTR_SYNTH | CNTR_VL), |
4176 | [C_DC_TOTAL_CRC] = |
4177 | DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR, |
4178 | CNTR_SYNTH), |
4179 | [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0, |
4180 | CNTR_SYNTH), |
4181 | [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1, |
4182 | CNTR_SYNTH), |
4183 | [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2, |
4184 | CNTR_SYNTH), |
4185 | [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3, |
4186 | CNTR_SYNTH), |
4187 | [C_DC_CRC_MULT_LN] = |
4188 | DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN, |
4189 | CNTR_SYNTH), |
4190 | [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT, |
4191 | CNTR_SYNTH), |
4192 | [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT, |
4193 | CNTR_SYNTH), |
4194 | [C_DC_SEQ_CRC_CNT] = |
4195 | DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT, |
4196 | CNTR_SYNTH), |
4197 | [C_DC_ESC0_ONLY_CNT] = |
4198 | DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT, |
4199 | CNTR_SYNTH), |
4200 | [C_DC_ESC0_PLUS1_CNT] = |
4201 | DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT, |
4202 | CNTR_SYNTH), |
4203 | [C_DC_ESC0_PLUS2_CNT] = |
4204 | DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT, |
4205 | CNTR_SYNTH), |
4206 | [C_DC_REINIT_FROM_PEER_CNT] = |
4207 | DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, |
4208 | CNTR_SYNTH), |
4209 | [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT, |
4210 | CNTR_SYNTH), |
4211 | [C_DC_MISC_FLG_CNT] = |
4212 | DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT, |
4213 | CNTR_SYNTH), |
4214 | [C_DC_PRF_GOOD_LTP_CNT] = |
4215 | DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH), |
4216 | [C_DC_PRF_ACCEPTED_LTP_CNT] = |
4217 | DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT, |
4218 | CNTR_SYNTH), |
4219 | [C_DC_PRF_RX_FLIT_CNT] = |
4220 | DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH), |
4221 | [C_DC_PRF_TX_FLIT_CNT] = |
4222 | DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH), |
4223 | [C_DC_PRF_CLK_CNTR] = |
4224 | DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH), |
4225 | [C_DC_PG_DBG_FLIT_CRDTS_CNT] = |
4226 | DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH), |
4227 | [C_DC_PG_STS_PAUSE_COMPLETE_CNT] = |
4228 | DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT, |
4229 | CNTR_SYNTH), |
4230 | [C_DC_PG_STS_TX_SBE_CNT] = |
4231 | DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH), |
4232 | [C_DC_PG_STS_TX_MBE_CNT] = |
4233 | DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT, |
4234 | CNTR_SYNTH), |
4235 | [C_SW_CPU_INTR] = CNTR_ELEM("Intr" , 0, 0, CNTR_NORMAL, |
4236 | access_sw_cpu_intr), |
4237 | [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit" , 0, 0, CNTR_NORMAL, |
4238 | access_sw_cpu_rcv_limit), |
4239 | [C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0" , 0, 0, CNTR_NORMAL, |
4240 | access_sw_ctx0_seq_drop), |
4241 | [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait" , 0, 0, CNTR_NORMAL, |
4242 | access_sw_vtx_wait), |
4243 | [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait" , 0, 0, CNTR_NORMAL, |
4244 | access_sw_pio_wait), |
4245 | [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain" , 0, 0, CNTR_NORMAL, |
4246 | access_sw_pio_drain), |
4247 | [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait" , 0, 0, CNTR_NORMAL, |
4248 | access_sw_kmem_wait), |
4249 | [C_SW_TID_WAIT] = CNTR_ELEM("TidWait" , 0, 0, CNTR_NORMAL, |
4250 | hfi1_access_sw_tid_wait), |
4251 | [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched" , 0, 0, CNTR_NORMAL, |
4252 | access_sw_send_schedule), |
4253 | [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn" , |
4254 | SEND_DMA_DESC_FETCHED_CNT, 0, |
4255 | CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, |
4256 | dev_access_u32_csr), |
4257 | [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt" , 0, 0, |
4258 | CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, |
4259 | access_sde_int_cnt), |
4260 | [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt" , 0, 0, |
4261 | CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, |
4262 | access_sde_err_cnt), |
4263 | [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt" , 0, 0, |
4264 | CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, |
4265 | access_sde_idle_int_cnt), |
4266 | [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn" , 0, 0, |
4267 | CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, |
4268 | access_sde_progress_int_cnt), |
4269 | /* MISC_ERR_STATUS */ |
4270 | [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR" , 0, 0, |
4271 | CNTR_NORMAL, |
4272 | access_misc_pll_lock_fail_err_cnt), |
4273 | [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR" , 0, 0, |
4274 | CNTR_NORMAL, |
4275 | access_misc_mbist_fail_err_cnt), |
4276 | [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR" , 0, 0, |
4277 | CNTR_NORMAL, |
4278 | access_misc_invalid_eep_cmd_err_cnt), |
4279 | [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR" , 0, 0, |
4280 | CNTR_NORMAL, |
4281 | access_misc_efuse_done_parity_err_cnt), |
4282 | [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR" , 0, 0, |
4283 | CNTR_NORMAL, |
4284 | access_misc_efuse_write_err_cnt), |
4285 | [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR" , 0, |
4286 | 0, CNTR_NORMAL, |
4287 | access_misc_efuse_read_bad_addr_err_cnt), |
4288 | [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR" , 0, 0, |
4289 | CNTR_NORMAL, |
4290 | access_misc_efuse_csr_parity_err_cnt), |
4291 | [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR" , 0, 0, |
4292 | CNTR_NORMAL, |
4293 | access_misc_fw_auth_failed_err_cnt), |
4294 | [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR" , 0, 0, |
4295 | CNTR_NORMAL, |
4296 | access_misc_key_mismatch_err_cnt), |
4297 | [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR" , 0, 0, |
4298 | CNTR_NORMAL, |
4299 | access_misc_sbus_write_failed_err_cnt), |
4300 | [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR" , 0, 0, |
4301 | CNTR_NORMAL, |
4302 | access_misc_csr_write_bad_addr_err_cnt), |
4303 | [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR" , 0, 0, |
4304 | CNTR_NORMAL, |
4305 | access_misc_csr_read_bad_addr_err_cnt), |
4306 | [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR" , 0, 0, |
4307 | CNTR_NORMAL, |
4308 | access_misc_csr_parity_err_cnt), |
4309 | /* CceErrStatus */ |
4310 | [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt" , 0, 0, |
4311 | CNTR_NORMAL, |
4312 | access_sw_cce_err_status_aggregated_cnt), |
4313 | [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr" , 0, 0, |
4314 | CNTR_NORMAL, |
4315 | access_cce_msix_csr_parity_err_cnt), |
4316 | [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr" , 0, 0, |
4317 | CNTR_NORMAL, |
4318 | access_cce_int_map_unc_err_cnt), |
4319 | [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr" , 0, 0, |
4320 | CNTR_NORMAL, |
4321 | access_cce_int_map_cor_err_cnt), |
4322 | [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr" , 0, 0, |
4323 | CNTR_NORMAL, |
4324 | access_cce_msix_table_unc_err_cnt), |
4325 | [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr" , 0, 0, |
4326 | CNTR_NORMAL, |
4327 | access_cce_msix_table_cor_err_cnt), |
4328 | [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr" , 0, |
4329 | 0, CNTR_NORMAL, |
4330 | access_cce_rxdma_conv_fifo_parity_err_cnt), |
4331 | [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr" , 0, |
4332 | 0, CNTR_NORMAL, |
4333 | access_cce_rcpl_async_fifo_parity_err_cnt), |
4334 | [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr" , 0, 0, |
4335 | CNTR_NORMAL, |
4336 | access_cce_seg_write_bad_addr_err_cnt), |
4337 | [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr" , 0, 0, |
4338 | CNTR_NORMAL, |
4339 | access_cce_seg_read_bad_addr_err_cnt), |
4340 | [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered" , 0, 0, |
4341 | CNTR_NORMAL, |
4342 | access_la_triggered_cnt), |
4343 | [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr" , 0, 0, |
4344 | CNTR_NORMAL, |
4345 | access_cce_trgt_cpl_timeout_err_cnt), |
4346 | [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr" , 0, 0, |
4347 | CNTR_NORMAL, |
4348 | access_pcic_receive_parity_err_cnt), |
4349 | [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr" , 0, 0, |
4350 | CNTR_NORMAL, |
4351 | access_pcic_transmit_back_parity_err_cnt), |
4352 | [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr" , 0, |
4353 | 0, CNTR_NORMAL, |
4354 | access_pcic_transmit_front_parity_err_cnt), |
4355 | [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr" , 0, 0, |
4356 | CNTR_NORMAL, |
4357 | access_pcic_cpl_dat_q_unc_err_cnt), |
4358 | [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr" , 0, 0, |
4359 | CNTR_NORMAL, |
4360 | access_pcic_cpl_hd_q_unc_err_cnt), |
4361 | [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr" , 0, 0, |
4362 | CNTR_NORMAL, |
4363 | access_pcic_post_dat_q_unc_err_cnt), |
4364 | [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr" , 0, 0, |
4365 | CNTR_NORMAL, |
4366 | access_pcic_post_hd_q_unc_err_cnt), |
4367 | [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr" , 0, 0, |
4368 | CNTR_NORMAL, |
4369 | access_pcic_retry_sot_mem_unc_err_cnt), |
4370 | [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr" , 0, 0, |
4371 | CNTR_NORMAL, |
4372 | access_pcic_retry_mem_unc_err), |
4373 | [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr" , 0, 0, |
4374 | CNTR_NORMAL, |
4375 | access_pcic_n_post_dat_q_parity_err_cnt), |
4376 | [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr" , 0, 0, |
4377 | CNTR_NORMAL, |
4378 | access_pcic_n_post_h_q_parity_err_cnt), |
4379 | [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr" , 0, 0, |
4380 | CNTR_NORMAL, |
4381 | access_pcic_cpl_dat_q_cor_err_cnt), |
4382 | [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr" , 0, 0, |
4383 | CNTR_NORMAL, |
4384 | access_pcic_cpl_hd_q_cor_err_cnt), |
4385 | [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr" , 0, 0, |
4386 | CNTR_NORMAL, |
4387 | access_pcic_post_dat_q_cor_err_cnt), |
4388 | [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr" , 0, 0, |
4389 | CNTR_NORMAL, |
4390 | access_pcic_post_hd_q_cor_err_cnt), |
4391 | [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr" , 0, 0, |
4392 | CNTR_NORMAL, |
4393 | access_pcic_retry_sot_mem_cor_err_cnt), |
4394 | [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr" , 0, 0, |
4395 | CNTR_NORMAL, |
4396 | access_pcic_retry_mem_cor_err_cnt), |
4397 | [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM( |
4398 | "CceCli1AsyncFifoDbgParityError" , 0, 0, |
4399 | CNTR_NORMAL, |
4400 | access_cce_cli1_async_fifo_dbg_parity_err_cnt), |
4401 | [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM( |
4402 | "CceCli1AsyncFifoRxdmaParityError" , 0, 0, |
4403 | CNTR_NORMAL, |
4404 | access_cce_cli1_async_fifo_rxdma_parity_err_cnt |
4405 | ), |
4406 | [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM( |
4407 | "CceCli1AsyncFifoSdmaHdParityErr" , 0, 0, |
4408 | CNTR_NORMAL, |
4409 | access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt), |
4410 | [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM( |
4411 | "CceCli1AsyncFifoPioCrdtParityErr" , 0, 0, |
4412 | CNTR_NORMAL, |
4413 | access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt), |
4414 | [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr" , 0, |
4415 | 0, CNTR_NORMAL, |
4416 | access_cce_cli2_async_fifo_parity_err_cnt), |
4417 | [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr" , 0, 0, |
4418 | CNTR_NORMAL, |
4419 | access_cce_csr_cfg_bus_parity_err_cnt), |
4420 | [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr" , 0, |
4421 | 0, CNTR_NORMAL, |
4422 | access_cce_cli0_async_fifo_parity_err_cnt), |
4423 | [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr" , 0, 0, |
4424 | CNTR_NORMAL, |
4425 | access_cce_rspd_data_parity_err_cnt), |
4426 | [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr" , 0, 0, |
4427 | CNTR_NORMAL, |
4428 | access_cce_trgt_access_err_cnt), |
4429 | [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr" , 0, |
4430 | 0, CNTR_NORMAL, |
4431 | access_cce_trgt_async_fifo_parity_err_cnt), |
4432 | [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr" , 0, 0, |
4433 | CNTR_NORMAL, |
4434 | access_cce_csr_write_bad_addr_err_cnt), |
4435 | [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr" , 0, 0, |
4436 | CNTR_NORMAL, |
4437 | access_cce_csr_read_bad_addr_err_cnt), |
4438 | [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr" , 0, 0, |
4439 | CNTR_NORMAL, |
4440 | access_ccs_csr_parity_err_cnt), |
4441 | |
4442 | /* RcvErrStatus */ |
4443 | [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr" , 0, 0, |
4444 | CNTR_NORMAL, |
4445 | access_rx_csr_parity_err_cnt), |
4446 | [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr" , 0, 0, |
4447 | CNTR_NORMAL, |
4448 | access_rx_csr_write_bad_addr_err_cnt), |
4449 | [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr" , 0, 0, |
4450 | CNTR_NORMAL, |
4451 | access_rx_csr_read_bad_addr_err_cnt), |
4452 | [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr" , 0, 0, |
4453 | CNTR_NORMAL, |
4454 | access_rx_dma_csr_unc_err_cnt), |
4455 | [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr" , 0, 0, |
4456 | CNTR_NORMAL, |
4457 | access_rx_dma_dq_fsm_encoding_err_cnt), |
4458 | [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr" , 0, 0, |
4459 | CNTR_NORMAL, |
4460 | access_rx_dma_eq_fsm_encoding_err_cnt), |
4461 | [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr" , 0, 0, |
4462 | CNTR_NORMAL, |
4463 | access_rx_dma_csr_parity_err_cnt), |
4464 | [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr" , 0, 0, |
4465 | CNTR_NORMAL, |
4466 | access_rx_rbuf_data_cor_err_cnt), |
4467 | [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr" , 0, 0, |
4468 | CNTR_NORMAL, |
4469 | access_rx_rbuf_data_unc_err_cnt), |
4470 | [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr" , 0, 0, |
4471 | CNTR_NORMAL, |
4472 | access_rx_dma_data_fifo_rd_cor_err_cnt), |
4473 | [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr" , 0, 0, |
4474 | CNTR_NORMAL, |
4475 | access_rx_dma_data_fifo_rd_unc_err_cnt), |
4476 | [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr" , 0, 0, |
4477 | CNTR_NORMAL, |
4478 | access_rx_dma_hdr_fifo_rd_cor_err_cnt), |
4479 | [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr" , 0, 0, |
4480 | CNTR_NORMAL, |
4481 | access_rx_dma_hdr_fifo_rd_unc_err_cnt), |
4482 | [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr" , 0, 0, |
4483 | CNTR_NORMAL, |
4484 | access_rx_rbuf_desc_part2_cor_err_cnt), |
4485 | [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr" , 0, 0, |
4486 | CNTR_NORMAL, |
4487 | access_rx_rbuf_desc_part2_unc_err_cnt), |
4488 | [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr" , 0, 0, |
4489 | CNTR_NORMAL, |
4490 | access_rx_rbuf_desc_part1_cor_err_cnt), |
4491 | [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr" , 0, 0, |
4492 | CNTR_NORMAL, |
4493 | access_rx_rbuf_desc_part1_unc_err_cnt), |
4494 | [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr" , 0, 0, |
4495 | CNTR_NORMAL, |
4496 | access_rx_hq_intr_fsm_err_cnt), |
4497 | [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr" , 0, 0, |
4498 | CNTR_NORMAL, |
4499 | access_rx_hq_intr_csr_parity_err_cnt), |
4500 | [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr" , 0, 0, |
4501 | CNTR_NORMAL, |
4502 | access_rx_lookup_csr_parity_err_cnt), |
4503 | [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr" , 0, 0, |
4504 | CNTR_NORMAL, |
4505 | access_rx_lookup_rcv_array_cor_err_cnt), |
4506 | [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr" , 0, 0, |
4507 | CNTR_NORMAL, |
4508 | access_rx_lookup_rcv_array_unc_err_cnt), |
4509 | [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr" , 0, |
4510 | 0, CNTR_NORMAL, |
4511 | access_rx_lookup_des_part2_parity_err_cnt), |
4512 | [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr" , 0, |
4513 | 0, CNTR_NORMAL, |
4514 | access_rx_lookup_des_part1_unc_cor_err_cnt), |
4515 | [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr" , 0, 0, |
4516 | CNTR_NORMAL, |
4517 | access_rx_lookup_des_part1_unc_err_cnt), |
4518 | [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr" , 0, 0, |
4519 | CNTR_NORMAL, |
4520 | access_rx_rbuf_next_free_buf_cor_err_cnt), |
4521 | [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr" , 0, 0, |
4522 | CNTR_NORMAL, |
4523 | access_rx_rbuf_next_free_buf_unc_err_cnt), |
4524 | [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM( |
4525 | "RxRbufFlInitWrAddrParityErr" , 0, 0, |
4526 | CNTR_NORMAL, |
4527 | access_rbuf_fl_init_wr_addr_parity_err_cnt), |
4528 | [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr" , 0, |
4529 | 0, CNTR_NORMAL, |
4530 | access_rx_rbuf_fl_initdone_parity_err_cnt), |
4531 | [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr" , 0, |
4532 | 0, CNTR_NORMAL, |
4533 | access_rx_rbuf_fl_write_addr_parity_err_cnt), |
4534 | [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr" , 0, 0, |
4535 | CNTR_NORMAL, |
4536 | access_rx_rbuf_fl_rd_addr_parity_err_cnt), |
4537 | [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr" , 0, 0, |
4538 | CNTR_NORMAL, |
4539 | access_rx_rbuf_empty_err_cnt), |
4540 | [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr" , 0, 0, |
4541 | CNTR_NORMAL, |
4542 | access_rx_rbuf_full_err_cnt), |
4543 | [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr" , 0, 0, |
4544 | CNTR_NORMAL, |
4545 | access_rbuf_bad_lookup_err_cnt), |
4546 | [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr" , 0, 0, |
4547 | CNTR_NORMAL, |
4548 | access_rbuf_ctx_id_parity_err_cnt), |
4549 | [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr" , 0, 0, |
4550 | CNTR_NORMAL, |
4551 | access_rbuf_csr_qeopdw_parity_err_cnt), |
4552 | [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM( |
4553 | "RxRbufCsrQNumOfPktParityErr" , 0, 0, |
4554 | CNTR_NORMAL, |
4555 | access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt), |
4556 | [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM( |
4557 | "RxRbufCsrQTlPtrParityErr" , 0, 0, |
4558 | CNTR_NORMAL, |
4559 | access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt), |
4560 | [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr" , 0, |
4561 | 0, CNTR_NORMAL, |
4562 | access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt), |
4563 | [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr" , 0, |
4564 | 0, CNTR_NORMAL, |
4565 | access_rx_rbuf_csr_q_vld_bit_parity_err_cnt), |
4566 | [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr" , |
4567 | 0, 0, CNTR_NORMAL, |
4568 | access_rx_rbuf_csr_q_next_buf_parity_err_cnt), |
4569 | [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr" , 0, |
4570 | 0, CNTR_NORMAL, |
4571 | access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt), |
4572 | [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM( |
4573 | "RxRbufCsrQHeadBufNumParityErr" , 0, 0, |
4574 | CNTR_NORMAL, |
4575 | access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt), |
4576 | [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr" , 0, |
4577 | 0, CNTR_NORMAL, |
4578 | access_rx_rbuf_block_list_read_cor_err_cnt), |
4579 | [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr" , 0, |
4580 | 0, CNTR_NORMAL, |
4581 | access_rx_rbuf_block_list_read_unc_err_cnt), |
4582 | [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr" , 0, 0, |
4583 | CNTR_NORMAL, |
4584 | access_rx_rbuf_lookup_des_cor_err_cnt), |
4585 | [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr" , 0, 0, |
4586 | CNTR_NORMAL, |
4587 | access_rx_rbuf_lookup_des_unc_err_cnt), |
4588 | [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM( |
4589 | "RxRbufLookupDesRegUncCorErr" , 0, 0, |
4590 | CNTR_NORMAL, |
4591 | access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt), |
4592 | [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr" , 0, 0, |
4593 | CNTR_NORMAL, |
4594 | access_rx_rbuf_lookup_des_reg_unc_err_cnt), |
4595 | [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr" , 0, 0, |
4596 | CNTR_NORMAL, |
4597 | access_rx_rbuf_free_list_cor_err_cnt), |
4598 | [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr" , 0, 0, |
4599 | CNTR_NORMAL, |
4600 | access_rx_rbuf_free_list_unc_err_cnt), |
4601 | [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr" , 0, 0, |
4602 | CNTR_NORMAL, |
4603 | access_rx_rcv_fsm_encoding_err_cnt), |
4604 | [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr" , 0, 0, |
4605 | CNTR_NORMAL, |
4606 | access_rx_dma_flag_cor_err_cnt), |
4607 | [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr" , 0, 0, |
4608 | CNTR_NORMAL, |
4609 | access_rx_dma_flag_unc_err_cnt), |
4610 | [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr" , 0, 0, |
4611 | CNTR_NORMAL, |
4612 | access_rx_dc_sop_eop_parity_err_cnt), |
4613 | [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr" , 0, 0, |
4614 | CNTR_NORMAL, |
4615 | access_rx_rcv_csr_parity_err_cnt), |
4616 | [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr" , 0, 0, |
4617 | CNTR_NORMAL, |
4618 | access_rx_rcv_qp_map_table_cor_err_cnt), |
4619 | [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr" , 0, 0, |
4620 | CNTR_NORMAL, |
4621 | access_rx_rcv_qp_map_table_unc_err_cnt), |
4622 | [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr" , 0, 0, |
4623 | CNTR_NORMAL, |
4624 | access_rx_rcv_data_cor_err_cnt), |
4625 | [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr" , 0, 0, |
4626 | CNTR_NORMAL, |
4627 | access_rx_rcv_data_unc_err_cnt), |
4628 | [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr" , 0, 0, |
4629 | CNTR_NORMAL, |
4630 | access_rx_rcv_hdr_cor_err_cnt), |
4631 | [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr" , 0, 0, |
4632 | CNTR_NORMAL, |
4633 | access_rx_rcv_hdr_unc_err_cnt), |
4634 | [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr" , 0, 0, |
4635 | CNTR_NORMAL, |
4636 | access_rx_dc_intf_parity_err_cnt), |
4637 | [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr" , 0, 0, |
4638 | CNTR_NORMAL, |
4639 | access_rx_dma_csr_cor_err_cnt), |
4640 | /* SendPioErrStatus */ |
4641 | [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr" , 0, 0, |
4642 | CNTR_NORMAL, |
4643 | access_pio_pec_sop_head_parity_err_cnt), |
4644 | [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr" , 0, 0, |
4645 | CNTR_NORMAL, |
4646 | access_pio_pcc_sop_head_parity_err_cnt), |
4647 | [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr" , |
4648 | 0, 0, CNTR_NORMAL, |
4649 | access_pio_last_returned_cnt_parity_err_cnt), |
4650 | [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr" , 0, |
4651 | 0, CNTR_NORMAL, |
4652 | access_pio_current_free_cnt_parity_err_cnt), |
4653 | [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31" , 0, 0, |
4654 | CNTR_NORMAL, |
4655 | access_pio_reserved_31_err_cnt), |
4656 | [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30" , 0, 0, |
4657 | CNTR_NORMAL, |
4658 | access_pio_reserved_30_err_cnt), |
4659 | [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr" , 0, 0, |
4660 | CNTR_NORMAL, |
4661 | access_pio_ppmc_sop_len_err_cnt), |
4662 | [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr" , 0, 0, |
4663 | CNTR_NORMAL, |
4664 | access_pio_ppmc_bqc_mem_parity_err_cnt), |
4665 | [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr" , 0, 0, |
4666 | CNTR_NORMAL, |
4667 | access_pio_vl_fifo_parity_err_cnt), |
4668 | [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr" , 0, 0, |
4669 | CNTR_NORMAL, |
4670 | access_pio_vlf_sop_parity_err_cnt), |
4671 | [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr" , 0, 0, |
4672 | CNTR_NORMAL, |
4673 | access_pio_vlf_v1_len_parity_err_cnt), |
4674 | [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr" , 0, 0, |
4675 | CNTR_NORMAL, |
4676 | access_pio_block_qw_count_parity_err_cnt), |
4677 | [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr" , 0, 0, |
4678 | CNTR_NORMAL, |
4679 | access_pio_write_qw_valid_parity_err_cnt), |
4680 | [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr" , 0, 0, |
4681 | CNTR_NORMAL, |
4682 | access_pio_state_machine_err_cnt), |
4683 | [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr" , 0, 0, |
4684 | CNTR_NORMAL, |
4685 | access_pio_write_data_parity_err_cnt), |
4686 | [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr" , 0, 0, |
4687 | CNTR_NORMAL, |
4688 | access_pio_host_addr_mem_cor_err_cnt), |
4689 | [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr" , 0, 0, |
4690 | CNTR_NORMAL, |
4691 | access_pio_host_addr_mem_unc_err_cnt), |
4692 | [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr" , 0, 0, |
4693 | CNTR_NORMAL, |
4694 | access_pio_pkt_evict_sm_or_arb_sm_err_cnt), |
4695 | [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr" , 0, 0, |
4696 | CNTR_NORMAL, |
4697 | access_pio_init_sm_in_err_cnt), |
4698 | [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr" , 0, 0, |
4699 | CNTR_NORMAL, |
4700 | access_pio_ppmc_pbl_fifo_err_cnt), |
4701 | [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr" , 0, |
4702 | 0, CNTR_NORMAL, |
4703 | access_pio_credit_ret_fifo_parity_err_cnt), |
4704 | [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr" , 0, 0, |
4705 | CNTR_NORMAL, |
4706 | access_pio_v1_len_mem_bank1_cor_err_cnt), |
4707 | [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr" , 0, 0, |
4708 | CNTR_NORMAL, |
4709 | access_pio_v1_len_mem_bank0_cor_err_cnt), |
4710 | [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr" , 0, 0, |
4711 | CNTR_NORMAL, |
4712 | access_pio_v1_len_mem_bank1_unc_err_cnt), |
4713 | [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr" , 0, 0, |
4714 | CNTR_NORMAL, |
4715 | access_pio_v1_len_mem_bank0_unc_err_cnt), |
4716 | [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr" , 0, 0, |
4717 | CNTR_NORMAL, |
4718 | access_pio_sm_pkt_reset_parity_err_cnt), |
4719 | [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr" , 0, 0, |
4720 | CNTR_NORMAL, |
4721 | access_pio_pkt_evict_fifo_parity_err_cnt), |
4722 | [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM( |
4723 | "PioSbrdctrlCrrelFifoParityErr" , 0, 0, |
4724 | CNTR_NORMAL, |
4725 | access_pio_sbrdctrl_crrel_fifo_parity_err_cnt), |
4726 | [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr" , 0, 0, |
4727 | CNTR_NORMAL, |
4728 | access_pio_sbrdctl_crrel_parity_err_cnt), |
4729 | [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr" , 0, 0, |
4730 | CNTR_NORMAL, |
4731 | access_pio_pec_fifo_parity_err_cnt), |
4732 | [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr" , 0, 0, |
4733 | CNTR_NORMAL, |
4734 | access_pio_pcc_fifo_parity_err_cnt), |
4735 | [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err" , 0, 0, |
4736 | CNTR_NORMAL, |
4737 | access_pio_sb_mem_fifo1_err_cnt), |
4738 | [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err" , 0, 0, |
4739 | CNTR_NORMAL, |
4740 | access_pio_sb_mem_fifo0_err_cnt), |
4741 | [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr" , 0, 0, |
4742 | CNTR_NORMAL, |
4743 | access_pio_csr_parity_err_cnt), |
4744 | [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr" , 0, 0, |
4745 | CNTR_NORMAL, |
4746 | access_pio_write_addr_parity_err_cnt), |
4747 | [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr" , 0, 0, |
4748 | CNTR_NORMAL, |
4749 | access_pio_write_bad_ctxt_err_cnt), |
4750 | /* SendDmaErrStatus */ |
4751 | [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr" , 0, |
4752 | 0, CNTR_NORMAL, |
4753 | access_sdma_pcie_req_tracking_cor_err_cnt), |
4754 | [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr" , 0, |
4755 | 0, CNTR_NORMAL, |
4756 | access_sdma_pcie_req_tracking_unc_err_cnt), |
4757 | [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr" , 0, 0, |
4758 | CNTR_NORMAL, |
4759 | access_sdma_csr_parity_err_cnt), |
4760 | [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr" , 0, 0, |
4761 | CNTR_NORMAL, |
4762 | access_sdma_rpy_tag_err_cnt), |
4763 | /* SendEgressErrStatus */ |
4764 | [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr" , 0, 0, |
4765 | CNTR_NORMAL, |
4766 | access_tx_read_pio_memory_csr_unc_err_cnt), |
4767 | [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr" , 0, |
4768 | 0, CNTR_NORMAL, |
4769 | access_tx_read_sdma_memory_csr_err_cnt), |
4770 | [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr" , 0, 0, |
4771 | CNTR_NORMAL, |
4772 | access_tx_egress_fifo_cor_err_cnt), |
4773 | [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr" , 0, 0, |
4774 | CNTR_NORMAL, |
4775 | access_tx_read_pio_memory_cor_err_cnt), |
4776 | [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr" , 0, 0, |
4777 | CNTR_NORMAL, |
4778 | access_tx_read_sdma_memory_cor_err_cnt), |
4779 | [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr" , 0, 0, |
4780 | CNTR_NORMAL, |
4781 | access_tx_sb_hdr_cor_err_cnt), |
4782 | [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr" , 0, 0, |
4783 | CNTR_NORMAL, |
4784 | access_tx_credit_overrun_err_cnt), |
4785 | [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr" , 0, 0, |
4786 | CNTR_NORMAL, |
4787 | access_tx_launch_fifo8_cor_err_cnt), |
4788 | [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr" , 0, 0, |
4789 | CNTR_NORMAL, |
4790 | access_tx_launch_fifo7_cor_err_cnt), |
4791 | [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr" , 0, 0, |
4792 | CNTR_NORMAL, |
4793 | access_tx_launch_fifo6_cor_err_cnt), |
4794 | [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr" , 0, 0, |
4795 | CNTR_NORMAL, |
4796 | access_tx_launch_fifo5_cor_err_cnt), |
4797 | [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr" , 0, 0, |
4798 | CNTR_NORMAL, |
4799 | access_tx_launch_fifo4_cor_err_cnt), |
4800 | [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr" , 0, 0, |
4801 | CNTR_NORMAL, |
4802 | access_tx_launch_fifo3_cor_err_cnt), |
4803 | [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr" , 0, 0, |
4804 | CNTR_NORMAL, |
4805 | access_tx_launch_fifo2_cor_err_cnt), |
4806 | [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr" , 0, 0, |
4807 | CNTR_NORMAL, |
4808 | access_tx_launch_fifo1_cor_err_cnt), |
4809 | [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr" , 0, 0, |
4810 | CNTR_NORMAL, |
4811 | access_tx_launch_fifo0_cor_err_cnt), |
4812 | [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr" , 0, 0, |
4813 | CNTR_NORMAL, |
4814 | access_tx_credit_return_vl_err_cnt), |
4815 | [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr" , 0, 0, |
4816 | CNTR_NORMAL, |
4817 | access_tx_hcrc_insertion_err_cnt), |
4818 | [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr" , 0, 0, |
4819 | CNTR_NORMAL, |
4820 | access_tx_egress_fifo_unc_err_cnt), |
4821 | [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr" , 0, 0, |
4822 | CNTR_NORMAL, |
4823 | access_tx_read_pio_memory_unc_err_cnt), |
4824 | [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr" , 0, 0, |
4825 | CNTR_NORMAL, |
4826 | access_tx_read_sdma_memory_unc_err_cnt), |
4827 | [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr" , 0, 0, |
4828 | CNTR_NORMAL, |
4829 | access_tx_sb_hdr_unc_err_cnt), |
4830 | [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr" , 0, 0, |
4831 | CNTR_NORMAL, |
4832 | access_tx_credit_return_partiy_err_cnt), |
4833 | [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr" , |
4834 | 0, 0, CNTR_NORMAL, |
4835 | access_tx_launch_fifo8_unc_or_parity_err_cnt), |
4836 | [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr" , |
4837 | 0, 0, CNTR_NORMAL, |
4838 | access_tx_launch_fifo7_unc_or_parity_err_cnt), |
4839 | [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr" , |
4840 | 0, 0, CNTR_NORMAL, |
4841 | access_tx_launch_fifo6_unc_or_parity_err_cnt), |
4842 | [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr" , |
4843 | 0, 0, CNTR_NORMAL, |
4844 | access_tx_launch_fifo5_unc_or_parity_err_cnt), |
4845 | [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr" , |
4846 | 0, 0, CNTR_NORMAL, |
4847 | access_tx_launch_fifo4_unc_or_parity_err_cnt), |
4848 | [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr" , |
4849 | 0, 0, CNTR_NORMAL, |
4850 | access_tx_launch_fifo3_unc_or_parity_err_cnt), |
4851 | [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr" , |
4852 | 0, 0, CNTR_NORMAL, |
4853 | access_tx_launch_fifo2_unc_or_parity_err_cnt), |
4854 | [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr" , |
4855 | 0, 0, CNTR_NORMAL, |
4856 | access_tx_launch_fifo1_unc_or_parity_err_cnt), |
4857 | [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr" , |
4858 | 0, 0, CNTR_NORMAL, |
4859 | access_tx_launch_fifo0_unc_or_parity_err_cnt), |
4860 | [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr" , |
4861 | 0, 0, CNTR_NORMAL, |
4862 | access_tx_sdma15_disallowed_packet_err_cnt), |
4863 | [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr" , |
4864 | 0, 0, CNTR_NORMAL, |
4865 | access_tx_sdma14_disallowed_packet_err_cnt), |
4866 | [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr" , |
4867 | 0, 0, CNTR_NORMAL, |
4868 | access_tx_sdma13_disallowed_packet_err_cnt), |
4869 | [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr" , |
4870 | 0, 0, CNTR_NORMAL, |
4871 | access_tx_sdma12_disallowed_packet_err_cnt), |
4872 | [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr" , |
4873 | 0, 0, CNTR_NORMAL, |
4874 | access_tx_sdma11_disallowed_packet_err_cnt), |
4875 | [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr" , |
4876 | 0, 0, CNTR_NORMAL, |
4877 | access_tx_sdma10_disallowed_packet_err_cnt), |
4878 | [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr" , |
4879 | 0, 0, CNTR_NORMAL, |
4880 | access_tx_sdma9_disallowed_packet_err_cnt), |
4881 | [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr" , |
4882 | 0, 0, CNTR_NORMAL, |
4883 | access_tx_sdma8_disallowed_packet_err_cnt), |
4884 | [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr" , |
4885 | 0, 0, CNTR_NORMAL, |
4886 | access_tx_sdma7_disallowed_packet_err_cnt), |
4887 | [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr" , |
4888 | 0, 0, CNTR_NORMAL, |
4889 | access_tx_sdma6_disallowed_packet_err_cnt), |
4890 | [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr" , |
4891 | 0, 0, CNTR_NORMAL, |
4892 | access_tx_sdma5_disallowed_packet_err_cnt), |
4893 | [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr" , |
4894 | 0, 0, CNTR_NORMAL, |
4895 | access_tx_sdma4_disallowed_packet_err_cnt), |
4896 | [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr" , |
4897 | 0, 0, CNTR_NORMAL, |
4898 | access_tx_sdma3_disallowed_packet_err_cnt), |
4899 | [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr" , |
4900 | 0, 0, CNTR_NORMAL, |
4901 | access_tx_sdma2_disallowed_packet_err_cnt), |
4902 | [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr" , |
4903 | 0, 0, CNTR_NORMAL, |
4904 | access_tx_sdma1_disallowed_packet_err_cnt), |
4905 | [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr" , |
4906 | 0, 0, CNTR_NORMAL, |
4907 | access_tx_sdma0_disallowed_packet_err_cnt), |
4908 | [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr" , 0, 0, |
4909 | CNTR_NORMAL, |
4910 | access_tx_config_parity_err_cnt), |
4911 | [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr" , 0, 0, |
4912 | CNTR_NORMAL, |
4913 | access_tx_sbrd_ctl_csr_parity_err_cnt), |
4914 | [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr" , 0, 0, |
4915 | CNTR_NORMAL, |
4916 | access_tx_launch_csr_parity_err_cnt), |
4917 | [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr" , 0, 0, |
4918 | CNTR_NORMAL, |
4919 | access_tx_illegal_vl_err_cnt), |
4920 | [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM( |
4921 | "TxSbrdCtlStateMachineParityErr" , 0, 0, |
4922 | CNTR_NORMAL, |
4923 | access_tx_sbrd_ctl_state_machine_parity_err_cnt), |
4924 | [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10" , 0, 0, |
4925 | CNTR_NORMAL, |
4926 | access_egress_reserved_10_err_cnt), |
4927 | [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9" , 0, 0, |
4928 | CNTR_NORMAL, |
4929 | access_egress_reserved_9_err_cnt), |
4930 | [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr" , |
4931 | 0, 0, CNTR_NORMAL, |
4932 | access_tx_sdma_launch_intf_parity_err_cnt), |
4933 | [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr" , 0, 0, |
4934 | CNTR_NORMAL, |
4935 | access_tx_pio_launch_intf_parity_err_cnt), |
4936 | [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6" , 0, 0, |
4937 | CNTR_NORMAL, |
4938 | access_egress_reserved_6_err_cnt), |
4939 | [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr" , 0, 0, |
4940 | CNTR_NORMAL, |
4941 | access_tx_incorrect_link_state_err_cnt), |
4942 | [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr" , 0, 0, |
4943 | CNTR_NORMAL, |
4944 | access_tx_linkdown_err_cnt), |
4945 | [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM( |
4946 | "EgressFifoUnderrunOrParityErr" , 0, 0, |
4947 | CNTR_NORMAL, |
4948 | access_tx_egress_fifi_underrun_or_parity_err_cnt), |
4949 | [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2" , 0, 0, |
4950 | CNTR_NORMAL, |
4951 | access_egress_reserved_2_err_cnt), |
4952 | [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr" , 0, 0, |
4953 | CNTR_NORMAL, |
4954 | access_tx_pkt_integrity_mem_unc_err_cnt), |
4955 | [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr" , 0, 0, |
4956 | CNTR_NORMAL, |
4957 | access_tx_pkt_integrity_mem_cor_err_cnt), |
4958 | /* SendErrStatus */ |
4959 | [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr" , 0, 0, |
4960 | CNTR_NORMAL, |
4961 | access_send_csr_write_bad_addr_err_cnt), |
4962 | [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr" , 0, 0, |
4963 | CNTR_NORMAL, |
4964 | access_send_csr_read_bad_addr_err_cnt), |
4965 | [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr" , 0, 0, |
4966 | CNTR_NORMAL, |
4967 | access_send_csr_parity_cnt), |
4968 | /* SendCtxtErrStatus */ |
4969 | [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr" , 0, 0, |
4970 | CNTR_NORMAL, |
4971 | access_pio_write_out_of_bounds_err_cnt), |
4972 | [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr" , 0, 0, |
4973 | CNTR_NORMAL, |
4974 | access_pio_write_overflow_err_cnt), |
4975 | [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr" , |
4976 | 0, 0, CNTR_NORMAL, |
4977 | access_pio_write_crosses_boundary_err_cnt), |
4978 | [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr" , 0, 0, |
4979 | CNTR_NORMAL, |
4980 | access_pio_disallowed_packet_err_cnt), |
4981 | [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr" , 0, 0, |
4982 | CNTR_NORMAL, |
4983 | access_pio_inconsistent_sop_err_cnt), |
4984 | /* SendDmaEngErrStatus */ |
4985 | [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr" , |
4986 | 0, 0, CNTR_NORMAL, |
4987 | access_sdma_header_request_fifo_cor_err_cnt), |
4988 | [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr" , 0, 0, |
4989 | CNTR_NORMAL, |
4990 | access_sdma_header_storage_cor_err_cnt), |
4991 | [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr" , 0, 0, |
4992 | CNTR_NORMAL, |
4993 | access_sdma_packet_tracking_cor_err_cnt), |
4994 | [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr" , 0, 0, |
4995 | CNTR_NORMAL, |
4996 | access_sdma_assembly_cor_err_cnt), |
4997 | [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr" , 0, 0, |
4998 | CNTR_NORMAL, |
4999 | access_sdma_desc_table_cor_err_cnt), |
5000 | [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr" , |
5001 | 0, 0, CNTR_NORMAL, |
5002 | access_sdma_header_request_fifo_unc_err_cnt), |
5003 | [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr" , 0, 0, |
5004 | CNTR_NORMAL, |
5005 | access_sdma_header_storage_unc_err_cnt), |
5006 | [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr" , 0, 0, |
5007 | CNTR_NORMAL, |
5008 | access_sdma_packet_tracking_unc_err_cnt), |
5009 | [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr" , 0, 0, |
5010 | CNTR_NORMAL, |
5011 | access_sdma_assembly_unc_err_cnt), |
5012 | [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr" , 0, 0, |
5013 | CNTR_NORMAL, |
5014 | access_sdma_desc_table_unc_err_cnt), |
5015 | [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr" , 0, 0, |
5016 | CNTR_NORMAL, |
5017 | access_sdma_timeout_err_cnt), |
5018 | [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr" , 0, 0, |
5019 | CNTR_NORMAL, |
5020 | access_sdma_header_length_err_cnt), |
5021 | [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr" , 0, 0, |
5022 | CNTR_NORMAL, |
5023 | access_sdma_header_address_err_cnt), |
5024 | [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr" , 0, 0, |
5025 | CNTR_NORMAL, |
5026 | access_sdma_header_select_err_cnt), |
5027 | [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9" , 0, 0, |
5028 | CNTR_NORMAL, |
5029 | access_sdma_reserved_9_err_cnt), |
5030 | [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr" , 0, 0, |
5031 | CNTR_NORMAL, |
5032 | access_sdma_packet_desc_overflow_err_cnt), |
5033 | [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr" , 0, 0, |
5034 | CNTR_NORMAL, |
5035 | access_sdma_length_mismatch_err_cnt), |
5036 | [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr" , 0, 0, |
5037 | CNTR_NORMAL, |
5038 | access_sdma_halt_err_cnt), |
5039 | [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr" , 0, 0, |
5040 | CNTR_NORMAL, |
5041 | access_sdma_mem_read_err_cnt), |
5042 | [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr" , 0, 0, |
5043 | CNTR_NORMAL, |
5044 | access_sdma_first_desc_err_cnt), |
5045 | [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr" , 0, 0, |
5046 | CNTR_NORMAL, |
5047 | access_sdma_tail_out_of_bounds_err_cnt), |
5048 | [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr" , 0, 0, |
5049 | CNTR_NORMAL, |
5050 | access_sdma_too_long_err_cnt), |
5051 | [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr" , 0, 0, |
5052 | CNTR_NORMAL, |
5053 | access_sdma_gen_mismatch_err_cnt), |
5054 | [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr" , 0, 0, |
5055 | CNTR_NORMAL, |
5056 | access_sdma_wrong_dw_err_cnt), |
5057 | }; |
5058 | |
5059 | static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = { |
5060 | [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT, |
5061 | CNTR_NORMAL), |
5062 | [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT, |
5063 | CNTR_NORMAL), |
5064 | [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT, |
5065 | CNTR_NORMAL), |
5066 | [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT, |
5067 | CNTR_NORMAL), |
5068 | [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT, |
5069 | CNTR_NORMAL), |
5070 | [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT, |
5071 | CNTR_NORMAL), |
5072 | [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT, |
5073 | CNTR_NORMAL), |
5074 | [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL), |
5075 | [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL), |
5076 | [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH), |
5077 | [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT, |
5078 | CNTR_SYNTH | CNTR_VL), |
5079 | [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT, |
5080 | CNTR_SYNTH | CNTR_VL), |
5081 | [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT, |
5082 | CNTR_SYNTH | CNTR_VL), |
5083 | [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL), |
5084 | [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL), |
5085 | [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown" , 0, 0, CNTR_SYNTH | CNTR_32BIT, |
5086 | access_sw_link_dn_cnt), |
5087 | [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp" , 0, 0, CNTR_SYNTH | CNTR_32BIT, |
5088 | access_sw_link_up_cnt), |
5089 | [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame" , 0, 0, CNTR_NORMAL, |
5090 | access_sw_unknown_frame_cnt), |
5091 | [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd" , 0, 0, CNTR_SYNTH | CNTR_32BIT, |
5092 | access_sw_xmit_discards), |
5093 | [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl" , 0, 0, |
5094 | CNTR_SYNTH | CNTR_32BIT | CNTR_VL, |
5095 | access_sw_xmit_discards), |
5096 | [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr" , 0, 0, CNTR_SYNTH, |
5097 | access_xmit_constraint_errs), |
5098 | [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr" , 0, 0, CNTR_SYNTH, |
5099 | access_rcv_constraint_errs), |
5100 | [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts), |
5101 | [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends), |
5102 | [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks), |
5103 | [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks), |
5104 | [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts), |
5105 | [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops), |
5106 | [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait), |
5107 | [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak), |
5108 | [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq), |
5109 | [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq), |
5110 | [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned), |
5111 | [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks), |
5112 | [C_SW_IBP_RC_CRWAITS] = SW_IBP_CNTR(RcCrWait, rc_crwaits), |
5113 | [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks" , 0, 0, CNTR_NORMAL, |
5114 | access_sw_cpu_rc_acks), |
5115 | [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks" , 0, 0, CNTR_NORMAL, |
5116 | access_sw_cpu_rc_qacks), |
5117 | [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp" , 0, 0, CNTR_NORMAL, |
5118 | access_sw_cpu_rc_delayed_comp), |
5119 | [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1), |
5120 | [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3), |
5121 | [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5), |
5122 | [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7), |
5123 | [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9), |
5124 | [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11), |
5125 | [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13), |
5126 | [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15), |
5127 | [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17), |
5128 | [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19), |
5129 | [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21), |
5130 | [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23), |
5131 | [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25), |
5132 | [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27), |
5133 | [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29), |
5134 | [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31), |
5135 | [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33), |
5136 | [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35), |
5137 | [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37), |
5138 | [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39), |
5139 | [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41), |
5140 | [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43), |
5141 | [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45), |
5142 | [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47), |
5143 | [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49), |
5144 | [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51), |
5145 | [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53), |
5146 | [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55), |
5147 | [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57), |
5148 | [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59), |
5149 | [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61), |
5150 | [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63), |
5151 | [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65), |
5152 | [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67), |
5153 | [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69), |
5154 | [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71), |
5155 | [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73), |
5156 | [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75), |
5157 | [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77), |
5158 | [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79), |
5159 | [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81), |
5160 | [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83), |
5161 | [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85), |
5162 | [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87), |
5163 | [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89), |
5164 | [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91), |
5165 | [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93), |
5166 | [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95), |
5167 | [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97), |
5168 | [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99), |
5169 | [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101), |
5170 | [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103), |
5171 | [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105), |
5172 | [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107), |
5173 | [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109), |
5174 | [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111), |
5175 | [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113), |
5176 | [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115), |
5177 | [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117), |
5178 | [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119), |
5179 | [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121), |
5180 | [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123), |
5181 | [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125), |
5182 | [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127), |
5183 | [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129), |
5184 | [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131), |
5185 | [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133), |
5186 | [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135), |
5187 | [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137), |
5188 | [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139), |
5189 | [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141), |
5190 | [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143), |
5191 | [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145), |
5192 | [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147), |
5193 | [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149), |
5194 | [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151), |
5195 | [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153), |
5196 | [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155), |
5197 | [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157), |
5198 | [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159), |
5199 | }; |
5200 | |
5201 | /* ======================================================================== */ |
5202 | |
5203 | /* return true if this is chip revision revision a */ |
5204 | int is_ax(struct hfi1_devdata *dd) |
5205 | { |
5206 | u8 chip_rev_minor = |
5207 | dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT |
5208 | & CCE_REVISION_CHIP_REV_MINOR_MASK; |
5209 | return (chip_rev_minor & 0xf0) == 0; |
5210 | } |
5211 | |
5212 | /* return true if this is chip revision revision b */ |
5213 | int is_bx(struct hfi1_devdata *dd) |
5214 | { |
5215 | u8 chip_rev_minor = |
5216 | dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT |
5217 | & CCE_REVISION_CHIP_REV_MINOR_MASK; |
5218 | return (chip_rev_minor & 0xF0) == 0x10; |
5219 | } |
5220 | |
5221 | /* return true is kernel urg disabled for rcd */ |
5222 | bool is_urg_masked(struct hfi1_ctxtdata *rcd) |
5223 | { |
5224 | u64 mask; |
5225 | u32 is = IS_RCVURGENT_START + rcd->ctxt; |
5226 | u8 bit = is % 64; |
5227 | |
5228 | mask = read_csr(dd: rcd->dd, CCE_INT_MASK + (8 * (is / 64))); |
5229 | return !(mask & BIT_ULL(bit)); |
5230 | } |
5231 | |
5232 | /* |
5233 | * Append string s to buffer buf. Arguments curp and len are the current |
5234 | * position and remaining length, respectively. |
5235 | * |
5236 | * return 0 on success, 1 on out of room |
5237 | */ |
5238 | static int append_str(char *buf, char **curp, int *lenp, const char *s) |
5239 | { |
5240 | char *p = *curp; |
5241 | int len = *lenp; |
5242 | int result = 0; /* success */ |
5243 | char c; |
5244 | |
5245 | /* add a comma, if first in the buffer */ |
5246 | if (p != buf) { |
5247 | if (len == 0) { |
5248 | result = 1; /* out of room */ |
5249 | goto done; |
5250 | } |
5251 | *p++ = ','; |
5252 | len--; |
5253 | } |
5254 | |
5255 | /* copy the string */ |
5256 | while ((c = *s++) != 0) { |
5257 | if (len == 0) { |
5258 | result = 1; /* out of room */ |
5259 | goto done; |
5260 | } |
5261 | *p++ = c; |
5262 | len--; |
5263 | } |
5264 | |
5265 | done: |
5266 | /* write return values */ |
5267 | *curp = p; |
5268 | *lenp = len; |
5269 | |
5270 | return result; |
5271 | } |
5272 | |
5273 | /* |
5274 | * Using the given flag table, print a comma separated string into |
5275 | * the buffer. End in '*' if the buffer is too short. |
5276 | */ |
5277 | static char *flag_string(char *buf, int buf_len, u64 flags, |
5278 | struct flag_table *table, int table_size) |
5279 | { |
5280 | char [32]; |
5281 | char *p = buf; |
5282 | int len = buf_len; |
5283 | int no_room = 0; |
5284 | int i; |
5285 | |
5286 | /* make sure there is at least 2 so we can form "*" */ |
5287 | if (len < 2) |
5288 | return "" ; |
5289 | |
5290 | len--; /* leave room for a nul */ |
5291 | for (i = 0; i < table_size; i++) { |
5292 | if (flags & table[i].flag) { |
5293 | no_room = append_str(buf, curp: &p, lenp: &len, s: table[i].str); |
5294 | if (no_room) |
5295 | break; |
5296 | flags &= ~table[i].flag; |
5297 | } |
5298 | } |
5299 | |
5300 | /* any undocumented bits left? */ |
5301 | if (!no_room && flags) { |
5302 | snprintf(buf: extra, size: sizeof(extra), fmt: "bits 0x%llx" , flags); |
5303 | no_room = append_str(buf, curp: &p, lenp: &len, s: extra); |
5304 | } |
5305 | |
5306 | /* add * if ran out of room */ |
5307 | if (no_room) { |
5308 | /* may need to back up to add space for a '*' */ |
5309 | if (len == 0) |
5310 | --p; |
5311 | *p++ = '*'; |
5312 | } |
5313 | |
5314 | /* add final nul - space already allocated above */ |
5315 | *p = 0; |
5316 | return buf; |
5317 | } |
5318 | |
5319 | /* first 8 CCE error interrupt source names */ |
5320 | static const char * const cce_misc_names[] = { |
5321 | "CceErrInt" , /* 0 */ |
5322 | "RxeErrInt" , /* 1 */ |
5323 | "MiscErrInt" , /* 2 */ |
5324 | "Reserved3" , /* 3 */ |
5325 | "PioErrInt" , /* 4 */ |
5326 | "SDmaErrInt" , /* 5 */ |
5327 | "EgressErrInt" , /* 6 */ |
5328 | "TxeErrInt" /* 7 */ |
5329 | }; |
5330 | |
5331 | /* |
5332 | * Return the miscellaneous error interrupt name. |
5333 | */ |
5334 | static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source) |
5335 | { |
5336 | if (source < ARRAY_SIZE(cce_misc_names)) |
5337 | strscpy_pad(buf, cce_misc_names[source], bsize); |
5338 | else |
5339 | snprintf(buf, size: bsize, fmt: "Reserved%u" , |
5340 | source + IS_GENERAL_ERR_START); |
5341 | |
5342 | return buf; |
5343 | } |
5344 | |
5345 | /* |
5346 | * Return the SDMA engine error interrupt name. |
5347 | */ |
5348 | static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source) |
5349 | { |
5350 | snprintf(buf, size: bsize, fmt: "SDmaEngErrInt%u" , source); |
5351 | return buf; |
5352 | } |
5353 | |
5354 | /* |
5355 | * Return the send context error interrupt name. |
5356 | */ |
5357 | static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source) |
5358 | { |
5359 | snprintf(buf, size: bsize, fmt: "SendCtxtErrInt%u" , source); |
5360 | return buf; |
5361 | } |
5362 | |
5363 | static const char * const various_names[] = { |
5364 | "PbcInt" , |
5365 | "GpioAssertInt" , |
5366 | "Qsfp1Int" , |
5367 | "Qsfp2Int" , |
5368 | "TCritInt" |
5369 | }; |
5370 | |
5371 | /* |
5372 | * Return the various interrupt name. |
5373 | */ |
5374 | static char *is_various_name(char *buf, size_t bsize, unsigned int source) |
5375 | { |
5376 | if (source < ARRAY_SIZE(various_names)) |
5377 | strscpy_pad(buf, various_names[source], bsize); |
5378 | else |
5379 | snprintf(buf, size: bsize, fmt: "Reserved%u" , source + IS_VARIOUS_START); |
5380 | return buf; |
5381 | } |
5382 | |
5383 | /* |
5384 | * Return the DC interrupt name. |
5385 | */ |
5386 | static char *is_dc_name(char *buf, size_t bsize, unsigned int source) |
5387 | { |
5388 | static const char * const dc_int_names[] = { |
5389 | "common" , |
5390 | "lcb" , |
5391 | "8051" , |
5392 | "lbm" /* local block merge */ |
5393 | }; |
5394 | |
5395 | if (source < ARRAY_SIZE(dc_int_names)) |
5396 | snprintf(buf, size: bsize, fmt: "dc_%s_int" , dc_int_names[source]); |
5397 | else |
5398 | snprintf(buf, size: bsize, fmt: "DCInt%u" , source); |
5399 | return buf; |
5400 | } |
5401 | |
5402 | static const char * const sdma_int_names[] = { |
5403 | "SDmaInt" , |
5404 | "SdmaIdleInt" , |
5405 | "SdmaProgressInt" , |
5406 | }; |
5407 | |
5408 | /* |
5409 | * Return the SDMA engine interrupt name. |
5410 | */ |
5411 | static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source) |
5412 | { |
5413 | /* what interrupt */ |
5414 | unsigned int what = source / TXE_NUM_SDMA_ENGINES; |
5415 | /* which engine */ |
5416 | unsigned int which = source % TXE_NUM_SDMA_ENGINES; |
5417 | |
5418 | if (likely(what < 3)) |
5419 | snprintf(buf, size: bsize, fmt: "%s%u" , sdma_int_names[what], which); |
5420 | else |
5421 | snprintf(buf, size: bsize, fmt: "Invalid SDMA interrupt %u" , source); |
5422 | return buf; |
5423 | } |
5424 | |
5425 | /* |
5426 | * Return the receive available interrupt name. |
5427 | */ |
5428 | static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source) |
5429 | { |
5430 | snprintf(buf, size: bsize, fmt: "RcvAvailInt%u" , source); |
5431 | return buf; |
5432 | } |
5433 | |
5434 | /* |
5435 | * Return the receive urgent interrupt name. |
5436 | */ |
5437 | static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source) |
5438 | { |
5439 | snprintf(buf, size: bsize, fmt: "RcvUrgentInt%u" , source); |
5440 | return buf; |
5441 | } |
5442 | |
5443 | /* |
5444 | * Return the send credit interrupt name. |
5445 | */ |
5446 | static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source) |
5447 | { |
5448 | snprintf(buf, size: bsize, fmt: "SendCreditInt%u" , source); |
5449 | return buf; |
5450 | } |
5451 | |
5452 | /* |
5453 | * Return the reserved interrupt name. |
5454 | */ |
5455 | static char *is_reserved_name(char *buf, size_t bsize, unsigned int source) |
5456 | { |
5457 | snprintf(buf, size: bsize, fmt: "Reserved%u" , source + IS_RESERVED_START); |
5458 | return buf; |
5459 | } |
5460 | |
5461 | static char *cce_err_status_string(char *buf, int buf_len, u64 flags) |
5462 | { |
5463 | return flag_string(buf, buf_len, flags, |
5464 | table: cce_err_status_flags, |
5465 | ARRAY_SIZE(cce_err_status_flags)); |
5466 | } |
5467 | |
5468 | static char *rxe_err_status_string(char *buf, int buf_len, u64 flags) |
5469 | { |
5470 | return flag_string(buf, buf_len, flags, |
5471 | table: rxe_err_status_flags, |
5472 | ARRAY_SIZE(rxe_err_status_flags)); |
5473 | } |
5474 | |
5475 | static char *misc_err_status_string(char *buf, int buf_len, u64 flags) |
5476 | { |
5477 | return flag_string(buf, buf_len, flags, table: misc_err_status_flags, |
5478 | ARRAY_SIZE(misc_err_status_flags)); |
5479 | } |
5480 | |
5481 | static char *pio_err_status_string(char *buf, int buf_len, u64 flags) |
5482 | { |
5483 | return flag_string(buf, buf_len, flags, |
5484 | table: pio_err_status_flags, |
5485 | ARRAY_SIZE(pio_err_status_flags)); |
5486 | } |
5487 | |
5488 | static char *sdma_err_status_string(char *buf, int buf_len, u64 flags) |
5489 | { |
5490 | return flag_string(buf, buf_len, flags, |
5491 | table: sdma_err_status_flags, |
5492 | ARRAY_SIZE(sdma_err_status_flags)); |
5493 | } |
5494 | |
5495 | static char *egress_err_status_string(char *buf, int buf_len, u64 flags) |
5496 | { |
5497 | return flag_string(buf, buf_len, flags, |
5498 | table: egress_err_status_flags, |
5499 | ARRAY_SIZE(egress_err_status_flags)); |
5500 | } |
5501 | |
5502 | static char *egress_err_info_string(char *buf, int buf_len, u64 flags) |
5503 | { |
5504 | return flag_string(buf, buf_len, flags, |
5505 | table: egress_err_info_flags, |
5506 | ARRAY_SIZE(egress_err_info_flags)); |
5507 | } |
5508 | |
5509 | static char *send_err_status_string(char *buf, int buf_len, u64 flags) |
5510 | { |
5511 | return flag_string(buf, buf_len, flags, |
5512 | table: send_err_status_flags, |
5513 | ARRAY_SIZE(send_err_status_flags)); |
5514 | } |
5515 | |
5516 | static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg) |
5517 | { |
5518 | char buf[96]; |
5519 | int i = 0; |
5520 | |
5521 | /* |
5522 | * For most these errors, there is nothing that can be done except |
5523 | * report or record it. |
5524 | */ |
5525 | dd_dev_info(dd, "CCE Error: %s\n" , |
5526 | cce_err_status_string(buf, sizeof(buf), reg)); |
5527 | |
5528 | if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) && |
5529 | is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) { |
5530 | /* this error requires a manual drop into SPC freeze mode */ |
5531 | /* then a fix up */ |
5532 | start_freeze_handling(ppd: dd->pport, FREEZE_SELF); |
5533 | } |
5534 | |
5535 | for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) { |
5536 | if (reg & (1ull << i)) { |
5537 | incr_cntr64(cntr: &dd->cce_err_status_cnt[i]); |
5538 | /* maintain a counter over all cce_err_status errors */ |
5539 | incr_cntr64(cntr: &dd->sw_cce_err_status_aggregate); |
5540 | } |
5541 | } |
5542 | } |
5543 | |
5544 | /* |
5545 | * Check counters for receive errors that do not have an interrupt |
5546 | * associated with them. |
5547 | */ |
5548 | #define RCVERR_CHECK_TIME 10 |
5549 | static void update_rcverr_timer(struct timer_list *t) |
5550 | { |
5551 | struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer); |
5552 | struct hfi1_pportdata *ppd = dd->pport; |
5553 | u32 cur_ovfl_cnt = read_dev_cntr(dd, index: C_RCV_OVF, CNTR_INVALID_VL); |
5554 | |
5555 | if (dd->rcv_ovfl_cnt < cur_ovfl_cnt && |
5556 | ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) { |
5557 | dd_dev_info(dd, "%s: PortErrorAction bounce\n" , __func__); |
5558 | set_link_down_reason( |
5559 | ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, neigh_reason: 0, |
5560 | OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN); |
5561 | queue_work(wq: ppd->link_wq, work: &ppd->link_bounce_work); |
5562 | } |
5563 | dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt; |
5564 | |
5565 | mod_timer(timer: &dd->rcverr_timer, expires: jiffies + HZ * RCVERR_CHECK_TIME); |
5566 | } |
5567 | |
5568 | static int init_rcverr(struct hfi1_devdata *dd) |
5569 | { |
5570 | timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0); |
5571 | /* Assume the hardware counter has been reset */ |
5572 | dd->rcv_ovfl_cnt = 0; |
5573 | return mod_timer(timer: &dd->rcverr_timer, expires: jiffies + HZ * RCVERR_CHECK_TIME); |
5574 | } |
5575 | |
5576 | static void free_rcverr(struct hfi1_devdata *dd) |
5577 | { |
5578 | if (dd->rcverr_timer.function) |
5579 | del_timer_sync(timer: &dd->rcverr_timer); |
5580 | } |
5581 | |
5582 | static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) |
5583 | { |
5584 | char buf[96]; |
5585 | int i = 0; |
5586 | |
5587 | dd_dev_info(dd, "Receive Error: %s\n" , |
5588 | rxe_err_status_string(buf, sizeof(buf), reg)); |
5589 | |
5590 | if (reg & ALL_RXE_FREEZE_ERR) { |
5591 | int flags = 0; |
5592 | |
5593 | /* |
5594 | * Freeze mode recovery is disabled for the errors |
5595 | * in RXE_FREEZE_ABORT_MASK |
5596 | */ |
5597 | if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK)) |
5598 | flags = FREEZE_ABORT; |
5599 | |
5600 | start_freeze_handling(ppd: dd->pport, flags); |
5601 | } |
5602 | |
5603 | for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) { |
5604 | if (reg & (1ull << i)) |
5605 | incr_cntr64(cntr: &dd->rcv_err_status_cnt[i]); |
5606 | } |
5607 | } |
5608 | |
5609 | static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) |
5610 | { |
5611 | char buf[96]; |
5612 | int i = 0; |
5613 | |
5614 | dd_dev_info(dd, "Misc Error: %s" , |
5615 | misc_err_status_string(buf, sizeof(buf), reg)); |
5616 | for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) { |
5617 | if (reg & (1ull << i)) |
5618 | incr_cntr64(cntr: &dd->misc_err_status_cnt[i]); |
5619 | } |
5620 | } |
5621 | |
5622 | static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg) |
5623 | { |
5624 | char buf[96]; |
5625 | int i = 0; |
5626 | |
5627 | dd_dev_info(dd, "PIO Error: %s\n" , |
5628 | pio_err_status_string(buf, sizeof(buf), reg)); |
5629 | |
5630 | if (reg & ALL_PIO_FREEZE_ERR) |
5631 | start_freeze_handling(ppd: dd->pport, flags: 0); |
5632 | |
5633 | for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) { |
5634 | if (reg & (1ull << i)) |
5635 | incr_cntr64(cntr: &dd->send_pio_err_status_cnt[i]); |
5636 | } |
5637 | } |
5638 | |
5639 | static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg) |
5640 | { |
5641 | char buf[96]; |
5642 | int i = 0; |
5643 | |
5644 | dd_dev_info(dd, "SDMA Error: %s\n" , |
5645 | sdma_err_status_string(buf, sizeof(buf), reg)); |
5646 | |
5647 | if (reg & ALL_SDMA_FREEZE_ERR) |
5648 | start_freeze_handling(ppd: dd->pport, flags: 0); |
5649 | |
5650 | for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) { |
5651 | if (reg & (1ull << i)) |
5652 | incr_cntr64(cntr: &dd->send_dma_err_status_cnt[i]); |
5653 | } |
5654 | } |
5655 | |
5656 | static inline void __count_port_discards(struct hfi1_pportdata *ppd) |
5657 | { |
5658 | incr_cntr64(cntr: &ppd->port_xmit_discards); |
5659 | } |
5660 | |
5661 | static void count_port_inactive(struct hfi1_devdata *dd) |
5662 | { |
5663 | __count_port_discards(ppd: dd->pport); |
5664 | } |
5665 | |
5666 | /* |
5667 | * We have had a "disallowed packet" error during egress. Determine the |
5668 | * integrity check which failed, and update relevant error counter, etc. |
5669 | * |
5670 | * Note that the SEND_EGRESS_ERR_INFO register has only a single |
5671 | * bit of state per integrity check, and so we can miss the reason for an |
5672 | * egress error if more than one packet fails the same integrity check |
5673 | * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO. |
5674 | */ |
5675 | static void handle_send_egress_err_info(struct hfi1_devdata *dd, |
5676 | int vl) |
5677 | { |
5678 | struct hfi1_pportdata *ppd = dd->pport; |
5679 | u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */ |
5680 | u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO); |
5681 | char buf[96]; |
5682 | |
5683 | /* clear down all observed info as quickly as possible after read */ |
5684 | write_csr(dd, SEND_EGRESS_ERR_INFO, value: info); |
5685 | |
5686 | dd_dev_info(dd, |
5687 | "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n" , |
5688 | info, egress_err_info_string(buf, sizeof(buf), info), src); |
5689 | |
5690 | /* Eventually add other counters for each bit */ |
5691 | if (info & PORT_DISCARD_EGRESS_ERRS) { |
5692 | int weight, i; |
5693 | |
5694 | /* |
5695 | * Count all applicable bits as individual errors and |
5696 | * attribute them to the packet that triggered this handler. |
5697 | * This may not be completely accurate due to limitations |
5698 | * on the available hardware error information. There is |
5699 | * a single information register and any number of error |
5700 | * packets may have occurred and contributed to it before |
5701 | * this routine is called. This means that: |
5702 | * a) If multiple packets with the same error occur before |
5703 | * this routine is called, earlier packets are missed. |
5704 | * There is only a single bit for each error type. |
5705 | * b) Errors may not be attributed to the correct VL. |
5706 | * The driver is attributing all bits in the info register |
5707 | * to the packet that triggered this call, but bits |
5708 | * could be an accumulation of different packets with |
5709 | * different VLs. |
5710 | * c) A single error packet may have multiple counts attached |
5711 | * to it. There is no way for the driver to know if |
5712 | * multiple bits set in the info register are due to a |
5713 | * single packet or multiple packets. The driver assumes |
5714 | * multiple packets. |
5715 | */ |
5716 | weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS); |
5717 | for (i = 0; i < weight; i++) { |
5718 | __count_port_discards(ppd); |
5719 | if (vl >= 0 && vl < TXE_NUM_DATA_VL) |
5720 | incr_cntr64(cntr: &ppd->port_xmit_discards_vl[vl]); |
5721 | else if (vl == 15) |
5722 | incr_cntr64(cntr: &ppd->port_xmit_discards_vl |
5723 | [C_VL_15]); |
5724 | } |
5725 | } |
5726 | } |
5727 | |
5728 | /* |
5729 | * Input value is a bit position within the SEND_EGRESS_ERR_STATUS |
5730 | * register. Does it represent a 'port inactive' error? |
5731 | */ |
5732 | static inline int port_inactive_err(u64 posn) |
5733 | { |
5734 | return (posn >= SEES(TX_LINKDOWN) && |
5735 | posn <= SEES(TX_INCORRECT_LINK_STATE)); |
5736 | } |
5737 | |
5738 | /* |
5739 | * Input value is a bit position within the SEND_EGRESS_ERR_STATUS |
5740 | * register. Does it represent a 'disallowed packet' error? |
5741 | */ |
5742 | static inline int disallowed_pkt_err(int posn) |
5743 | { |
5744 | return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) && |
5745 | posn <= SEES(TX_SDMA15_DISALLOWED_PACKET)); |
5746 | } |
5747 | |
5748 | /* |
5749 | * Input value is a bit position of one of the SDMA engine disallowed |
5750 | * packet errors. Return which engine. Use of this must be guarded by |
5751 | * disallowed_pkt_err(). |
5752 | */ |
5753 | static inline int disallowed_pkt_engine(int posn) |
5754 | { |
5755 | return posn - SEES(TX_SDMA0_DISALLOWED_PACKET); |
5756 | } |
5757 | |
5758 | /* |
5759 | * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot |
5760 | * be done. |
5761 | */ |
5762 | static int engine_to_vl(struct hfi1_devdata *dd, int engine) |
5763 | { |
5764 | struct sdma_vl_map *m; |
5765 | int vl; |
5766 | |
5767 | /* range check */ |
5768 | if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES) |
5769 | return -1; |
5770 | |
5771 | rcu_read_lock(); |
5772 | m = rcu_dereference(dd->sdma_map); |
5773 | vl = m->engine_to_vl[engine]; |
5774 | rcu_read_unlock(); |
5775 | |
5776 | return vl; |
5777 | } |
5778 | |
5779 | /* |
5780 | * Translate the send context (sofware index) into a VL. Return -1 if the |
5781 | * translation cannot be done. |
5782 | */ |
5783 | static int sc_to_vl(struct hfi1_devdata *dd, int sw_index) |
5784 | { |
5785 | struct send_context_info *sci; |
5786 | struct send_context *sc; |
5787 | int i; |
5788 | |
5789 | sci = &dd->send_contexts[sw_index]; |
5790 | |
5791 | /* there is no information for user (PSM) and ack contexts */ |
5792 | if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15)) |
5793 | return -1; |
5794 | |
5795 | sc = sci->sc; |
5796 | if (!sc) |
5797 | return -1; |
5798 | if (dd->vld[15].sc == sc) |
5799 | return 15; |
5800 | for (i = 0; i < num_vls; i++) |
5801 | if (dd->vld[i].sc == sc) |
5802 | return i; |
5803 | |
5804 | return -1; |
5805 | } |
5806 | |
5807 | static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg) |
5808 | { |
5809 | u64 reg_copy = reg, handled = 0; |
5810 | char buf[96]; |
5811 | int i = 0; |
5812 | |
5813 | if (reg & ALL_TXE_EGRESS_FREEZE_ERR) |
5814 | start_freeze_handling(ppd: dd->pport, flags: 0); |
5815 | else if (is_ax(dd) && |
5816 | (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) && |
5817 | (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) |
5818 | start_freeze_handling(ppd: dd->pport, flags: 0); |
5819 | |
5820 | while (reg_copy) { |
5821 | int posn = fls64(x: reg_copy); |
5822 | /* fls64() returns a 1-based offset, we want it zero based */ |
5823 | int shift = posn - 1; |
5824 | u64 mask = 1ULL << shift; |
5825 | |
5826 | if (port_inactive_err(posn: shift)) { |
5827 | count_port_inactive(dd); |
5828 | handled |= mask; |
5829 | } else if (disallowed_pkt_err(posn: shift)) { |
5830 | int vl = engine_to_vl(dd, engine: disallowed_pkt_engine(posn: shift)); |
5831 | |
5832 | handle_send_egress_err_info(dd, vl); |
5833 | handled |= mask; |
5834 | } |
5835 | reg_copy &= ~mask; |
5836 | } |
5837 | |
5838 | reg &= ~handled; |
5839 | |
5840 | if (reg) |
5841 | dd_dev_info(dd, "Egress Error: %s\n" , |
5842 | egress_err_status_string(buf, sizeof(buf), reg)); |
5843 | |
5844 | for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) { |
5845 | if (reg & (1ull << i)) |
5846 | incr_cntr64(cntr: &dd->send_egress_err_status_cnt[i]); |
5847 | } |
5848 | } |
5849 | |
5850 | static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) |
5851 | { |
5852 | char buf[96]; |
5853 | int i = 0; |
5854 | |
5855 | dd_dev_info(dd, "Send Error: %s\n" , |
5856 | send_err_status_string(buf, sizeof(buf), reg)); |
5857 | |
5858 | for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) { |
5859 | if (reg & (1ull << i)) |
5860 | incr_cntr64(cntr: &dd->send_err_status_cnt[i]); |
5861 | } |
5862 | } |
5863 | |
5864 | /* |
5865 | * The maximum number of times the error clear down will loop before |
5866 | * blocking a repeating error. This value is arbitrary. |
5867 | */ |
5868 | #define MAX_CLEAR_COUNT 20 |
5869 | |
5870 | /* |
5871 | * Clear and handle an error register. All error interrupts are funneled |
5872 | * through here to have a central location to correctly handle single- |
5873 | * or multi-shot errors. |
5874 | * |
5875 | * For non per-context registers, call this routine with a context value |
5876 | * of 0 so the per-context offset is zero. |
5877 | * |
5878 | * If the handler loops too many times, assume that something is wrong |
5879 | * and can't be fixed, so mask the error bits. |
5880 | */ |
5881 | static void interrupt_clear_down(struct hfi1_devdata *dd, |
5882 | u32 context, |
5883 | const struct err_reg_info *eri) |
5884 | { |
5885 | u64 reg; |
5886 | u32 count; |
5887 | |
5888 | /* read in a loop until no more errors are seen */ |
5889 | count = 0; |
5890 | while (1) { |
5891 | reg = read_kctxt_csr(dd, ctxt: context, offset0: eri->status); |
5892 | if (reg == 0) |
5893 | break; |
5894 | write_kctxt_csr(dd, ctxt: context, offset0: eri->clear, value: reg); |
5895 | if (likely(eri->handler)) |
5896 | eri->handler(dd, context, reg); |
5897 | count++; |
5898 | if (count > MAX_CLEAR_COUNT) { |
5899 | u64 mask; |
5900 | |
5901 | dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n" , |
5902 | eri->desc, reg); |
5903 | /* |
5904 | * Read-modify-write so any other masked bits |
5905 | * remain masked. |
5906 | */ |
5907 | mask = read_kctxt_csr(dd, ctxt: context, offset0: eri->mask); |
5908 | mask &= ~reg; |
5909 | write_kctxt_csr(dd, ctxt: context, offset0: eri->mask, value: mask); |
5910 | break; |
5911 | } |
5912 | } |
5913 | } |
5914 | |
5915 | /* |
5916 | * CCE block "misc" interrupt. Source is < 16. |
5917 | */ |
5918 | static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source) |
5919 | { |
5920 | const struct err_reg_info *eri = &misc_errs[source]; |
5921 | |
5922 | if (eri->handler) { |
5923 | interrupt_clear_down(dd, context: 0, eri); |
5924 | } else { |
5925 | dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n" , |
5926 | source); |
5927 | } |
5928 | } |
5929 | |
5930 | static char *send_context_err_status_string(char *buf, int buf_len, u64 flags) |
5931 | { |
5932 | return flag_string(buf, buf_len, flags, |
5933 | table: sc_err_status_flags, |
5934 | ARRAY_SIZE(sc_err_status_flags)); |
5935 | } |
5936 | |
5937 | /* |
5938 | * Send context error interrupt. Source (hw_context) is < 160. |
5939 | * |
5940 | * All send context errors cause the send context to halt. The normal |
5941 | * clear-down mechanism cannot be used because we cannot clear the |
5942 | * error bits until several other long-running items are done first. |
5943 | * This is OK because with the context halted, nothing else is going |
5944 | * to happen on it anyway. |
5945 | */ |
5946 | static void is_sendctxt_err_int(struct hfi1_devdata *dd, |
5947 | unsigned int hw_context) |
5948 | { |
5949 | struct send_context_info *sci; |
5950 | struct send_context *sc; |
5951 | char flags[96]; |
5952 | u64 status; |
5953 | u32 sw_index; |
5954 | int i = 0; |
5955 | unsigned long irq_flags; |
5956 | |
5957 | sw_index = dd->hw_to_sw[hw_context]; |
5958 | if (sw_index >= dd->num_send_contexts) { |
5959 | dd_dev_err(dd, |
5960 | "out of range sw index %u for send context %u\n" , |
5961 | sw_index, hw_context); |
5962 | return; |
5963 | } |
5964 | sci = &dd->send_contexts[sw_index]; |
5965 | spin_lock_irqsave(&dd->sc_lock, irq_flags); |
5966 | sc = sci->sc; |
5967 | if (!sc) { |
5968 | dd_dev_err(dd, "%s: context %u(%u): no sc?\n" , __func__, |
5969 | sw_index, hw_context); |
5970 | spin_unlock_irqrestore(lock: &dd->sc_lock, flags: irq_flags); |
5971 | return; |
5972 | } |
5973 | |
5974 | /* tell the software that a halt has begun */ |
5975 | sc_stop(sc, SCF_HALTED); |
5976 | |
5977 | status = read_kctxt_csr(dd, ctxt: hw_context, SEND_CTXT_ERR_STATUS); |
5978 | |
5979 | dd_dev_info(dd, "Send Context %u(%u) Error: %s\n" , sw_index, hw_context, |
5980 | send_context_err_status_string(flags, sizeof(flags), |
5981 | status)); |
5982 | |
5983 | if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK) |
5984 | handle_send_egress_err_info(dd, vl: sc_to_vl(dd, sw_index)); |
5985 | |
5986 | /* |
5987 | * Automatically restart halted kernel contexts out of interrupt |
5988 | * context. User contexts must ask the driver to restart the context. |
5989 | */ |
5990 | if (sc->type != SC_USER) |
5991 | queue_work(wq: dd->pport->hfi1_wq, work: &sc->halt_work); |
5992 | spin_unlock_irqrestore(lock: &dd->sc_lock, flags: irq_flags); |
5993 | |
5994 | /* |
5995 | * Update the counters for the corresponding status bits. |
5996 | * Note that these particular counters are aggregated over all |
5997 | * 160 contexts. |
5998 | */ |
5999 | for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) { |
6000 | if (status & (1ull << i)) |
6001 | incr_cntr64(cntr: &dd->sw_ctxt_err_status_cnt[i]); |
6002 | } |
6003 | } |
6004 | |
6005 | static void handle_sdma_eng_err(struct hfi1_devdata *dd, |
6006 | unsigned int source, u64 status) |
6007 | { |
6008 | struct sdma_engine *sde; |
6009 | int i = 0; |
6010 | |
6011 | sde = &dd->per_sdma[source]; |
6012 | #ifdef CONFIG_SDMA_VERBOSITY |
6013 | dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n" , sde->this_idx, |
6014 | slashstrip(__FILE__), __LINE__, __func__); |
6015 | dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n" , |
6016 | sde->this_idx, source, (unsigned long long)status); |
6017 | #endif |
6018 | sde->err_cnt++; |
6019 | sdma_engine_error(sde, status); |
6020 | |
6021 | /* |
6022 | * Update the counters for the corresponding status bits. |
6023 | * Note that these particular counters are aggregated over |
6024 | * all 16 DMA engines. |
6025 | */ |
6026 | for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) { |
6027 | if (status & (1ull << i)) |
6028 | incr_cntr64(cntr: &dd->sw_send_dma_eng_err_status_cnt[i]); |
6029 | } |
6030 | } |
6031 | |
6032 | /* |
6033 | * CCE block SDMA error interrupt. Source is < 16. |
6034 | */ |
6035 | static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source) |
6036 | { |
6037 | #ifdef CONFIG_SDMA_VERBOSITY |
6038 | struct sdma_engine *sde = &dd->per_sdma[source]; |
6039 | |
6040 | dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n" , sde->this_idx, |
6041 | slashstrip(__FILE__), __LINE__, __func__); |
6042 | dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n" , sde->this_idx, |
6043 | source); |
6044 | sdma_dumpstate(sde); |
6045 | #endif |
6046 | interrupt_clear_down(dd, context: source, eri: &sdma_eng_err); |
6047 | } |
6048 | |
6049 | /* |
6050 | * CCE block "various" interrupt. Source is < 8. |
6051 | */ |
6052 | static void is_various_int(struct hfi1_devdata *dd, unsigned int source) |
6053 | { |
6054 | const struct err_reg_info *eri = &various_err[source]; |
6055 | |
6056 | /* |
6057 | * TCritInt cannot go through interrupt_clear_down() |
6058 | * because it is not a second tier interrupt. The handler |
6059 | * should be called directly. |
6060 | */ |
6061 | if (source == TCRIT_INT_SOURCE) |
6062 | handle_temp_err(dd); |
6063 | else if (eri->handler) |
6064 | interrupt_clear_down(dd, context: 0, eri); |
6065 | else |
6066 | dd_dev_info(dd, |
6067 | "%s: Unimplemented/reserved interrupt %d\n" , |
6068 | __func__, source); |
6069 | } |
6070 | |
6071 | static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) |
6072 | { |
6073 | /* src_ctx is always zero */ |
6074 | struct hfi1_pportdata *ppd = dd->pport; |
6075 | unsigned long flags; |
6076 | u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N); |
6077 | |
6078 | if (reg & QSFP_HFI0_MODPRST_N) { |
6079 | if (!qsfp_mod_present(ppd)) { |
6080 | dd_dev_info(dd, "%s: QSFP module removed\n" , |
6081 | __func__); |
6082 | |
6083 | ppd->driver_link_ready = 0; |
6084 | /* |
6085 | * Cable removed, reset all our information about the |
6086 | * cache and cable capabilities |
6087 | */ |
6088 | |
6089 | spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); |
6090 | /* |
6091 | * We don't set cache_refresh_required here as we expect |
6092 | * an interrupt when a cable is inserted |
6093 | */ |
6094 | ppd->qsfp_info.cache_valid = 0; |
6095 | ppd->qsfp_info.reset_needed = 0; |
6096 | ppd->qsfp_info.limiting_active = 0; |
6097 | spin_unlock_irqrestore(lock: &ppd->qsfp_info.qsfp_lock, |
6098 | flags); |
6099 | /* Invert the ModPresent pin now to detect plug-in */ |
6100 | write_csr(dd, offset: dd->hfi1_id ? ASIC_QSFP2_INVERT : |
6101 | ASIC_QSFP1_INVERT, value: qsfp_int_mgmt); |
6102 | |
6103 | if ((ppd->offline_disabled_reason > |
6104 | HFI1_ODR_MASK( |
6105 | OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) || |
6106 | (ppd->offline_disabled_reason == |
6107 | HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))) |
6108 | ppd->offline_disabled_reason = |
6109 | HFI1_ODR_MASK( |
6110 | OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED); |
6111 | |
6112 | if (ppd->host_link_state == HLS_DN_POLL) { |
6113 | /* |
6114 | * The link is still in POLL. This means |
6115 | * that the normal link down processing |
6116 | * will not happen. We have to do it here |
6117 | * before turning the DC off. |
6118 | */ |
6119 | queue_work(wq: ppd->link_wq, work: &ppd->link_down_work); |
6120 | } |
6121 | } else { |
6122 | dd_dev_info(dd, "%s: QSFP module inserted\n" , |
6123 | __func__); |
6124 | |
6125 | spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); |
6126 | ppd->qsfp_info.cache_valid = 0; |
6127 | ppd->qsfp_info.cache_refresh_required = 1; |
6128 | spin_unlock_irqrestore(lock: &ppd->qsfp_info.qsfp_lock, |
6129 | flags); |
6130 | |
6131 | /* |
6132 | * Stop inversion of ModPresent pin to detect |
6133 | * removal of the cable |
6134 | */ |
6135 | qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N; |
6136 | write_csr(dd, offset: dd->hfi1_id ? ASIC_QSFP2_INVERT : |
6137 | ASIC_QSFP1_INVERT, value: qsfp_int_mgmt); |
6138 | |
6139 | ppd->offline_disabled_reason = |
6140 | HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); |
6141 | } |
6142 | } |
6143 | |
6144 | if (reg & QSFP_HFI0_INT_N) { |
6145 | dd_dev_info(dd, "%s: Interrupt received from QSFP module\n" , |
6146 | __func__); |
6147 | spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); |
6148 | ppd->qsfp_info.check_interrupt_flags = 1; |
6149 | spin_unlock_irqrestore(lock: &ppd->qsfp_info.qsfp_lock, flags); |
6150 | } |
6151 | |
6152 | /* Schedule the QSFP work only if there is a cable attached. */ |
6153 | if (qsfp_mod_present(ppd)) |
6154 | queue_work(wq: ppd->link_wq, work: &ppd->qsfp_info.qsfp_work); |
6155 | } |
6156 | |
6157 | static int request_host_lcb_access(struct hfi1_devdata *dd) |
6158 | { |
6159 | int ret; |
6160 | |
6161 | ret = do_8051_command(dd, HCMD_MISC, |
6162 | in_data: (u64)HCMD_MISC_REQUEST_LCB_ACCESS << |
6163 | LOAD_DATA_FIELD_ID_SHIFT, NULL); |
6164 | if (ret != HCMD_SUCCESS && !(dd->flags & HFI1_SHUTDOWN)) { |
6165 | dd_dev_err(dd, "%s: command failed with error %d\n" , |
6166 | __func__, ret); |
6167 | } |
6168 | return ret == HCMD_SUCCESS ? 0 : -EBUSY; |
6169 | } |
6170 | |
6171 | static int request_8051_lcb_access(struct hfi1_devdata *dd) |
6172 | { |
6173 | int ret; |
6174 | |
6175 | ret = do_8051_command(dd, HCMD_MISC, |
6176 | in_data: (u64)HCMD_MISC_GRANT_LCB_ACCESS << |
6177 | LOAD_DATA_FIELD_ID_SHIFT, NULL); |
6178 | if (ret != HCMD_SUCCESS) { |
6179 | dd_dev_err(dd, "%s: command failed with error %d\n" , |
6180 | __func__, ret); |
6181 | } |
6182 | return ret == HCMD_SUCCESS ? 0 : -EBUSY; |
6183 | } |
6184 | |
6185 | /* |
6186 | * Set the LCB selector - allow host access. The DCC selector always |
6187 | * points to the host. |
6188 | */ |
6189 | static inline void set_host_lcb_access(struct hfi1_devdata *dd) |
6190 | { |
6191 | write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL, |
6192 | DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK | |
6193 | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK); |
6194 | } |
6195 | |
6196 | /* |
6197 | * Clear the LCB selector - allow 8051 access. The DCC selector always |
6198 | * points to the host. |
6199 | */ |
6200 | static inline void set_8051_lcb_access(struct hfi1_devdata *dd) |
6201 | { |
6202 | write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL, |
6203 | DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK); |
6204 | } |
6205 | |
6206 | /* |
6207 | * Acquire LCB access from the 8051. If the host already has access, |
6208 | * just increment a counter. Otherwise, inform the 8051 that the |
6209 | * host is taking access. |
6210 | * |
6211 | * Returns: |
6212 | * 0 on success |
6213 | * -EBUSY if the 8051 has control and cannot be disturbed |
6214 | * -errno if unable to acquire access from the 8051 |
6215 | */ |
6216 | int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok) |
6217 | { |
6218 | struct hfi1_pportdata *ppd = dd->pport; |
6219 | int ret = 0; |
6220 | |
6221 | /* |
6222 | * Use the host link state lock so the operation of this routine |
6223 | * { link state check, selector change, count increment } can occur |
6224 | * as a unit against a link state change. Otherwise there is a |
6225 | * race between the state change and the count increment. |
6226 | */ |
6227 | if (sleep_ok) { |
6228 | mutex_lock(&ppd->hls_lock); |
6229 | } else { |
6230 | while (!mutex_trylock(lock: &ppd->hls_lock)) |
6231 | udelay(1); |
6232 | } |
6233 | |
6234 | /* this access is valid only when the link is up */ |
6235 | if (ppd->host_link_state & HLS_DOWN) { |
6236 | dd_dev_info(dd, "%s: link state %s not up\n" , |
6237 | __func__, link_state_name(ppd->host_link_state)); |
6238 | ret = -EBUSY; |
6239 | goto done; |
6240 | } |
6241 | |
6242 | if (dd->lcb_access_count == 0) { |
6243 | ret = request_host_lcb_access(dd); |
6244 | if (ret) { |
6245 | if (!(dd->flags & HFI1_SHUTDOWN)) |
6246 | dd_dev_err(dd, |
6247 | "%s: unable to acquire LCB access, err %d\n" , |
6248 | __func__, ret); |
6249 | goto done; |
6250 | } |
6251 | set_host_lcb_access(dd); |
6252 | } |
6253 | dd->lcb_access_count++; |
6254 | done: |
6255 | mutex_unlock(lock: &ppd->hls_lock); |
6256 | return ret; |
6257 | } |
6258 | |
6259 | /* |
6260 | * Release LCB access by decrementing the use count. If the count is moving |
6261 | * from 1 to 0, inform 8051 that it has control back. |
6262 | * |
6263 | * Returns: |
6264 | * 0 on success |
6265 | * -errno if unable to release access to the 8051 |
6266 | */ |
6267 | int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok) |
6268 | { |
6269 | int ret = 0; |
6270 | |
6271 | /* |
6272 | * Use the host link state lock because the acquire needed it. |
6273 | * Here, we only need to keep { selector change, count decrement } |
6274 | * as a unit. |
6275 | */ |
6276 | if (sleep_ok) { |
6277 | mutex_lock(&dd->pport->hls_lock); |
6278 | } else { |
6279 | while (!mutex_trylock(lock: &dd->pport->hls_lock)) |
6280 | udelay(1); |
6281 | } |
6282 | |
6283 | if (dd->lcb_access_count == 0) { |
6284 | dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n" , |
6285 | __func__); |
6286 | goto done; |
6287 | } |
6288 | |
6289 | if (dd->lcb_access_count == 1) { |
6290 | set_8051_lcb_access(dd); |
6291 | ret = request_8051_lcb_access(dd); |
6292 | if (ret) { |
6293 | dd_dev_err(dd, |
6294 | "%s: unable to release LCB access, err %d\n" , |
6295 | __func__, ret); |
6296 | /* restore host access if the grant didn't work */ |
6297 | set_host_lcb_access(dd); |
6298 | goto done; |
6299 | } |
6300 | } |
6301 | dd->lcb_access_count--; |
6302 | done: |
6303 | mutex_unlock(lock: &dd->pport->hls_lock); |
6304 | return ret; |
6305 | } |
6306 | |
6307 | /* |
6308 | * Initialize LCB access variables and state. Called during driver load, |
6309 | * after most of the initialization is finished. |
6310 | * |
6311 | * The DC default is LCB access on for the host. The driver defaults to |
6312 | * leaving access to the 8051. Assign access now - this constrains the call |
6313 | * to this routine to be after all LCB set-up is done. In particular, after |
6314 | * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts() |
6315 | */ |
6316 | static void init_lcb_access(struct hfi1_devdata *dd) |
6317 | { |
6318 | dd->lcb_access_count = 0; |
6319 | } |
6320 | |
6321 | /* |
6322 | * Write a response back to a 8051 request. |
6323 | */ |
6324 | static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data) |
6325 | { |
6326 | write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, |
6327 | DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK | |
6328 | (u64)return_code << |
6329 | DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT | |
6330 | (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT); |
6331 | } |
6332 | |
6333 | /* |
6334 | * Handle host requests from the 8051. |
6335 | */ |
6336 | static void handle_8051_request(struct hfi1_pportdata *ppd) |
6337 | { |
6338 | struct hfi1_devdata *dd = ppd->dd; |
6339 | u64 reg; |
6340 | u16 data = 0; |
6341 | u8 type; |
6342 | |
6343 | reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1); |
6344 | if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0) |
6345 | return; /* no request */ |
6346 | |
6347 | /* zero out COMPLETED so the response is seen */ |
6348 | write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, value: 0); |
6349 | |
6350 | /* extract request details */ |
6351 | type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT) |
6352 | & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK; |
6353 | data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT) |
6354 | & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK; |
6355 | |
6356 | switch (type) { |
6357 | case HREQ_LOAD_CONFIG: |
6358 | case HREQ_SAVE_CONFIG: |
6359 | case HREQ_READ_CONFIG: |
6360 | case HREQ_SET_TX_EQ_ABS: |
6361 | case HREQ_SET_TX_EQ_REL: |
6362 | case HREQ_ENABLE: |
6363 | dd_dev_info(dd, "8051 request: request 0x%x not supported\n" , |
6364 | type); |
6365 | hreq_response(dd, HREQ_NOT_SUPPORTED, rsp_data: 0); |
6366 | break; |
6367 | case HREQ_LCB_RESET: |
6368 | /* Put the LCB, RX FPE and TX FPE into reset */ |
6369 | write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET); |
6370 | /* Make sure the write completed */ |
6371 | (void)read_csr(dd, DCC_CFG_RESET); |
6372 | /* Hold the reset long enough to take effect */ |
6373 | udelay(1); |
6374 | /* Take the LCB, RX FPE and TX FPE out of reset */ |
6375 | write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET); |
6376 | hreq_response(dd, HREQ_SUCCESS, rsp_data: 0); |
6377 | |
6378 | break; |
6379 | case HREQ_CONFIG_DONE: |
6380 | hreq_response(dd, HREQ_SUCCESS, rsp_data: 0); |
6381 | break; |
6382 | |
6383 | case HREQ_INTERFACE_TEST: |
6384 | hreq_response(dd, HREQ_SUCCESS, rsp_data: data); |
6385 | break; |
6386 | default: |
6387 | dd_dev_err(dd, "8051 request: unknown request 0x%x\n" , type); |
6388 | hreq_response(dd, HREQ_NOT_SUPPORTED, rsp_data: 0); |
6389 | break; |
6390 | } |
6391 | } |
6392 | |
6393 | /* |
6394 | * Set up allocation unit vaulue. |
6395 | */ |
6396 | void set_up_vau(struct hfi1_devdata *dd, u8 vau) |
6397 | { |
6398 | u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); |
6399 | |
6400 | /* do not modify other values in the register */ |
6401 | reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK; |
6402 | reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT; |
6403 | write_csr(dd, SEND_CM_GLOBAL_CREDIT, value: reg); |
6404 | } |
6405 | |
6406 | /* |
6407 | * Set up initial VL15 credits of the remote. Assumes the rest of |
6408 | * the CM credit registers are zero from a previous global or credit reset. |
6409 | * Shared limit for VL15 will always be 0. |
6410 | */ |
6411 | void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf) |
6412 | { |
6413 | u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); |
6414 | |
6415 | /* set initial values for total and shared credit limit */ |
6416 | reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK | |
6417 | SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK); |
6418 | |
6419 | /* |
6420 | * Set total limit to be equal to VL15 credits. |
6421 | * Leave shared limit at 0. |
6422 | */ |
6423 | reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT; |
6424 | write_csr(dd, SEND_CM_GLOBAL_CREDIT, value: reg); |
6425 | |
6426 | write_csr(dd, SEND_CM_CREDIT_VL15, value: (u64)vl15buf |
6427 | << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); |
6428 | } |
6429 | |
6430 | /* |
6431 | * Zero all credit details from the previous connection and |
6432 | * reset the CM manager's internal counters. |
6433 | */ |
6434 | void reset_link_credits(struct hfi1_devdata *dd) |
6435 | { |
6436 | int i; |
6437 | |
6438 | /* remove all previous VL credit limits */ |
6439 | for (i = 0; i < TXE_NUM_DATA_VL; i++) |
6440 | write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), value: 0); |
6441 | write_csr(dd, SEND_CM_CREDIT_VL15, value: 0); |
6442 | write_csr(dd, SEND_CM_GLOBAL_CREDIT, value: 0); |
6443 | /* reset the CM block */ |
6444 | pio_send_control(dd, PSC_CM_RESET); |
6445 | /* reset cached value */ |
6446 | dd->vl15buf_cached = 0; |
6447 | } |
6448 | |
6449 | /* convert a vCU to a CU */ |
6450 | static u32 vcu_to_cu(u8 vcu) |
6451 | { |
6452 | return 1 << vcu; |
6453 | } |
6454 | |
6455 | /* convert a CU to a vCU */ |
6456 | static u8 cu_to_vcu(u32 cu) |
6457 | { |
6458 | return ilog2(cu); |
6459 | } |
6460 | |
6461 | /* convert a vAU to an AU */ |
6462 | static u32 vau_to_au(u8 vau) |
6463 | { |
6464 | return 8 * (1 << vau); |
6465 | } |
6466 | |
6467 | static void set_linkup_defaults(struct hfi1_pportdata *ppd) |
6468 | { |
6469 | ppd->sm_trap_qp = 0x0; |
6470 | ppd->sa_qp = 0x1; |
6471 | } |
6472 | |
6473 | /* |
6474 | * Graceful LCB shutdown. This leaves the LCB FIFOs in reset. |
6475 | */ |
6476 | static void lcb_shutdown(struct hfi1_devdata *dd, int abort) |
6477 | { |
6478 | u64 reg; |
6479 | |
6480 | /* clear lcb run: LCB_CFG_RUN.EN = 0 */ |
6481 | write_csr(dd, DC_LCB_CFG_RUN, value: 0); |
6482 | /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */ |
6483 | write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, |
6484 | value: 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT); |
6485 | /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */ |
6486 | dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN); |
6487 | reg = read_csr(dd, DCC_CFG_RESET); |
6488 | write_csr(dd, DCC_CFG_RESET, value: reg | |
6489 | DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE); |
6490 | (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */ |
6491 | if (!abort) { |
6492 | udelay(1); /* must hold for the longer of 16cclks or 20ns */ |
6493 | write_csr(dd, DCC_CFG_RESET, value: reg); |
6494 | write_csr(dd, DC_LCB_ERR_EN, value: dd->lcb_err_en); |
6495 | } |
6496 | } |
6497 | |
6498 | /* |
6499 | * This routine should be called after the link has been transitioned to |
6500 | * OFFLINE (OFFLINE state has the side effect of putting the SerDes into |
6501 | * reset). |
6502 | * |
6503 | * The expectation is that the caller of this routine would have taken |
6504 | * care of properly transitioning the link into the correct state. |
6505 | * NOTE: the caller needs to acquire the dd->dc8051_lock lock |
6506 | * before calling this function. |
6507 | */ |
6508 | static void _dc_shutdown(struct hfi1_devdata *dd) |
6509 | { |
6510 | lockdep_assert_held(&dd->dc8051_lock); |
6511 | |
6512 | if (dd->dc_shutdown) |
6513 | return; |
6514 | |
6515 | dd->dc_shutdown = 1; |
6516 | /* Shutdown the LCB */ |
6517 | lcb_shutdown(dd, abort: 1); |
6518 | /* |
6519 | * Going to OFFLINE would have causes the 8051 to put the |
6520 | * SerDes into reset already. Just need to shut down the 8051, |
6521 | * itself. |
6522 | */ |
6523 | write_csr(dd, DC_DC8051_CFG_RST, value: 0x1); |
6524 | } |
6525 | |
6526 | static void dc_shutdown(struct hfi1_devdata *dd) |
6527 | { |
6528 | mutex_lock(&dd->dc8051_lock); |
6529 | _dc_shutdown(dd); |
6530 | mutex_unlock(lock: &dd->dc8051_lock); |
6531 | } |
6532 | |
6533 | /* |
6534 | * Calling this after the DC has been brought out of reset should not |
6535 | * do any damage. |
6536 | * NOTE: the caller needs to acquire the dd->dc8051_lock lock |
6537 | * before calling this function. |
6538 | */ |
6539 | static void _dc_start(struct hfi1_devdata *dd) |
6540 | { |
6541 | lockdep_assert_held(&dd->dc8051_lock); |
6542 | |
6543 | if (!dd->dc_shutdown) |
6544 | return; |
6545 | |
6546 | /* Take the 8051 out of reset */ |
6547 | write_csr(dd, DC_DC8051_CFG_RST, value: 0ull); |
6548 | /* Wait until 8051 is ready */ |
6549 | if (wait_fm_ready(dd, TIMEOUT_8051_START)) |
6550 | dd_dev_err(dd, "%s: timeout starting 8051 firmware\n" , |
6551 | __func__); |
6552 | |
6553 | /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */ |
6554 | write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET); |
6555 | /* lcb_shutdown() with abort=1 does not restore these */ |
6556 | write_csr(dd, DC_LCB_ERR_EN, value: dd->lcb_err_en); |
6557 | dd->dc_shutdown = 0; |
6558 | } |
6559 | |
6560 | static void dc_start(struct hfi1_devdata *dd) |
6561 | { |
6562 | mutex_lock(&dd->dc8051_lock); |
6563 | _dc_start(dd); |
6564 | mutex_unlock(lock: &dd->dc8051_lock); |
6565 | } |
6566 | |
6567 | /* |
6568 | * These LCB adjustments are for the Aurora SerDes core in the FPGA. |
6569 | */ |
6570 | static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd) |
6571 | { |
6572 | u64 rx_radr, tx_radr; |
6573 | u32 version; |
6574 | |
6575 | if (dd->icode != ICODE_FPGA_EMULATION) |
6576 | return; |
6577 | |
6578 | /* |
6579 | * These LCB defaults on emulator _s are good, nothing to do here: |
6580 | * LCB_CFG_TX_FIFOS_RADR |
6581 | * LCB_CFG_RX_FIFOS_RADR |
6582 | * LCB_CFG_LN_DCLK |
6583 | * LCB_CFG_IGNORE_LOST_RCLK |
6584 | */ |
6585 | if (is_emulator_s(dd)) |
6586 | return; |
6587 | /* else this is _p */ |
6588 | |
6589 | version = emulator_rev(dd); |
6590 | if (!is_ax(dd)) |
6591 | version = 0x2d; /* all B0 use 0x2d or higher settings */ |
6592 | |
6593 | if (version <= 0x12) { |
6594 | /* release 0x12 and below */ |
6595 | |
6596 | /* |
6597 | * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9 |
6598 | * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9 |
6599 | * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa |
6600 | */ |
6601 | rx_radr = |
6602 | 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT |
6603 | | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT |
6604 | | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; |
6605 | /* |
6606 | * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default) |
6607 | * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6 |
6608 | */ |
6609 | tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; |
6610 | } else if (version <= 0x18) { |
6611 | /* release 0x13 up to 0x18 */ |
6612 | /* LCB_CFG_RX_FIFOS_RADR = 0x988 */ |
6613 | rx_radr = |
6614 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT |
6615 | | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT |
6616 | | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; |
6617 | tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; |
6618 | } else if (version == 0x19) { |
6619 | /* release 0x19 */ |
6620 | /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */ |
6621 | rx_radr = |
6622 | 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT |
6623 | | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT |
6624 | | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; |
6625 | tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; |
6626 | } else if (version == 0x1a) { |
6627 | /* release 0x1a */ |
6628 | /* LCB_CFG_RX_FIFOS_RADR = 0x988 */ |
6629 | rx_radr = |
6630 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT |
6631 | | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT |
6632 | | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; |
6633 | tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; |
6634 | write_csr(dd, DC_LCB_CFG_LN_DCLK, value: 1ull); |
6635 | } else { |
6636 | /* release 0x1b and higher */ |
6637 | /* LCB_CFG_RX_FIFOS_RADR = 0x877 */ |
6638 | rx_radr = |
6639 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT |
6640 | | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT |
6641 | | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; |
6642 | tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; |
6643 | } |
6644 | |
6645 | write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, value: rx_radr); |
6646 | /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */ |
6647 | write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, |
6648 | DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK); |
6649 | write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, value: tx_radr); |
6650 | } |
6651 | |
6652 | /* |
6653 | * Handle a SMA idle message |
6654 | * |
6655 | * This is a work-queue function outside of the interrupt. |
6656 | */ |
6657 | void handle_sma_message(struct work_struct *work) |
6658 | { |
6659 | struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, |
6660 | sma_message_work); |
6661 | struct hfi1_devdata *dd = ppd->dd; |
6662 | u64 msg; |
6663 | int ret; |
6664 | |
6665 | /* |
6666 | * msg is bytes 1-4 of the 40-bit idle message - the command code |
6667 | * is stripped off |
6668 | */ |
6669 | ret = read_idle_sma(dd, data: &msg); |
6670 | if (ret) |
6671 | return; |
6672 | dd_dev_info(dd, "%s: SMA message 0x%llx\n" , __func__, msg); |
6673 | /* |
6674 | * React to the SMA message. Byte[1] (0 for us) is the command. |
6675 | */ |
6676 | switch (msg & 0xff) { |
6677 | case SMA_IDLE_ARM: |
6678 | /* |
6679 | * See OPAv1 table 9-14 - HFI and External Switch Ports Key |
6680 | * State Transitions |
6681 | * |
6682 | * Only expected in INIT or ARMED, discard otherwise. |
6683 | */ |
6684 | if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED)) |
6685 | ppd->neighbor_normal = 1; |
6686 | break; |
6687 | case SMA_IDLE_ACTIVE: |
6688 | /* |
6689 | * See OPAv1 table 9-14 - HFI and External Switch Ports Key |
6690 | * State Transitions |
6691 | * |
6692 | * Can activate the node. Discard otherwise. |
6693 | */ |
6694 | if (ppd->host_link_state == HLS_UP_ARMED && |
6695 | ppd->is_active_optimize_enabled) { |
6696 | ppd->neighbor_normal = 1; |
6697 | ret = set_link_state(ppd, HLS_UP_ACTIVE); |
6698 | if (ret) |
6699 | dd_dev_err( |
6700 | dd, |
6701 | "%s: received Active SMA idle message, couldn't set link to Active\n" , |
6702 | __func__); |
6703 | } |
6704 | break; |
6705 | default: |
6706 | dd_dev_err(dd, |
6707 | "%s: received unexpected SMA idle message 0x%llx\n" , |
6708 | __func__, msg); |
6709 | break; |
6710 | } |
6711 | } |
6712 | |
6713 | static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear) |
6714 | { |
6715 | u64 rcvctrl; |
6716 | unsigned long flags; |
6717 | |
6718 | spin_lock_irqsave(&dd->rcvctrl_lock, flags); |
6719 | rcvctrl = read_csr(dd, RCV_CTRL); |
6720 | rcvctrl |= add; |
6721 | rcvctrl &= ~clear; |
6722 | write_csr(dd, RCV_CTRL, value: rcvctrl); |
6723 | spin_unlock_irqrestore(lock: &dd->rcvctrl_lock, flags); |
6724 | } |
6725 | |
6726 | static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add) |
6727 | { |
6728 | adjust_rcvctrl(dd, add, clear: 0); |
6729 | } |
6730 | |
6731 | static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear) |
6732 | { |
6733 | adjust_rcvctrl(dd, add: 0, clear); |
6734 | } |
6735 | |
6736 | /* |
6737 | * Called from all interrupt handlers to start handling an SPC freeze. |
6738 | */ |
6739 | void start_freeze_handling(struct hfi1_pportdata *ppd, int flags) |
6740 | { |
6741 | struct hfi1_devdata *dd = ppd->dd; |
6742 | struct send_context *sc; |
6743 | int i; |
6744 | int sc_flags; |
6745 | |
6746 | if (flags & FREEZE_SELF) |
6747 | write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); |
6748 | |
6749 | /* enter frozen mode */ |
6750 | dd->flags |= HFI1_FROZEN; |
6751 | |
6752 | /* notify all SDMA engines that they are going into a freeze */ |
6753 | sdma_freeze_notify(dd, go_idle: !!(flags & FREEZE_LINK_DOWN)); |
6754 | |
6755 | sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ? |
6756 | SCF_LINK_DOWN : 0); |
6757 | /* do halt pre-handling on all enabled send contexts */ |
6758 | for (i = 0; i < dd->num_send_contexts; i++) { |
6759 | sc = dd->send_contexts[i].sc; |
6760 | if (sc && (sc->flags & SCF_ENABLED)) |
6761 | sc_stop(sc, bit: sc_flags); |
6762 | } |
6763 | |
6764 | /* Send context are frozen. Notify user space */ |
6765 | hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT); |
6766 | |
6767 | if (flags & FREEZE_ABORT) { |
6768 | dd_dev_err(dd, |
6769 | "Aborted freeze recovery. Please REBOOT system\n" ); |
6770 | return; |
6771 | } |
6772 | /* queue non-interrupt handler */ |
6773 | queue_work(wq: ppd->hfi1_wq, work: &ppd->freeze_work); |
6774 | } |
6775 | |
6776 | /* |
6777 | * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen, |
6778 | * depending on the "freeze" parameter. |
6779 | * |
6780 | * No need to return an error if it times out, our only option |
6781 | * is to proceed anyway. |
6782 | */ |
6783 | static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze) |
6784 | { |
6785 | unsigned long timeout; |
6786 | u64 reg; |
6787 | |
6788 | timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT); |
6789 | while (1) { |
6790 | reg = read_csr(dd, CCE_STATUS); |
6791 | if (freeze) { |
6792 | /* waiting until all indicators are set */ |
6793 | if ((reg & ALL_FROZE) == ALL_FROZE) |
6794 | return; /* all done */ |
6795 | } else { |
6796 | /* waiting until all indicators are clear */ |
6797 | if ((reg & ALL_FROZE) == 0) |
6798 | return; /* all done */ |
6799 | } |
6800 | |
6801 | if (time_after(jiffies, timeout)) { |
6802 | dd_dev_err(dd, |
6803 | "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing" , |
6804 | freeze ? "" : "un" , reg & ALL_FROZE, |
6805 | freeze ? ALL_FROZE : 0ull); |
6806 | return; |
6807 | } |
6808 | usleep_range(min: 80, max: 120); |
6809 | } |
6810 | } |
6811 | |
6812 | /* |
6813 | * Do all freeze handling for the RXE block. |
6814 | */ |
6815 | static void rxe_freeze(struct hfi1_devdata *dd) |
6816 | { |
6817 | int i; |
6818 | struct hfi1_ctxtdata *rcd; |
6819 | |
6820 | /* disable port */ |
6821 | clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); |
6822 | |
6823 | /* disable all receive contexts */ |
6824 | for (i = 0; i < dd->num_rcv_contexts; i++) { |
6825 | rcd = hfi1_rcd_get_by_index(dd, ctxt: i); |
6826 | hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd); |
6827 | hfi1_rcd_put(rcd); |
6828 | } |
6829 | } |
6830 | |
6831 | /* |
6832 | * Unfreeze handling for the RXE block - kernel contexts only. |
6833 | * This will also enable the port. User contexts will do unfreeze |
6834 | * handling on a per-context basis as they call into the driver. |
6835 | * |
6836 | */ |
6837 | static void rxe_kernel_unfreeze(struct hfi1_devdata *dd) |
6838 | { |
6839 | u32 rcvmask; |
6840 | u16 i; |
6841 | struct hfi1_ctxtdata *rcd; |
6842 | |
6843 | /* enable all kernel contexts */ |
6844 | for (i = 0; i < dd->num_rcv_contexts; i++) { |
6845 | rcd = hfi1_rcd_get_by_index(dd, ctxt: i); |
6846 | |
6847 | /* Ensure all non-user contexts(including vnic) are enabled */ |
6848 | if (!rcd || |
6849 | (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) { |
6850 | hfi1_rcd_put(rcd); |
6851 | continue; |
6852 | } |
6853 | rcvmask = HFI1_RCVCTRL_CTXT_ENB; |
6854 | /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */ |
6855 | rcvmask |= hfi1_rcvhdrtail_kvaddr(rcd) ? |
6856 | HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; |
6857 | hfi1_rcvctrl(dd, op: rcvmask, rcd); |
6858 | hfi1_rcd_put(rcd); |
6859 | } |
6860 | |
6861 | /* enable port */ |
6862 | add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); |
6863 | } |
6864 | |
6865 | /* |
6866 | * Non-interrupt SPC freeze handling. |
6867 | * |
6868 | * This is a work-queue function outside of the triggering interrupt. |
6869 | */ |
6870 | void handle_freeze(struct work_struct *work) |
6871 | { |
6872 | struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, |
6873 | freeze_work); |
6874 | struct hfi1_devdata *dd = ppd->dd; |
6875 | |
6876 | /* wait for freeze indicators on all affected blocks */ |
6877 | wait_for_freeze_status(dd, freeze: 1); |
6878 | |
6879 | /* SPC is now frozen */ |
6880 | |
6881 | /* do send PIO freeze steps */ |
6882 | pio_freeze(dd); |
6883 | |
6884 | /* do send DMA freeze steps */ |
6885 | sdma_freeze(dd); |
6886 | |
6887 | /* do send egress freeze steps - nothing to do */ |
6888 | |
6889 | /* do receive freeze steps */ |
6890 | rxe_freeze(dd); |
6891 | |
6892 | /* |
6893 | * Unfreeze the hardware - clear the freeze, wait for each |
6894 | * block's frozen bit to clear, then clear the frozen flag. |
6895 | */ |
6896 | write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK); |
6897 | wait_for_freeze_status(dd, freeze: 0); |
6898 | |
6899 | if (is_ax(dd)) { |
6900 | write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); |
6901 | wait_for_freeze_status(dd, freeze: 1); |
6902 | write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK); |
6903 | wait_for_freeze_status(dd, freeze: 0); |
6904 | } |
6905 | |
6906 | /* do send PIO unfreeze steps for kernel contexts */ |
6907 | pio_kernel_unfreeze(dd); |
6908 | |
6909 | /* do send DMA unfreeze steps */ |
6910 | sdma_unfreeze(dd); |
6911 | |
6912 | /* do send egress unfreeze steps - nothing to do */ |
6913 | |
6914 | /* do receive unfreeze steps for kernel contexts */ |
6915 | rxe_kernel_unfreeze(dd); |
6916 | |
6917 | /* |
6918 | * The unfreeze procedure touches global device registers when |
6919 | * it disables and re-enables RXE. Mark the device unfrozen |
6920 | * after all that is done so other parts of the driver waiting |
6921 | * for the device to unfreeze don't do things out of order. |
6922 | * |
6923 | * The above implies that the meaning of HFI1_FROZEN flag is |
6924 | * "Device has gone into freeze mode and freeze mode handling |
6925 | * is still in progress." |
6926 | * |
6927 | * The flag will be removed when freeze mode processing has |
6928 | * completed. |
6929 | */ |
6930 | dd->flags &= ~HFI1_FROZEN; |
6931 | wake_up(&dd->event_queue); |
6932 | |
6933 | /* no longer frozen */ |
6934 | } |
6935 | |
6936 | /** |
6937 | * update_xmit_counters - update PortXmitWait/PortVlXmitWait |
6938 | * counters. |
6939 | * @ppd: info of physical Hfi port |
6940 | * @link_width: new link width after link up or downgrade |
6941 | * |
6942 | * Update the PortXmitWait and PortVlXmitWait counters after |
6943 | * a link up or downgrade event to reflect a link width change. |
6944 | */ |
6945 | static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width) |
6946 | { |
6947 | int i; |
6948 | u16 tx_width; |
6949 | u16 link_speed; |
6950 | |
6951 | tx_width = tx_link_width(link_width); |
6952 | link_speed = get_link_speed(link_speed: ppd->link_speed_active); |
6953 | |
6954 | /* |
6955 | * There are C_VL_COUNT number of PortVLXmitWait counters. |
6956 | * Adding 1 to C_VL_COUNT to include the PortXmitWait counter. |
6957 | */ |
6958 | for (i = 0; i < C_VL_COUNT + 1; i++) |
6959 | get_xmit_wait_counters(ppd, link_width: tx_width, link_speed, vl: i); |
6960 | } |
6961 | |
6962 | /* |
6963 | * Handle a link up interrupt from the 8051. |
6964 | * |
6965 | * This is a work-queue function outside of the interrupt. |
6966 | */ |
6967 | void handle_link_up(struct work_struct *work) |
6968 | { |
6969 | struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, |
6970 | link_up_work); |
6971 | struct hfi1_devdata *dd = ppd->dd; |
6972 | |
6973 | set_link_state(ppd, HLS_UP_INIT); |
6974 | |
6975 | /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ |
6976 | read_ltp_rtt(dd); |
6977 | /* |
6978 | * OPA specifies that certain counters are cleared on a transition |
6979 | * to link up, so do that. |
6980 | */ |
6981 | clear_linkup_counters(dd); |
6982 | /* |
6983 | * And (re)set link up default values. |
6984 | */ |
6985 | set_linkup_defaults(ppd); |
6986 | |
6987 | /* |
6988 | * Set VL15 credits. Use cached value from verify cap interrupt. |
6989 | * In case of quick linkup or simulator, vl15 value will be set by |
6990 | * handle_linkup_change. VerifyCap interrupt handler will not be |
6991 | * called in those scenarios. |
6992 | */ |
6993 | if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) |
6994 | set_up_vl15(dd, vl15buf: dd->vl15buf_cached); |
6995 | |
6996 | /* enforce link speed enabled */ |
6997 | if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { |
6998 | /* oops - current speed is not enabled, bounce */ |
6999 | dd_dev_err(dd, |
7000 | "Link speed active 0x%x is outside enabled 0x%x, downing link\n" , |
7001 | ppd->link_speed_active, ppd->link_speed_enabled); |
7002 | set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, neigh_reason: 0, |
7003 | OPA_LINKDOWN_REASON_SPEED_POLICY); |
7004 | set_link_state(ppd, HLS_DN_OFFLINE); |
7005 | start_link(ppd); |
7006 | } |
7007 | } |
7008 | |
7009 | /* |
7010 | * Several pieces of LNI information were cached for SMA in ppd. |
7011 | * Reset these on link down |
7012 | */ |
7013 | static void reset_neighbor_info(struct hfi1_pportdata *ppd) |
7014 | { |
7015 | ppd->neighbor_guid = 0; |
7016 | ppd->neighbor_port_number = 0; |
7017 | ppd->neighbor_type = 0; |
7018 | ppd->neighbor_fm_security = 0; |
7019 | } |
7020 | |
7021 | static const char * const link_down_reason_strs[] = { |
7022 | [OPA_LINKDOWN_REASON_NONE] = "None" , |
7023 | [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0" , |
7024 | [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length" , |
7025 | [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long" , |
7026 | [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short" , |
7027 | [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID" , |
7028 | [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID" , |
7029 | [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2" , |
7030 | [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC" , |
7031 | [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8" , |
7032 | [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail" , |
7033 | [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10" , |
7034 | [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error" , |
7035 | [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15" , |
7036 | [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker" , |
7037 | [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14" , |
7038 | [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15" , |
7039 | [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance" , |
7040 | [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance" , |
7041 | [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance" , |
7042 | [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack" , |
7043 | [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker" , |
7044 | [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt" , |
7045 | [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit" , |
7046 | [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit" , |
7047 | [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24" , |
7048 | [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25" , |
7049 | [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26" , |
7050 | [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27" , |
7051 | [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28" , |
7052 | [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29" , |
7053 | [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30" , |
7054 | [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] = |
7055 | "Excessive buffer overrun" , |
7056 | [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown" , |
7057 | [OPA_LINKDOWN_REASON_REBOOT] = "Reboot" , |
7058 | [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown" , |
7059 | [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce" , |
7060 | [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy" , |
7061 | [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy" , |
7062 | [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected" , |
7063 | [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] = |
7064 | "Local media not installed" , |
7065 | [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed" , |
7066 | [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config" , |
7067 | [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] = |
7068 | "End to end not installed" , |
7069 | [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy" , |
7070 | [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy" , |
7071 | [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy" , |
7072 | [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management" , |
7073 | [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled" , |
7074 | [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient" |
7075 | }; |
7076 | |
7077 | /* return the neighbor link down reason string */ |
7078 | static const char *link_down_reason_str(u8 reason) |
7079 | { |
7080 | const char *str = NULL; |
7081 | |
7082 | if (reason < ARRAY_SIZE(link_down_reason_strs)) |
7083 | str = link_down_reason_strs[reason]; |
7084 | if (!str) |
7085 | str = "(invalid)" ; |
7086 | |
7087 | return str; |
7088 | } |
7089 | |
7090 | /* |
7091 | * Handle a link down interrupt from the 8051. |
7092 | * |
7093 | * This is a work-queue function outside of the interrupt. |
7094 | */ |
7095 | void handle_link_down(struct work_struct *work) |
7096 | { |
7097 | u8 lcl_reason, neigh_reason = 0; |
7098 | u8 link_down_reason; |
7099 | struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, |
7100 | link_down_work); |
7101 | int was_up; |
7102 | static const char ldr_str[] = "Link down reason: " ; |
7103 | |
7104 | if ((ppd->host_link_state & |
7105 | (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) && |
7106 | ppd->port_type == PORT_TYPE_FIXED) |
7107 | ppd->offline_disabled_reason = |
7108 | HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED); |
7109 | |
7110 | /* Go offline first, then deal with reading/writing through 8051 */ |
7111 | was_up = !!(ppd->host_link_state & HLS_UP); |
7112 | set_link_state(ppd, HLS_DN_OFFLINE); |
7113 | xchg(&ppd->is_link_down_queued, 0); |
7114 | |
7115 | if (was_up) { |
7116 | lcl_reason = 0; |
7117 | /* link down reason is only valid if the link was up */ |
7118 | read_link_down_reason(dd: ppd->dd, ldr: &link_down_reason); |
7119 | switch (link_down_reason) { |
7120 | case LDR_LINK_TRANSFER_ACTIVE_LOW: |
7121 | /* the link went down, no idle message reason */ |
7122 | dd_dev_info(ppd->dd, "%sUnexpected link down\n" , |
7123 | ldr_str); |
7124 | break; |
7125 | case LDR_RECEIVED_LINKDOWN_IDLE_MSG: |
7126 | /* |
7127 | * The neighbor reason is only valid if an idle message |
7128 | * was received for it. |
7129 | */ |
7130 | read_planned_down_reason_code(dd: ppd->dd, pdrrc: &neigh_reason); |
7131 | dd_dev_info(ppd->dd, |
7132 | "%sNeighbor link down message %d, %s\n" , |
7133 | ldr_str, neigh_reason, |
7134 | link_down_reason_str(neigh_reason)); |
7135 | break; |
7136 | case LDR_RECEIVED_HOST_OFFLINE_REQ: |
7137 | dd_dev_info(ppd->dd, |
7138 | "%sHost requested link to go offline\n" , |
7139 | ldr_str); |
7140 | break; |
7141 | default: |
7142 | dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n" , |
7143 | ldr_str, link_down_reason); |
7144 | break; |
7145 | } |
7146 | |
7147 | /* |
7148 | * If no reason, assume peer-initiated but missed |
7149 | * LinkGoingDown idle flits. |
7150 | */ |
7151 | if (neigh_reason == 0) |
7152 | lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN; |
7153 | } else { |
7154 | /* went down while polling or going up */ |
7155 | lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT; |
7156 | } |
7157 | |
7158 | set_link_down_reason(ppd, lcl_reason, neigh_reason, rem_reason: 0); |
7159 | |
7160 | /* inform the SMA when the link transitions from up to down */ |
7161 | if (was_up && ppd->local_link_down_reason.sma == 0 && |
7162 | ppd->neigh_link_down_reason.sma == 0) { |
7163 | ppd->local_link_down_reason.sma = |
7164 | ppd->local_link_down_reason.latest; |
7165 | ppd->neigh_link_down_reason.sma = |
7166 | ppd->neigh_link_down_reason.latest; |
7167 | } |
7168 | |
7169 | reset_neighbor_info(ppd); |
7170 | |
7171 | /* disable the port */ |
7172 | clear_rcvctrl(dd: ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); |
7173 | |
7174 | /* |
7175 | * If there is no cable attached, turn the DC off. Otherwise, |
7176 | * start the link bring up. |
7177 | */ |
7178 | if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) |
7179 | dc_shutdown(dd: ppd->dd); |
7180 | else |
7181 | start_link(ppd); |
7182 | } |
7183 | |
7184 | void handle_link_bounce(struct work_struct *work) |
7185 | { |
7186 | struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, |
7187 | link_bounce_work); |
7188 | |
7189 | /* |
7190 | * Only do something if the link is currently up. |
7191 | */ |
7192 | if (ppd->host_link_state & HLS_UP) { |
7193 | set_link_state(ppd, HLS_DN_OFFLINE); |
7194 | start_link(ppd); |
7195 | } else { |
7196 | dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n" , |
7197 | __func__, link_state_name(ppd->host_link_state)); |
7198 | } |
7199 | } |
7200 | |
7201 | /* |
7202 | * Mask conversion: Capability exchange to Port LTP. The capability |
7203 | * exchange has an implicit 16b CRC that is mandatory. |
7204 | */ |
7205 | static int cap_to_port_ltp(int cap) |
7206 | { |
7207 | int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */ |
7208 | |
7209 | if (cap & CAP_CRC_14B) |
7210 | port_ltp |= PORT_LTP_CRC_MODE_14; |
7211 | if (cap & CAP_CRC_48B) |
7212 | port_ltp |= PORT_LTP_CRC_MODE_48; |
7213 | if (cap & CAP_CRC_12B_16B_PER_LANE) |
7214 | port_ltp |= PORT_LTP_CRC_MODE_PER_LANE; |
7215 | |
7216 | return port_ltp; |
7217 | } |
7218 | |
7219 | /* |
7220 | * Convert an OPA Port LTP mask to capability mask |
7221 | */ |
7222 | int port_ltp_to_cap(int port_ltp) |
7223 | { |
7224 | int cap_mask = 0; |
7225 | |
7226 | if (port_ltp & PORT_LTP_CRC_MODE_14) |
7227 | cap_mask |= CAP_CRC_14B; |
7228 | if (port_ltp & PORT_LTP_CRC_MODE_48) |
7229 | cap_mask |= CAP_CRC_48B; |
7230 | if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE) |
7231 | cap_mask |= CAP_CRC_12B_16B_PER_LANE; |
7232 | |
7233 | return cap_mask; |
7234 | } |
7235 | |
7236 | /* |
7237 | * Convert a single DC LCB CRC mode to an OPA Port LTP mask. |
7238 | */ |
7239 | static int lcb_to_port_ltp(int lcb_crc) |
7240 | { |
7241 | int port_ltp = 0; |
7242 | |
7243 | if (lcb_crc == LCB_CRC_12B_16B_PER_LANE) |
7244 | port_ltp = PORT_LTP_CRC_MODE_PER_LANE; |
7245 | else if (lcb_crc == LCB_CRC_48B) |
7246 | port_ltp = PORT_LTP_CRC_MODE_48; |
7247 | else if (lcb_crc == LCB_CRC_14B) |
7248 | port_ltp = PORT_LTP_CRC_MODE_14; |
7249 | else |
7250 | port_ltp = PORT_LTP_CRC_MODE_16; |
7251 | |
7252 | return port_ltp; |
7253 | } |
7254 | |
7255 | static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd) |
7256 | { |
7257 | if (ppd->pkeys[2] != 0) { |
7258 | ppd->pkeys[2] = 0; |
7259 | (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, val: 0); |
7260 | hfi1_event_pkey_change(dd: ppd->dd, port: ppd->port); |
7261 | } |
7262 | } |
7263 | |
7264 | /* |
7265 | * Convert the given link width to the OPA link width bitmask. |
7266 | */ |
7267 | static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width) |
7268 | { |
7269 | switch (width) { |
7270 | case 0: |
7271 | /* |
7272 | * Simulator and quick linkup do not set the width. |
7273 | * Just set it to 4x without complaint. |
7274 | */ |
7275 | if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup) |
7276 | return OPA_LINK_WIDTH_4X; |
7277 | return 0; /* no lanes up */ |
7278 | case 1: return OPA_LINK_WIDTH_1X; |
7279 | case 2: return OPA_LINK_WIDTH_2X; |
7280 | case 3: return OPA_LINK_WIDTH_3X; |
7281 | case 4: return OPA_LINK_WIDTH_4X; |
7282 | default: |
7283 | dd_dev_info(dd, "%s: invalid width %d, using 4\n" , |
7284 | __func__, width); |
7285 | return OPA_LINK_WIDTH_4X; |
7286 | } |
7287 | } |
7288 | |
7289 | /* |
7290 | * Do a population count on the bottom nibble. |
7291 | */ |
7292 | static const u8 bit_counts[16] = { |
7293 | 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 |
7294 | }; |
7295 | |
7296 | static inline u8 nibble_to_count(u8 nibble) |
7297 | { |
7298 | return bit_counts[nibble & 0xf]; |
7299 | } |
7300 | |
7301 | /* |
7302 | * Read the active lane information from the 8051 registers and return |
7303 | * their widths. |
7304 | * |
7305 | * Active lane information is found in these 8051 registers: |
7306 | * enable_lane_tx |
7307 | * enable_lane_rx |
7308 | */ |
7309 | static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width, |
7310 | u16 *rx_width) |
7311 | { |
7312 | u16 tx, rx; |
7313 | u8 enable_lane_rx; |
7314 | u8 enable_lane_tx; |
7315 | u8 tx_polarity_inversion; |
7316 | u8 rx_polarity_inversion; |
7317 | u8 max_rate; |
7318 | |
7319 | /* read the active lanes */ |
7320 | read_tx_settings(dd, enable_lane_tx: &enable_lane_tx, tx_polarity_inversion: &tx_polarity_inversion, |
7321 | rx_polarity_inversion: &rx_polarity_inversion, max_rate: &max_rate); |
7322 | read_local_lni(dd, enable_lane_rx: &enable_lane_rx); |
7323 | |
7324 | /* convert to counts */ |
7325 | tx = nibble_to_count(nibble: enable_lane_tx); |
7326 | rx = nibble_to_count(nibble: enable_lane_rx); |
7327 | |
7328 | /* |
7329 | * Set link_speed_active here, overriding what was set in |
7330 | * handle_verify_cap(). The ASIC 8051 firmware does not correctly |
7331 | * set the max_rate field in handle_verify_cap until v0.19. |
7332 | */ |
7333 | if ((dd->icode == ICODE_RTL_SILICON) && |
7334 | (dd->dc8051_ver < dc8051_ver(0, 19, 0))) { |
7335 | /* max_rate: 0 = 12.5G, 1 = 25G */ |
7336 | switch (max_rate) { |
7337 | case 0: |
7338 | dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G; |
7339 | break; |
7340 | case 1: |
7341 | dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; |
7342 | break; |
7343 | default: |
7344 | dd_dev_err(dd, |
7345 | "%s: unexpected max rate %d, using 25Gb\n" , |
7346 | __func__, (int)max_rate); |
7347 | dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; |
7348 | break; |
7349 | } |
7350 | } |
7351 | |
7352 | dd_dev_info(dd, |
7353 | "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n" , |
7354 | enable_lane_tx, tx, enable_lane_rx, rx); |
7355 | *tx_width = link_width_to_bits(dd, width: tx); |
7356 | *rx_width = link_width_to_bits(dd, width: rx); |
7357 | } |
7358 | |
7359 | /* |
7360 | * Read verify_cap_local_fm_link_width[1] to obtain the link widths. |
7361 | * Valid after the end of VerifyCap and during LinkUp. Does not change |
7362 | * after link up. I.e. look elsewhere for downgrade information. |
7363 | * |
7364 | * Bits are: |
7365 | * + bits [7:4] contain the number of active transmitters |
7366 | * + bits [3:0] contain the number of active receivers |
7367 | * These are numbers 1 through 4 and can be different values if the |
7368 | * link is asymmetric. |
7369 | * |
7370 | * verify_cap_local_fm_link_width[0] retains its original value. |
7371 | */ |
7372 | static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width, |
7373 | u16 *rx_width) |
7374 | { |
7375 | u16 widths, tx, rx; |
7376 | u8 misc_bits, local_flags; |
7377 | u16 active_tx, active_rx; |
7378 | |
7379 | read_vc_local_link_mode(dd, misc_bits: &misc_bits, flag_bits: &local_flags, link_widths: &widths); |
7380 | tx = widths >> 12; |
7381 | rx = (widths >> 8) & 0xf; |
7382 | |
7383 | *tx_width = link_width_to_bits(dd, width: tx); |
7384 | *rx_width = link_width_to_bits(dd, width: rx); |
7385 | |
7386 | /* print the active widths */ |
7387 | get_link_widths(dd, tx_width: &active_tx, rx_width: &active_rx); |
7388 | } |
7389 | |
7390 | /* |
7391 | * Set ppd->link_width_active and ppd->link_width_downgrade_active using |
7392 | * hardware information when the link first comes up. |
7393 | * |
7394 | * The link width is not available until after VerifyCap.AllFramesReceived |
7395 | * (the trigger for handle_verify_cap), so this is outside that routine |
7396 | * and should be called when the 8051 signals linkup. |
7397 | */ |
7398 | void get_linkup_link_widths(struct hfi1_pportdata *ppd) |
7399 | { |
7400 | u16 tx_width, rx_width; |
7401 | |
7402 | /* get end-of-LNI link widths */ |
7403 | get_linkup_widths(dd: ppd->dd, tx_width: &tx_width, rx_width: &rx_width); |
7404 | |
7405 | /* use tx_width as the link is supposed to be symmetric on link up */ |
7406 | ppd->link_width_active = tx_width; |
7407 | /* link width downgrade active (LWD.A) starts out matching LW.A */ |
7408 | ppd->link_width_downgrade_tx_active = ppd->link_width_active; |
7409 | ppd->link_width_downgrade_rx_active = ppd->link_width_active; |
7410 | /* per OPA spec, on link up LWD.E resets to LWD.S */ |
7411 | ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported; |
7412 | /* cache the active egress rate (units {10^6 bits/sec]) */ |
7413 | ppd->current_egress_rate = active_egress_rate(ppd); |
7414 | } |
7415 | |
7416 | /* |
7417 | * Handle a verify capabilities interrupt from the 8051. |
7418 | * |
7419 | * This is a work-queue function outside of the interrupt. |
7420 | */ |
7421 | void handle_verify_cap(struct work_struct *work) |
7422 | { |
7423 | struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, |
7424 | link_vc_work); |
7425 | struct hfi1_devdata *dd = ppd->dd; |
7426 | u64 reg; |
7427 | u8 power_management; |
7428 | u8 continuous; |
7429 | u8 vcu; |
7430 | u8 vau; |
7431 | u8 z; |
7432 | u16 vl15buf; |
7433 | u16 link_widths; |
7434 | u16 crc_mask; |
7435 | u16 crc_val; |
7436 | u16 device_id; |
7437 | u16 active_tx, active_rx; |
7438 | u8 partner_supported_crc; |
7439 | u8 remote_tx_rate; |
7440 | u8 device_rev; |
7441 | |
7442 | set_link_state(ppd, HLS_VERIFY_CAP); |
7443 | |
7444 | lcb_shutdown(dd, abort: 0); |
7445 | adjust_lcb_for_fpga_serdes(dd); |
7446 | |
7447 | read_vc_remote_phy(dd, power_management: &power_management, continuous: &continuous); |
7448 | read_vc_remote_fabric(dd, vau: &vau, z: &z, vcu: &vcu, vl15buf: &vl15buf, |
7449 | crc_sizes: &partner_supported_crc); |
7450 | read_vc_remote_link_width(dd, remote_tx_rate: &remote_tx_rate, link_widths: &link_widths); |
7451 | read_remote_device_id(dd, device_id: &device_id, device_rev: &device_rev); |
7452 | |
7453 | /* print the active widths */ |
7454 | get_link_widths(dd, tx_width: &active_tx, rx_width: &active_rx); |
7455 | dd_dev_info(dd, |
7456 | "Peer PHY: power management 0x%x, continuous updates 0x%x\n" , |
7457 | (int)power_management, (int)continuous); |
7458 | dd_dev_info(dd, |
7459 | "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n" , |
7460 | (int)vau, (int)z, (int)vcu, (int)vl15buf, |
7461 | (int)partner_supported_crc); |
7462 | dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n" , |
7463 | (u32)remote_tx_rate, (u32)link_widths); |
7464 | dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n" , |
7465 | (u32)device_id, (u32)device_rev); |
7466 | /* |
7467 | * The peer vAU value just read is the peer receiver value. HFI does |
7468 | * not support a transmit vAU of 0 (AU == 8). We advertised that |
7469 | * with Z=1 in the fabric capabilities sent to the peer. The peer |
7470 | * will see our Z=1, and, if it advertised a vAU of 0, will move its |
7471 | * receive to vAU of 1 (AU == 16). Do the same here. We do not care |
7472 | * about the peer Z value - our sent vAU is 3 (hardwired) and is not |
7473 | * subject to the Z value exception. |
7474 | */ |
7475 | if (vau == 0) |
7476 | vau = 1; |
7477 | set_up_vau(dd, vau); |
7478 | |
7479 | /* |
7480 | * Set VL15 credits to 0 in global credit register. Cache remote VL15 |
7481 | * credits value and wait for link-up interrupt ot set it. |
7482 | */ |
7483 | set_up_vl15(dd, vl15buf: 0); |
7484 | dd->vl15buf_cached = vl15buf; |
7485 | |
7486 | /* set up the LCB CRC mode */ |
7487 | crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc; |
7488 | |
7489 | /* order is important: use the lowest bit in common */ |
7490 | if (crc_mask & CAP_CRC_14B) |
7491 | crc_val = LCB_CRC_14B; |
7492 | else if (crc_mask & CAP_CRC_48B) |
7493 | crc_val = LCB_CRC_48B; |
7494 | else if (crc_mask & CAP_CRC_12B_16B_PER_LANE) |
7495 | crc_val = LCB_CRC_12B_16B_PER_LANE; |
7496 | else |
7497 | crc_val = LCB_CRC_16B; |
7498 | |
7499 | dd_dev_info(dd, "Final LCB CRC mode: %d\n" , (int)crc_val); |
7500 | write_csr(dd, DC_LCB_CFG_CRC_MODE, |
7501 | value: (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT); |
7502 | |
7503 | /* set (14b only) or clear sideband credit */ |
7504 | reg = read_csr(dd, SEND_CM_CTRL); |
7505 | if (crc_val == LCB_CRC_14B && crc_14b_sideband) { |
7506 | write_csr(dd, SEND_CM_CTRL, |
7507 | value: reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK); |
7508 | } else { |
7509 | write_csr(dd, SEND_CM_CTRL, |
7510 | value: reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK); |
7511 | } |
7512 | |
7513 | ppd->link_speed_active = 0; /* invalid value */ |
7514 | if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { |
7515 | /* remote_tx_rate: 0 = 12.5G, 1 = 25G */ |
7516 | switch (remote_tx_rate) { |
7517 | case 0: |
7518 | ppd->link_speed_active = OPA_LINK_SPEED_12_5G; |
7519 | break; |
7520 | case 1: |
7521 | ppd->link_speed_active = OPA_LINK_SPEED_25G; |
7522 | break; |
7523 | } |
7524 | } else { |
7525 | /* actual rate is highest bit of the ANDed rates */ |
7526 | u8 rate = remote_tx_rate & ppd->local_tx_rate; |
7527 | |
7528 | if (rate & 2) |
7529 | ppd->link_speed_active = OPA_LINK_SPEED_25G; |
7530 | else if (rate & 1) |
7531 | ppd->link_speed_active = OPA_LINK_SPEED_12_5G; |
7532 | } |
7533 | if (ppd->link_speed_active == 0) { |
7534 | dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n" , |
7535 | __func__, (int)remote_tx_rate); |
7536 | ppd->link_speed_active = OPA_LINK_SPEED_25G; |
7537 | } |
7538 | |
7539 | /* |
7540 | * Cache the values of the supported, enabled, and active |
7541 | * LTP CRC modes to return in 'portinfo' queries. But the bit |
7542 | * flags that are returned in the portinfo query differ from |
7543 | * what's in the link_crc_mask, crc_sizes, and crc_val |
7544 | * variables. Convert these here. |
7545 | */ |
7546 | ppd->port_ltp_crc_mode = cap_to_port_ltp(cap: link_crc_mask) << 8; |
7547 | /* supported crc modes */ |
7548 | ppd->port_ltp_crc_mode |= |
7549 | cap_to_port_ltp(cap: ppd->port_crc_mode_enabled) << 4; |
7550 | /* enabled crc modes */ |
7551 | ppd->port_ltp_crc_mode |= lcb_to_port_ltp(lcb_crc: crc_val); |
7552 | /* active crc mode */ |
7553 | |
7554 | /* set up the remote credit return table */ |
7555 | assign_remote_cm_au_table(dd, vcu); |
7556 | |
7557 | /* |
7558 | * The LCB is reset on entry to handle_verify_cap(), so this must |
7559 | * be applied on every link up. |
7560 | * |
7561 | * Adjust LCB error kill enable to kill the link if |
7562 | * these RBUF errors are seen: |
7563 | * REPLAY_BUF_MBE_SMASK |
7564 | * FLIT_INPUT_BUF_MBE_SMASK |
7565 | */ |
7566 | if (is_ax(dd)) { /* fixed in B0 */ |
7567 | reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN); |
7568 | reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK |
7569 | | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK; |
7570 | write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, value: reg); |
7571 | } |
7572 | |
7573 | /* pull LCB fifos out of reset - all fifo clocks must be stable */ |
7574 | write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, value: 0); |
7575 | |
7576 | /* give 8051 access to the LCB CSRs */ |
7577 | write_csr(dd, DC_LCB_ERR_EN, value: 0); /* mask LCB errors */ |
7578 | set_8051_lcb_access(dd); |
7579 | |
7580 | /* tell the 8051 to go to LinkUp */ |
7581 | set_link_state(ppd, HLS_GOING_UP); |
7582 | } |
7583 | |
7584 | /** |
7585 | * apply_link_downgrade_policy - Apply the link width downgrade enabled |
7586 | * policy against the current active link widths. |
7587 | * @ppd: info of physical Hfi port |
7588 | * @refresh_widths: True indicates link downgrade event |
7589 | * @return: True indicates a successful link downgrade. False indicates |
7590 | * link downgrade event failed and the link will bounce back to |
7591 | * default link width. |
7592 | * |
7593 | * Called when the enabled policy changes or the active link widths |
7594 | * change. |
7595 | * Refresh_widths indicates that a link downgrade occurred. The |
7596 | * link_downgraded variable is set by refresh_widths and |
7597 | * determines the success/failure of the policy application. |
7598 | */ |
7599 | bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd, |
7600 | bool refresh_widths) |
7601 | { |
7602 | int do_bounce = 0; |
7603 | int tries; |
7604 | u16 lwde; |
7605 | u16 tx, rx; |
7606 | bool link_downgraded = refresh_widths; |
7607 | |
7608 | /* use the hls lock to avoid a race with actual link up */ |
7609 | tries = 0; |
7610 | retry: |
7611 | mutex_lock(&ppd->hls_lock); |
7612 | /* only apply if the link is up */ |
7613 | if (ppd->host_link_state & HLS_DOWN) { |
7614 | /* still going up..wait and retry */ |
7615 | if (ppd->host_link_state & HLS_GOING_UP) { |
7616 | if (++tries < 1000) { |
7617 | mutex_unlock(lock: &ppd->hls_lock); |
7618 | usleep_range(min: 100, max: 120); /* arbitrary */ |
7619 | goto retry; |
7620 | } |
7621 | dd_dev_err(ppd->dd, |
7622 | "%s: giving up waiting for link state change\n" , |
7623 | __func__); |
7624 | } |
7625 | goto done; |
7626 | } |
7627 | |
7628 | lwde = ppd->link_width_downgrade_enabled; |
7629 | |
7630 | if (refresh_widths) { |
7631 | get_link_widths(dd: ppd->dd, tx_width: &tx, rx_width: &rx); |
7632 | ppd->link_width_downgrade_tx_active = tx; |
7633 | ppd->link_width_downgrade_rx_active = rx; |
7634 | } |
7635 | |
7636 | if (ppd->link_width_downgrade_tx_active == 0 || |
7637 | ppd->link_width_downgrade_rx_active == 0) { |
7638 | /* the 8051 reported a dead link as a downgrade */ |
7639 | dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n" ); |
7640 | link_downgraded = false; |
7641 | } else if (lwde == 0) { |
7642 | /* downgrade is disabled */ |
7643 | |
7644 | /* bounce if not at starting active width */ |
7645 | if ((ppd->link_width_active != |
7646 | ppd->link_width_downgrade_tx_active) || |
7647 | (ppd->link_width_active != |
7648 | ppd->link_width_downgrade_rx_active)) { |
7649 | dd_dev_err(ppd->dd, |
7650 | "Link downgrade is disabled and link has downgraded, downing link\n" ); |
7651 | dd_dev_err(ppd->dd, |
7652 | " original 0x%x, tx active 0x%x, rx active 0x%x\n" , |
7653 | ppd->link_width_active, |
7654 | ppd->link_width_downgrade_tx_active, |
7655 | ppd->link_width_downgrade_rx_active); |
7656 | do_bounce = 1; |
7657 | link_downgraded = false; |
7658 | } |
7659 | } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 || |
7660 | (lwde & ppd->link_width_downgrade_rx_active) == 0) { |
7661 | /* Tx or Rx is outside the enabled policy */ |
7662 | dd_dev_err(ppd->dd, |
7663 | "Link is outside of downgrade allowed, downing link\n" ); |
7664 | dd_dev_err(ppd->dd, |
7665 | " enabled 0x%x, tx active 0x%x, rx active 0x%x\n" , |
7666 | lwde, ppd->link_width_downgrade_tx_active, |
7667 | ppd->link_width_downgrade_rx_active); |
7668 | do_bounce = 1; |
7669 | link_downgraded = false; |
7670 | } |
7671 | |
7672 | done: |
7673 | mutex_unlock(lock: &ppd->hls_lock); |
7674 | |
7675 | if (do_bounce) { |
7676 | set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, neigh_reason: 0, |
7677 | OPA_LINKDOWN_REASON_WIDTH_POLICY); |
7678 | set_link_state(ppd, HLS_DN_OFFLINE); |
7679 | start_link(ppd); |
7680 | } |
7681 | |
7682 | return link_downgraded; |
7683 | } |
7684 | |
7685 | /* |
7686 | * Handle a link downgrade interrupt from the 8051. |
7687 | * |
7688 | * This is a work-queue function outside of the interrupt. |
7689 | */ |
7690 | void handle_link_downgrade(struct work_struct *work) |
7691 | { |
7692 | struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, |
7693 | link_downgrade_work); |
7694 | |
7695 | dd_dev_info(ppd->dd, "8051: Link width downgrade\n" ); |
7696 | if (apply_link_downgrade_policy(ppd, refresh_widths: true)) |
7697 | update_xmit_counters(ppd, link_width: ppd->link_width_downgrade_tx_active); |
7698 | } |
7699 | |
7700 | static char *dcc_err_string(char *buf, int buf_len, u64 flags) |
7701 | { |
7702 | return flag_string(buf, buf_len, flags, table: dcc_err_flags, |
7703 | ARRAY_SIZE(dcc_err_flags)); |
7704 | } |
7705 | |
7706 | static char *lcb_err_string(char *buf, int buf_len, u64 flags) |
7707 | { |
7708 | return flag_string(buf, buf_len, flags, table: lcb_err_flags, |
7709 | ARRAY_SIZE(lcb_err_flags)); |
7710 | } |
7711 | |
7712 | static char *dc8051_err_string(char *buf, int buf_len, u64 flags) |
7713 | { |
7714 | return flag_string(buf, buf_len, flags, table: dc8051_err_flags, |
7715 | ARRAY_SIZE(dc8051_err_flags)); |
7716 | } |
7717 | |
7718 | static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags) |
7719 | { |
7720 | return flag_string(buf, buf_len, flags, table: dc8051_info_err_flags, |
7721 | ARRAY_SIZE(dc8051_info_err_flags)); |
7722 | } |
7723 | |
7724 | static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags) |
7725 | { |
7726 | return flag_string(buf, buf_len, flags, table: dc8051_info_host_msg_flags, |
7727 | ARRAY_SIZE(dc8051_info_host_msg_flags)); |
7728 | } |
7729 | |
7730 | static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg) |
7731 | { |
7732 | struct hfi1_pportdata *ppd = dd->pport; |
7733 | u64 info, err, host_msg; |
7734 | int queue_link_down = 0; |
7735 | char buf[96]; |
7736 | |
7737 | /* look at the flags */ |
7738 | if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) { |
7739 | /* 8051 information set by firmware */ |
7740 | /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */ |
7741 | info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051); |
7742 | err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT) |
7743 | & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK; |
7744 | host_msg = (info >> |
7745 | DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT) |
7746 | & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK; |
7747 | |
7748 | /* |
7749 | * Handle error flags. |
7750 | */ |
7751 | if (err & FAILED_LNI) { |
7752 | /* |
7753 | * LNI error indications are cleared by the 8051 |
7754 | * only when starting polling. Only pay attention |
7755 | * to them when in the states that occur during |
7756 | * LNI. |
7757 | */ |
7758 | if (ppd->host_link_state |
7759 | & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { |
7760 | queue_link_down = 1; |
7761 | dd_dev_info(dd, "Link error: %s\n" , |
7762 | dc8051_info_err_string(buf, |
7763 | sizeof(buf), |
7764 | err & |
7765 | FAILED_LNI)); |
7766 | } |
7767 | err &= ~(u64)FAILED_LNI; |
7768 | } |
7769 | /* unknown frames can happen durning LNI, just count */ |
7770 | if (err & UNKNOWN_FRAME) { |
7771 | ppd->unknown_frame_count++; |
7772 | err &= ~(u64)UNKNOWN_FRAME; |
7773 | } |
7774 | if (err) { |
7775 | /* report remaining errors, but do not do anything */ |
7776 | dd_dev_err(dd, "8051 info error: %s\n" , |
7777 | dc8051_info_err_string(buf, sizeof(buf), |
7778 | err)); |
7779 | } |
7780 | |
7781 | /* |
7782 | * Handle host message flags. |
7783 | */ |
7784 | if (host_msg & HOST_REQ_DONE) { |
7785 | /* |
7786 | * Presently, the driver does a busy wait for |
7787 | * host requests to complete. This is only an |
7788 | * informational message. |
7789 | * NOTE: The 8051 clears the host message |
7790 | * information *on the next 8051 command*. |
7791 | * Therefore, when linkup is achieved, |
7792 | * this flag will still be set. |
7793 | */ |
7794 | host_msg &= ~(u64)HOST_REQ_DONE; |
7795 | } |
7796 | if (host_msg & BC_SMA_MSG) { |
7797 | queue_work(wq: ppd->link_wq, work: &ppd->sma_message_work); |
7798 | host_msg &= ~(u64)BC_SMA_MSG; |
7799 | } |
7800 | if (host_msg & LINKUP_ACHIEVED) { |
7801 | dd_dev_info(dd, "8051: Link up\n" ); |
7802 | queue_work(wq: ppd->link_wq, work: &ppd->link_up_work); |
7803 | host_msg &= ~(u64)LINKUP_ACHIEVED; |
7804 | } |
7805 | if (host_msg & EXT_DEVICE_CFG_REQ) { |
7806 | handle_8051_request(ppd); |
7807 | host_msg &= ~(u64)EXT_DEVICE_CFG_REQ; |
7808 | } |
7809 | if (host_msg & VERIFY_CAP_FRAME) { |
7810 | queue_work(wq: ppd->link_wq, work: &ppd->link_vc_work); |
7811 | host_msg &= ~(u64)VERIFY_CAP_FRAME; |
7812 | } |
7813 | if (host_msg & LINK_GOING_DOWN) { |
7814 | const char * = "" ; |
7815 | /* no downgrade action needed if going down */ |
7816 | if (host_msg & LINK_WIDTH_DOWNGRADED) { |
7817 | host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED; |
7818 | extra = " (ignoring downgrade)" ; |
7819 | } |
7820 | dd_dev_info(dd, "8051: Link down%s\n" , extra); |
7821 | queue_link_down = 1; |
7822 | host_msg &= ~(u64)LINK_GOING_DOWN; |
7823 | } |
7824 | if (host_msg & LINK_WIDTH_DOWNGRADED) { |
7825 | queue_work(wq: ppd->link_wq, work: &ppd->link_downgrade_work); |
7826 | host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED; |
7827 | } |
7828 | if (host_msg) { |
7829 | /* report remaining messages, but do not do anything */ |
7830 | dd_dev_info(dd, "8051 info host message: %s\n" , |
7831 | dc8051_info_host_msg_string(buf, |
7832 | sizeof(buf), |
7833 | host_msg)); |
7834 | } |
7835 | |
7836 | reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK; |
7837 | } |
7838 | if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) { |
7839 | /* |
7840 | * Lost the 8051 heartbeat. If this happens, we |
7841 | * receive constant interrupts about it. Disable |
7842 | * the interrupt after the first. |
7843 | */ |
7844 | dd_dev_err(dd, "Lost 8051 heartbeat\n" ); |
7845 | write_csr(dd, DC_DC8051_ERR_EN, |
7846 | value: read_csr(dd, DC_DC8051_ERR_EN) & |
7847 | ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK); |
7848 | |
7849 | reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK; |
7850 | } |
7851 | if (reg) { |
7852 | /* report the error, but do not do anything */ |
7853 | dd_dev_err(dd, "8051 error: %s\n" , |
7854 | dc8051_err_string(buf, sizeof(buf), reg)); |
7855 | } |
7856 | |
7857 | if (queue_link_down) { |
7858 | /* |
7859 | * if the link is already going down or disabled, do not |
7860 | * queue another. If there's a link down entry already |
7861 | * queued, don't queue another one. |
7862 | */ |
7863 | if ((ppd->host_link_state & |
7864 | (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) || |
7865 | ppd->link_enabled == 0) { |
7866 | dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n" , |
7867 | __func__, ppd->host_link_state, |
7868 | ppd->link_enabled); |
7869 | } else { |
7870 | if (xchg(&ppd->is_link_down_queued, 1) == 1) |
7871 | dd_dev_info(dd, |
7872 | "%s: link down request already queued\n" , |
7873 | __func__); |
7874 | else |
7875 | queue_work(wq: ppd->link_wq, work: &ppd->link_down_work); |
7876 | } |
7877 | } |
7878 | } |
7879 | |
7880 | static const char * const fm_config_txt[] = { |
7881 | [0] = |
7882 | "BadHeadDist: Distance violation between two head flits" , |
7883 | [1] = |
7884 | "BadTailDist: Distance violation between two tail flits" , |
7885 | [2] = |
7886 | "BadCtrlDist: Distance violation between two credit control flits" , |
7887 | [3] = |
7888 | "BadCrdAck: Credits return for unsupported VL" , |
7889 | [4] = |
7890 | "UnsupportedVLMarker: Received VL Marker" , |
7891 | [5] = |
7892 | "BadPreempt: Exceeded the preemption nesting level" , |
7893 | [6] = |
7894 | "BadControlFlit: Received unsupported control flit" , |
7895 | /* no 7 */ |
7896 | [8] = |
7897 | "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL" , |
7898 | }; |
7899 | |
7900 | static const char * const port_rcv_txt[] = { |
7901 | [1] = |
7902 | "BadPktLen: Illegal PktLen" , |
7903 | [2] = |
7904 | "PktLenTooLong: Packet longer than PktLen" , |
7905 | [3] = |
7906 | "PktLenTooShort: Packet shorter than PktLen" , |
7907 | [4] = |
7908 | "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)" , |
7909 | [5] = |
7910 | "BadDLID: Illegal DLID (0, doesn't match HFI)" , |
7911 | [6] = |
7912 | "BadL2: Illegal L2 opcode" , |
7913 | [7] = |
7914 | "BadSC: Unsupported SC" , |
7915 | [9] = |
7916 | "BadRC: Illegal RC" , |
7917 | [11] = |
7918 | "PreemptError: Preempting with same VL" , |
7919 | [12] = |
7920 | "PreemptVL15: Preempting a VL15 packet" , |
7921 | }; |
7922 | |
7923 | #define OPA_LDR_FMCONFIG_OFFSET 16 |
7924 | #define OPA_LDR_PORTRCV_OFFSET 0 |
7925 | static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) |
7926 | { |
7927 | u64 info, hdr0, hdr1; |
7928 | const char *; |
7929 | char buf[96]; |
7930 | struct hfi1_pportdata *ppd = dd->pport; |
7931 | u8 lcl_reason = 0; |
7932 | int do_bounce = 0; |
7933 | |
7934 | if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) { |
7935 | if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) { |
7936 | info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE); |
7937 | dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK; |
7938 | /* set status bit */ |
7939 | dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK; |
7940 | } |
7941 | reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK; |
7942 | } |
7943 | |
7944 | if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) { |
7945 | struct hfi1_pportdata *ppd = dd->pport; |
7946 | /* this counter saturates at (2^32) - 1 */ |
7947 | if (ppd->link_downed < (u32)UINT_MAX) |
7948 | ppd->link_downed++; |
7949 | reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK; |
7950 | } |
7951 | |
7952 | if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) { |
7953 | u8 reason_valid = 1; |
7954 | |
7955 | info = read_csr(dd, DCC_ERR_INFO_FMCONFIG); |
7956 | if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) { |
7957 | dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK; |
7958 | /* set status bit */ |
7959 | dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK; |
7960 | } |
7961 | switch (info) { |
7962 | case 0: |
7963 | case 1: |
7964 | case 2: |
7965 | case 3: |
7966 | case 4: |
7967 | case 5: |
7968 | case 6: |
7969 | extra = fm_config_txt[info]; |
7970 | break; |
7971 | case 8: |
7972 | extra = fm_config_txt[info]; |
7973 | if (ppd->port_error_action & |
7974 | OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) { |
7975 | do_bounce = 1; |
7976 | /* |
7977 | * lcl_reason cannot be derived from info |
7978 | * for this error |
7979 | */ |
7980 | lcl_reason = |
7981 | OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER; |
7982 | } |
7983 | break; |
7984 | default: |
7985 | reason_valid = 0; |
7986 | snprintf(buf, size: sizeof(buf), fmt: "reserved%lld" , info); |
7987 | extra = buf; |
7988 | break; |
7989 | } |
7990 | |
7991 | if (reason_valid && !do_bounce) { |
7992 | do_bounce = ppd->port_error_action & |
7993 | (1 << (OPA_LDR_FMCONFIG_OFFSET + info)); |
7994 | lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST; |
7995 | } |
7996 | |
7997 | /* just report this */ |
7998 | dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n" , |
7999 | extra); |
8000 | reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK; |
8001 | } |
8002 | |
8003 | if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) { |
8004 | u8 reason_valid = 1; |
8005 | |
8006 | info = read_csr(dd, DCC_ERR_INFO_PORTRCV); |
8007 | hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0); |
8008 | hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1); |
8009 | if (!(dd->err_info_rcvport.status_and_code & |
8010 | OPA_EI_STATUS_SMASK)) { |
8011 | dd->err_info_rcvport.status_and_code = |
8012 | info & OPA_EI_CODE_SMASK; |
8013 | /* set status bit */ |
8014 | dd->err_info_rcvport.status_and_code |= |
8015 | OPA_EI_STATUS_SMASK; |
8016 | /* |
8017 | * save first 2 flits in the packet that caused |
8018 | * the error |
8019 | */ |
8020 | dd->err_info_rcvport.packet_flit1 = hdr0; |
8021 | dd->err_info_rcvport.packet_flit2 = hdr1; |
8022 | } |
8023 | switch (info) { |
8024 | case 1: |
8025 | case 2: |
8026 | case 3: |
8027 | case 4: |
8028 | case 5: |
8029 | case 6: |
8030 | case 7: |
8031 | case 9: |
8032 | case 11: |
8033 | case 12: |
8034 | extra = port_rcv_txt[info]; |
8035 | break; |
8036 | default: |
8037 | reason_valid = 0; |
8038 | snprintf(buf, size: sizeof(buf), fmt: "reserved%lld" , info); |
8039 | extra = buf; |
8040 | break; |
8041 | } |
8042 | |
8043 | if (reason_valid && !do_bounce) { |
8044 | do_bounce = ppd->port_error_action & |
8045 | (1 << (OPA_LDR_PORTRCV_OFFSET + info)); |
8046 | lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0; |
8047 | } |
8048 | |
8049 | /* just report this */ |
8050 | dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n" |
8051 | " hdr0 0x%llx, hdr1 0x%llx\n" , |
8052 | extra, hdr0, hdr1); |
8053 | |
8054 | reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK; |
8055 | } |
8056 | |
8057 | if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) { |
8058 | /* informative only */ |
8059 | dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n" ); |
8060 | reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK; |
8061 | } |
8062 | if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) { |
8063 | /* informative only */ |
8064 | dd_dev_info_ratelimited(dd, "host access to LCB blocked\n" ); |
8065 | reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK; |
8066 | } |
8067 | |
8068 | if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev))) |
8069 | reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK; |
8070 | |
8071 | /* report any remaining errors */ |
8072 | if (reg) |
8073 | dd_dev_info_ratelimited(dd, "DCC Error: %s\n" , |
8074 | dcc_err_string(buf, sizeof(buf), reg)); |
8075 | |
8076 | if (lcl_reason == 0) |
8077 | lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN; |
8078 | |
8079 | if (do_bounce) { |
8080 | dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n" , |
8081 | __func__); |
8082 | set_link_down_reason(ppd, lcl_reason, neigh_reason: 0, rem_reason: lcl_reason); |
8083 | queue_work(wq: ppd->link_wq, work: &ppd->link_bounce_work); |
8084 | } |
8085 | } |
8086 | |
8087 | static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg) |
8088 | { |
8089 | char buf[96]; |
8090 | |
8091 | dd_dev_info(dd, "LCB Error: %s\n" , |
8092 | lcb_err_string(buf, sizeof(buf), reg)); |
8093 | } |
8094 | |
8095 | /* |
8096 | * CCE block DC interrupt. Source is < 8. |
8097 | */ |
8098 | static void is_dc_int(struct hfi1_devdata *dd, unsigned int source) |
8099 | { |
8100 | const struct err_reg_info *eri = &dc_errs[source]; |
8101 | |
8102 | if (eri->handler) { |
8103 | interrupt_clear_down(dd, context: 0, eri); |
8104 | } else if (source == 3 /* dc_lbm_int */) { |
8105 | /* |
8106 | * This indicates that a parity error has occurred on the |
8107 | * address/control lines presented to the LBM. The error |
8108 | * is a single pulse, there is no associated error flag, |
8109 | * and it is non-maskable. This is because if a parity |
8110 | * error occurs on the request the request is dropped. |
8111 | * This should never occur, but it is nice to know if it |
8112 | * ever does. |
8113 | */ |
8114 | dd_dev_err(dd, "Parity error in DC LBM block\n" ); |
8115 | } else { |
8116 | dd_dev_err(dd, "Invalid DC interrupt %u\n" , source); |
8117 | } |
8118 | } |
8119 | |
8120 | /* |
8121 | * TX block send credit interrupt. Source is < 160. |
8122 | */ |
8123 | static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source) |
8124 | { |
8125 | sc_group_release_update(dd, hw_context: source); |
8126 | } |
8127 | |
8128 | /* |
8129 | * TX block SDMA interrupt. Source is < 48. |
8130 | * |
8131 | * SDMA interrupts are grouped by type: |
8132 | * |
8133 | * 0 - N-1 = SDma |
8134 | * N - 2N-1 = SDmaProgress |
8135 | * 2N - 3N-1 = SDmaIdle |
8136 | */ |
8137 | static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source) |
8138 | { |
8139 | /* what interrupt */ |
8140 | unsigned int what = source / TXE_NUM_SDMA_ENGINES; |
8141 | /* which engine */ |
8142 | unsigned int which = source % TXE_NUM_SDMA_ENGINES; |
8143 | |
8144 | #ifdef CONFIG_SDMA_VERBOSITY |
8145 | dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n" , which, |
8146 | slashstrip(__FILE__), __LINE__, __func__); |
8147 | sdma_dumpstate(&dd->per_sdma[which]); |
8148 | #endif |
8149 | |
8150 | if (likely(what < 3 && which < dd->num_sdma)) { |
8151 | sdma_engine_interrupt(sde: &dd->per_sdma[which], status: 1ull << source); |
8152 | } else { |
8153 | /* should not happen */ |
8154 | dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n" , source); |
8155 | } |
8156 | } |
8157 | |
8158 | /** |
8159 | * is_rcv_avail_int() - User receive context available IRQ handler |
8160 | * @dd: valid dd |
8161 | * @source: logical IRQ source (offset from IS_RCVAVAIL_START) |
8162 | * |
8163 | * RX block receive available interrupt. Source is < 160. |
8164 | * |
8165 | * This is the general interrupt handler for user (PSM) receive contexts, |
8166 | * and can only be used for non-threaded IRQs. |
8167 | */ |
8168 | static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source) |
8169 | { |
8170 | struct hfi1_ctxtdata *rcd; |
8171 | char *err_detail; |
8172 | |
8173 | if (likely(source < dd->num_rcv_contexts)) { |
8174 | rcd = hfi1_rcd_get_by_index(dd, ctxt: source); |
8175 | if (rcd) { |
8176 | handle_user_interrupt(rcd); |
8177 | hfi1_rcd_put(rcd); |
8178 | return; /* OK */ |
8179 | } |
8180 | /* received an interrupt, but no rcd */ |
8181 | err_detail = "dataless" ; |
8182 | } else { |
8183 | /* received an interrupt, but are not using that context */ |
8184 | err_detail = "out of range" ; |
8185 | } |
8186 | dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n" , |
8187 | err_detail, source); |
8188 | } |
8189 | |
8190 | /** |
8191 | * is_rcv_urgent_int() - User receive context urgent IRQ handler |
8192 | * @dd: valid dd |
8193 | * @source: logical IRQ source (offset from IS_RCVURGENT_START) |
8194 | * |
8195 | * RX block receive urgent interrupt. Source is < 160. |
8196 | * |
8197 | * NOTE: kernel receive contexts specifically do NOT enable this IRQ. |
8198 | */ |
8199 | static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source) |
8200 | { |
8201 | struct hfi1_ctxtdata *rcd; |
8202 | char *err_detail; |
8203 | |
8204 | if (likely(source < dd->num_rcv_contexts)) { |
8205 | rcd = hfi1_rcd_get_by_index(dd, ctxt: source); |
8206 | if (rcd) { |
8207 | handle_user_interrupt(rcd); |
8208 | hfi1_rcd_put(rcd); |
8209 | return; /* OK */ |
8210 | } |
8211 | /* received an interrupt, but no rcd */ |
8212 | err_detail = "dataless" ; |
8213 | } else { |
8214 | /* received an interrupt, but are not using that context */ |
8215 | err_detail = "out of range" ; |
8216 | } |
8217 | dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n" , |
8218 | err_detail, source); |
8219 | } |
8220 | |
8221 | /* |
8222 | * Reserved range interrupt. Should not be called in normal operation. |
8223 | */ |
8224 | static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source) |
8225 | { |
8226 | char name[64]; |
8227 | |
8228 | dd_dev_err(dd, "unexpected %s interrupt\n" , |
8229 | is_reserved_name(name, sizeof(name), source)); |
8230 | } |
8231 | |
8232 | static const struct is_table is_table[] = { |
8233 | /* |
8234 | * start end |
8235 | * name func interrupt func |
8236 | */ |
8237 | { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END, |
8238 | is_misc_err_name, is_misc_err_int }, |
8239 | { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END, |
8240 | is_sdma_eng_err_name, is_sdma_eng_err_int }, |
8241 | { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, |
8242 | is_sendctxt_err_name, is_sendctxt_err_int }, |
8243 | { IS_SDMA_START, IS_SDMA_IDLE_END, |
8244 | is_sdma_eng_name, is_sdma_eng_int }, |
8245 | { IS_VARIOUS_START, IS_VARIOUS_END, |
8246 | is_various_name, is_various_int }, |
8247 | { IS_DC_START, IS_DC_END, |
8248 | is_dc_name, is_dc_int }, |
8249 | { IS_RCVAVAIL_START, IS_RCVAVAIL_END, |
8250 | is_rcv_avail_name, is_rcv_avail_int }, |
8251 | { IS_RCVURGENT_START, IS_RCVURGENT_END, |
8252 | is_rcv_urgent_name, is_rcv_urgent_int }, |
8253 | { IS_SENDCREDIT_START, IS_SENDCREDIT_END, |
8254 | is_send_credit_name, is_send_credit_int}, |
8255 | { IS_RESERVED_START, IS_RESERVED_END, |
8256 | is_reserved_name, is_reserved_int}, |
8257 | }; |
8258 | |
8259 | /* |
8260 | * Interrupt source interrupt - called when the given source has an interrupt. |
8261 | * Source is a bit index into an array of 64-bit integers. |
8262 | */ |
8263 | static void is_interrupt(struct hfi1_devdata *dd, unsigned int source) |
8264 | { |
8265 | const struct is_table *entry; |
8266 | |
8267 | /* avoids a double compare by walking the table in-order */ |
8268 | for (entry = &is_table[0]; entry->is_name; entry++) { |
8269 | if (source <= entry->end) { |
8270 | trace_hfi1_interrupt(dd, is_entry: entry, src: source); |
8271 | entry->is_int(dd, source - entry->start); |
8272 | return; |
8273 | } |
8274 | } |
8275 | /* fell off the end */ |
8276 | dd_dev_err(dd, "invalid interrupt source %u\n" , source); |
8277 | } |
8278 | |
8279 | /** |
8280 | * general_interrupt - General interrupt handler |
8281 | * @irq: MSIx IRQ vector |
8282 | * @data: hfi1 devdata |
8283 | * |
8284 | * This is able to correctly handle all non-threaded interrupts. Receive |
8285 | * context DATA IRQs are threaded and are not supported by this handler. |
8286 | * |
8287 | */ |
8288 | irqreturn_t general_interrupt(int irq, void *data) |
8289 | { |
8290 | struct hfi1_devdata *dd = data; |
8291 | u64 regs[CCE_NUM_INT_CSRS]; |
8292 | u32 bit; |
8293 | int i; |
8294 | irqreturn_t handled = IRQ_NONE; |
8295 | |
8296 | this_cpu_inc(*dd->int_counter); |
8297 | |
8298 | /* phase 1: scan and clear all handled interrupts */ |
8299 | for (i = 0; i < CCE_NUM_INT_CSRS; i++) { |
8300 | if (dd->gi_mask[i] == 0) { |
8301 | regs[i] = 0; /* used later */ |
8302 | continue; |
8303 | } |
8304 | regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) & |
8305 | dd->gi_mask[i]; |
8306 | /* only clear if anything is set */ |
8307 | if (regs[i]) |
8308 | write_csr(dd, CCE_INT_CLEAR + (8 * i), value: regs[i]); |
8309 | } |
8310 | |
8311 | /* phase 2: call the appropriate handler */ |
8312 | for_each_set_bit(bit, (unsigned long *)®s[0], |
8313 | CCE_NUM_INT_CSRS * 64) { |
8314 | is_interrupt(dd, source: bit); |
8315 | handled = IRQ_HANDLED; |
8316 | } |
8317 | |
8318 | return handled; |
8319 | } |
8320 | |
8321 | irqreturn_t sdma_interrupt(int irq, void *data) |
8322 | { |
8323 | struct sdma_engine *sde = data; |
8324 | struct hfi1_devdata *dd = sde->dd; |
8325 | u64 status; |
8326 | |
8327 | #ifdef CONFIG_SDMA_VERBOSITY |
8328 | dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n" , sde->this_idx, |
8329 | slashstrip(__FILE__), __LINE__, __func__); |
8330 | sdma_dumpstate(sde); |
8331 | #endif |
8332 | |
8333 | this_cpu_inc(*dd->int_counter); |
8334 | |
8335 | /* This read_csr is really bad in the hot path */ |
8336 | status = read_csr(dd, |
8337 | CCE_INT_STATUS + (8 * (IS_SDMA_START / 64))) |
8338 | & sde->imask; |
8339 | if (likely(status)) { |
8340 | /* clear the interrupt(s) */ |
8341 | write_csr(dd, |
8342 | CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)), |
8343 | value: status); |
8344 | |
8345 | /* handle the interrupt(s) */ |
8346 | sdma_engine_interrupt(sde, status); |
8347 | } else { |
8348 | dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n" , |
8349 | sde->this_idx); |
8350 | } |
8351 | return IRQ_HANDLED; |
8352 | } |
8353 | |
8354 | /* |
8355 | * Clear the receive interrupt. Use a read of the interrupt clear CSR |
8356 | * to insure that the write completed. This does NOT guarantee that |
8357 | * queued DMA writes to memory from the chip are pushed. |
8358 | */ |
8359 | static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd) |
8360 | { |
8361 | struct hfi1_devdata *dd = rcd->dd; |
8362 | u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg); |
8363 | |
8364 | write_csr(dd, offset: addr, value: rcd->imask); |
8365 | /* force the above write on the chip and get a value back */ |
8366 | (void)read_csr(dd, offset: addr); |
8367 | } |
8368 | |
8369 | /* force the receive interrupt */ |
8370 | void force_recv_intr(struct hfi1_ctxtdata *rcd) |
8371 | { |
8372 | write_csr(dd: rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), value: rcd->imask); |
8373 | } |
8374 | |
8375 | /* |
8376 | * Return non-zero if a packet is present. |
8377 | * |
8378 | * This routine is called when rechecking for packets after the RcvAvail |
8379 | * interrupt has been cleared down. First, do a quick check of memory for |
8380 | * a packet present. If not found, use an expensive CSR read of the context |
8381 | * tail to determine the actual tail. The CSR read is necessary because there |
8382 | * is no method to push pending DMAs to memory other than an interrupt and we |
8383 | * are trying to determine if we need to force an interrupt. |
8384 | */ |
8385 | static inline int check_packet_present(struct hfi1_ctxtdata *rcd) |
8386 | { |
8387 | u32 tail; |
8388 | |
8389 | if (hfi1_packet_present(rcd)) |
8390 | return 1; |
8391 | |
8392 | /* fall back to a CSR read, correct indpendent of DMA_RTAIL */ |
8393 | tail = (u32)read_uctxt_csr(dd: rcd->dd, ctxt: rcd->ctxt, RCV_HDR_TAIL); |
8394 | return hfi1_rcd_head(rcd) != tail; |
8395 | } |
8396 | |
8397 | /* |
8398 | * Common code for receive contexts interrupt handlers. |
8399 | * Update traces, increment kernel IRQ counter and |
8400 | * setup ASPM when needed. |
8401 | */ |
8402 | static void receive_interrupt_common(struct hfi1_ctxtdata *rcd) |
8403 | { |
8404 | struct hfi1_devdata *dd = rcd->dd; |
8405 | |
8406 | trace_hfi1_receive_interrupt(dd, rcd); |
8407 | this_cpu_inc(*dd->int_counter); |
8408 | aspm_ctx_disable(rcd); |
8409 | } |
8410 | |
8411 | /* |
8412 | * __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt |
8413 | * when there are packets present in the queue. When calling |
8414 | * with interrupts enabled please use hfi1_rcd_eoi_intr. |
8415 | * |
8416 | * @rcd: valid receive context |
8417 | */ |
8418 | static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd) |
8419 | { |
8420 | if (!rcd->rcvhdrq) |
8421 | return; |
8422 | clear_recv_intr(rcd); |
8423 | if (check_packet_present(rcd)) |
8424 | force_recv_intr(rcd); |
8425 | } |
8426 | |
8427 | /** |
8428 | * hfi1_rcd_eoi_intr() - End of Interrupt processing action |
8429 | * |
8430 | * @rcd: Ptr to hfi1_ctxtdata of receive context |
8431 | * |
8432 | * Hold IRQs so we can safely clear the interrupt and |
8433 | * recheck for a packet that may have arrived after the previous |
8434 | * check and the interrupt clear. If a packet arrived, force another |
8435 | * interrupt. This routine can be called at the end of receive packet |
8436 | * processing in interrupt service routines, interrupt service thread |
8437 | * and softirqs |
8438 | */ |
8439 | static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd) |
8440 | { |
8441 | unsigned long flags; |
8442 | |
8443 | local_irq_save(flags); |
8444 | __hfi1_rcd_eoi_intr(rcd); |
8445 | local_irq_restore(flags); |
8446 | } |
8447 | |
8448 | /** |
8449 | * hfi1_netdev_rx_napi - napi poll function to move eoi inline |
8450 | * @napi: pointer to napi object |
8451 | * @budget: netdev budget |
8452 | */ |
8453 | int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget) |
8454 | { |
8455 | struct hfi1_netdev_rxq *rxq = container_of(napi, |
8456 | struct hfi1_netdev_rxq, napi); |
8457 | struct hfi1_ctxtdata *rcd = rxq->rcd; |
8458 | int work_done = 0; |
8459 | |
8460 | work_done = rcd->do_interrupt(rcd, budget); |
8461 | |
8462 | if (work_done < budget) { |
8463 | napi_complete_done(n: napi, work_done); |
8464 | hfi1_rcd_eoi_intr(rcd); |
8465 | } |
8466 | |
8467 | return work_done; |
8468 | } |
8469 | |
8470 | /* Receive packet napi handler for netdevs VNIC and AIP */ |
8471 | irqreturn_t receive_context_interrupt_napi(int irq, void *data) |
8472 | { |
8473 | struct hfi1_ctxtdata *rcd = data; |
8474 | |
8475 | receive_interrupt_common(rcd); |
8476 | |
8477 | if (likely(rcd->napi)) { |
8478 | if (likely(napi_schedule_prep(rcd->napi))) |
8479 | __napi_schedule_irqoff(n: rcd->napi); |
8480 | else |
8481 | __hfi1_rcd_eoi_intr(rcd); |
8482 | } else { |
8483 | WARN_ONCE(1, "Napi IRQ handler without napi set up ctxt=%d\n" , |
8484 | rcd->ctxt); |
8485 | __hfi1_rcd_eoi_intr(rcd); |
8486 | } |
8487 | |
8488 | return IRQ_HANDLED; |
8489 | } |
8490 | |
8491 | /* |
8492 | * Receive packet IRQ handler. This routine expects to be on its own IRQ. |
8493 | * This routine will try to handle packets immediately (latency), but if |
8494 | * it finds too many, it will invoke the thread handler (bandwitdh). The |
8495 | * chip receive interrupt is *not* cleared down until this or the thread (if |
8496 | * invoked) is finished. The intent is to avoid extra interrupts while we |
8497 | * are processing packets anyway. |
8498 | */ |
8499 | irqreturn_t receive_context_interrupt(int irq, void *data) |
8500 | { |
8501 | struct hfi1_ctxtdata *rcd = data; |
8502 | int disposition; |
8503 | |
8504 | receive_interrupt_common(rcd); |
8505 | |
8506 | /* receive interrupt remains blocked while processing packets */ |
8507 | disposition = rcd->do_interrupt(rcd, 0); |
8508 | |
8509 | /* |
8510 | * Too many packets were seen while processing packets in this |
8511 | * IRQ handler. Invoke the handler thread. The receive interrupt |
8512 | * remains blocked. |
8513 | */ |
8514 | if (disposition == RCV_PKT_LIMIT) |
8515 | return IRQ_WAKE_THREAD; |
8516 | |
8517 | __hfi1_rcd_eoi_intr(rcd); |
8518 | return IRQ_HANDLED; |
8519 | } |
8520 | |
8521 | /* |
8522 | * Receive packet thread handler. This expects to be invoked with the |
8523 | * receive interrupt still blocked. |
8524 | */ |
8525 | irqreturn_t receive_context_thread(int irq, void *data) |
8526 | { |
8527 | struct hfi1_ctxtdata *rcd = data; |
8528 | |
8529 | /* receive interrupt is still blocked from the IRQ handler */ |
8530 | (void)rcd->do_interrupt(rcd, 1); |
8531 | |
8532 | hfi1_rcd_eoi_intr(rcd); |
8533 | |
8534 | return IRQ_HANDLED; |
8535 | } |
8536 | |
8537 | /* ========================================================================= */ |
8538 | |
8539 | u32 read_physical_state(struct hfi1_devdata *dd) |
8540 | { |
8541 | u64 reg; |
8542 | |
8543 | reg = read_csr(dd, DC_DC8051_STS_CUR_STATE); |
8544 | return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT) |
8545 | & DC_DC8051_STS_CUR_STATE_PORT_MASK; |
8546 | } |
8547 | |
8548 | u32 read_logical_state(struct hfi1_devdata *dd) |
8549 | { |
8550 | u64 reg; |
8551 | |
8552 | reg = read_csr(dd, DCC_CFG_PORT_CONFIG); |
8553 | return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT) |
8554 | & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK; |
8555 | } |
8556 | |
8557 | static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate) |
8558 | { |
8559 | u64 reg; |
8560 | |
8561 | reg = read_csr(dd, DCC_CFG_PORT_CONFIG); |
8562 | /* clear current state, set new state */ |
8563 | reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK; |
8564 | reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT; |
8565 | write_csr(dd, DCC_CFG_PORT_CONFIG, value: reg); |
8566 | } |
8567 | |
8568 | /* |
8569 | * Use the 8051 to read a LCB CSR. |
8570 | */ |
8571 | static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data) |
8572 | { |
8573 | u32 regno; |
8574 | int ret; |
8575 | |
8576 | if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { |
8577 | if (acquire_lcb_access(dd, sleep_ok: 0) == 0) { |
8578 | *data = read_csr(dd, offset: addr); |
8579 | release_lcb_access(dd, sleep_ok: 0); |
8580 | return 0; |
8581 | } |
8582 | return -EBUSY; |
8583 | } |
8584 | |
8585 | /* register is an index of LCB registers: (offset - base) / 8 */ |
8586 | regno = (addr - DC_LCB_CFG_RUN) >> 3; |
8587 | ret = do_8051_command(dd, HCMD_READ_LCB_CSR, in_data: regno, out_data: data); |
8588 | if (ret != HCMD_SUCCESS) |
8589 | return -EBUSY; |
8590 | return 0; |
8591 | } |
8592 | |
8593 | /* |
8594 | * Provide a cache for some of the LCB registers in case the LCB is |
8595 | * unavailable. |
8596 | * (The LCB is unavailable in certain link states, for example.) |
8597 | */ |
8598 | struct lcb_datum { |
8599 | u32 off; |
8600 | u64 val; |
8601 | }; |
8602 | |
8603 | static struct lcb_datum lcb_cache[] = { |
8604 | { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0}, |
8605 | { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 }, |
8606 | { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 }, |
8607 | }; |
8608 | |
8609 | static void update_lcb_cache(struct hfi1_devdata *dd) |
8610 | { |
8611 | int i; |
8612 | int ret; |
8613 | u64 val; |
8614 | |
8615 | for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) { |
8616 | ret = read_lcb_csr(dd, offset: lcb_cache[i].off, data: &val); |
8617 | |
8618 | /* Update if we get good data */ |
8619 | if (likely(ret != -EBUSY)) |
8620 | lcb_cache[i].val = val; |
8621 | } |
8622 | } |
8623 | |
8624 | static int read_lcb_cache(u32 off, u64 *val) |
8625 | { |
8626 | int i; |
8627 | |
8628 | for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) { |
8629 | if (lcb_cache[i].off == off) { |
8630 | *val = lcb_cache[i].val; |
8631 | return 0; |
8632 | } |
8633 | } |
8634 | |
8635 | pr_warn("%s bad offset 0x%x\n" , __func__, off); |
8636 | return -1; |
8637 | } |
8638 | |
8639 | /* |
8640 | * Read an LCB CSR. Access may not be in host control, so check. |
8641 | * Return 0 on success, -EBUSY on failure. |
8642 | */ |
8643 | int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data) |
8644 | { |
8645 | struct hfi1_pportdata *ppd = dd->pport; |
8646 | |
8647 | /* if up, go through the 8051 for the value */ |
8648 | if (ppd->host_link_state & HLS_UP) |
8649 | return read_lcb_via_8051(dd, addr, data); |
8650 | /* if going up or down, check the cache, otherwise, no access */ |
8651 | if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) { |
8652 | if (read_lcb_cache(off: addr, val: data)) |
8653 | return -EBUSY; |
8654 | return 0; |
8655 | } |
8656 | |
8657 | /* otherwise, host has access */ |
8658 | *data = read_csr(dd, offset: addr); |
8659 | return 0; |
8660 | } |
8661 | |
8662 | /* |
8663 | * Use the 8051 to write a LCB CSR. |
8664 | */ |
8665 | static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data) |
8666 | { |
8667 | u32 regno; |
8668 | int ret; |
8669 | |
8670 | if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || |
8671 | (dd->dc8051_ver < dc8051_ver(0, 20, 0))) { |
8672 | if (acquire_lcb_access(dd, sleep_ok: 0) == 0) { |
8673 | write_csr(dd, offset: addr, value: data); |
8674 | release_lcb_access(dd, sleep_ok: 0); |
8675 | return 0; |
8676 | } |
8677 | return -EBUSY; |
8678 | } |
8679 | |
8680 | /* register is an index of LCB registers: (offset - base) / 8 */ |
8681 | regno = (addr - DC_LCB_CFG_RUN) >> 3; |
8682 | ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, in_data: regno, out_data: &data); |
8683 | if (ret != HCMD_SUCCESS) |
8684 | return -EBUSY; |
8685 | return 0; |
8686 | } |
8687 | |
8688 | /* |
8689 | * Write an LCB CSR. Access may not be in host control, so check. |
8690 | * Return 0 on success, -EBUSY on failure. |
8691 | */ |
8692 | int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data) |
8693 | { |
8694 | struct hfi1_pportdata *ppd = dd->pport; |
8695 | |
8696 | /* if up, go through the 8051 for the value */ |
8697 | if (ppd->host_link_state & HLS_UP) |
8698 | return write_lcb_via_8051(dd, addr, data); |
8699 | /* if going up or down, no access */ |
8700 | if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) |
8701 | return -EBUSY; |
8702 | /* otherwise, host has access */ |
8703 | write_csr(dd, offset: addr, value: data); |
8704 | return 0; |
8705 | } |
8706 | |
8707 | /* |
8708 | * Returns: |
8709 | * < 0 = Linux error, not able to get access |
8710 | * > 0 = 8051 command RETURN_CODE |
8711 | */ |
8712 | static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data, |
8713 | u64 *out_data) |
8714 | { |
8715 | u64 reg, completed; |
8716 | int return_code; |
8717 | unsigned long timeout; |
8718 | |
8719 | hfi1_cdbg(DC8051, "type %d, data 0x%012llx" , type, in_data); |
8720 | |
8721 | mutex_lock(&dd->dc8051_lock); |
8722 | |
8723 | /* We can't send any commands to the 8051 if it's in reset */ |
8724 | if (dd->dc_shutdown) { |
8725 | return_code = -ENODEV; |
8726 | goto fail; |
8727 | } |
8728 | |
8729 | /* |
8730 | * If an 8051 host command timed out previously, then the 8051 is |
8731 | * stuck. |
8732 | * |
8733 | * On first timeout, attempt to reset and restart the entire DC |
8734 | * block (including 8051). (Is this too big of a hammer?) |
8735 | * |
8736 | * If the 8051 times out a second time, the reset did not bring it |
8737 | * back to healthy life. In that case, fail any subsequent commands. |
8738 | */ |
8739 | if (dd->dc8051_timed_out) { |
8740 | if (dd->dc8051_timed_out > 1) { |
8741 | dd_dev_err(dd, |
8742 | "Previous 8051 host command timed out, skipping command %u\n" , |
8743 | type); |
8744 | return_code = -ENXIO; |
8745 | goto fail; |
8746 | } |
8747 | _dc_shutdown(dd); |
8748 | _dc_start(dd); |
8749 | } |
8750 | |
8751 | /* |
8752 | * If there is no timeout, then the 8051 command interface is |
8753 | * waiting for a command. |
8754 | */ |
8755 | |
8756 | /* |
8757 | * When writing a LCB CSR, out_data contains the full value to |
8758 | * be written, while in_data contains the relative LCB |
8759 | * address in 7:0. Do the work here, rather than the caller, |
8760 | * of distrubting the write data to where it needs to go: |
8761 | * |
8762 | * Write data |
8763 | * 39:00 -> in_data[47:8] |
8764 | * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE |
8765 | * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA |
8766 | */ |
8767 | if (type == HCMD_WRITE_LCB_CSR) { |
8768 | in_data |= ((*out_data) & 0xffffffffffull) << 8; |
8769 | /* must preserve COMPLETED - it is tied to hardware */ |
8770 | reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0); |
8771 | reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK; |
8772 | reg |= ((((*out_data) >> 40) & 0xff) << |
8773 | DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT) |
8774 | | ((((*out_data) >> 48) & 0xffff) << |
8775 | DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT); |
8776 | write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, value: reg); |
8777 | } |
8778 | |
8779 | /* |
8780 | * Do two writes: the first to stabilize the type and req_data, the |
8781 | * second to activate. |
8782 | */ |
8783 | reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK) |
8784 | << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT |
8785 | | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK) |
8786 | << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT; |
8787 | write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, value: reg); |
8788 | reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK; |
8789 | write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, value: reg); |
8790 | |
8791 | /* wait for completion, alternate: interrupt */ |
8792 | timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT); |
8793 | while (1) { |
8794 | reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1); |
8795 | completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK; |
8796 | if (completed) |
8797 | break; |
8798 | if (time_after(jiffies, timeout)) { |
8799 | dd->dc8051_timed_out++; |
8800 | dd_dev_err(dd, "8051 host command %u timeout\n" , type); |
8801 | if (out_data) |
8802 | *out_data = 0; |
8803 | return_code = -ETIMEDOUT; |
8804 | goto fail; |
8805 | } |
8806 | udelay(2); |
8807 | } |
8808 | |
8809 | if (out_data) { |
8810 | *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT) |
8811 | & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK; |
8812 | if (type == HCMD_READ_LCB_CSR) { |
8813 | /* top 16 bits are in a different register */ |
8814 | *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1) |
8815 | & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK) |
8816 | << (48 |
8817 | - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT); |
8818 | } |
8819 | } |
8820 | return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT) |
8821 | & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK; |
8822 | dd->dc8051_timed_out = 0; |
8823 | /* |
8824 | * Clear command for next user. |
8825 | */ |
8826 | write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, value: 0); |
8827 | |
8828 | fail: |
8829 | mutex_unlock(lock: &dd->dc8051_lock); |
8830 | return return_code; |
8831 | } |
8832 | |
8833 | static int set_physical_link_state(struct hfi1_devdata *dd, u64 state) |
8834 | { |
8835 | return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, in_data: state, NULL); |
8836 | } |
8837 | |
8838 | int load_8051_config(struct hfi1_devdata *dd, u8 field_id, |
8839 | u8 lane_id, u32 config_data) |
8840 | { |
8841 | u64 data; |
8842 | int ret; |
8843 | |
8844 | data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT |
8845 | | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT |
8846 | | (u64)config_data << LOAD_DATA_DATA_SHIFT; |
8847 | ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, in_data: data, NULL); |
8848 | if (ret != HCMD_SUCCESS) { |
8849 | dd_dev_err(dd, |
8850 | "load 8051 config: field id %d, lane %d, err %d\n" , |
8851 | (int)field_id, (int)lane_id, ret); |
8852 | } |
8853 | return ret; |
8854 | } |
8855 | |
8856 | /* |
8857 | * Read the 8051 firmware "registers". Use the RAM directly. Always |
8858 | * set the result, even on error. |
8859 | * Return 0 on success, -errno on failure |
8860 | */ |
8861 | int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id, |
8862 | u32 *result) |
8863 | { |
8864 | u64 big_data; |
8865 | u32 addr; |
8866 | int ret; |
8867 | |
8868 | /* address start depends on the lane_id */ |
8869 | if (lane_id < 4) |
8870 | addr = (4 * NUM_GENERAL_FIELDS) |
8871 | + (lane_id * 4 * NUM_LANE_FIELDS); |
8872 | else |
8873 | addr = 0; |
8874 | addr += field_id * 4; |
8875 | |
8876 | /* read is in 8-byte chunks, hardware will truncate the address down */ |
8877 | ret = read_8051_data(dd, addr, len: 8, result: &big_data); |
8878 | |
8879 | if (ret == 0) { |
8880 | /* extract the 4 bytes we want */ |
8881 | if (addr & 0x4) |
8882 | *result = (u32)(big_data >> 32); |
8883 | else |
8884 | *result = (u32)big_data; |
8885 | } else { |
8886 | *result = 0; |
8887 | dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n" , |
8888 | __func__, lane_id, field_id); |
8889 | } |
8890 | |
8891 | return ret; |
8892 | } |
8893 | |
8894 | static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management, |
8895 | u8 continuous) |
8896 | { |
8897 | u32 frame; |
8898 | |
8899 | frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT |
8900 | | power_management << POWER_MANAGEMENT_SHIFT; |
8901 | return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY, |
8902 | GENERAL_CONFIG, config_data: frame); |
8903 | } |
8904 | |
8905 | static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu, |
8906 | u16 vl15buf, u8 crc_sizes) |
8907 | { |
8908 | u32 frame; |
8909 | |
8910 | frame = (u32)vau << VAU_SHIFT |
8911 | | (u32)z << Z_SHIFT |
8912 | | (u32)vcu << VCU_SHIFT |
8913 | | (u32)vl15buf << VL15BUF_SHIFT |
8914 | | (u32)crc_sizes << CRC_SIZES_SHIFT; |
8915 | return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC, |
8916 | GENERAL_CONFIG, config_data: frame); |
8917 | } |
8918 | |
8919 | static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits, |
8920 | u8 *flag_bits, u16 *link_widths) |
8921 | { |
8922 | u32 frame; |
8923 | |
8924 | read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG, |
8925 | result: &frame); |
8926 | *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK; |
8927 | *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK; |
8928 | *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK; |
8929 | } |
8930 | |
8931 | static int write_vc_local_link_mode(struct hfi1_devdata *dd, |
8932 | u8 misc_bits, |
8933 | u8 flag_bits, |
8934 | u16 link_widths) |
8935 | { |
8936 | u32 frame; |
8937 | |
8938 | frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT |
8939 | | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT |
8940 | | (u32)link_widths << LINK_WIDTH_SHIFT; |
8941 | return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG, |
8942 | config_data: frame); |
8943 | } |
8944 | |
8945 | static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id, |
8946 | u8 device_rev) |
8947 | { |
8948 | u32 frame; |
8949 | |
8950 | frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT) |
8951 | | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT); |
8952 | return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, config_data: frame); |
8953 | } |
8954 | |
8955 | static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id, |
8956 | u8 *device_rev) |
8957 | { |
8958 | u32 frame; |
8959 | |
8960 | read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, result: &frame); |
8961 | *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK; |
8962 | *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT) |
8963 | & REMOTE_DEVICE_REV_MASK; |
8964 | } |
8965 | |
8966 | int write_host_interface_version(struct hfi1_devdata *dd, u8 version) |
8967 | { |
8968 | u32 frame; |
8969 | u32 mask; |
8970 | |
8971 | mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT); |
8972 | read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, result: &frame); |
8973 | /* Clear, then set field */ |
8974 | frame &= ~mask; |
8975 | frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT); |
8976 | return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, |
8977 | config_data: frame); |
8978 | } |
8979 | |
8980 | void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor, |
8981 | u8 *ver_patch) |
8982 | { |
8983 | u32 frame; |
8984 | |
8985 | read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, result: &frame); |
8986 | *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) & |
8987 | STS_FM_VERSION_MAJOR_MASK; |
8988 | *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) & |
8989 | STS_FM_VERSION_MINOR_MASK; |
8990 | |
8991 | read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, result: &frame); |
8992 | *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) & |
8993 | STS_FM_VERSION_PATCH_MASK; |
8994 | } |
8995 | |
8996 | static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management, |
8997 | u8 *continuous) |
8998 | { |
8999 | u32 frame; |
9000 | |
9001 | read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, result: &frame); |
9002 | *power_management = (frame >> POWER_MANAGEMENT_SHIFT) |
9003 | & POWER_MANAGEMENT_MASK; |
9004 | *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT) |
9005 | & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK; |
9006 | } |
9007 | |
9008 | static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z, |
9009 | u8 *vcu, u16 *vl15buf, u8 *crc_sizes) |
9010 | { |
9011 | u32 frame; |
9012 | |
9013 | read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, result: &frame); |
9014 | *vau = (frame >> VAU_SHIFT) & VAU_MASK; |
9015 | *z = (frame >> Z_SHIFT) & Z_MASK; |
9016 | *vcu = (frame >> VCU_SHIFT) & VCU_MASK; |
9017 | *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK; |
9018 | *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK; |
9019 | } |
9020 | |
9021 | static void read_vc_remote_link_width(struct hfi1_devdata *dd, |
9022 | u8 *remote_tx_rate, |
9023 | u16 *link_widths) |
9024 | { |
9025 | u32 frame; |
9026 | |
9027 | read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG, |
9028 | result: &frame); |
9029 | *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT) |
9030 | & REMOTE_TX_RATE_MASK; |
9031 | *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK; |
9032 | } |
9033 | |
9034 | static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx) |
9035 | { |
9036 | u32 frame; |
9037 | |
9038 | read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, result: &frame); |
9039 | *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK; |
9040 | } |
9041 | |
9042 | static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls) |
9043 | { |
9044 | read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, result: lls); |
9045 | } |
9046 | |
9047 | static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs) |
9048 | { |
9049 | read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, result: lrs); |
9050 | } |
9051 | |
9052 | void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality) |
9053 | { |
9054 | u32 frame; |
9055 | int ret; |
9056 | |
9057 | *link_quality = 0; |
9058 | if (dd->pport->host_link_state & HLS_UP) { |
9059 | ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, |
9060 | result: &frame); |
9061 | if (ret == 0) |
9062 | *link_quality = (frame >> LINK_QUALITY_SHIFT) |
9063 | & LINK_QUALITY_MASK; |
9064 | } |
9065 | } |
9066 | |
9067 | static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc) |
9068 | { |
9069 | u32 frame; |
9070 | |
9071 | read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, result: &frame); |
9072 | *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK; |
9073 | } |
9074 | |
9075 | static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr) |
9076 | { |
9077 | u32 frame; |
9078 | |
9079 | read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, result: &frame); |
9080 | *ldr = (frame & 0xff); |
9081 | } |
9082 | |
9083 | static int read_tx_settings(struct hfi1_devdata *dd, |
9084 | u8 *enable_lane_tx, |
9085 | u8 *tx_polarity_inversion, |
9086 | u8 *rx_polarity_inversion, |
9087 | u8 *max_rate) |
9088 | { |
9089 | u32 frame; |
9090 | int ret; |
9091 | |
9092 | ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, result: &frame); |
9093 | *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT) |
9094 | & ENABLE_LANE_TX_MASK; |
9095 | *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT) |
9096 | & TX_POLARITY_INVERSION_MASK; |
9097 | *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT) |
9098 | & RX_POLARITY_INVERSION_MASK; |
9099 | *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK; |
9100 | return ret; |
9101 | } |
9102 | |
9103 | static int write_tx_settings(struct hfi1_devdata *dd, |
9104 | u8 enable_lane_tx, |
9105 | u8 tx_polarity_inversion, |
9106 | u8 rx_polarity_inversion, |
9107 | u8 max_rate) |
9108 | { |
9109 | u32 frame; |
9110 | |
9111 | /* no need to mask, all variable sizes match field widths */ |
9112 | frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT |
9113 | | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT |
9114 | | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT |
9115 | | max_rate << MAX_RATE_SHIFT; |
9116 | return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, config_data: frame); |
9117 | } |
9118 | |
9119 | /* |
9120 | * Read an idle LCB message. |
9121 | * |
9122 | * Returns 0 on success, -EINVAL on error |
9123 | */ |
9124 | static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out) |
9125 | { |
9126 | int ret; |
9127 | |
9128 | ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, in_data: type, out_data: data_out); |
9129 | if (ret != HCMD_SUCCESS) { |
9130 | dd_dev_err(dd, "read idle message: type %d, err %d\n" , |
9131 | (u32)type, ret); |
9132 | return -EINVAL; |
9133 | } |
9134 | dd_dev_info(dd, "%s: read idle message 0x%llx\n" , __func__, *data_out); |
9135 | /* return only the payload as we already know the type */ |
9136 | *data_out >>= IDLE_PAYLOAD_SHIFT; |
9137 | return 0; |
9138 | } |
9139 | |
9140 | /* |
9141 | * Read an idle SMA message. To be done in response to a notification from |
9142 | * the 8051. |
9143 | * |
9144 | * Returns 0 on success, -EINVAL on error |
9145 | */ |
9146 | static int read_idle_sma(struct hfi1_devdata *dd, u64 *data) |
9147 | { |
9148 | return read_idle_message(dd, type: (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, |
9149 | data_out: data); |
9150 | } |
9151 | |
9152 | /* |
9153 | * Send an idle LCB message. |
9154 | * |
9155 | * Returns 0 on success, -EINVAL on error |
9156 | */ |
9157 | static int send_idle_message(struct hfi1_devdata *dd, u64 data) |
9158 | { |
9159 | int ret; |
9160 | |
9161 | dd_dev_info(dd, "%s: sending idle message 0x%llx\n" , __func__, data); |
9162 | ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, in_data: data, NULL); |
9163 | if (ret != HCMD_SUCCESS) { |
9164 | dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n" , |
9165 | data, ret); |
9166 | return -EINVAL; |
9167 | } |
9168 | return 0; |
9169 | } |
9170 | |
9171 | /* |
9172 | * Send an idle SMA message. |
9173 | * |
9174 | * Returns 0 on success, -EINVAL on error |
9175 | */ |
9176 | int send_idle_sma(struct hfi1_devdata *dd, u64 message) |
9177 | { |
9178 | u64 data; |
9179 | |
9180 | data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) | |
9181 | ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT); |
9182 | return send_idle_message(dd, data); |
9183 | } |
9184 | |
9185 | /* |
9186 | * Initialize the LCB then do a quick link up. This may or may not be |
9187 | * in loopback. |
9188 | * |
9189 | * return 0 on success, -errno on error |
9190 | */ |
9191 | static int do_quick_linkup(struct hfi1_devdata *dd) |
9192 | { |
9193 | int ret; |
9194 | |
9195 | lcb_shutdown(dd, abort: 0); |
9196 | |
9197 | if (loopback) { |
9198 | /* LCB_CFG_LOOPBACK.VAL = 2 */ |
9199 | /* LCB_CFG_LANE_WIDTH.VAL = 0 */ |
9200 | write_csr(dd, DC_LCB_CFG_LOOPBACK, |
9201 | IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT); |
9202 | write_csr(dd, DC_LCB_CFG_LANE_WIDTH, value: 0); |
9203 | } |
9204 | |
9205 | /* start the LCBs */ |
9206 | /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */ |
9207 | write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, value: 0); |
9208 | |
9209 | /* simulator only loopback steps */ |
9210 | if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { |
9211 | /* LCB_CFG_RUN.EN = 1 */ |
9212 | write_csr(dd, DC_LCB_CFG_RUN, |
9213 | value: 1ull << DC_LCB_CFG_RUN_EN_SHIFT); |
9214 | |
9215 | ret = wait_link_transfer_active(dd, wait_ms: 10); |
9216 | if (ret) |
9217 | return ret; |
9218 | |
9219 | write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, |
9220 | value: 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT); |
9221 | } |
9222 | |
9223 | if (!loopback) { |
9224 | /* |
9225 | * When doing quick linkup and not in loopback, both |
9226 | * sides must be done with LCB set-up before either |
9227 | * starts the quick linkup. Put a delay here so that |
9228 | * both sides can be started and have a chance to be |
9229 | * done with LCB set up before resuming. |
9230 | */ |
9231 | dd_dev_err(dd, |
9232 | "Pausing for peer to be finished with LCB set up\n" ); |
9233 | msleep(msecs: 5000); |
9234 | dd_dev_err(dd, "Continuing with quick linkup\n" ); |
9235 | } |
9236 | |
9237 | write_csr(dd, DC_LCB_ERR_EN, value: 0); /* mask LCB errors */ |
9238 | set_8051_lcb_access(dd); |
9239 | |
9240 | /* |
9241 | * State "quick" LinkUp request sets the physical link state to |
9242 | * LinkUp without a verify capability sequence. |
9243 | * This state is in simulator v37 and later. |
9244 | */ |
9245 | ret = set_physical_link_state(dd, PLS_QUICK_LINKUP); |
9246 | if (ret != HCMD_SUCCESS) { |
9247 | dd_dev_err(dd, |
9248 | "%s: set physical link state to quick LinkUp failed with return %d\n" , |
9249 | __func__, ret); |
9250 | |
9251 | set_host_lcb_access(dd); |
9252 | write_csr(dd, DC_LCB_ERR_EN, value: ~0ull); /* watch LCB errors */ |
9253 | |
9254 | if (ret >= 0) |
9255 | ret = -EINVAL; |
9256 | return ret; |
9257 | } |
9258 | |
9259 | return 0; /* success */ |
9260 | } |
9261 | |
9262 | /* |
9263 | * Do all special steps to set up loopback. |
9264 | */ |
9265 | static int init_loopback(struct hfi1_devdata *dd) |
9266 | { |
9267 | dd_dev_info(dd, "Entering loopback mode\n" ); |
9268 | |
9269 | /* all loopbacks should disable self GUID check */ |
9270 | write_csr(dd, DC_DC8051_CFG_MODE, |
9271 | value: (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK)); |
9272 | |
9273 | /* |
9274 | * The simulator has only one loopback option - LCB. Switch |
9275 | * to that option, which includes quick link up. |
9276 | * |
9277 | * Accept all valid loopback values. |
9278 | */ |
9279 | if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) && |
9280 | (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB || |
9281 | loopback == LOOPBACK_CABLE)) { |
9282 | loopback = LOOPBACK_LCB; |
9283 | quick_linkup = 1; |
9284 | return 0; |
9285 | } |
9286 | |
9287 | /* |
9288 | * SerDes loopback init sequence is handled in set_local_link_attributes |
9289 | */ |
9290 | if (loopback == LOOPBACK_SERDES) |
9291 | return 0; |
9292 | |
9293 | /* LCB loopback - handled at poll time */ |
9294 | if (loopback == LOOPBACK_LCB) { |
9295 | quick_linkup = 1; /* LCB is always quick linkup */ |
9296 | |
9297 | /* not supported in emulation due to emulation RTL changes */ |
9298 | if (dd->icode == ICODE_FPGA_EMULATION) { |
9299 | dd_dev_err(dd, |
9300 | "LCB loopback not supported in emulation\n" ); |
9301 | return -EINVAL; |
9302 | } |
9303 | return 0; |
9304 | } |
9305 | |
9306 | /* external cable loopback requires no extra steps */ |
9307 | if (loopback == LOOPBACK_CABLE) |
9308 | return 0; |
9309 | |
9310 | dd_dev_err(dd, "Invalid loopback mode %d\n" , loopback); |
9311 | return -EINVAL; |
9312 | } |
9313 | |
9314 | /* |
9315 | * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits |
9316 | * used in the Verify Capability link width attribute. |
9317 | */ |
9318 | static u16 opa_to_vc_link_widths(u16 opa_widths) |
9319 | { |
9320 | int i; |
9321 | u16 result = 0; |
9322 | |
9323 | static const struct link_bits { |
9324 | u16 from; |
9325 | u16 to; |
9326 | } opa_link_xlate[] = { |
9327 | { OPA_LINK_WIDTH_1X, 1 << (1 - 1) }, |
9328 | { OPA_LINK_WIDTH_2X, 1 << (2 - 1) }, |
9329 | { OPA_LINK_WIDTH_3X, 1 << (3 - 1) }, |
9330 | { OPA_LINK_WIDTH_4X, 1 << (4 - 1) }, |
9331 | }; |
9332 | |
9333 | for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) { |
9334 | if (opa_widths & opa_link_xlate[i].from) |
9335 | result |= opa_link_xlate[i].to; |
9336 | } |
9337 | return result; |
9338 | } |
9339 | |
9340 | /* |
9341 | * Set link attributes before moving to polling. |
9342 | */ |
9343 | static int set_local_link_attributes(struct hfi1_pportdata *ppd) |
9344 | { |
9345 | struct hfi1_devdata *dd = ppd->dd; |
9346 | u8 enable_lane_tx; |
9347 | u8 tx_polarity_inversion; |
9348 | u8 rx_polarity_inversion; |
9349 | int ret; |
9350 | u32 misc_bits = 0; |
9351 | /* reset our fabric serdes to clear any lingering problems */ |
9352 | fabric_serdes_reset(dd); |
9353 | |
9354 | /* set the local tx rate - need to read-modify-write */ |
9355 | ret = read_tx_settings(dd, enable_lane_tx: &enable_lane_tx, tx_polarity_inversion: &tx_polarity_inversion, |
9356 | rx_polarity_inversion: &rx_polarity_inversion, max_rate: &ppd->local_tx_rate); |
9357 | if (ret) |
9358 | goto set_local_link_attributes_fail; |
9359 | |
9360 | if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { |
9361 | /* set the tx rate to the fastest enabled */ |
9362 | if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G) |
9363 | ppd->local_tx_rate = 1; |
9364 | else |
9365 | ppd->local_tx_rate = 0; |
9366 | } else { |
9367 | /* set the tx rate to all enabled */ |
9368 | ppd->local_tx_rate = 0; |
9369 | if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G) |
9370 | ppd->local_tx_rate |= 2; |
9371 | if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G) |
9372 | ppd->local_tx_rate |= 1; |
9373 | } |
9374 | |
9375 | enable_lane_tx = 0xF; /* enable all four lanes */ |
9376 | ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion, |
9377 | rx_polarity_inversion, max_rate: ppd->local_tx_rate); |
9378 | if (ret != HCMD_SUCCESS) |
9379 | goto set_local_link_attributes_fail; |
9380 | |
9381 | ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION); |
9382 | if (ret != HCMD_SUCCESS) { |
9383 | dd_dev_err(dd, |
9384 | "Failed to set host interface version, return 0x%x\n" , |
9385 | ret); |
9386 | goto set_local_link_attributes_fail; |
9387 | } |
9388 | |
9389 | /* |
9390 | * DC supports continuous updates. |
9391 | */ |
9392 | ret = write_vc_local_phy(dd, |
9393 | power_management: 0 /* no power management */, |
9394 | continuous: 1 /* continuous updates */); |
9395 | if (ret != HCMD_SUCCESS) |
9396 | goto set_local_link_attributes_fail; |
9397 | |
9398 | /* z=1 in the next call: AU of 0 is not supported by the hardware */ |
9399 | ret = write_vc_local_fabric(dd, vau: dd->vau, z: 1, vcu: dd->vcu, vl15buf: dd->vl15_init, |
9400 | crc_sizes: ppd->port_crc_mode_enabled); |
9401 | if (ret != HCMD_SUCCESS) |
9402 | goto set_local_link_attributes_fail; |
9403 | |
9404 | /* |
9405 | * SerDes loopback init sequence requires |
9406 | * setting bit 0 of MISC_CONFIG_BITS |
9407 | */ |
9408 | if (loopback == LOOPBACK_SERDES) |
9409 | misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT; |
9410 | |
9411 | /* |
9412 | * An external device configuration request is used to reset the LCB |
9413 | * to retry to obtain operational lanes when the first attempt is |
9414 | * unsuccesful. |
9415 | */ |
9416 | if (dd->dc8051_ver >= dc8051_ver(1, 25, 0)) |
9417 | misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT; |
9418 | |
9419 | ret = write_vc_local_link_mode(dd, misc_bits, flag_bits: 0, |
9420 | link_widths: opa_to_vc_link_widths( |
9421 | opa_widths: ppd->link_width_enabled)); |
9422 | if (ret != HCMD_SUCCESS) |
9423 | goto set_local_link_attributes_fail; |
9424 | |
9425 | /* let peer know who we are */ |
9426 | ret = write_local_device_id(dd, device_id: dd->pcidev->device, device_rev: dd->minrev); |
9427 | if (ret == HCMD_SUCCESS) |
9428 | return 0; |
9429 | |
9430 | set_local_link_attributes_fail: |
9431 | dd_dev_err(dd, |
9432 | "Failed to set local link attributes, return 0x%x\n" , |
9433 | ret); |
9434 | return ret; |
9435 | } |
9436 | |
9437 | /* |
9438 | * Call this to start the link. |
9439 | * Do not do anything if the link is disabled. |
9440 | * Returns 0 if link is disabled, moved to polling, or the driver is not ready. |
9441 | */ |
9442 | int start_link(struct hfi1_pportdata *ppd) |
9443 | { |
9444 | /* |
9445 | * Tune the SerDes to a ballpark setting for optimal signal and bit |
9446 | * error rate. Needs to be done before starting the link. |
9447 | */ |
9448 | tune_serdes(ppd); |
9449 | |
9450 | if (!ppd->driver_link_ready) { |
9451 | dd_dev_info(ppd->dd, |
9452 | "%s: stopping link start because driver is not ready\n" , |
9453 | __func__); |
9454 | return 0; |
9455 | } |
9456 | |
9457 | /* |
9458 | * FULL_MGMT_P_KEY is cleared from the pkey table, so that the |
9459 | * pkey table can be configured properly if the HFI unit is connected |
9460 | * to switch port with MgmtAllowed=NO |
9461 | */ |
9462 | clear_full_mgmt_pkey(ppd); |
9463 | |
9464 | return set_link_state(ppd, HLS_DN_POLL); |
9465 | } |
9466 | |
9467 | static void wait_for_qsfp_init(struct hfi1_pportdata *ppd) |
9468 | { |
9469 | struct hfi1_devdata *dd = ppd->dd; |
9470 | u64 mask; |
9471 | unsigned long timeout; |
9472 | |
9473 | /* |
9474 | * Some QSFP cables have a quirk that asserts the IntN line as a side |
9475 | * effect of power up on plug-in. We ignore this false positive |
9476 | * interrupt until the module has finished powering up by waiting for |
9477 | * a minimum timeout of the module inrush initialization time of |
9478 | * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the |
9479 | * module have stabilized. |
9480 | */ |
9481 | msleep(msecs: 500); |
9482 | |
9483 | /* |
9484 | * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1) |
9485 | */ |
9486 | timeout = jiffies + msecs_to_jiffies(m: 2000); |
9487 | while (1) { |
9488 | mask = read_csr(dd, offset: dd->hfi1_id ? |
9489 | ASIC_QSFP2_IN : ASIC_QSFP1_IN); |
9490 | if (!(mask & QSFP_HFI0_INT_N)) |
9491 | break; |
9492 | if (time_after(jiffies, timeout)) { |
9493 | dd_dev_info(dd, "%s: No IntN detected, reset complete\n" , |
9494 | __func__); |
9495 | break; |
9496 | } |
9497 | udelay(2); |
9498 | } |
9499 | } |
9500 | |
9501 | static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable) |
9502 | { |
9503 | struct hfi1_devdata *dd = ppd->dd; |
9504 | u64 mask; |
9505 | |
9506 | mask = read_csr(dd, offset: dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK); |
9507 | if (enable) { |
9508 | /* |
9509 | * Clear the status register to avoid an immediate interrupt |
9510 | * when we re-enable the IntN pin |
9511 | */ |
9512 | write_csr(dd, offset: dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, |
9513 | QSFP_HFI0_INT_N); |
9514 | mask |= (u64)QSFP_HFI0_INT_N; |
9515 | } else { |
9516 | mask &= ~(u64)QSFP_HFI0_INT_N; |
9517 | } |
9518 | write_csr(dd, offset: dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, value: mask); |
9519 | } |
9520 | |
9521 | int reset_qsfp(struct hfi1_pportdata *ppd) |
9522 | { |
9523 | struct hfi1_devdata *dd = ppd->dd; |
9524 | u64 mask, qsfp_mask; |
9525 | |
9526 | /* Disable INT_N from triggering QSFP interrupts */ |
9527 | set_qsfp_int_n(ppd, enable: 0); |
9528 | |
9529 | /* Reset the QSFP */ |
9530 | mask = (u64)QSFP_HFI0_RESET_N; |
9531 | |
9532 | qsfp_mask = read_csr(dd, |
9533 | offset: dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); |
9534 | qsfp_mask &= ~mask; |
9535 | write_csr(dd, |
9536 | offset: dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, value: qsfp_mask); |
9537 | |
9538 | udelay(10); |
9539 | |
9540 | qsfp_mask |= mask; |
9541 | write_csr(dd, |
9542 | offset: dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, value: qsfp_mask); |
9543 | |
9544 | wait_for_qsfp_init(ppd); |
9545 | |
9546 | /* |
9547 | * Allow INT_N to trigger the QSFP interrupt to watch |
9548 | * for alarms and warnings |
9549 | */ |
9550 | set_qsfp_int_n(ppd, enable: 1); |
9551 | |
9552 | /* |
9553 | * After the reset, AOC transmitters are enabled by default. They need |
9554 | * to be turned off to complete the QSFP setup before they can be |
9555 | * enabled again. |
9556 | */ |
9557 | return set_qsfp_tx(ppd, on: 0); |
9558 | } |
9559 | |
9560 | static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, |
9561 | u8 *qsfp_interrupt_status) |
9562 | { |
9563 | struct hfi1_devdata *dd = ppd->dd; |
9564 | |
9565 | if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) || |
9566 | (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING)) |
9567 | dd_dev_err(dd, "%s: QSFP cable temperature too high\n" , |
9568 | __func__); |
9569 | |
9570 | if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) || |
9571 | (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING)) |
9572 | dd_dev_err(dd, "%s: QSFP cable temperature too low\n" , |
9573 | __func__); |
9574 | |
9575 | /* |
9576 | * The remaining alarms/warnings don't matter if the link is down. |
9577 | */ |
9578 | if (ppd->host_link_state & HLS_DOWN) |
9579 | return 0; |
9580 | |
9581 | if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) || |
9582 | (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING)) |
9583 | dd_dev_err(dd, "%s: QSFP supply voltage too high\n" , |
9584 | __func__); |
9585 | |
9586 | if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) || |
9587 | (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING)) |
9588 | dd_dev_err(dd, "%s: QSFP supply voltage too low\n" , |
9589 | __func__); |
9590 | |
9591 | /* Byte 2 is vendor specific */ |
9592 | |
9593 | if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) || |
9594 | (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING)) |
9595 | dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n" , |
9596 | __func__); |
9597 | |
9598 | if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) || |
9599 | (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING)) |
9600 | dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n" , |
9601 | __func__); |
9602 | |
9603 | if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) || |
9604 | (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING)) |
9605 | dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n" , |
9606 | __func__); |
9607 | |
9608 | if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) || |
9609 | (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING)) |
9610 | dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n" , |
9611 | __func__); |
9612 | |
9613 | if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) || |
9614 | (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING)) |
9615 | dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n" , |
9616 | __func__); |
9617 | |
9618 | if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) || |
9619 | (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING)) |
9620 | dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n" , |
9621 | __func__); |
9622 | |
9623 | if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) || |
9624 | (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING)) |
9625 | dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n" , |
9626 | __func__); |
9627 | |
9628 | if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) || |
9629 | (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING)) |
9630 | dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n" , |
9631 | __func__); |
9632 | |
9633 | if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) || |
9634 | (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING)) |
9635 | dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n" , |
9636 | __func__); |
9637 | |
9638 | if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) || |
9639 | (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING)) |
9640 | dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n" , |
9641 | __func__); |
9642 | |
9643 | if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) || |
9644 | (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING)) |
9645 | dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n" , |
9646 | __func__); |
9647 | |
9648 | if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) || |
9649 | (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING)) |
9650 | dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n" , |
9651 | __func__); |
9652 | |
9653 | /* Bytes 9-10 and 11-12 are reserved */ |
9654 | /* Bytes 13-15 are vendor specific */ |
9655 | |
9656 | return 0; |
9657 | } |
9658 | |
9659 | /* This routine will only be scheduled if the QSFP module present is asserted */ |
9660 | void qsfp_event(struct work_struct *work) |
9661 | { |
9662 | struct qsfp_data *qd; |
9663 | struct hfi1_pportdata *ppd; |
9664 | struct hfi1_devdata *dd; |
9665 | |
9666 | qd = container_of(work, struct qsfp_data, qsfp_work); |
9667 | ppd = qd->ppd; |
9668 | dd = ppd->dd; |
9669 | |
9670 | /* Sanity check */ |
9671 | if (!qsfp_mod_present(ppd)) |
9672 | return; |
9673 | |
9674 | if (ppd->host_link_state == HLS_DN_DISABLE) { |
9675 | dd_dev_info(ppd->dd, |
9676 | "%s: stopping link start because link is disabled\n" , |
9677 | __func__); |
9678 | return; |
9679 | } |
9680 | |
9681 | /* |
9682 | * Turn DC back on after cable has been re-inserted. Up until |
9683 | * now, the DC has been in reset to save power. |
9684 | */ |
9685 | dc_start(dd); |
9686 | |
9687 | if (qd->cache_refresh_required) { |
9688 | set_qsfp_int_n(ppd, enable: 0); |
9689 | |
9690 | wait_for_qsfp_init(ppd); |
9691 | |
9692 | /* |
9693 | * Allow INT_N to trigger the QSFP interrupt to watch |
9694 | * for alarms and warnings |
9695 | */ |
9696 | set_qsfp_int_n(ppd, enable: 1); |
9697 | |
9698 | start_link(ppd); |
9699 | } |
9700 | |
9701 | if (qd->check_interrupt_flags) { |
9702 | u8 qsfp_interrupt_status[16] = {0,}; |
9703 | |
9704 | if (one_qsfp_read(ppd, target: dd->hfi1_id, addr: 6, |
9705 | bp: &qsfp_interrupt_status[0], len: 16) != 16) { |
9706 | dd_dev_info(dd, |
9707 | "%s: Failed to read status of QSFP module\n" , |
9708 | __func__); |
9709 | } else { |
9710 | unsigned long flags; |
9711 | |
9712 | handle_qsfp_error_conditions( |
9713 | ppd, qsfp_interrupt_status); |
9714 | spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); |
9715 | ppd->qsfp_info.check_interrupt_flags = 0; |
9716 | spin_unlock_irqrestore(lock: &ppd->qsfp_info.qsfp_lock, |
9717 | flags); |
9718 | } |
9719 | } |
9720 | } |
9721 | |
9722 | void init_qsfp_int(struct hfi1_devdata *dd) |
9723 | { |
9724 | struct hfi1_pportdata *ppd = dd->pport; |
9725 | u64 qsfp_mask; |
9726 | |
9727 | qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N); |
9728 | /* Clear current status to avoid spurious interrupts */ |
9729 | write_csr(dd, offset: dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, |
9730 | value: qsfp_mask); |
9731 | write_csr(dd, offset: dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, |
9732 | value: qsfp_mask); |
9733 | |
9734 | set_qsfp_int_n(ppd, enable: 0); |
9735 | |
9736 | /* Handle active low nature of INT_N and MODPRST_N pins */ |
9737 | if (qsfp_mod_present(ppd)) |
9738 | qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N; |
9739 | write_csr(dd, |
9740 | offset: dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT, |
9741 | value: qsfp_mask); |
9742 | |
9743 | /* Enable the appropriate QSFP IRQ source */ |
9744 | if (!dd->hfi1_id) |
9745 | set_intr_bits(dd, QSFP1_INT, QSFP1_INT, set: true); |
9746 | else |
9747 | set_intr_bits(dd, QSFP2_INT, QSFP2_INT, set: true); |
9748 | } |
9749 | |
9750 | /* |
9751 | * Do a one-time initialize of the LCB block. |
9752 | */ |
9753 | static void init_lcb(struct hfi1_devdata *dd) |
9754 | { |
9755 | /* simulator does not correctly handle LCB cclk loopback, skip */ |
9756 | if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) |
9757 | return; |
9758 | |
9759 | /* the DC has been reset earlier in the driver load */ |
9760 | |
9761 | /* set LCB for cclk loopback on the port */ |
9762 | write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, value: 0x01); |
9763 | write_csr(dd, DC_LCB_CFG_LANE_WIDTH, value: 0x00); |
9764 | write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, value: 0x00); |
9765 | write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, value: 0x110); |
9766 | write_csr(dd, DC_LCB_CFG_CLK_CNTR, value: 0x08); |
9767 | write_csr(dd, DC_LCB_CFG_LOOPBACK, value: 0x02); |
9768 | write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, value: 0x00); |
9769 | } |
9770 | |
9771 | /* |
9772 | * Perform a test read on the QSFP. Return 0 on success, -ERRNO |
9773 | * on error. |
9774 | */ |
9775 | static int test_qsfp_read(struct hfi1_pportdata *ppd) |
9776 | { |
9777 | int ret; |
9778 | u8 status; |
9779 | |
9780 | /* |
9781 | * Report success if not a QSFP or, if it is a QSFP, but the cable is |
9782 | * not present |
9783 | */ |
9784 | if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd)) |
9785 | return 0; |
9786 | |
9787 | /* read byte 2, the status byte */ |
9788 | ret = one_qsfp_read(ppd, target: ppd->dd->hfi1_id, addr: 2, bp: &status, len: 1); |
9789 | if (ret < 0) |
9790 | return ret; |
9791 | if (ret != 1) |
9792 | return -EIO; |
9793 | |
9794 | return 0; /* success */ |
9795 | } |
9796 | |
9797 | /* |
9798 | * Values for QSFP retry. |
9799 | * |
9800 | * Give up after 10s (20 x 500ms). The overall timeout was empirically |
9801 | * arrived at from experience on a large cluster. |
9802 | */ |
9803 | #define MAX_QSFP_RETRIES 20 |
9804 | #define QSFP_RETRY_WAIT 500 /* msec */ |
9805 | |
9806 | /* |
9807 | * Try a QSFP read. If it fails, schedule a retry for later. |
9808 | * Called on first link activation after driver load. |
9809 | */ |
9810 | static void try_start_link(struct hfi1_pportdata *ppd) |
9811 | { |
9812 | if (test_qsfp_read(ppd)) { |
9813 | /* read failed */ |
9814 | if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) { |
9815 | dd_dev_err(ppd->dd, "QSFP not responding, giving up\n" ); |
9816 | return; |
9817 | } |
9818 | dd_dev_info(ppd->dd, |
9819 | "QSFP not responding, waiting and retrying %d\n" , |
9820 | (int)ppd->qsfp_retry_count); |
9821 | ppd->qsfp_retry_count++; |
9822 | queue_delayed_work(wq: ppd->link_wq, dwork: &ppd->start_link_work, |
9823 | delay: msecs_to_jiffies(QSFP_RETRY_WAIT)); |
9824 | return; |
9825 | } |
9826 | ppd->qsfp_retry_count = 0; |
9827 | |
9828 | start_link(ppd); |
9829 | } |
9830 | |
9831 | /* |
9832 | * Workqueue function to start the link after a delay. |
9833 | */ |
9834 | void handle_start_link(struct work_struct *work) |
9835 | { |
9836 | struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, |
9837 | start_link_work.work); |
9838 | try_start_link(ppd); |
9839 | } |
9840 | |
9841 | int bringup_serdes(struct hfi1_pportdata *ppd) |
9842 | { |
9843 | struct hfi1_devdata *dd = ppd->dd; |
9844 | u64 guid; |
9845 | int ret; |
9846 | |
9847 | if (HFI1_CAP_IS_KSET(EXTENDED_PSN)) |
9848 | add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK); |
9849 | |
9850 | guid = ppd->guids[HFI1_PORT_GUID_INDEX]; |
9851 | if (!guid) { |
9852 | if (dd->base_guid) |
9853 | guid = dd->base_guid + ppd->port - 1; |
9854 | ppd->guids[HFI1_PORT_GUID_INDEX] = guid; |
9855 | } |
9856 | |
9857 | /* Set linkinit_reason on power up per OPA spec */ |
9858 | ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP; |
9859 | |
9860 | /* one-time init of the LCB */ |
9861 | init_lcb(dd); |
9862 | |
9863 | if (loopback) { |
9864 | ret = init_loopback(dd); |
9865 | if (ret < 0) |
9866 | return ret; |
9867 | } |
9868 | |
9869 | get_port_type(ppd); |
9870 | if (ppd->port_type == PORT_TYPE_QSFP) { |
9871 | set_qsfp_int_n(ppd, enable: 0); |
9872 | wait_for_qsfp_init(ppd); |
9873 | set_qsfp_int_n(ppd, enable: 1); |
9874 | } |
9875 | |
9876 | try_start_link(ppd); |
9877 | return 0; |
9878 | } |
9879 | |
9880 | void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) |
9881 | { |
9882 | struct hfi1_devdata *dd = ppd->dd; |
9883 | |
9884 | /* |
9885 | * Shut down the link and keep it down. First turn off that the |
9886 | * driver wants to allow the link to be up (driver_link_ready). |
9887 | * Then make sure the link is not automatically restarted |
9888 | * (link_enabled). Cancel any pending restart. And finally |
9889 | * go offline. |
9890 | */ |
9891 | ppd->driver_link_ready = 0; |
9892 | ppd->link_enabled = 0; |
9893 | |
9894 | ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */ |
9895 | flush_delayed_work(dwork: &ppd->start_link_work); |
9896 | cancel_delayed_work_sync(dwork: &ppd->start_link_work); |
9897 | |
9898 | ppd->offline_disabled_reason = |
9899 | HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT); |
9900 | set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, neigh_reason: 0, |
9901 | OPA_LINKDOWN_REASON_REBOOT); |
9902 | set_link_state(ppd, HLS_DN_OFFLINE); |
9903 | |
9904 | /* disable the port */ |
9905 | clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); |
9906 | cancel_work_sync(work: &ppd->freeze_work); |
9907 | } |
9908 | |
9909 | static inline int init_cpu_counters(struct hfi1_devdata *dd) |
9910 | { |
9911 | struct hfi1_pportdata *ppd; |
9912 | int i; |
9913 | |
9914 | ppd = (struct hfi1_pportdata *)(dd + 1); |
9915 | for (i = 0; i < dd->num_pports; i++, ppd++) { |
9916 | ppd->ibport_data.rvp.rc_acks = NULL; |
9917 | ppd->ibport_data.rvp.rc_qacks = NULL; |
9918 | ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64); |
9919 | ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64); |
9920 | ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64); |
9921 | if (!ppd->ibport_data.rvp.rc_acks || |
9922 | !ppd->ibport_data.rvp.rc_delayed_comp || |
9923 | !ppd->ibport_data.rvp.rc_qacks) |
9924 | return -ENOMEM; |
9925 | } |
9926 | |
9927 | return 0; |
9928 | } |
9929 | |
9930 | /* |
9931 | * index is the index into the receive array |
9932 | */ |
9933 | void hfi1_put_tid(struct hfi1_devdata *dd, u32 index, |
9934 | u32 type, unsigned long pa, u16 order) |
9935 | { |
9936 | u64 reg; |
9937 | |
9938 | if (!(dd->flags & HFI1_PRESENT)) |
9939 | goto done; |
9940 | |
9941 | if (type == PT_INVALID || type == PT_INVALID_FLUSH) { |
9942 | pa = 0; |
9943 | order = 0; |
9944 | } else if (type > PT_INVALID) { |
9945 | dd_dev_err(dd, |
9946 | "unexpected receive array type %u for index %u, not handled\n" , |
9947 | type, index); |
9948 | goto done; |
9949 | } |
9950 | trace_hfi1_put_tid(dd, index, type, pa, order); |
9951 | |
9952 | #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */ |
9953 | reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK |
9954 | | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT |
9955 | | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK) |
9956 | << RCV_ARRAY_RT_ADDR_SHIFT; |
9957 | trace_hfi1_write_rcvarray(addr: dd->rcvarray_wc + (index * 8), value: reg); |
9958 | writeq(val: reg, addr: dd->rcvarray_wc + (index * 8)); |
9959 | |
9960 | if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3) |
9961 | /* |
9962 | * Eager entries are written and flushed |
9963 | * |
9964 | * Expected entries are flushed every 4 writes |
9965 | */ |
9966 | flush_wc(); |
9967 | done: |
9968 | return; |
9969 | } |
9970 | |
9971 | void hfi1_clear_tids(struct hfi1_ctxtdata *rcd) |
9972 | { |
9973 | struct hfi1_devdata *dd = rcd->dd; |
9974 | u32 i; |
9975 | |
9976 | /* this could be optimized */ |
9977 | for (i = rcd->eager_base; i < rcd->eager_base + |
9978 | rcd->egrbufs.alloced; i++) |
9979 | hfi1_put_tid(dd, index: i, PT_INVALID, pa: 0, order: 0); |
9980 | |
9981 | for (i = rcd->expected_base; |
9982 | i < rcd->expected_base + rcd->expected_count; i++) |
9983 | hfi1_put_tid(dd, index: i, PT_INVALID, pa: 0, order: 0); |
9984 | } |
9985 | |
9986 | static const char * const ib_cfg_name_strings[] = { |
9987 | "HFI1_IB_CFG_LIDLMC" , |
9988 | "HFI1_IB_CFG_LWID_DG_ENB" , |
9989 | "HFI1_IB_CFG_LWID_ENB" , |
9990 | "HFI1_IB_CFG_LWID" , |
9991 | "HFI1_IB_CFG_SPD_ENB" , |
9992 | "HFI1_IB_CFG_SPD" , |
9993 | "HFI1_IB_CFG_RXPOL_ENB" , |
9994 | "HFI1_IB_CFG_LREV_ENB" , |
9995 | "HFI1_IB_CFG_LINKLATENCY" , |
9996 | "HFI1_IB_CFG_HRTBT" , |
9997 | "HFI1_IB_CFG_OP_VLS" , |
9998 | "HFI1_IB_CFG_VL_HIGH_CAP" , |
9999 | "HFI1_IB_CFG_VL_LOW_CAP" , |
10000 | "HFI1_IB_CFG_OVERRUN_THRESH" , |
10001 | "HFI1_IB_CFG_PHYERR_THRESH" , |
10002 | "HFI1_IB_CFG_LINKDEFAULT" , |
10003 | "HFI1_IB_CFG_PKEYS" , |
10004 | "HFI1_IB_CFG_MTU" , |
10005 | "HFI1_IB_CFG_LSTATE" , |
10006 | "HFI1_IB_CFG_VL_HIGH_LIMIT" , |
10007 | "HFI1_IB_CFG_PMA_TICKS" , |
10008 | "HFI1_IB_CFG_PORT" |
10009 | }; |
10010 | |
10011 | static const char *ib_cfg_name(int which) |
10012 | { |
10013 | if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings)) |
10014 | return "invalid" ; |
10015 | return ib_cfg_name_strings[which]; |
10016 | } |
10017 | |
10018 | int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which) |
10019 | { |
10020 | struct hfi1_devdata *dd = ppd->dd; |
10021 | int val = 0; |
10022 | |
10023 | switch (which) { |
10024 | case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */ |
10025 | val = ppd->link_width_enabled; |
10026 | break; |
10027 | case HFI1_IB_CFG_LWID: /* currently active Link-width */ |
10028 | val = ppd->link_width_active; |
10029 | break; |
10030 | case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */ |
10031 | val = ppd->link_speed_enabled; |
10032 | break; |
10033 | case HFI1_IB_CFG_SPD: /* current Link speed */ |
10034 | val = ppd->link_speed_active; |
10035 | break; |
10036 | |
10037 | case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */ |
10038 | case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */ |
10039 | case HFI1_IB_CFG_LINKLATENCY: |
10040 | goto unimplemented; |
10041 | |
10042 | case HFI1_IB_CFG_OP_VLS: |
10043 | val = ppd->actual_vls_operational; |
10044 | break; |
10045 | case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */ |
10046 | val = VL_ARB_HIGH_PRIO_TABLE_SIZE; |
10047 | break; |
10048 | case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */ |
10049 | val = VL_ARB_LOW_PRIO_TABLE_SIZE; |
10050 | break; |
10051 | case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ |
10052 | val = ppd->overrun_threshold; |
10053 | break; |
10054 | case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ |
10055 | val = ppd->phy_error_threshold; |
10056 | break; |
10057 | case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ |
10058 | val = HLS_DEFAULT; |
10059 | break; |
10060 | |
10061 | case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */ |
10062 | case HFI1_IB_CFG_PMA_TICKS: |
10063 | default: |
10064 | unimplemented: |
10065 | if (HFI1_CAP_IS_KSET(PRINT_UNIMPL)) |
10066 | dd_dev_info( |
10067 | dd, |
10068 | "%s: which %s: not implemented\n" , |
10069 | __func__, |
10070 | ib_cfg_name(which)); |
10071 | break; |
10072 | } |
10073 | |
10074 | return val; |
10075 | } |
10076 | |
10077 | /* |
10078 | * The largest MAD packet size. |
10079 | */ |
10080 | #define MAX_MAD_PACKET 2048 |
10081 | |
10082 | /* |
10083 | * Return the maximum header bytes that can go on the _wire_ |
10084 | * for this device. This count includes the ICRC which is |
10085 | * not part of the packet held in memory but it is appended |
10086 | * by the HW. |
10087 | * This is dependent on the device's receive header entry size. |
10088 | * HFI allows this to be set per-receive context, but the |
10089 | * driver presently enforces a global value. |
10090 | */ |
10091 | u32 (struct hfi1_devdata *dd) |
10092 | { |
10093 | /* |
10094 | * The maximum non-payload (MTU) bytes in LRH.PktLen are |
10095 | * the Receive Header Entry Size minus the PBC (or RHF) size |
10096 | * plus one DW for the ICRC appended by HW. |
10097 | * |
10098 | * dd->rcd[0].rcvhdrqentsize is in DW. |
10099 | * We use rcd[0] as all context will have the same value. Also, |
10100 | * the first kernel context would have been allocated by now so |
10101 | * we are guaranteed a valid value. |
10102 | */ |
10103 | return (get_hdrqentsize(rcd: dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2; |
10104 | } |
10105 | |
10106 | /* |
10107 | * Set Send Length |
10108 | * @ppd: per port data |
10109 | * |
10110 | * Set the MTU by limiting how many DWs may be sent. The SendLenCheck* |
10111 | * registers compare against LRH.PktLen, so use the max bytes included |
10112 | * in the LRH. |
10113 | * |
10114 | * This routine changes all VL values except VL15, which it maintains at |
10115 | * the same value. |
10116 | */ |
10117 | static void set_send_length(struct hfi1_pportdata *ppd) |
10118 | { |
10119 | struct hfi1_devdata *dd = ppd->dd; |
10120 | u32 max_hb = lrh_max_header_bytes(dd), dcmtu; |
10121 | u32 maxvlmtu = dd->vld[15].mtu; |
10122 | u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) |
10123 | & SEND_LEN_CHECK1_LEN_VL15_MASK) << |
10124 | SEND_LEN_CHECK1_LEN_VL15_SHIFT; |
10125 | int i, j; |
10126 | u32 thres; |
10127 | |
10128 | for (i = 0; i < ppd->vls_supported; i++) { |
10129 | if (dd->vld[i].mtu > maxvlmtu) |
10130 | maxvlmtu = dd->vld[i].mtu; |
10131 | if (i <= 3) |
10132 | len1 |= (((dd->vld[i].mtu + max_hb) >> 2) |
10133 | & SEND_LEN_CHECK0_LEN_VL0_MASK) << |
10134 | ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT); |
10135 | else |
10136 | len2 |= (((dd->vld[i].mtu + max_hb) >> 2) |
10137 | & SEND_LEN_CHECK1_LEN_VL4_MASK) << |
10138 | ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT); |
10139 | } |
10140 | write_csr(dd, SEND_LEN_CHECK0, value: len1); |
10141 | write_csr(dd, SEND_LEN_CHECK1, value: len2); |
10142 | /* adjust kernel credit return thresholds based on new MTUs */ |
10143 | /* all kernel receive contexts have the same hdrqentsize */ |
10144 | for (i = 0; i < ppd->vls_supported; i++) { |
10145 | thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50), |
10146 | sc_mtu_to_threshold(dd->vld[i].sc, |
10147 | dd->vld[i].mtu, |
10148 | get_hdrqentsize(dd->rcd[0]))); |
10149 | for (j = 0; j < INIT_SC_PER_VL; j++) |
10150 | sc_set_cr_threshold( |
10151 | sc: pio_select_send_context_vl(dd, selector: j, vl: i), |
10152 | new_threshold: thres); |
10153 | } |
10154 | thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), |
10155 | sc_mtu_to_threshold(dd->vld[15].sc, |
10156 | dd->vld[15].mtu, |
10157 | dd->rcd[0]->rcvhdrqentsize)); |
10158 | sc_set_cr_threshold(sc: dd->vld[15].sc, new_threshold: thres); |
10159 | |
10160 | /* Adjust maximum MTU for the port in DC */ |
10161 | dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 : |
10162 | (ilog2(maxvlmtu >> 8) + 1); |
10163 | len1 = read_csr(dd: ppd->dd, DCC_CFG_PORT_CONFIG); |
10164 | len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK; |
10165 | len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) << |
10166 | DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT; |
10167 | write_csr(dd: ppd->dd, DCC_CFG_PORT_CONFIG, value: len1); |
10168 | } |
10169 | |
10170 | static void set_lidlmc(struct hfi1_pportdata *ppd) |
10171 | { |
10172 | int i; |
10173 | u64 sreg = 0; |
10174 | struct hfi1_devdata *dd = ppd->dd; |
10175 | u32 mask = ~((1U << ppd->lmc) - 1); |
10176 | u64 c1 = read_csr(dd: ppd->dd, DCC_CFG_PORT_CONFIG1); |
10177 | u32 lid; |
10178 | |
10179 | /* |
10180 | * Program 0 in CSR if port lid is extended. This prevents |
10181 | * 9B packets being sent out for large lids. |
10182 | */ |
10183 | lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid; |
10184 | c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK |
10185 | | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK); |
10186 | c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK) |
10187 | << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) | |
10188 | ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK) |
10189 | << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT); |
10190 | write_csr(dd: ppd->dd, DCC_CFG_PORT_CONFIG1, value: c1); |
10191 | |
10192 | /* |
10193 | * Iterate over all the send contexts and set their SLID check |
10194 | */ |
10195 | sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) << |
10196 | SEND_CTXT_CHECK_SLID_MASK_SHIFT) | |
10197 | (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) << |
10198 | SEND_CTXT_CHECK_SLID_VALUE_SHIFT); |
10199 | |
10200 | for (i = 0; i < chip_send_contexts(dd); i++) { |
10201 | hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x" , |
10202 | i, (u32)sreg); |
10203 | write_kctxt_csr(dd, ctxt: i, SEND_CTXT_CHECK_SLID, value: sreg); |
10204 | } |
10205 | |
10206 | /* Now we have to do the same thing for the sdma engines */ |
10207 | sdma_update_lmc(dd, mask, lid); |
10208 | } |
10209 | |
10210 | static const char *state_completed_string(u32 completed) |
10211 | { |
10212 | static const char * const state_completed[] = { |
10213 | "EstablishComm" , |
10214 | "OptimizeEQ" , |
10215 | "VerifyCap" |
10216 | }; |
10217 | |
10218 | if (completed < ARRAY_SIZE(state_completed)) |
10219 | return state_completed[completed]; |
10220 | |
10221 | return "unknown" ; |
10222 | } |
10223 | |
10224 | static const char all_lanes_dead_timeout_expired[] = |
10225 | "All lanes were inactive – was the interconnect media removed?" ; |
10226 | static const char tx_out_of_policy[] = |
10227 | "Passing lanes on local port do not meet the local link width policy" ; |
10228 | static const char no_state_complete[] = |
10229 | "State timeout occurred before link partner completed the state" ; |
10230 | static const char * const state_complete_reasons[] = { |
10231 | [0x00] = "Reason unknown" , |
10232 | [0x01] = "Link was halted by driver, refer to LinkDownReason" , |
10233 | [0x02] = "Link partner reported failure" , |
10234 | [0x10] = "Unable to achieve frame sync on any lane" , |
10235 | [0x11] = |
10236 | "Unable to find a common bit rate with the link partner" , |
10237 | [0x12] = |
10238 | "Unable to achieve frame sync on sufficient lanes to meet the local link width policy" , |
10239 | [0x13] = |
10240 | "Unable to identify preset equalization on sufficient lanes to meet the local link width policy" , |
10241 | [0x14] = no_state_complete, |
10242 | [0x15] = |
10243 | "State timeout occurred before link partner identified equalization presets" , |
10244 | [0x16] = |
10245 | "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy" , |
10246 | [0x17] = tx_out_of_policy, |
10247 | [0x20] = all_lanes_dead_timeout_expired, |
10248 | [0x21] = |
10249 | "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy" , |
10250 | [0x22] = no_state_complete, |
10251 | [0x23] = |
10252 | "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy" , |
10253 | [0x24] = tx_out_of_policy, |
10254 | [0x30] = all_lanes_dead_timeout_expired, |
10255 | [0x31] = |
10256 | "State timeout occurred waiting for host to process received frames" , |
10257 | [0x32] = no_state_complete, |
10258 | [0x33] = |
10259 | "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy" , |
10260 | [0x34] = tx_out_of_policy, |
10261 | [0x35] = "Negotiated link width is mutually exclusive" , |
10262 | [0x36] = |
10263 | "Timed out before receiving verifycap frames in VerifyCap.Exchange" , |
10264 | [0x37] = "Unable to resolve secure data exchange" , |
10265 | }; |
10266 | |
10267 | static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd, |
10268 | u32 code) |
10269 | { |
10270 | const char *str = NULL; |
10271 | |
10272 | if (code < ARRAY_SIZE(state_complete_reasons)) |
10273 | str = state_complete_reasons[code]; |
10274 | |
10275 | if (str) |
10276 | return str; |
10277 | return "Reserved" ; |
10278 | } |
10279 | |
10280 | /* describe the given last state complete frame */ |
10281 | static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame, |
10282 | const char *prefix) |
10283 | { |
10284 | struct hfi1_devdata *dd = ppd->dd; |
10285 | u32 success; |
10286 | u32 state; |
10287 | u32 reason; |
10288 | u32 lanes; |
10289 | |
10290 | /* |
10291 | * Decode frame: |
10292 | * [ 0: 0] - success |
10293 | * [ 3: 1] - state |
10294 | * [ 7: 4] - next state timeout |
10295 | * [15: 8] - reason code |
10296 | * [31:16] - lanes |
10297 | */ |
10298 | success = frame & 0x1; |
10299 | state = (frame >> 1) & 0x7; |
10300 | reason = (frame >> 8) & 0xff; |
10301 | lanes = (frame >> 16) & 0xffff; |
10302 | |
10303 | dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n" , |
10304 | prefix, frame); |
10305 | dd_dev_err(dd, " last reported state state: %s (0x%x)\n" , |
10306 | state_completed_string(state), state); |
10307 | dd_dev_err(dd, " state successfully completed: %s\n" , |
10308 | success ? "yes" : "no" ); |
10309 | dd_dev_err(dd, " fail reason 0x%x: %s\n" , |
10310 | reason, state_complete_reason_code_string(ppd, reason)); |
10311 | dd_dev_err(dd, " passing lane mask: 0x%x" , lanes); |
10312 | } |
10313 | |
10314 | /* |
10315 | * Read the last state complete frames and explain them. This routine |
10316 | * expects to be called if the link went down during link negotiation |
10317 | * and initialization (LNI). That is, anywhere between polling and link up. |
10318 | */ |
10319 | static void check_lni_states(struct hfi1_pportdata *ppd) |
10320 | { |
10321 | u32 last_local_state; |
10322 | u32 last_remote_state; |
10323 | |
10324 | read_last_local_state(dd: ppd->dd, lls: &last_local_state); |
10325 | read_last_remote_state(dd: ppd->dd, lrs: &last_remote_state); |
10326 | |
10327 | /* |
10328 | * Don't report anything if there is nothing to report. A value of |
10329 | * 0 means the link was taken down while polling and there was no |
10330 | * training in-process. |
10331 | */ |
10332 | if (last_local_state == 0 && last_remote_state == 0) |
10333 | return; |
10334 | |
10335 | decode_state_complete(ppd, frame: last_local_state, prefix: "transmitted" ); |
10336 | decode_state_complete(ppd, frame: last_remote_state, prefix: "received" ); |
10337 | } |
10338 | |
10339 | /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */ |
10340 | static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms) |
10341 | { |
10342 | u64 reg; |
10343 | unsigned long timeout; |
10344 | |
10345 | /* watch LCB_STS_LINK_TRANSFER_ACTIVE */ |
10346 | timeout = jiffies + msecs_to_jiffies(m: wait_ms); |
10347 | while (1) { |
10348 | reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE); |
10349 | if (reg) |
10350 | break; |
10351 | if (time_after(jiffies, timeout)) { |
10352 | dd_dev_err(dd, |
10353 | "timeout waiting for LINK_TRANSFER_ACTIVE\n" ); |
10354 | return -ETIMEDOUT; |
10355 | } |
10356 | udelay(2); |
10357 | } |
10358 | return 0; |
10359 | } |
10360 | |
10361 | /* called when the logical link state is not down as it should be */ |
10362 | static void force_logical_link_state_down(struct hfi1_pportdata *ppd) |
10363 | { |
10364 | struct hfi1_devdata *dd = ppd->dd; |
10365 | |
10366 | /* |
10367 | * Bring link up in LCB loopback |
10368 | */ |
10369 | write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, value: 1); |
10370 | write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, |
10371 | DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK); |
10372 | |
10373 | write_csr(dd, DC_LCB_CFG_LANE_WIDTH, value: 0); |
10374 | write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, value: 0); |
10375 | write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, value: 0x110); |
10376 | write_csr(dd, DC_LCB_CFG_LOOPBACK, value: 0x2); |
10377 | |
10378 | write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, value: 0); |
10379 | (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET); |
10380 | udelay(3); |
10381 | write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, value: 1); |
10382 | write_csr(dd, DC_LCB_CFG_RUN, value: 1ull << DC_LCB_CFG_RUN_EN_SHIFT); |
10383 | |
10384 | wait_link_transfer_active(dd, wait_ms: 100); |
10385 | |
10386 | /* |
10387 | * Bring the link down again. |
10388 | */ |
10389 | write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, value: 1); |
10390 | write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, value: 0); |
10391 | write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, value: 0); |
10392 | |
10393 | dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n" ); |
10394 | } |
10395 | |
10396 | /* |
10397 | * Helper for set_link_state(). Do not call except from that routine. |
10398 | * Expects ppd->hls_mutex to be held. |
10399 | * |
10400 | * @rem_reason value to be sent to the neighbor |
10401 | * |
10402 | * LinkDownReasons only set if transition succeeds. |
10403 | */ |
10404 | static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) |
10405 | { |
10406 | struct hfi1_devdata *dd = ppd->dd; |
10407 | u32 previous_state; |
10408 | int offline_state_ret; |
10409 | int ret; |
10410 | |
10411 | update_lcb_cache(dd); |
10412 | |
10413 | previous_state = ppd->host_link_state; |
10414 | ppd->host_link_state = HLS_GOING_OFFLINE; |
10415 | |
10416 | /* start offline transition */ |
10417 | ret = set_physical_link_state(dd, state: (rem_reason << 8) | PLS_OFFLINE); |
10418 | |
10419 | if (ret != HCMD_SUCCESS) { |
10420 | dd_dev_err(dd, |
10421 | "Failed to transition to Offline link state, return %d\n" , |
10422 | ret); |
10423 | return -EINVAL; |
10424 | } |
10425 | if (ppd->offline_disabled_reason == |
10426 | HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)) |
10427 | ppd->offline_disabled_reason = |
10428 | HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); |
10429 | |
10430 | offline_state_ret = wait_phys_link_offline_substates(ppd, msecs: 10000); |
10431 | if (offline_state_ret < 0) |
10432 | return offline_state_ret; |
10433 | |
10434 | /* Disabling AOC transmitters */ |
10435 | if (ppd->port_type == PORT_TYPE_QSFP && |
10436 | ppd->qsfp_info.limiting_active && |
10437 | qsfp_mod_present(ppd)) { |
10438 | int ret; |
10439 | |
10440 | ret = acquire_chip_resource(dd, resource: qsfp_resource(dd), QSFP_WAIT); |
10441 | if (ret == 0) { |
10442 | set_qsfp_tx(ppd, on: 0); |
10443 | release_chip_resource(dd, resource: qsfp_resource(dd)); |
10444 | } else { |
10445 | /* not fatal, but should warn */ |
10446 | dd_dev_err(dd, |
10447 | "Unable to acquire lock to turn off QSFP TX\n" ); |
10448 | } |
10449 | } |
10450 | |
10451 | /* |
10452 | * Wait for the offline.Quiet transition if it hasn't happened yet. It |
10453 | * can take a while for the link to go down. |
10454 | */ |
10455 | if (offline_state_ret != PLS_OFFLINE_QUIET) { |
10456 | ret = wait_physical_linkstate(ppd, PLS_OFFLINE, msecs: 30000); |
10457 | if (ret < 0) |
10458 | return ret; |
10459 | } |
10460 | |
10461 | /* |
10462 | * Now in charge of LCB - must be after the physical state is |
10463 | * offline.quiet and before host_link_state is changed. |
10464 | */ |
10465 | set_host_lcb_access(dd); |
10466 | write_csr(dd, DC_LCB_ERR_EN, value: ~0ull); /* watch LCB errors */ |
10467 | |
10468 | /* make sure the logical state is also down */ |
10469 | ret = wait_logical_linkstate(ppd, state: IB_PORT_DOWN, msecs: 1000); |
10470 | if (ret) |
10471 | force_logical_link_state_down(ppd); |
10472 | |
10473 | ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */ |
10474 | update_statusp(ppd, state: IB_PORT_DOWN); |
10475 | |
10476 | /* |
10477 | * The LNI has a mandatory wait time after the physical state |
10478 | * moves to Offline.Quiet. The wait time may be different |
10479 | * depending on how the link went down. The 8051 firmware |
10480 | * will observe the needed wait time and only move to ready |
10481 | * when that is completed. The largest of the quiet timeouts |
10482 | * is 6s, so wait that long and then at least 0.5s more for |
10483 | * other transitions, and another 0.5s for a buffer. |
10484 | */ |
10485 | ret = wait_fm_ready(dd, mstimeout: 7000); |
10486 | if (ret) { |
10487 | dd_dev_err(dd, |
10488 | "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n" ); |
10489 | /* state is really offline, so make it so */ |
10490 | ppd->host_link_state = HLS_DN_OFFLINE; |
10491 | return ret; |
10492 | } |
10493 | |
10494 | /* |
10495 | * The state is now offline and the 8051 is ready to accept host |
10496 | * requests. |
10497 | * - change our state |
10498 | * - notify others if we were previously in a linkup state |
10499 | */ |
10500 | ppd->host_link_state = HLS_DN_OFFLINE; |
10501 | if (previous_state & HLS_UP) { |
10502 | /* went down while link was up */ |
10503 | handle_linkup_change(dd, linkup: 0); |
10504 | } else if (previous_state |
10505 | & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { |
10506 | /* went down while attempting link up */ |
10507 | check_lni_states(ppd); |
10508 | |
10509 | /* The QSFP doesn't need to be reset on LNI failure */ |
10510 | ppd->qsfp_info.reset_needed = 0; |
10511 | } |
10512 | |
10513 | /* the active link width (downgrade) is 0 on link down */ |
10514 | ppd->link_width_active = 0; |
10515 | ppd->link_width_downgrade_tx_active = 0; |
10516 | ppd->link_width_downgrade_rx_active = 0; |
10517 | ppd->current_egress_rate = 0; |
10518 | return 0; |
10519 | } |
10520 | |
10521 | /* return the link state name */ |
10522 | static const char *link_state_name(u32 state) |
10523 | { |
10524 | const char *name; |
10525 | int n = ilog2(state); |
10526 | static const char * const names[] = { |
10527 | [__HLS_UP_INIT_BP] = "INIT" , |
10528 | [__HLS_UP_ARMED_BP] = "ARMED" , |
10529 | [__HLS_UP_ACTIVE_BP] = "ACTIVE" , |
10530 | [__HLS_DN_DOWNDEF_BP] = "DOWNDEF" , |
10531 | [__HLS_DN_POLL_BP] = "POLL" , |
10532 | [__HLS_DN_DISABLE_BP] = "DISABLE" , |
10533 | [__HLS_DN_OFFLINE_BP] = "OFFLINE" , |
10534 | [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP" , |
10535 | [__HLS_GOING_UP_BP] = "GOING_UP" , |
10536 | [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE" , |
10537 | [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN" |
10538 | }; |
10539 | |
10540 | name = n < ARRAY_SIZE(names) ? names[n] : NULL; |
10541 | return name ? name : "unknown" ; |
10542 | } |
10543 | |
10544 | /* return the link state reason name */ |
10545 | static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state) |
10546 | { |
10547 | if (state == HLS_UP_INIT) { |
10548 | switch (ppd->linkinit_reason) { |
10549 | case OPA_LINKINIT_REASON_LINKUP: |
10550 | return "(LINKUP)" ; |
10551 | case OPA_LINKINIT_REASON_FLAPPING: |
10552 | return "(FLAPPING)" ; |
10553 | case OPA_LINKINIT_OUTSIDE_POLICY: |
10554 | return "(OUTSIDE_POLICY)" ; |
10555 | case OPA_LINKINIT_QUARANTINED: |
10556 | return "(QUARANTINED)" ; |
10557 | case OPA_LINKINIT_INSUFIC_CAPABILITY: |
10558 | return "(INSUFIC_CAPABILITY)" ; |
10559 | default: |
10560 | break; |
10561 | } |
10562 | } |
10563 | return "" ; |
10564 | } |
10565 | |
10566 | /* |
10567 | * driver_pstate - convert the driver's notion of a port's |
10568 | * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*). |
10569 | * Return -1 (converted to a u32) to indicate error. |
10570 | */ |
10571 | u32 driver_pstate(struct hfi1_pportdata *ppd) |
10572 | { |
10573 | switch (ppd->host_link_state) { |
10574 | case HLS_UP_INIT: |
10575 | case HLS_UP_ARMED: |
10576 | case HLS_UP_ACTIVE: |
10577 | return IB_PORTPHYSSTATE_LINKUP; |
10578 | case HLS_DN_POLL: |
10579 | return IB_PORTPHYSSTATE_POLLING; |
10580 | case HLS_DN_DISABLE: |
10581 | return IB_PORTPHYSSTATE_DISABLED; |
10582 | case HLS_DN_OFFLINE: |
10583 | return OPA_PORTPHYSSTATE_OFFLINE; |
10584 | case HLS_VERIFY_CAP: |
10585 | return IB_PORTPHYSSTATE_TRAINING; |
10586 | case HLS_GOING_UP: |
10587 | return IB_PORTPHYSSTATE_TRAINING; |
10588 | case HLS_GOING_OFFLINE: |
10589 | return OPA_PORTPHYSSTATE_OFFLINE; |
10590 | case HLS_LINK_COOLDOWN: |
10591 | return OPA_PORTPHYSSTATE_OFFLINE; |
10592 | case HLS_DN_DOWNDEF: |
10593 | default: |
10594 | dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n" , |
10595 | ppd->host_link_state); |
10596 | return -1; |
10597 | } |
10598 | } |
10599 | |
10600 | /* |
10601 | * driver_lstate - convert the driver's notion of a port's |
10602 | * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1 |
10603 | * (converted to a u32) to indicate error. |
10604 | */ |
10605 | u32 driver_lstate(struct hfi1_pportdata *ppd) |
10606 | { |
10607 | if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN)) |
10608 | return IB_PORT_DOWN; |
10609 | |
10610 | switch (ppd->host_link_state & HLS_UP) { |
10611 | case HLS_UP_INIT: |
10612 | return IB_PORT_INIT; |
10613 | case HLS_UP_ARMED: |
10614 | return IB_PORT_ARMED; |
10615 | case HLS_UP_ACTIVE: |
10616 | return IB_PORT_ACTIVE; |
10617 | default: |
10618 | dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n" , |
10619 | ppd->host_link_state); |
10620 | return -1; |
10621 | } |
10622 | } |
10623 | |
10624 | void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason, |
10625 | u8 neigh_reason, u8 rem_reason) |
10626 | { |
10627 | if (ppd->local_link_down_reason.latest == 0 && |
10628 | ppd->neigh_link_down_reason.latest == 0) { |
10629 | ppd->local_link_down_reason.latest = lcl_reason; |
10630 | ppd->neigh_link_down_reason.latest = neigh_reason; |
10631 | ppd->remote_link_down_reason = rem_reason; |
10632 | } |
10633 | } |
10634 | |
10635 | /** |
10636 | * data_vls_operational() - Verify if data VL BCT credits and MTU |
10637 | * are both set. |
10638 | * @ppd: pointer to hfi1_pportdata structure |
10639 | * |
10640 | * Return: true - Ok, false -otherwise. |
10641 | */ |
10642 | static inline bool data_vls_operational(struct hfi1_pportdata *ppd) |
10643 | { |
10644 | int i; |
10645 | u64 reg; |
10646 | |
10647 | if (!ppd->actual_vls_operational) |
10648 | return false; |
10649 | |
10650 | for (i = 0; i < ppd->vls_supported; i++) { |
10651 | reg = read_csr(dd: ppd->dd, SEND_CM_CREDIT_VL + (8 * i)); |
10652 | if ((reg && !ppd->dd->vld[i].mtu) || |
10653 | (!reg && ppd->dd->vld[i].mtu)) |
10654 | return false; |
10655 | } |
10656 | |
10657 | return true; |
10658 | } |
10659 | |
10660 | /* |
10661 | * Change the physical and/or logical link state. |
10662 | * |
10663 | * Do not call this routine while inside an interrupt. It contains |
10664 | * calls to routines that can take multiple seconds to finish. |
10665 | * |
10666 | * Returns 0 on success, -errno on failure. |
10667 | */ |
10668 | int set_link_state(struct hfi1_pportdata *ppd, u32 state) |
10669 | { |
10670 | struct hfi1_devdata *dd = ppd->dd; |
10671 | struct ib_event event = {.device = NULL}; |
10672 | int ret1, ret = 0; |
10673 | int orig_new_state, poll_bounce; |
10674 | |
10675 | mutex_lock(&ppd->hls_lock); |
10676 | |
10677 | orig_new_state = state; |
10678 | if (state == HLS_DN_DOWNDEF) |
10679 | state = HLS_DEFAULT; |
10680 | |
10681 | /* interpret poll -> poll as a link bounce */ |
10682 | poll_bounce = ppd->host_link_state == HLS_DN_POLL && |
10683 | state == HLS_DN_POLL; |
10684 | |
10685 | dd_dev_info(dd, "%s: current %s, new %s %s%s\n" , __func__, |
10686 | link_state_name(ppd->host_link_state), |
10687 | link_state_name(orig_new_state), |
10688 | poll_bounce ? "(bounce) " : "" , |
10689 | link_state_reason_name(ppd, state)); |
10690 | |
10691 | /* |
10692 | * If we're going to a (HLS_*) link state that implies the logical |
10693 | * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then |
10694 | * reset is_sm_config_started to 0. |
10695 | */ |
10696 | if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE))) |
10697 | ppd->is_sm_config_started = 0; |
10698 | |
10699 | /* |
10700 | * Do nothing if the states match. Let a poll to poll link bounce |
10701 | * go through. |
10702 | */ |
10703 | if (ppd->host_link_state == state && !poll_bounce) |
10704 | goto done; |
10705 | |
10706 | switch (state) { |
10707 | case HLS_UP_INIT: |
10708 | if (ppd->host_link_state == HLS_DN_POLL && |
10709 | (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) { |
10710 | /* |
10711 | * Quick link up jumps from polling to here. |
10712 | * |
10713 | * Whether in normal or loopback mode, the |
10714 | * simulator jumps from polling to link up. |
10715 | * Accept that here. |
10716 | */ |
10717 | /* OK */ |
10718 | } else if (ppd->host_link_state != HLS_GOING_UP) { |
10719 | goto unexpected; |
10720 | } |
10721 | |
10722 | /* |
10723 | * Wait for Link_Up physical state. |
10724 | * Physical and Logical states should already be |
10725 | * be transitioned to LinkUp and LinkInit respectively. |
10726 | */ |
10727 | ret = wait_physical_linkstate(ppd, PLS_LINKUP, msecs: 1000); |
10728 | if (ret) { |
10729 | dd_dev_err(dd, |
10730 | "%s: physical state did not change to LINK-UP\n" , |
10731 | __func__); |
10732 | break; |
10733 | } |
10734 | |
10735 | ret = wait_logical_linkstate(ppd, state: IB_PORT_INIT, msecs: 1000); |
10736 | if (ret) { |
10737 | dd_dev_err(dd, |
10738 | "%s: logical state did not change to INIT\n" , |
10739 | __func__); |
10740 | break; |
10741 | } |
10742 | |
10743 | /* clear old transient LINKINIT_REASON code */ |
10744 | if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR) |
10745 | ppd->linkinit_reason = |
10746 | OPA_LINKINIT_REASON_LINKUP; |
10747 | |
10748 | /* enable the port */ |
10749 | add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); |
10750 | |
10751 | handle_linkup_change(dd, linkup: 1); |
10752 | pio_kernel_linkup(dd); |
10753 | |
10754 | /* |
10755 | * After link up, a new link width will have been set. |
10756 | * Update the xmit counters with regards to the new |
10757 | * link width. |
10758 | */ |
10759 | update_xmit_counters(ppd, link_width: ppd->link_width_active); |
10760 | |
10761 | ppd->host_link_state = HLS_UP_INIT; |
10762 | update_statusp(ppd, state: IB_PORT_INIT); |
10763 | break; |
10764 | case HLS_UP_ARMED: |
10765 | if (ppd->host_link_state != HLS_UP_INIT) |
10766 | goto unexpected; |
10767 | |
10768 | if (!data_vls_operational(ppd)) { |
10769 | dd_dev_err(dd, |
10770 | "%s: Invalid data VL credits or mtu\n" , |
10771 | __func__); |
10772 | ret = -EINVAL; |
10773 | break; |
10774 | } |
10775 | |
10776 | set_logical_state(dd, LSTATE_ARMED); |
10777 | ret = wait_logical_linkstate(ppd, state: IB_PORT_ARMED, msecs: 1000); |
10778 | if (ret) { |
10779 | dd_dev_err(dd, |
10780 | "%s: logical state did not change to ARMED\n" , |
10781 | __func__); |
10782 | break; |
10783 | } |
10784 | ppd->host_link_state = HLS_UP_ARMED; |
10785 | update_statusp(ppd, state: IB_PORT_ARMED); |
10786 | /* |
10787 | * The simulator does not currently implement SMA messages, |
10788 | * so neighbor_normal is not set. Set it here when we first |
10789 | * move to Armed. |
10790 | */ |
10791 | if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) |
10792 | ppd->neighbor_normal = 1; |
10793 | break; |
10794 | case HLS_UP_ACTIVE: |
10795 | if (ppd->host_link_state != HLS_UP_ARMED) |
10796 | goto unexpected; |
10797 | |
10798 | set_logical_state(dd, LSTATE_ACTIVE); |
10799 | ret = wait_logical_linkstate(ppd, state: IB_PORT_ACTIVE, msecs: 1000); |
10800 | if (ret) { |
10801 | dd_dev_err(dd, |
10802 | "%s: logical state did not change to ACTIVE\n" , |
10803 | __func__); |
10804 | } else { |
10805 | /* tell all engines to go running */ |
10806 | sdma_all_running(dd); |
10807 | ppd->host_link_state = HLS_UP_ACTIVE; |
10808 | update_statusp(ppd, state: IB_PORT_ACTIVE); |
10809 | |
10810 | /* Signal the IB layer that the port has went active */ |
10811 | event.device = &dd->verbs_dev.rdi.ibdev; |
10812 | event.element.port_num = ppd->port; |
10813 | event.event = IB_EVENT_PORT_ACTIVE; |
10814 | } |
10815 | break; |
10816 | case HLS_DN_POLL: |
10817 | if ((ppd->host_link_state == HLS_DN_DISABLE || |
10818 | ppd->host_link_state == HLS_DN_OFFLINE) && |
10819 | dd->dc_shutdown) |
10820 | dc_start(dd); |
10821 | /* Hand LED control to the DC */ |
10822 | write_csr(dd, DCC_CFG_LED_CNTRL, value: 0); |
10823 | |
10824 | if (ppd->host_link_state != HLS_DN_OFFLINE) { |
10825 | u8 tmp = ppd->link_enabled; |
10826 | |
10827 | ret = goto_offline(ppd, rem_reason: ppd->remote_link_down_reason); |
10828 | if (ret) { |
10829 | ppd->link_enabled = tmp; |
10830 | break; |
10831 | } |
10832 | ppd->remote_link_down_reason = 0; |
10833 | |
10834 | if (ppd->driver_link_ready) |
10835 | ppd->link_enabled = 1; |
10836 | } |
10837 | |
10838 | set_all_slowpath(ppd->dd); |
10839 | ret = set_local_link_attributes(ppd); |
10840 | if (ret) |
10841 | break; |
10842 | |
10843 | ppd->port_error_action = 0; |
10844 | |
10845 | if (quick_linkup) { |
10846 | /* quick linkup does not go into polling */ |
10847 | ret = do_quick_linkup(dd); |
10848 | } else { |
10849 | ret1 = set_physical_link_state(dd, PLS_POLLING); |
10850 | if (!ret1) |
10851 | ret1 = wait_phys_link_out_of_offline(ppd, |
10852 | msecs: 3000); |
10853 | if (ret1 != HCMD_SUCCESS) { |
10854 | dd_dev_err(dd, |
10855 | "Failed to transition to Polling link state, return 0x%x\n" , |
10856 | ret1); |
10857 | ret = -EINVAL; |
10858 | } |
10859 | } |
10860 | |
10861 | /* |
10862 | * Change the host link state after requesting DC8051 to |
10863 | * change its physical state so that we can ignore any |
10864 | * interrupt with stale LNI(XX) error, which will not be |
10865 | * cleared until DC8051 transitions to Polling state. |
10866 | */ |
10867 | ppd->host_link_state = HLS_DN_POLL; |
10868 | ppd->offline_disabled_reason = |
10869 | HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE); |
10870 | /* |
10871 | * If an error occurred above, go back to offline. The |
10872 | * caller may reschedule another attempt. |
10873 | */ |
10874 | if (ret) |
10875 | goto_offline(ppd, rem_reason: 0); |
10876 | else |
10877 | log_physical_state(ppd, PLS_POLLING); |
10878 | break; |
10879 | case HLS_DN_DISABLE: |
10880 | /* link is disabled */ |
10881 | ppd->link_enabled = 0; |
10882 | |
10883 | /* allow any state to transition to disabled */ |
10884 | |
10885 | /* must transition to offline first */ |
10886 | if (ppd->host_link_state != HLS_DN_OFFLINE) { |
10887 | ret = goto_offline(ppd, rem_reason: ppd->remote_link_down_reason); |
10888 | if (ret) |
10889 | break; |
10890 | ppd->remote_link_down_reason = 0; |
10891 | } |
10892 | |
10893 | if (!dd->dc_shutdown) { |
10894 | ret1 = set_physical_link_state(dd, PLS_DISABLED); |
10895 | if (ret1 != HCMD_SUCCESS) { |
10896 | dd_dev_err(dd, |
10897 | "Failed to transition to Disabled link state, return 0x%x\n" , |
10898 | ret1); |
10899 | ret = -EINVAL; |
10900 | break; |
10901 | } |
10902 | ret = wait_physical_linkstate(ppd, PLS_DISABLED, msecs: 10000); |
10903 | if (ret) { |
10904 | dd_dev_err(dd, |
10905 | "%s: physical state did not change to DISABLED\n" , |
10906 | __func__); |
10907 | break; |
10908 | } |
10909 | dc_shutdown(dd); |
10910 | } |
10911 | ppd->host_link_state = HLS_DN_DISABLE; |
10912 | break; |
10913 | case HLS_DN_OFFLINE: |
10914 | if (ppd->host_link_state == HLS_DN_DISABLE) |
10915 | dc_start(dd); |
10916 | |
10917 | /* allow any state to transition to offline */ |
10918 | ret = goto_offline(ppd, rem_reason: ppd->remote_link_down_reason); |
10919 | if (!ret) |
10920 | ppd->remote_link_down_reason = 0; |
10921 | break; |
10922 | case HLS_VERIFY_CAP: |
10923 | if (ppd->host_link_state != HLS_DN_POLL) |
10924 | goto unexpected; |
10925 | ppd->host_link_state = HLS_VERIFY_CAP; |
10926 | log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP); |
10927 | break; |
10928 | case HLS_GOING_UP: |
10929 | if (ppd->host_link_state != HLS_VERIFY_CAP) |
10930 | goto unexpected; |
10931 | |
10932 | ret1 = set_physical_link_state(dd, PLS_LINKUP); |
10933 | if (ret1 != HCMD_SUCCESS) { |
10934 | dd_dev_err(dd, |
10935 | "Failed to transition to link up state, return 0x%x\n" , |
10936 | ret1); |
10937 | ret = -EINVAL; |
10938 | break; |
10939 | } |
10940 | ppd->host_link_state = HLS_GOING_UP; |
10941 | break; |
10942 | |
10943 | case HLS_GOING_OFFLINE: /* transient within goto_offline() */ |
10944 | case HLS_LINK_COOLDOWN: /* transient within goto_offline() */ |
10945 | default: |
10946 | dd_dev_info(dd, "%s: state 0x%x: not supported\n" , |
10947 | __func__, state); |
10948 | ret = -EINVAL; |
10949 | break; |
10950 | } |
10951 | |
10952 | goto done; |
10953 | |
10954 | unexpected: |
10955 | dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n" , |
10956 | __func__, link_state_name(ppd->host_link_state), |
10957 | link_state_name(state)); |
10958 | ret = -EINVAL; |
10959 | |
10960 | done: |
10961 | mutex_unlock(lock: &ppd->hls_lock); |
10962 | |
10963 | if (event.device) |
10964 | ib_dispatch_event(event: &event); |
10965 | |
10966 | return ret; |
10967 | } |
10968 | |
10969 | int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val) |
10970 | { |
10971 | u64 reg; |
10972 | int ret = 0; |
10973 | |
10974 | switch (which) { |
10975 | case HFI1_IB_CFG_LIDLMC: |
10976 | set_lidlmc(ppd); |
10977 | break; |
10978 | case HFI1_IB_CFG_VL_HIGH_LIMIT: |
10979 | /* |
10980 | * The VL Arbitrator high limit is sent in units of 4k |
10981 | * bytes, while HFI stores it in units of 64 bytes. |
10982 | */ |
10983 | val *= 4096 / 64; |
10984 | reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK) |
10985 | << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT; |
10986 | write_csr(dd: ppd->dd, SEND_HIGH_PRIORITY_LIMIT, value: reg); |
10987 | break; |
10988 | case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ |
10989 | /* HFI only supports POLL as the default link down state */ |
10990 | if (val != HLS_DN_POLL) |
10991 | ret = -EINVAL; |
10992 | break; |
10993 | case HFI1_IB_CFG_OP_VLS: |
10994 | if (ppd->vls_operational != val) { |
10995 | ppd->vls_operational = val; |
10996 | if (!ppd->port) |
10997 | ret = -EINVAL; |
10998 | } |
10999 | break; |
11000 | /* |
11001 | * For link width, link width downgrade, and speed enable, always AND |
11002 | * the setting with what is actually supported. This has two benefits. |
11003 | * First, enabled can't have unsupported values, no matter what the |
11004 | * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean |
11005 | * "fill in with your supported value" have all the bits in the |
11006 | * field set, so simply ANDing with supported has the desired result. |
11007 | */ |
11008 | case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */ |
11009 | ppd->link_width_enabled = val & ppd->link_width_supported; |
11010 | break; |
11011 | case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */ |
11012 | ppd->link_width_downgrade_enabled = |
11013 | val & ppd->link_width_downgrade_supported; |
11014 | break; |
11015 | case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */ |
11016 | ppd->link_speed_enabled = val & ppd->link_speed_supported; |
11017 | break; |
11018 | case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ |
11019 | /* |
11020 | * HFI does not follow IB specs, save this value |
11021 | * so we can report it, if asked. |
11022 | */ |
11023 | ppd->overrun_threshold = val; |
11024 | break; |
11025 | case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ |
11026 | /* |
11027 | * HFI does not follow IB specs, save this value |
11028 | * so we can report it, if asked. |
11029 | */ |
11030 | ppd->phy_error_threshold = val; |
11031 | break; |
11032 | |
11033 | case HFI1_IB_CFG_MTU: |
11034 | set_send_length(ppd); |
11035 | break; |
11036 | |
11037 | case HFI1_IB_CFG_PKEYS: |
11038 | if (HFI1_CAP_IS_KSET(PKEY_CHECK)) |
11039 | set_partition_keys(ppd); |
11040 | break; |
11041 | |
11042 | default: |
11043 | if (HFI1_CAP_IS_KSET(PRINT_UNIMPL)) |
11044 | dd_dev_info(ppd->dd, |
11045 | "%s: which %s, val 0x%x: not implemented\n" , |
11046 | __func__, ib_cfg_name(which), val); |
11047 | break; |
11048 | } |
11049 | return ret; |
11050 | } |
11051 | |
11052 | /* begin functions related to vl arbitration table caching */ |
11053 | static void init_vl_arb_caches(struct hfi1_pportdata *ppd) |
11054 | { |
11055 | int i; |
11056 | |
11057 | BUILD_BUG_ON(VL_ARB_TABLE_SIZE != |
11058 | VL_ARB_LOW_PRIO_TABLE_SIZE); |
11059 | BUILD_BUG_ON(VL_ARB_TABLE_SIZE != |
11060 | VL_ARB_HIGH_PRIO_TABLE_SIZE); |
11061 | |
11062 | /* |
11063 | * Note that we always return values directly from the |
11064 | * 'vl_arb_cache' (and do no CSR reads) in response to a |
11065 | * 'Get(VLArbTable)'. This is obviously correct after a |
11066 | * 'Set(VLArbTable)', since the cache will then be up to |
11067 | * date. But it's also correct prior to any 'Set(VLArbTable)' |
11068 | * since then both the cache, and the relevant h/w registers |
11069 | * will be zeroed. |
11070 | */ |
11071 | |
11072 | for (i = 0; i < MAX_PRIO_TABLE; i++) |
11073 | spin_lock_init(&ppd->vl_arb_cache[i].lock); |
11074 | } |
11075 | |
11076 | /* |
11077 | * vl_arb_lock_cache |
11078 | * |
11079 | * All other vl_arb_* functions should be called only after locking |
11080 | * the cache. |
11081 | */ |
11082 | static inline struct vl_arb_cache * |
11083 | vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx) |
11084 | { |
11085 | if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE) |
11086 | return NULL; |
11087 | spin_lock(lock: &ppd->vl_arb_cache[idx].lock); |
11088 | return &ppd->vl_arb_cache[idx]; |
11089 | } |
11090 | |
11091 | static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx) |
11092 | { |
11093 | spin_unlock(lock: &ppd->vl_arb_cache[idx].lock); |
11094 | } |
11095 | |
11096 | static void vl_arb_get_cache(struct vl_arb_cache *cache, |
11097 | struct ib_vl_weight_elem *vl) |
11098 | { |
11099 | memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl)); |
11100 | } |
11101 | |
11102 | static void vl_arb_set_cache(struct vl_arb_cache *cache, |
11103 | struct ib_vl_weight_elem *vl) |
11104 | { |
11105 | memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); |
11106 | } |
11107 | |
11108 | static int vl_arb_match_cache(struct vl_arb_cache *cache, |
11109 | struct ib_vl_weight_elem *vl) |
11110 | { |
11111 | return !memcmp(p: cache->table, q: vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); |
11112 | } |
11113 | |
11114 | /* end functions related to vl arbitration table caching */ |
11115 | |
11116 | static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target, |
11117 | u32 size, struct ib_vl_weight_elem *vl) |
11118 | { |
11119 | struct hfi1_devdata *dd = ppd->dd; |
11120 | u64 reg; |
11121 | unsigned int i, is_up = 0; |
11122 | int drain, ret = 0; |
11123 | |
11124 | mutex_lock(&ppd->hls_lock); |
11125 | |
11126 | if (ppd->host_link_state & HLS_UP) |
11127 | is_up = 1; |
11128 | |
11129 | drain = !is_ax(dd) && is_up; |
11130 | |
11131 | if (drain) |
11132 | /* |
11133 | * Before adjusting VL arbitration weights, empty per-VL |
11134 | * FIFOs, otherwise a packet whose VL weight is being |
11135 | * set to 0 could get stuck in a FIFO with no chance to |
11136 | * egress. |
11137 | */ |
11138 | ret = stop_drain_data_vls(dd); |
11139 | |
11140 | if (ret) { |
11141 | dd_dev_err( |
11142 | dd, |
11143 | "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n" , |
11144 | __func__); |
11145 | goto err; |
11146 | } |
11147 | |
11148 | for (i = 0; i < size; i++, vl++) { |
11149 | /* |
11150 | * NOTE: The low priority shift and mask are used here, but |
11151 | * they are the same for both the low and high registers. |
11152 | */ |
11153 | reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK) |
11154 | << SEND_LOW_PRIORITY_LIST_VL_SHIFT) |
11155 | | (((u64)vl->weight |
11156 | & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK) |
11157 | << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT); |
11158 | write_csr(dd, offset: target + (i * 8), value: reg); |
11159 | } |
11160 | pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE); |
11161 | |
11162 | if (drain) |
11163 | open_fill_data_vls(dd); /* reopen all VLs */ |
11164 | |
11165 | err: |
11166 | mutex_unlock(lock: &ppd->hls_lock); |
11167 | |
11168 | return ret; |
11169 | } |
11170 | |
11171 | /* |
11172 | * Read one credit merge VL register. |
11173 | */ |
11174 | static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr, |
11175 | struct vl_limit *vll) |
11176 | { |
11177 | u64 reg = read_csr(dd, offset: csr); |
11178 | |
11179 | vll->dedicated = cpu_to_be16( |
11180 | (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT) |
11181 | & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK); |
11182 | vll->shared = cpu_to_be16( |
11183 | (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT) |
11184 | & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK); |
11185 | } |
11186 | |
11187 | /* |
11188 | * Read the current credit merge limits. |
11189 | */ |
11190 | static int get_buffer_control(struct hfi1_devdata *dd, |
11191 | struct buffer_control *bc, u16 *overall_limit) |
11192 | { |
11193 | u64 reg; |
11194 | int i; |
11195 | |
11196 | /* not all entries are filled in */ |
11197 | memset(bc, 0, sizeof(*bc)); |
11198 | |
11199 | /* OPA and HFI have a 1-1 mapping */ |
11200 | for (i = 0; i < TXE_NUM_DATA_VL; i++) |
11201 | read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), vll: &bc->vl[i]); |
11202 | |
11203 | /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */ |
11204 | read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, vll: &bc->vl[15]); |
11205 | |
11206 | reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); |
11207 | bc->overall_shared_limit = cpu_to_be16( |
11208 | (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
11209 | & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK); |
11210 | if (overall_limit) |
11211 | *overall_limit = (reg |
11212 | >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
11213 | & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK; |
11214 | return sizeof(struct buffer_control); |
11215 | } |
11216 | |
11217 | static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp) |
11218 | { |
11219 | u64 reg; |
11220 | int i; |
11221 | |
11222 | /* each register contains 16 SC->VLnt mappings, 4 bits each */ |
11223 | reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0); |
11224 | for (i = 0; i < sizeof(u64); i++) { |
11225 | u8 byte = *(((u8 *)®) + i); |
11226 | |
11227 | dp->vlnt[2 * i] = byte & 0xf; |
11228 | dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4; |
11229 | } |
11230 | |
11231 | reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16); |
11232 | for (i = 0; i < sizeof(u64); i++) { |
11233 | u8 byte = *(((u8 *)®) + i); |
11234 | |
11235 | dp->vlnt[16 + (2 * i)] = byte & 0xf; |
11236 | dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4; |
11237 | } |
11238 | return sizeof(struct sc2vlnt); |
11239 | } |
11240 | |
11241 | static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems, |
11242 | struct ib_vl_weight_elem *vl) |
11243 | { |
11244 | unsigned int i; |
11245 | |
11246 | for (i = 0; i < nelems; i++, vl++) { |
11247 | vl->vl = 0xf; |
11248 | vl->weight = 0; |
11249 | } |
11250 | } |
11251 | |
11252 | static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp) |
11253 | { |
11254 | write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, |
11255 | DC_SC_VL_VAL(15_0, |
11256 | 0, dp->vlnt[0] & 0xf, |
11257 | 1, dp->vlnt[1] & 0xf, |
11258 | 2, dp->vlnt[2] & 0xf, |
11259 | 3, dp->vlnt[3] & 0xf, |
11260 | 4, dp->vlnt[4] & 0xf, |
11261 | 5, dp->vlnt[5] & 0xf, |
11262 | 6, dp->vlnt[6] & 0xf, |
11263 | 7, dp->vlnt[7] & 0xf, |
11264 | 8, dp->vlnt[8] & 0xf, |
11265 | 9, dp->vlnt[9] & 0xf, |
11266 | 10, dp->vlnt[10] & 0xf, |
11267 | 11, dp->vlnt[11] & 0xf, |
11268 | 12, dp->vlnt[12] & 0xf, |
11269 | 13, dp->vlnt[13] & 0xf, |
11270 | 14, dp->vlnt[14] & 0xf, |
11271 | 15, dp->vlnt[15] & 0xf)); |
11272 | write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, |
11273 | DC_SC_VL_VAL(31_16, |
11274 | 16, dp->vlnt[16] & 0xf, |
11275 | 17, dp->vlnt[17] & 0xf, |
11276 | 18, dp->vlnt[18] & 0xf, |
11277 | 19, dp->vlnt[19] & 0xf, |
11278 | 20, dp->vlnt[20] & 0xf, |
11279 | 21, dp->vlnt[21] & 0xf, |
11280 | 22, dp->vlnt[22] & 0xf, |
11281 | 23, dp->vlnt[23] & 0xf, |
11282 | 24, dp->vlnt[24] & 0xf, |
11283 | 25, dp->vlnt[25] & 0xf, |
11284 | 26, dp->vlnt[26] & 0xf, |
11285 | 27, dp->vlnt[27] & 0xf, |
11286 | 28, dp->vlnt[28] & 0xf, |
11287 | 29, dp->vlnt[29] & 0xf, |
11288 | 30, dp->vlnt[30] & 0xf, |
11289 | 31, dp->vlnt[31] & 0xf)); |
11290 | } |
11291 | |
11292 | static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what, |
11293 | u16 limit) |
11294 | { |
11295 | if (limit != 0) |
11296 | dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n" , |
11297 | what, (int)limit, idx); |
11298 | } |
11299 | |
11300 | /* change only the shared limit portion of SendCmGLobalCredit */ |
11301 | static void set_global_shared(struct hfi1_devdata *dd, u16 limit) |
11302 | { |
11303 | u64 reg; |
11304 | |
11305 | reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); |
11306 | reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK; |
11307 | reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT; |
11308 | write_csr(dd, SEND_CM_GLOBAL_CREDIT, value: reg); |
11309 | } |
11310 | |
11311 | /* change only the total credit limit portion of SendCmGLobalCredit */ |
11312 | static void set_global_limit(struct hfi1_devdata *dd, u16 limit) |
11313 | { |
11314 | u64 reg; |
11315 | |
11316 | reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); |
11317 | reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK; |
11318 | reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT; |
11319 | write_csr(dd, SEND_CM_GLOBAL_CREDIT, value: reg); |
11320 | } |
11321 | |
11322 | /* set the given per-VL shared limit */ |
11323 | static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit) |
11324 | { |
11325 | u64 reg; |
11326 | u32 addr; |
11327 | |
11328 | if (vl < TXE_NUM_DATA_VL) |
11329 | addr = SEND_CM_CREDIT_VL + (8 * vl); |
11330 | else |
11331 | addr = SEND_CM_CREDIT_VL15; |
11332 | |
11333 | reg = read_csr(dd, offset: addr); |
11334 | reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK; |
11335 | reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT; |
11336 | write_csr(dd, offset: addr, value: reg); |
11337 | } |
11338 | |
11339 | /* set the given per-VL dedicated limit */ |
11340 | static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit) |
11341 | { |
11342 | u64 reg; |
11343 | u32 addr; |
11344 | |
11345 | if (vl < TXE_NUM_DATA_VL) |
11346 | addr = SEND_CM_CREDIT_VL + (8 * vl); |
11347 | else |
11348 | addr = SEND_CM_CREDIT_VL15; |
11349 | |
11350 | reg = read_csr(dd, offset: addr); |
11351 | reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK; |
11352 | reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT; |
11353 | write_csr(dd, offset: addr, value: reg); |
11354 | } |
11355 | |
11356 | /* spin until the given per-VL status mask bits clear */ |
11357 | static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask, |
11358 | const char *which) |
11359 | { |
11360 | unsigned long timeout; |
11361 | u64 reg; |
11362 | |
11363 | timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT); |
11364 | while (1) { |
11365 | reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask; |
11366 | |
11367 | if (reg == 0) |
11368 | return; /* success */ |
11369 | if (time_after(jiffies, timeout)) |
11370 | break; /* timed out */ |
11371 | udelay(1); |
11372 | } |
11373 | |
11374 | dd_dev_err(dd, |
11375 | "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n" , |
11376 | which, VL_STATUS_CLEAR_TIMEOUT, mask, reg); |
11377 | /* |
11378 | * If this occurs, it is likely there was a credit loss on the link. |
11379 | * The only recovery from that is a link bounce. |
11380 | */ |
11381 | dd_dev_err(dd, |
11382 | "Continuing anyway. A credit loss may occur. Suggest a link bounce\n" ); |
11383 | } |
11384 | |
11385 | /* |
11386 | * The number of credits on the VLs may be changed while everything |
11387 | * is "live", but the following algorithm must be followed due to |
11388 | * how the hardware is actually implemented. In particular, |
11389 | * Return_Credit_Status[] is the only correct status check. |
11390 | * |
11391 | * if (reducing Global_Shared_Credit_Limit or any shared limit changing) |
11392 | * set Global_Shared_Credit_Limit = 0 |
11393 | * use_all_vl = 1 |
11394 | * mask0 = all VLs that are changing either dedicated or shared limits |
11395 | * set Shared_Limit[mask0] = 0 |
11396 | * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0 |
11397 | * if (changing any dedicated limit) |
11398 | * mask1 = all VLs that are lowering dedicated limits |
11399 | * lower Dedicated_Limit[mask1] |
11400 | * spin until Return_Credit_Status[mask1] == 0 |
11401 | * raise Dedicated_Limits |
11402 | * raise Shared_Limits |
11403 | * raise Global_Shared_Credit_Limit |
11404 | * |
11405 | * lower = if the new limit is lower, set the limit to the new value |
11406 | * raise = if the new limit is higher than the current value (may be changed |
11407 | * earlier in the algorithm), set the new limit to the new value |
11408 | */ |
11409 | int set_buffer_control(struct hfi1_pportdata *ppd, |
11410 | struct buffer_control *new_bc) |
11411 | { |
11412 | struct hfi1_devdata *dd = ppd->dd; |
11413 | u64 changing_mask, ld_mask, stat_mask; |
11414 | int change_count; |
11415 | int i, use_all_mask; |
11416 | int this_shared_changing; |
11417 | int vl_count = 0, ret; |
11418 | /* |
11419 | * A0: add the variable any_shared_limit_changing below and in the |
11420 | * algorithm above. If removing A0 support, it can be removed. |
11421 | */ |
11422 | int any_shared_limit_changing; |
11423 | struct buffer_control cur_bc; |
11424 | u8 changing[OPA_MAX_VLS]; |
11425 | u8 lowering_dedicated[OPA_MAX_VLS]; |
11426 | u16 cur_total; |
11427 | u32 new_total = 0; |
11428 | const u64 all_mask = |
11429 | SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK |
11430 | | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK |
11431 | | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK |
11432 | | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK |
11433 | | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK |
11434 | | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK |
11435 | | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK |
11436 | | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK |
11437 | | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK; |
11438 | |
11439 | #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15) |
11440 | #define NUM_USABLE_VLS 16 /* look at VL15 and less */ |
11441 | |
11442 | /* find the new total credits, do sanity check on unused VLs */ |
11443 | for (i = 0; i < OPA_MAX_VLS; i++) { |
11444 | if (valid_vl(i)) { |
11445 | new_total += be16_to_cpu(new_bc->vl[i].dedicated); |
11446 | continue; |
11447 | } |
11448 | nonzero_msg(dd, idx: i, what: "dedicated" , |
11449 | be16_to_cpu(new_bc->vl[i].dedicated)); |
11450 | nonzero_msg(dd, idx: i, what: "shared" , |
11451 | be16_to_cpu(new_bc->vl[i].shared)); |
11452 | new_bc->vl[i].dedicated = 0; |
11453 | new_bc->vl[i].shared = 0; |
11454 | } |
11455 | new_total += be16_to_cpu(new_bc->overall_shared_limit); |
11456 | |
11457 | /* fetch the current values */ |
11458 | get_buffer_control(dd, bc: &cur_bc, overall_limit: &cur_total); |
11459 | |
11460 | /* |
11461 | * Create the masks we will use. |
11462 | */ |
11463 | memset(changing, 0, sizeof(changing)); |
11464 | memset(lowering_dedicated, 0, sizeof(lowering_dedicated)); |
11465 | /* |
11466 | * NOTE: Assumes that the individual VL bits are adjacent and in |
11467 | * increasing order |
11468 | */ |
11469 | stat_mask = |
11470 | SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK; |
11471 | changing_mask = 0; |
11472 | ld_mask = 0; |
11473 | change_count = 0; |
11474 | any_shared_limit_changing = 0; |
11475 | for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) { |
11476 | if (!valid_vl(i)) |
11477 | continue; |
11478 | this_shared_changing = new_bc->vl[i].shared |
11479 | != cur_bc.vl[i].shared; |
11480 | if (this_shared_changing) |
11481 | any_shared_limit_changing = 1; |
11482 | if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated || |
11483 | this_shared_changing) { |
11484 | changing[i] = 1; |
11485 | changing_mask |= stat_mask; |
11486 | change_count++; |
11487 | } |
11488 | if (be16_to_cpu(new_bc->vl[i].dedicated) < |
11489 | be16_to_cpu(cur_bc.vl[i].dedicated)) { |
11490 | lowering_dedicated[i] = 1; |
11491 | ld_mask |= stat_mask; |
11492 | } |
11493 | } |
11494 | |
11495 | /* bracket the credit change with a total adjustment */ |
11496 | if (new_total > cur_total) |
11497 | set_global_limit(dd, limit: new_total); |
11498 | |
11499 | /* |
11500 | * Start the credit change algorithm. |
11501 | */ |
11502 | use_all_mask = 0; |
11503 | if ((be16_to_cpu(new_bc->overall_shared_limit) < |
11504 | be16_to_cpu(cur_bc.overall_shared_limit)) || |
11505 | (is_ax(dd) && any_shared_limit_changing)) { |
11506 | set_global_shared(dd, limit: 0); |
11507 | cur_bc.overall_shared_limit = 0; |
11508 | use_all_mask = 1; |
11509 | } |
11510 | |
11511 | for (i = 0; i < NUM_USABLE_VLS; i++) { |
11512 | if (!valid_vl(i)) |
11513 | continue; |
11514 | |
11515 | if (changing[i]) { |
11516 | set_vl_shared(dd, vl: i, limit: 0); |
11517 | cur_bc.vl[i].shared = 0; |
11518 | } |
11519 | } |
11520 | |
11521 | wait_for_vl_status_clear(dd, mask: use_all_mask ? all_mask : changing_mask, |
11522 | which: "shared" ); |
11523 | |
11524 | if (change_count > 0) { |
11525 | for (i = 0; i < NUM_USABLE_VLS; i++) { |
11526 | if (!valid_vl(i)) |
11527 | continue; |
11528 | |
11529 | if (lowering_dedicated[i]) { |
11530 | set_vl_dedicated(dd, vl: i, |
11531 | be16_to_cpu(new_bc-> |
11532 | vl[i].dedicated)); |
11533 | cur_bc.vl[i].dedicated = |
11534 | new_bc->vl[i].dedicated; |
11535 | } |
11536 | } |
11537 | |
11538 | wait_for_vl_status_clear(dd, mask: ld_mask, which: "dedicated" ); |
11539 | |
11540 | /* now raise all dedicated that are going up */ |
11541 | for (i = 0; i < NUM_USABLE_VLS; i++) { |
11542 | if (!valid_vl(i)) |
11543 | continue; |
11544 | |
11545 | if (be16_to_cpu(new_bc->vl[i].dedicated) > |
11546 | be16_to_cpu(cur_bc.vl[i].dedicated)) |
11547 | set_vl_dedicated(dd, vl: i, |
11548 | be16_to_cpu(new_bc-> |
11549 | vl[i].dedicated)); |
11550 | } |
11551 | } |
11552 | |
11553 | /* next raise all shared that are going up */ |
11554 | for (i = 0; i < NUM_USABLE_VLS; i++) { |
11555 | if (!valid_vl(i)) |
11556 | continue; |
11557 | |
11558 | if (be16_to_cpu(new_bc->vl[i].shared) > |
11559 | be16_to_cpu(cur_bc.vl[i].shared)) |
11560 | set_vl_shared(dd, vl: i, be16_to_cpu(new_bc->vl[i].shared)); |
11561 | } |
11562 | |
11563 | /* finally raise the global shared */ |
11564 | if (be16_to_cpu(new_bc->overall_shared_limit) > |
11565 | be16_to_cpu(cur_bc.overall_shared_limit)) |
11566 | set_global_shared(dd, |
11567 | be16_to_cpu(new_bc->overall_shared_limit)); |
11568 | |
11569 | /* bracket the credit change with a total adjustment */ |
11570 | if (new_total < cur_total) |
11571 | set_global_limit(dd, limit: new_total); |
11572 | |
11573 | /* |
11574 | * Determine the actual number of operational VLS using the number of |
11575 | * dedicated and shared credits for each VL. |
11576 | */ |
11577 | if (change_count > 0) { |
11578 | for (i = 0; i < TXE_NUM_DATA_VL; i++) |
11579 | if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 || |
11580 | be16_to_cpu(new_bc->vl[i].shared) > 0) |
11581 | vl_count++; |
11582 | ppd->actual_vls_operational = vl_count; |
11583 | ret = sdma_map_init(dd, port: ppd->port - 1, num_vls: vl_count ? |
11584 | ppd->actual_vls_operational : |
11585 | ppd->vls_operational, |
11586 | NULL); |
11587 | if (ret == 0) |
11588 | ret = pio_map_init(dd, port: ppd->port - 1, num_vls: vl_count ? |
11589 | ppd->actual_vls_operational : |
11590 | ppd->vls_operational, NULL); |
11591 | if (ret) |
11592 | return ret; |
11593 | } |
11594 | return 0; |
11595 | } |
11596 | |
11597 | /* |
11598 | * Read the given fabric manager table. Return the size of the |
11599 | * table (in bytes) on success, and a negative error code on |
11600 | * failure. |
11601 | */ |
11602 | int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t) |
11603 | |
11604 | { |
11605 | int size; |
11606 | struct vl_arb_cache *vlc; |
11607 | |
11608 | switch (which) { |
11609 | case FM_TBL_VL_HIGH_ARB: |
11610 | size = 256; |
11611 | /* |
11612 | * OPA specifies 128 elements (of 2 bytes each), though |
11613 | * HFI supports only 16 elements in h/w. |
11614 | */ |
11615 | vlc = vl_arb_lock_cache(ppd, idx: HI_PRIO_TABLE); |
11616 | vl_arb_get_cache(cache: vlc, vl: t); |
11617 | vl_arb_unlock_cache(ppd, idx: HI_PRIO_TABLE); |
11618 | break; |
11619 | case FM_TBL_VL_LOW_ARB: |
11620 | size = 256; |
11621 | /* |
11622 | * OPA specifies 128 elements (of 2 bytes each), though |
11623 | * HFI supports only 16 elements in h/w. |
11624 | */ |
11625 | vlc = vl_arb_lock_cache(ppd, idx: LO_PRIO_TABLE); |
11626 | vl_arb_get_cache(cache: vlc, vl: t); |
11627 | vl_arb_unlock_cache(ppd, idx: LO_PRIO_TABLE); |
11628 | break; |
11629 | case FM_TBL_BUFFER_CONTROL: |
11630 | size = get_buffer_control(dd: ppd->dd, bc: t, NULL); |
11631 | break; |
11632 | case FM_TBL_SC2VLNT: |
11633 | size = get_sc2vlnt(dd: ppd->dd, dp: t); |
11634 | break; |
11635 | case FM_TBL_VL_PREEMPT_ELEMS: |
11636 | size = 256; |
11637 | /* OPA specifies 128 elements, of 2 bytes each */ |
11638 | get_vlarb_preempt(dd: ppd->dd, OPA_MAX_VLS, vl: t); |
11639 | break; |
11640 | case FM_TBL_VL_PREEMPT_MATRIX: |
11641 | size = 256; |
11642 | /* |
11643 | * OPA specifies that this is the same size as the VL |
11644 | * arbitration tables (i.e., 256 bytes). |
11645 | */ |
11646 | break; |
11647 | default: |
11648 | return -EINVAL; |
11649 | } |
11650 | return size; |
11651 | } |
11652 | |
11653 | /* |
11654 | * Write the given fabric manager table. |
11655 | */ |
11656 | int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t) |
11657 | { |
11658 | int ret = 0; |
11659 | struct vl_arb_cache *vlc; |
11660 | |
11661 | switch (which) { |
11662 | case FM_TBL_VL_HIGH_ARB: |
11663 | vlc = vl_arb_lock_cache(ppd, idx: HI_PRIO_TABLE); |
11664 | if (vl_arb_match_cache(cache: vlc, vl: t)) { |
11665 | vl_arb_unlock_cache(ppd, idx: HI_PRIO_TABLE); |
11666 | break; |
11667 | } |
11668 | vl_arb_set_cache(cache: vlc, vl: t); |
11669 | vl_arb_unlock_cache(ppd, idx: HI_PRIO_TABLE); |
11670 | ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST, |
11671 | VL_ARB_HIGH_PRIO_TABLE_SIZE, vl: t); |
11672 | break; |
11673 | case FM_TBL_VL_LOW_ARB: |
11674 | vlc = vl_arb_lock_cache(ppd, idx: LO_PRIO_TABLE); |
11675 | if (vl_arb_match_cache(cache: vlc, vl: t)) { |
11676 | vl_arb_unlock_cache(ppd, idx: LO_PRIO_TABLE); |
11677 | break; |
11678 | } |
11679 | vl_arb_set_cache(cache: vlc, vl: t); |
11680 | vl_arb_unlock_cache(ppd, idx: LO_PRIO_TABLE); |
11681 | ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST, |
11682 | VL_ARB_LOW_PRIO_TABLE_SIZE, vl: t); |
11683 | break; |
11684 | case FM_TBL_BUFFER_CONTROL: |
11685 | ret = set_buffer_control(ppd, new_bc: t); |
11686 | break; |
11687 | case FM_TBL_SC2VLNT: |
11688 | set_sc2vlnt(dd: ppd->dd, dp: t); |
11689 | break; |
11690 | default: |
11691 | ret = -EINVAL; |
11692 | } |
11693 | return ret; |
11694 | } |
11695 | |
11696 | /* |
11697 | * Disable all data VLs. |
11698 | * |
11699 | * Return 0 if disabled, non-zero if the VLs cannot be disabled. |
11700 | */ |
11701 | static int disable_data_vls(struct hfi1_devdata *dd) |
11702 | { |
11703 | if (is_ax(dd)) |
11704 | return 1; |
11705 | |
11706 | pio_send_control(dd, PSC_DATA_VL_DISABLE); |
11707 | |
11708 | return 0; |
11709 | } |
11710 | |
11711 | /* |
11712 | * open_fill_data_vls() - the counterpart to stop_drain_data_vls(). |
11713 | * Just re-enables all data VLs (the "fill" part happens |
11714 | * automatically - the name was chosen for symmetry with |
11715 | * stop_drain_data_vls()). |
11716 | * |
11717 | * Return 0 if successful, non-zero if the VLs cannot be enabled. |
11718 | */ |
11719 | int open_fill_data_vls(struct hfi1_devdata *dd) |
11720 | { |
11721 | if (is_ax(dd)) |
11722 | return 1; |
11723 | |
11724 | pio_send_control(dd, PSC_DATA_VL_ENABLE); |
11725 | |
11726 | return 0; |
11727 | } |
11728 | |
11729 | /* |
11730 | * drain_data_vls() - assumes that disable_data_vls() has been called, |
11731 | * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA |
11732 | * engines to drop to 0. |
11733 | */ |
11734 | static void drain_data_vls(struct hfi1_devdata *dd) |
11735 | { |
11736 | sc_wait(dd); |
11737 | sdma_wait(dd); |
11738 | pause_for_credit_return(dd); |
11739 | } |
11740 | |
11741 | /* |
11742 | * stop_drain_data_vls() - disable, then drain all per-VL fifos. |
11743 | * |
11744 | * Use open_fill_data_vls() to resume using data VLs. This pair is |
11745 | * meant to be used like this: |
11746 | * |
11747 | * stop_drain_data_vls(dd); |
11748 | * // do things with per-VL resources |
11749 | * open_fill_data_vls(dd); |
11750 | */ |
11751 | int stop_drain_data_vls(struct hfi1_devdata *dd) |
11752 | { |
11753 | int ret; |
11754 | |
11755 | ret = disable_data_vls(dd); |
11756 | if (ret == 0) |
11757 | drain_data_vls(dd); |
11758 | |
11759 | return ret; |
11760 | } |
11761 | |
11762 | /* |
11763 | * Convert a nanosecond time to a cclock count. No matter how slow |
11764 | * the cclock, a non-zero ns will always have a non-zero result. |
11765 | */ |
11766 | u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns) |
11767 | { |
11768 | u32 cclocks; |
11769 | |
11770 | if (dd->icode == ICODE_FPGA_EMULATION) |
11771 | cclocks = (ns * 1000) / FPGA_CCLOCK_PS; |
11772 | else /* simulation pretends to be ASIC */ |
11773 | cclocks = (ns * 1000) / ASIC_CCLOCK_PS; |
11774 | if (ns && !cclocks) /* if ns nonzero, must be at least 1 */ |
11775 | cclocks = 1; |
11776 | return cclocks; |
11777 | } |
11778 | |
11779 | /* |
11780 | * Convert a cclock count to nanoseconds. Not matter how slow |
11781 | * the cclock, a non-zero cclocks will always have a non-zero result. |
11782 | */ |
11783 | u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks) |
11784 | { |
11785 | u32 ns; |
11786 | |
11787 | if (dd->icode == ICODE_FPGA_EMULATION) |
11788 | ns = (cclocks * FPGA_CCLOCK_PS) / 1000; |
11789 | else /* simulation pretends to be ASIC */ |
11790 | ns = (cclocks * ASIC_CCLOCK_PS) / 1000; |
11791 | if (cclocks && !ns) |
11792 | ns = 1; |
11793 | return ns; |
11794 | } |
11795 | |
11796 | /* |
11797 | * Dynamically adjust the receive interrupt timeout for a context based on |
11798 | * incoming packet rate. |
11799 | * |
11800 | * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero. |
11801 | */ |
11802 | static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts) |
11803 | { |
11804 | struct hfi1_devdata *dd = rcd->dd; |
11805 | u32 timeout = rcd->rcvavail_timeout; |
11806 | |
11807 | /* |
11808 | * This algorithm doubles or halves the timeout depending on whether |
11809 | * the number of packets received in this interrupt were less than or |
11810 | * greater equal the interrupt count. |
11811 | * |
11812 | * The calculations below do not allow a steady state to be achieved. |
11813 | * Only at the endpoints it is possible to have an unchanging |
11814 | * timeout. |
11815 | */ |
11816 | if (npkts < rcv_intr_count) { |
11817 | /* |
11818 | * Not enough packets arrived before the timeout, adjust |
11819 | * timeout downward. |
11820 | */ |
11821 | if (timeout < 2) /* already at minimum? */ |
11822 | return; |
11823 | timeout >>= 1; |
11824 | } else { |
11825 | /* |
11826 | * More than enough packets arrived before the timeout, adjust |
11827 | * timeout upward. |
11828 | */ |
11829 | if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */ |
11830 | return; |
11831 | timeout = min(timeout << 1, dd->rcv_intr_timeout_csr); |
11832 | } |
11833 | |
11834 | rcd->rcvavail_timeout = timeout; |
11835 | /* |
11836 | * timeout cannot be larger than rcv_intr_timeout_csr which has already |
11837 | * been verified to be in range |
11838 | */ |
11839 | write_kctxt_csr(dd, ctxt: rcd->ctxt, RCV_AVAIL_TIME_OUT, |
11840 | value: (u64)timeout << |
11841 | RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT); |
11842 | } |
11843 | |
11844 | void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd, |
11845 | u32 intr_adjust, u32 npkts) |
11846 | { |
11847 | struct hfi1_devdata *dd = rcd->dd; |
11848 | u64 reg; |
11849 | u32 ctxt = rcd->ctxt; |
11850 | |
11851 | /* |
11852 | * Need to write timeout register before updating RcvHdrHead to ensure |
11853 | * that a new value is used when the HW decides to restart counting. |
11854 | */ |
11855 | if (intr_adjust) |
11856 | adjust_rcv_timeout(rcd, npkts); |
11857 | if (updegr) { |
11858 | reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK) |
11859 | << RCV_EGR_INDEX_HEAD_HEAD_SHIFT; |
11860 | write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, value: reg); |
11861 | } |
11862 | reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) | |
11863 | (((u64)hd & RCV_HDR_HEAD_HEAD_MASK) |
11864 | << RCV_HDR_HEAD_HEAD_SHIFT); |
11865 | write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, value: reg); |
11866 | } |
11867 | |
11868 | u32 hdrqempty(struct hfi1_ctxtdata *rcd) |
11869 | { |
11870 | u32 head, tail; |
11871 | |
11872 | head = (read_uctxt_csr(dd: rcd->dd, ctxt: rcd->ctxt, RCV_HDR_HEAD) |
11873 | & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT; |
11874 | |
11875 | if (hfi1_rcvhdrtail_kvaddr(rcd)) |
11876 | tail = get_rcvhdrtail(rcd); |
11877 | else |
11878 | tail = read_uctxt_csr(dd: rcd->dd, ctxt: rcd->ctxt, RCV_HDR_TAIL); |
11879 | |
11880 | return head == tail; |
11881 | } |
11882 | |
11883 | /* |
11884 | * Context Control and Receive Array encoding for buffer size: |
11885 | * 0x0 invalid |
11886 | * 0x1 4 KB |
11887 | * 0x2 8 KB |
11888 | * 0x3 16 KB |
11889 | * 0x4 32 KB |
11890 | * 0x5 64 KB |
11891 | * 0x6 128 KB |
11892 | * 0x7 256 KB |
11893 | * 0x8 512 KB (Receive Array only) |
11894 | * 0x9 1 MB (Receive Array only) |
11895 | * 0xa 2 MB (Receive Array only) |
11896 | * |
11897 | * 0xB-0xF - reserved (Receive Array only) |
11898 | * |
11899 | * |
11900 | * This routine assumes that the value has already been sanity checked. |
11901 | */ |
11902 | static u32 encoded_size(u32 size) |
11903 | { |
11904 | switch (size) { |
11905 | case 4 * 1024: return 0x1; |
11906 | case 8 * 1024: return 0x2; |
11907 | case 16 * 1024: return 0x3; |
11908 | case 32 * 1024: return 0x4; |
11909 | case 64 * 1024: return 0x5; |
11910 | case 128 * 1024: return 0x6; |
11911 | case 256 * 1024: return 0x7; |
11912 | case 512 * 1024: return 0x8; |
11913 | case 1 * 1024 * 1024: return 0x9; |
11914 | case 2 * 1024 * 1024: return 0xa; |
11915 | } |
11916 | return 0x1; /* if invalid, go with the minimum size */ |
11917 | } |
11918 | |
11919 | /** |
11920 | * encode_rcv_header_entry_size - return chip specific encoding for size |
11921 | * @size: size in dwords |
11922 | * |
11923 | * Convert a receive header entry size that to the encoding used in the CSR. |
11924 | * |
11925 | * Return a zero if the given size is invalid, otherwise the encoding. |
11926 | */ |
11927 | u8 (u8 size) |
11928 | { |
11929 | /* there are only 3 valid receive header entry sizes */ |
11930 | if (size == 2) |
11931 | return 1; |
11932 | if (size == 16) |
11933 | return 2; |
11934 | if (size == 32) |
11935 | return 4; |
11936 | return 0; /* invalid */ |
11937 | } |
11938 | |
11939 | /** |
11940 | * hfi1_validate_rcvhdrcnt - validate hdrcnt |
11941 | * @dd: the device data |
11942 | * @thecnt: the header count |
11943 | */ |
11944 | int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt) |
11945 | { |
11946 | if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { |
11947 | dd_dev_err(dd, "Receive header queue count too small\n" ); |
11948 | return -EINVAL; |
11949 | } |
11950 | |
11951 | if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { |
11952 | dd_dev_err(dd, |
11953 | "Receive header queue count cannot be greater than %u\n" , |
11954 | HFI1_MAX_HDRQ_EGRBUF_CNT); |
11955 | return -EINVAL; |
11956 | } |
11957 | |
11958 | if (thecnt % HDRQ_INCREMENT) { |
11959 | dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n" , |
11960 | thecnt, HDRQ_INCREMENT); |
11961 | return -EINVAL; |
11962 | } |
11963 | |
11964 | return 0; |
11965 | } |
11966 | |
11967 | /** |
11968 | * set_hdrq_regs - set header queue registers for context |
11969 | * @dd: the device data |
11970 | * @ctxt: the context |
11971 | * @entsize: the dword entry size |
11972 | * @hdrcnt: the number of header entries |
11973 | */ |
11974 | void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt) |
11975 | { |
11976 | u64 reg; |
11977 | |
11978 | reg = (((u64)hdrcnt >> HDRQ_SIZE_SHIFT) & RCV_HDR_CNT_CNT_MASK) << |
11979 | RCV_HDR_CNT_CNT_SHIFT; |
11980 | write_kctxt_csr(dd, ctxt, RCV_HDR_CNT, value: reg); |
11981 | reg = ((u64)encode_rcv_header_entry_size(size: entsize) & |
11982 | RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) << |
11983 | RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; |
11984 | write_kctxt_csr(dd, ctxt, RCV_HDR_ENT_SIZE, value: reg); |
11985 | reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) << |
11986 | RCV_HDR_SIZE_HDR_SIZE_SHIFT; |
11987 | write_kctxt_csr(dd, ctxt, RCV_HDR_SIZE, value: reg); |
11988 | |
11989 | /* |
11990 | * Program dummy tail address for every receive context |
11991 | * before enabling any receive context |
11992 | */ |
11993 | write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, |
11994 | value: dd->rcvhdrtail_dummy_dma); |
11995 | } |
11996 | |
11997 | void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, |
11998 | struct hfi1_ctxtdata *rcd) |
11999 | { |
12000 | u64 rcvctrl, reg; |
12001 | int did_enable = 0; |
12002 | u16 ctxt; |
12003 | |
12004 | if (!rcd) |
12005 | return; |
12006 | |
12007 | ctxt = rcd->ctxt; |
12008 | |
12009 | hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x" , ctxt, op); |
12010 | |
12011 | rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL); |
12012 | /* if the context already enabled, don't do the extra steps */ |
12013 | if ((op & HFI1_RCVCTRL_CTXT_ENB) && |
12014 | !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) { |
12015 | /* reset the tail and hdr addresses, and sequence count */ |
12016 | write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR, |
12017 | value: rcd->rcvhdrq_dma); |
12018 | if (hfi1_rcvhdrtail_kvaddr(rcd)) |
12019 | write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, |
12020 | value: rcd->rcvhdrqtailaddr_dma); |
12021 | hfi1_set_seq_cnt(rcd, cnt: 1); |
12022 | |
12023 | /* reset the cached receive header queue head value */ |
12024 | hfi1_set_rcd_head(rcd, head: 0); |
12025 | |
12026 | /* |
12027 | * Zero the receive header queue so we don't get false |
12028 | * positives when checking the sequence number. The |
12029 | * sequence numbers could land exactly on the same spot. |
12030 | * E.g. a rcd restart before the receive header wrapped. |
12031 | */ |
12032 | memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd)); |
12033 | |
12034 | /* starting timeout */ |
12035 | rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr; |
12036 | |
12037 | /* enable the context */ |
12038 | rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK; |
12039 | |
12040 | /* clean the egr buffer size first */ |
12041 | rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK; |
12042 | rcvctrl |= ((u64)encoded_size(size: rcd->egrbufs.rcvtid_size) |
12043 | & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK) |
12044 | << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT; |
12045 | |
12046 | /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */ |
12047 | write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, value: 0); |
12048 | did_enable = 1; |
12049 | |
12050 | /* zero RcvEgrIndexHead */ |
12051 | write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, value: 0); |
12052 | |
12053 | /* set eager count and base index */ |
12054 | reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT) |
12055 | & RCV_EGR_CTRL_EGR_CNT_MASK) |
12056 | << RCV_EGR_CTRL_EGR_CNT_SHIFT) | |
12057 | (((rcd->eager_base >> RCV_SHIFT) |
12058 | & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK) |
12059 | << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT); |
12060 | write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, value: reg); |
12061 | |
12062 | /* |
12063 | * Set TID (expected) count and base index. |
12064 | * rcd->expected_count is set to individual RcvArray entries, |
12065 | * not pairs, and the CSR takes a pair-count in groups of |
12066 | * four, so divide by 8. |
12067 | */ |
12068 | reg = (((rcd->expected_count >> RCV_SHIFT) |
12069 | & RCV_TID_CTRL_TID_PAIR_CNT_MASK) |
12070 | << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) | |
12071 | (((rcd->expected_base >> RCV_SHIFT) |
12072 | & RCV_TID_CTRL_TID_BASE_INDEX_MASK) |
12073 | << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT); |
12074 | write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, value: reg); |
12075 | if (ctxt == HFI1_CTRL_CTXT) |
12076 | write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT); |
12077 | } |
12078 | if (op & HFI1_RCVCTRL_CTXT_DIS) { |
12079 | write_csr(dd, RCV_VL15, value: 0); |
12080 | /* |
12081 | * When receive context is being disabled turn on tail |
12082 | * update with a dummy tail address and then disable |
12083 | * receive context. |
12084 | */ |
12085 | if (dd->rcvhdrtail_dummy_dma) { |
12086 | write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, |
12087 | value: dd->rcvhdrtail_dummy_dma); |
12088 | /* Enabling RcvCtxtCtrl.TailUpd is intentional. */ |
12089 | rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK; |
12090 | } |
12091 | |
12092 | rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK; |
12093 | } |
12094 | if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) { |
12095 | set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, |
12096 | IS_RCVAVAIL_START + rcd->ctxt, set: true); |
12097 | rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK; |
12098 | } |
12099 | if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) { |
12100 | set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, |
12101 | IS_RCVAVAIL_START + rcd->ctxt, set: false); |
12102 | rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK; |
12103 | } |
12104 | if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && hfi1_rcvhdrtail_kvaddr(rcd)) |
12105 | rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK; |
12106 | if (op & HFI1_RCVCTRL_TAILUPD_DIS) { |
12107 | /* See comment on RcvCtxtCtrl.TailUpd above */ |
12108 | if (!(op & HFI1_RCVCTRL_CTXT_DIS)) |
12109 | rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK; |
12110 | } |
12111 | if (op & HFI1_RCVCTRL_TIDFLOW_ENB) |
12112 | rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK; |
12113 | if (op & HFI1_RCVCTRL_TIDFLOW_DIS) |
12114 | rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK; |
12115 | if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) { |
12116 | /* |
12117 | * In one-packet-per-eager mode, the size comes from |
12118 | * the RcvArray entry. |
12119 | */ |
12120 | rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK; |
12121 | rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK; |
12122 | } |
12123 | if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS) |
12124 | rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK; |
12125 | if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB) |
12126 | rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK; |
12127 | if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS) |
12128 | rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK; |
12129 | if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB) |
12130 | rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK; |
12131 | if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS) |
12132 | rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK; |
12133 | if (op & HFI1_RCVCTRL_URGENT_ENB) |
12134 | set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, |
12135 | IS_RCVURGENT_START + rcd->ctxt, set: true); |
12136 | if (op & HFI1_RCVCTRL_URGENT_DIS) |
12137 | set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, |
12138 | IS_RCVURGENT_START + rcd->ctxt, set: false); |
12139 | |
12140 | hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx" , ctxt, rcvctrl); |
12141 | write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, value: rcvctrl); |
12142 | |
12143 | /* work around sticky RcvCtxtStatus.BlockedRHQFull */ |
12144 | if (did_enable && |
12145 | (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) { |
12146 | reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); |
12147 | if (reg != 0) { |
12148 | dd_dev_info(dd, "ctxt %d status %lld (blocked)\n" , |
12149 | ctxt, reg); |
12150 | read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); |
12151 | write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, value: 0x10); |
12152 | write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, value: 0x00); |
12153 | read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); |
12154 | reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); |
12155 | dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n" , |
12156 | ctxt, reg, reg == 0 ? "not" : "still" ); |
12157 | } |
12158 | } |
12159 | |
12160 | if (did_enable) { |
12161 | /* |
12162 | * The interrupt timeout and count must be set after |
12163 | * the context is enabled to take effect. |
12164 | */ |
12165 | /* set interrupt timeout */ |
12166 | write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT, |
12167 | value: (u64)rcd->rcvavail_timeout << |
12168 | RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT); |
12169 | |
12170 | /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */ |
12171 | reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT; |
12172 | write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, value: reg); |
12173 | } |
12174 | |
12175 | if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS)) |
12176 | /* |
12177 | * If the context has been disabled and the Tail Update has |
12178 | * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address |
12179 | * so it doesn't contain an address that is invalid. |
12180 | */ |
12181 | write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, |
12182 | value: dd->rcvhdrtail_dummy_dma); |
12183 | } |
12184 | |
12185 | u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp) |
12186 | { |
12187 | int ret; |
12188 | u64 val = 0; |
12189 | |
12190 | if (namep) { |
12191 | ret = dd->cntrnameslen; |
12192 | *namep = dd->cntrnames; |
12193 | } else { |
12194 | const struct cntr_entry *entry; |
12195 | int i, j; |
12196 | |
12197 | ret = (dd->ndevcntrs) * sizeof(u64); |
12198 | |
12199 | /* Get the start of the block of counters */ |
12200 | *cntrp = dd->cntrs; |
12201 | |
12202 | /* |
12203 | * Now go and fill in each counter in the block. |
12204 | */ |
12205 | for (i = 0; i < DEV_CNTR_LAST; i++) { |
12206 | entry = &dev_cntrs[i]; |
12207 | hfi1_cdbg(CNTR, "reading %s" , entry->name); |
12208 | if (entry->flags & CNTR_DISABLED) { |
12209 | /* Nothing */ |
12210 | hfi1_cdbg(CNTR, "\tDisabled" ); |
12211 | } else { |
12212 | if (entry->flags & CNTR_VL) { |
12213 | hfi1_cdbg(CNTR, "\tPer VL" ); |
12214 | for (j = 0; j < C_VL_COUNT; j++) { |
12215 | val = entry->rw_cntr(entry, |
12216 | dd, j, |
12217 | CNTR_MODE_R, |
12218 | 0); |
12219 | hfi1_cdbg( |
12220 | CNTR, |
12221 | "\t\tRead 0x%llx for %d" , |
12222 | val, j); |
12223 | dd->cntrs[entry->offset + j] = |
12224 | val; |
12225 | } |
12226 | } else if (entry->flags & CNTR_SDMA) { |
12227 | hfi1_cdbg(CNTR, |
12228 | "\t Per SDMA Engine" ); |
12229 | for (j = 0; j < chip_sdma_engines(dd); |
12230 | j++) { |
12231 | val = |
12232 | entry->rw_cntr(entry, dd, j, |
12233 | CNTR_MODE_R, 0); |
12234 | hfi1_cdbg(CNTR, |
12235 | "\t\tRead 0x%llx for %d" , |
12236 | val, j); |
12237 | dd->cntrs[entry->offset + j] = |
12238 | val; |
12239 | } |
12240 | } else { |
12241 | val = entry->rw_cntr(entry, dd, |
12242 | CNTR_INVALID_VL, |
12243 | CNTR_MODE_R, 0); |
12244 | dd->cntrs[entry->offset] = val; |
12245 | hfi1_cdbg(CNTR, "\tRead 0x%llx" , val); |
12246 | } |
12247 | } |
12248 | } |
12249 | } |
12250 | return ret; |
12251 | } |
12252 | |
12253 | /* |
12254 | * Used by sysfs to create files for hfi stats to read |
12255 | */ |
12256 | u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp) |
12257 | { |
12258 | int ret; |
12259 | u64 val = 0; |
12260 | |
12261 | if (namep) { |
12262 | ret = ppd->dd->portcntrnameslen; |
12263 | *namep = ppd->dd->portcntrnames; |
12264 | } else { |
12265 | const struct cntr_entry *entry; |
12266 | int i, j; |
12267 | |
12268 | ret = ppd->dd->nportcntrs * sizeof(u64); |
12269 | *cntrp = ppd->cntrs; |
12270 | |
12271 | for (i = 0; i < PORT_CNTR_LAST; i++) { |
12272 | entry = &port_cntrs[i]; |
12273 | hfi1_cdbg(CNTR, "reading %s" , entry->name); |
12274 | if (entry->flags & CNTR_DISABLED) { |
12275 | /* Nothing */ |
12276 | hfi1_cdbg(CNTR, "\tDisabled" ); |
12277 | continue; |
12278 | } |
12279 | |
12280 | if (entry->flags & CNTR_VL) { |
12281 | hfi1_cdbg(CNTR, "\tPer VL" ); |
12282 | for (j = 0; j < C_VL_COUNT; j++) { |
12283 | val = entry->rw_cntr(entry, ppd, j, |
12284 | CNTR_MODE_R, |
12285 | 0); |
12286 | hfi1_cdbg( |
12287 | CNTR, |
12288 | "\t\tRead 0x%llx for %d" , |
12289 | val, j); |
12290 | ppd->cntrs[entry->offset + j] = val; |
12291 | } |
12292 | } else { |
12293 | val = entry->rw_cntr(entry, ppd, |
12294 | CNTR_INVALID_VL, |
12295 | CNTR_MODE_R, |
12296 | 0); |
12297 | ppd->cntrs[entry->offset] = val; |
12298 | hfi1_cdbg(CNTR, "\tRead 0x%llx" , val); |
12299 | } |
12300 | } |
12301 | } |
12302 | return ret; |
12303 | } |
12304 | |
12305 | static void free_cntrs(struct hfi1_devdata *dd) |
12306 | { |
12307 | struct hfi1_pportdata *ppd; |
12308 | int i; |
12309 | |
12310 | if (dd->synth_stats_timer.function) |
12311 | del_timer_sync(timer: &dd->synth_stats_timer); |
12312 | cancel_work_sync(work: &dd->update_cntr_work); |
12313 | ppd = (struct hfi1_pportdata *)(dd + 1); |
12314 | for (i = 0; i < dd->num_pports; i++, ppd++) { |
12315 | kfree(objp: ppd->cntrs); |
12316 | kfree(objp: ppd->scntrs); |
12317 | free_percpu(pdata: ppd->ibport_data.rvp.rc_acks); |
12318 | free_percpu(pdata: ppd->ibport_data.rvp.rc_qacks); |
12319 | free_percpu(pdata: ppd->ibport_data.rvp.rc_delayed_comp); |
12320 | ppd->cntrs = NULL; |
12321 | ppd->scntrs = NULL; |
12322 | ppd->ibport_data.rvp.rc_acks = NULL; |
12323 | ppd->ibport_data.rvp.rc_qacks = NULL; |
12324 | ppd->ibport_data.rvp.rc_delayed_comp = NULL; |
12325 | } |
12326 | kfree(objp: dd->portcntrnames); |
12327 | dd->portcntrnames = NULL; |
12328 | kfree(objp: dd->cntrs); |
12329 | dd->cntrs = NULL; |
12330 | kfree(objp: dd->scntrs); |
12331 | dd->scntrs = NULL; |
12332 | kfree(objp: dd->cntrnames); |
12333 | dd->cntrnames = NULL; |
12334 | if (dd->update_cntr_wq) { |
12335 | destroy_workqueue(wq: dd->update_cntr_wq); |
12336 | dd->update_cntr_wq = NULL; |
12337 | } |
12338 | } |
12339 | |
12340 | static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry, |
12341 | u64 *psval, void *context, int vl) |
12342 | { |
12343 | u64 val; |
12344 | u64 sval = *psval; |
12345 | |
12346 | if (entry->flags & CNTR_DISABLED) { |
12347 | dd_dev_err(dd, "Counter %s not enabled" , entry->name); |
12348 | return 0; |
12349 | } |
12350 | |
12351 | hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx" , entry->name, vl, *psval); |
12352 | |
12353 | val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0); |
12354 | |
12355 | /* If its a synthetic counter there is more work we need to do */ |
12356 | if (entry->flags & CNTR_SYNTH) { |
12357 | if (sval == CNTR_MAX) { |
12358 | /* No need to read already saturated */ |
12359 | return CNTR_MAX; |
12360 | } |
12361 | |
12362 | if (entry->flags & CNTR_32BIT) { |
12363 | /* 32bit counters can wrap multiple times */ |
12364 | u64 upper = sval >> 32; |
12365 | u64 lower = (sval << 32) >> 32; |
12366 | |
12367 | if (lower > val) { /* hw wrapped */ |
12368 | if (upper == CNTR_32BIT_MAX) |
12369 | val = CNTR_MAX; |
12370 | else |
12371 | upper++; |
12372 | } |
12373 | |
12374 | if (val != CNTR_MAX) |
12375 | val = (upper << 32) | val; |
12376 | |
12377 | } else { |
12378 | /* If we rolled we are saturated */ |
12379 | if ((val < sval) || (val > CNTR_MAX)) |
12380 | val = CNTR_MAX; |
12381 | } |
12382 | } |
12383 | |
12384 | *psval = val; |
12385 | |
12386 | hfi1_cdbg(CNTR, "\tNew val=0x%llx" , val); |
12387 | |
12388 | return val; |
12389 | } |
12390 | |
12391 | static u64 write_dev_port_cntr(struct hfi1_devdata *dd, |
12392 | struct cntr_entry *entry, |
12393 | u64 *psval, void *context, int vl, u64 data) |
12394 | { |
12395 | u64 val; |
12396 | |
12397 | if (entry->flags & CNTR_DISABLED) { |
12398 | dd_dev_err(dd, "Counter %s not enabled" , entry->name); |
12399 | return 0; |
12400 | } |
12401 | |
12402 | hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx" , entry->name, vl, *psval); |
12403 | |
12404 | if (entry->flags & CNTR_SYNTH) { |
12405 | *psval = data; |
12406 | if (entry->flags & CNTR_32BIT) { |
12407 | val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, |
12408 | (data << 32) >> 32); |
12409 | val = data; /* return the full 64bit value */ |
12410 | } else { |
12411 | val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, |
12412 | data); |
12413 | } |
12414 | } else { |
12415 | val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data); |
12416 | } |
12417 | |
12418 | *psval = val; |
12419 | |
12420 | hfi1_cdbg(CNTR, "\tNew val=0x%llx" , val); |
12421 | |
12422 | return val; |
12423 | } |
12424 | |
12425 | u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl) |
12426 | { |
12427 | struct cntr_entry *entry; |
12428 | u64 *sval; |
12429 | |
12430 | entry = &dev_cntrs[index]; |
12431 | sval = dd->scntrs + entry->offset; |
12432 | |
12433 | if (vl != CNTR_INVALID_VL) |
12434 | sval += vl; |
12435 | |
12436 | return read_dev_port_cntr(dd, entry, psval: sval, context: dd, vl); |
12437 | } |
12438 | |
12439 | u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data) |
12440 | { |
12441 | struct cntr_entry *entry; |
12442 | u64 *sval; |
12443 | |
12444 | entry = &dev_cntrs[index]; |
12445 | sval = dd->scntrs + entry->offset; |
12446 | |
12447 | if (vl != CNTR_INVALID_VL) |
12448 | sval += vl; |
12449 | |
12450 | return write_dev_port_cntr(dd, entry, psval: sval, context: dd, vl, data); |
12451 | } |
12452 | |
12453 | u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl) |
12454 | { |
12455 | struct cntr_entry *entry; |
12456 | u64 *sval; |
12457 | |
12458 | entry = &port_cntrs[index]; |
12459 | sval = ppd->scntrs + entry->offset; |
12460 | |
12461 | if (vl != CNTR_INVALID_VL) |
12462 | sval += vl; |
12463 | |
12464 | if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && |
12465 | (index <= C_RCV_HDR_OVF_LAST)) { |
12466 | /* We do not want to bother for disabled contexts */ |
12467 | return 0; |
12468 | } |
12469 | |
12470 | return read_dev_port_cntr(dd: ppd->dd, entry, psval: sval, context: ppd, vl); |
12471 | } |
12472 | |
12473 | u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data) |
12474 | { |
12475 | struct cntr_entry *entry; |
12476 | u64 *sval; |
12477 | |
12478 | entry = &port_cntrs[index]; |
12479 | sval = ppd->scntrs + entry->offset; |
12480 | |
12481 | if (vl != CNTR_INVALID_VL) |
12482 | sval += vl; |
12483 | |
12484 | if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && |
12485 | (index <= C_RCV_HDR_OVF_LAST)) { |
12486 | /* We do not want to bother for disabled contexts */ |
12487 | return 0; |
12488 | } |
12489 | |
12490 | return write_dev_port_cntr(dd: ppd->dd, entry, psval: sval, context: ppd, vl, data); |
12491 | } |
12492 | |
12493 | static void do_update_synth_timer(struct work_struct *work) |
12494 | { |
12495 | u64 cur_tx; |
12496 | u64 cur_rx; |
12497 | u64 total_flits; |
12498 | u8 update = 0; |
12499 | int i, j, vl; |
12500 | struct hfi1_pportdata *ppd; |
12501 | struct cntr_entry *entry; |
12502 | struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata, |
12503 | update_cntr_work); |
12504 | |
12505 | /* |
12506 | * Rather than keep beating on the CSRs pick a minimal set that we can |
12507 | * check to watch for potential roll over. We can do this by looking at |
12508 | * the number of flits sent/recv. If the total flits exceeds 32bits then |
12509 | * we have to iterate all the counters and update. |
12510 | */ |
12511 | entry = &dev_cntrs[C_DC_RCV_FLITS]; |
12512 | cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); |
12513 | |
12514 | entry = &dev_cntrs[C_DC_XMIT_FLITS]; |
12515 | cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); |
12516 | |
12517 | hfi1_cdbg( |
12518 | CNTR, |
12519 | "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx" , |
12520 | dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx); |
12521 | |
12522 | if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) { |
12523 | /* |
12524 | * May not be strictly necessary to update but it won't hurt and |
12525 | * simplifies the logic here. |
12526 | */ |
12527 | update = 1; |
12528 | hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating" , |
12529 | dd->unit); |
12530 | } else { |
12531 | total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx); |
12532 | hfi1_cdbg(CNTR, |
12533 | "[%d] total flits 0x%llx limit 0x%llx" , dd->unit, |
12534 | total_flits, (u64)CNTR_32BIT_MAX); |
12535 | if (total_flits >= CNTR_32BIT_MAX) { |
12536 | hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating" , |
12537 | dd->unit); |
12538 | update = 1; |
12539 | } |
12540 | } |
12541 | |
12542 | if (update) { |
12543 | hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters" , dd->unit); |
12544 | for (i = 0; i < DEV_CNTR_LAST; i++) { |
12545 | entry = &dev_cntrs[i]; |
12546 | if (entry->flags & CNTR_VL) { |
12547 | for (vl = 0; vl < C_VL_COUNT; vl++) |
12548 | read_dev_cntr(dd, index: i, vl); |
12549 | } else { |
12550 | read_dev_cntr(dd, index: i, CNTR_INVALID_VL); |
12551 | } |
12552 | } |
12553 | ppd = (struct hfi1_pportdata *)(dd + 1); |
12554 | for (i = 0; i < dd->num_pports; i++, ppd++) { |
12555 | for (j = 0; j < PORT_CNTR_LAST; j++) { |
12556 | entry = &port_cntrs[j]; |
12557 | if (entry->flags & CNTR_VL) { |
12558 | for (vl = 0; vl < C_VL_COUNT; vl++) |
12559 | read_port_cntr(ppd, index: j, vl); |
12560 | } else { |
12561 | read_port_cntr(ppd, index: j, CNTR_INVALID_VL); |
12562 | } |
12563 | } |
12564 | } |
12565 | |
12566 | /* |
12567 | * We want the value in the register. The goal is to keep track |
12568 | * of the number of "ticks" not the counter value. In other |
12569 | * words if the register rolls we want to notice it and go ahead |
12570 | * and force an update. |
12571 | */ |
12572 | entry = &dev_cntrs[C_DC_XMIT_FLITS]; |
12573 | dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, |
12574 | CNTR_MODE_R, 0); |
12575 | |
12576 | entry = &dev_cntrs[C_DC_RCV_FLITS]; |
12577 | dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, |
12578 | CNTR_MODE_R, 0); |
12579 | |
12580 | hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx" , |
12581 | dd->unit, dd->last_tx, dd->last_rx); |
12582 | |
12583 | } else { |
12584 | hfi1_cdbg(CNTR, "[%d] No update necessary" , dd->unit); |
12585 | } |
12586 | } |
12587 | |
12588 | static void update_synth_timer(struct timer_list *t) |
12589 | { |
12590 | struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer); |
12591 | |
12592 | queue_work(wq: dd->update_cntr_wq, work: &dd->update_cntr_work); |
12593 | mod_timer(timer: &dd->synth_stats_timer, expires: jiffies + HZ * SYNTH_CNT_TIME); |
12594 | } |
12595 | |
12596 | #define C_MAX_NAME 16 /* 15 chars + one for /0 */ |
12597 | static int init_cntrs(struct hfi1_devdata *dd) |
12598 | { |
12599 | int i, rcv_ctxts, j; |
12600 | size_t sz; |
12601 | char *p; |
12602 | char name[C_MAX_NAME]; |
12603 | struct hfi1_pportdata *ppd; |
12604 | const char *bit_type_32 = ",32" ; |
12605 | const int bit_type_32_sz = strlen(bit_type_32); |
12606 | u32 sdma_engines = chip_sdma_engines(dd); |
12607 | |
12608 | /* set up the stats timer; the add_timer is done at the end */ |
12609 | timer_setup(&dd->synth_stats_timer, update_synth_timer, 0); |
12610 | |
12611 | /***********************/ |
12612 | /* per device counters */ |
12613 | /***********************/ |
12614 | |
12615 | /* size names and determine how many we have*/ |
12616 | dd->ndevcntrs = 0; |
12617 | sz = 0; |
12618 | |
12619 | for (i = 0; i < DEV_CNTR_LAST; i++) { |
12620 | if (dev_cntrs[i].flags & CNTR_DISABLED) { |
12621 | hfi1_dbg_early("\tSkipping %s\n" , dev_cntrs[i].name); |
12622 | continue; |
12623 | } |
12624 | |
12625 | if (dev_cntrs[i].flags & CNTR_VL) { |
12626 | dev_cntrs[i].offset = dd->ndevcntrs; |
12627 | for (j = 0; j < C_VL_COUNT; j++) { |
12628 | snprintf(buf: name, C_MAX_NAME, fmt: "%s%d" , |
12629 | dev_cntrs[i].name, vl_from_idx(idx: j)); |
12630 | sz += strlen(name); |
12631 | /* Add ",32" for 32-bit counters */ |
12632 | if (dev_cntrs[i].flags & CNTR_32BIT) |
12633 | sz += bit_type_32_sz; |
12634 | sz++; |
12635 | dd->ndevcntrs++; |
12636 | } |
12637 | } else if (dev_cntrs[i].flags & CNTR_SDMA) { |
12638 | dev_cntrs[i].offset = dd->ndevcntrs; |
12639 | for (j = 0; j < sdma_engines; j++) { |
12640 | snprintf(buf: name, C_MAX_NAME, fmt: "%s%d" , |
12641 | dev_cntrs[i].name, j); |
12642 | sz += strlen(name); |
12643 | /* Add ",32" for 32-bit counters */ |
12644 | if (dev_cntrs[i].flags & CNTR_32BIT) |
12645 | sz += bit_type_32_sz; |
12646 | sz++; |
12647 | dd->ndevcntrs++; |
12648 | } |
12649 | } else { |
12650 | /* +1 for newline. */ |
12651 | sz += strlen(dev_cntrs[i].name) + 1; |
12652 | /* Add ",32" for 32-bit counters */ |
12653 | if (dev_cntrs[i].flags & CNTR_32BIT) |
12654 | sz += bit_type_32_sz; |
12655 | dev_cntrs[i].offset = dd->ndevcntrs; |
12656 | dd->ndevcntrs++; |
12657 | } |
12658 | } |
12659 | |
12660 | /* allocate space for the counter values */ |
12661 | dd->cntrs = kcalloc(n: dd->ndevcntrs + num_driver_cntrs, size: sizeof(u64), |
12662 | GFP_KERNEL); |
12663 | if (!dd->cntrs) |
12664 | goto bail; |
12665 | |
12666 | dd->scntrs = kcalloc(n: dd->ndevcntrs, size: sizeof(u64), GFP_KERNEL); |
12667 | if (!dd->scntrs) |
12668 | goto bail; |
12669 | |
12670 | /* allocate space for the counter names */ |
12671 | dd->cntrnameslen = sz; |
12672 | dd->cntrnames = kmalloc(size: sz, GFP_KERNEL); |
12673 | if (!dd->cntrnames) |
12674 | goto bail; |
12675 | |
12676 | /* fill in the names */ |
12677 | for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) { |
12678 | if (dev_cntrs[i].flags & CNTR_DISABLED) { |
12679 | /* Nothing */ |
12680 | } else if (dev_cntrs[i].flags & CNTR_VL) { |
12681 | for (j = 0; j < C_VL_COUNT; j++) { |
12682 | snprintf(buf: name, C_MAX_NAME, fmt: "%s%d" , |
12683 | dev_cntrs[i].name, |
12684 | vl_from_idx(idx: j)); |
12685 | memcpy(p, name, strlen(name)); |
12686 | p += strlen(name); |
12687 | |
12688 | /* Counter is 32 bits */ |
12689 | if (dev_cntrs[i].flags & CNTR_32BIT) { |
12690 | memcpy(p, bit_type_32, bit_type_32_sz); |
12691 | p += bit_type_32_sz; |
12692 | } |
12693 | |
12694 | *p++ = '\n'; |
12695 | } |
12696 | } else if (dev_cntrs[i].flags & CNTR_SDMA) { |
12697 | for (j = 0; j < sdma_engines; j++) { |
12698 | snprintf(buf: name, C_MAX_NAME, fmt: "%s%d" , |
12699 | dev_cntrs[i].name, j); |
12700 | memcpy(p, name, strlen(name)); |
12701 | p += strlen(name); |
12702 | |
12703 | /* Counter is 32 bits */ |
12704 | if (dev_cntrs[i].flags & CNTR_32BIT) { |
12705 | memcpy(p, bit_type_32, bit_type_32_sz); |
12706 | p += bit_type_32_sz; |
12707 | } |
12708 | |
12709 | *p++ = '\n'; |
12710 | } |
12711 | } else { |
12712 | memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name)); |
12713 | p += strlen(dev_cntrs[i].name); |
12714 | |
12715 | /* Counter is 32 bits */ |
12716 | if (dev_cntrs[i].flags & CNTR_32BIT) { |
12717 | memcpy(p, bit_type_32, bit_type_32_sz); |
12718 | p += bit_type_32_sz; |
12719 | } |
12720 | |
12721 | *p++ = '\n'; |
12722 | } |
12723 | } |
12724 | |
12725 | /*********************/ |
12726 | /* per port counters */ |
12727 | /*********************/ |
12728 | |
12729 | /* |
12730 | * Go through the counters for the overflows and disable the ones we |
12731 | * don't need. This varies based on platform so we need to do it |
12732 | * dynamically here. |
12733 | */ |
12734 | rcv_ctxts = dd->num_rcv_contexts; |
12735 | for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts; |
12736 | i <= C_RCV_HDR_OVF_LAST; i++) { |
12737 | port_cntrs[i].flags |= CNTR_DISABLED; |
12738 | } |
12739 | |
12740 | /* size port counter names and determine how many we have*/ |
12741 | sz = 0; |
12742 | dd->nportcntrs = 0; |
12743 | for (i = 0; i < PORT_CNTR_LAST; i++) { |
12744 | if (port_cntrs[i].flags & CNTR_DISABLED) { |
12745 | hfi1_dbg_early("\tSkipping %s\n" , port_cntrs[i].name); |
12746 | continue; |
12747 | } |
12748 | |
12749 | if (port_cntrs[i].flags & CNTR_VL) { |
12750 | port_cntrs[i].offset = dd->nportcntrs; |
12751 | for (j = 0; j < C_VL_COUNT; j++) { |
12752 | snprintf(buf: name, C_MAX_NAME, fmt: "%s%d" , |
12753 | port_cntrs[i].name, vl_from_idx(idx: j)); |
12754 | sz += strlen(name); |
12755 | /* Add ",32" for 32-bit counters */ |
12756 | if (port_cntrs[i].flags & CNTR_32BIT) |
12757 | sz += bit_type_32_sz; |
12758 | sz++; |
12759 | dd->nportcntrs++; |
12760 | } |
12761 | } else { |
12762 | /* +1 for newline */ |
12763 | sz += strlen(port_cntrs[i].name) + 1; |
12764 | /* Add ",32" for 32-bit counters */ |
12765 | if (port_cntrs[i].flags & CNTR_32BIT) |
12766 | sz += bit_type_32_sz; |
12767 | port_cntrs[i].offset = dd->nportcntrs; |
12768 | dd->nportcntrs++; |
12769 | } |
12770 | } |
12771 | |
12772 | /* allocate space for the counter names */ |
12773 | dd->portcntrnameslen = sz; |
12774 | dd->portcntrnames = kmalloc(size: sz, GFP_KERNEL); |
12775 | if (!dd->portcntrnames) |
12776 | goto bail; |
12777 | |
12778 | /* fill in port cntr names */ |
12779 | for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) { |
12780 | if (port_cntrs[i].flags & CNTR_DISABLED) |
12781 | continue; |
12782 | |
12783 | if (port_cntrs[i].flags & CNTR_VL) { |
12784 | for (j = 0; j < C_VL_COUNT; j++) { |
12785 | snprintf(buf: name, C_MAX_NAME, fmt: "%s%d" , |
12786 | port_cntrs[i].name, vl_from_idx(idx: j)); |
12787 | memcpy(p, name, strlen(name)); |
12788 | p += strlen(name); |
12789 | |
12790 | /* Counter is 32 bits */ |
12791 | if (port_cntrs[i].flags & CNTR_32BIT) { |
12792 | memcpy(p, bit_type_32, bit_type_32_sz); |
12793 | p += bit_type_32_sz; |
12794 | } |
12795 | |
12796 | *p++ = '\n'; |
12797 | } |
12798 | } else { |
12799 | memcpy(p, port_cntrs[i].name, |
12800 | strlen(port_cntrs[i].name)); |
12801 | p += strlen(port_cntrs[i].name); |
12802 | |
12803 | /* Counter is 32 bits */ |
12804 | if (port_cntrs[i].flags & CNTR_32BIT) { |
12805 | memcpy(p, bit_type_32, bit_type_32_sz); |
12806 | p += bit_type_32_sz; |
12807 | } |
12808 | |
12809 | *p++ = '\n'; |
12810 | } |
12811 | } |
12812 | |
12813 | /* allocate per port storage for counter values */ |
12814 | ppd = (struct hfi1_pportdata *)(dd + 1); |
12815 | for (i = 0; i < dd->num_pports; i++, ppd++) { |
12816 | ppd->cntrs = kcalloc(n: dd->nportcntrs, size: sizeof(u64), GFP_KERNEL); |
12817 | if (!ppd->cntrs) |
12818 | goto bail; |
12819 | |
12820 | ppd->scntrs = kcalloc(n: dd->nportcntrs, size: sizeof(u64), GFP_KERNEL); |
12821 | if (!ppd->scntrs) |
12822 | goto bail; |
12823 | } |
12824 | |
12825 | /* CPU counters need to be allocated and zeroed */ |
12826 | if (init_cpu_counters(dd)) |
12827 | goto bail; |
12828 | |
12829 | dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d" , |
12830 | WQ_MEM_RECLAIM, dd->unit); |
12831 | if (!dd->update_cntr_wq) |
12832 | goto bail; |
12833 | |
12834 | INIT_WORK(&dd->update_cntr_work, do_update_synth_timer); |
12835 | |
12836 | mod_timer(timer: &dd->synth_stats_timer, expires: jiffies + HZ * SYNTH_CNT_TIME); |
12837 | return 0; |
12838 | bail: |
12839 | free_cntrs(dd); |
12840 | return -ENOMEM; |
12841 | } |
12842 | |
12843 | static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate) |
12844 | { |
12845 | switch (chip_lstate) { |
12846 | case LSTATE_DOWN: |
12847 | return IB_PORT_DOWN; |
12848 | case LSTATE_INIT: |
12849 | return IB_PORT_INIT; |
12850 | case LSTATE_ARMED: |
12851 | return IB_PORT_ARMED; |
12852 | case LSTATE_ACTIVE: |
12853 | return IB_PORT_ACTIVE; |
12854 | default: |
12855 | dd_dev_err(dd, |
12856 | "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n" , |
12857 | chip_lstate); |
12858 | return IB_PORT_DOWN; |
12859 | } |
12860 | } |
12861 | |
12862 | u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate) |
12863 | { |
12864 | /* look at the HFI meta-states only */ |
12865 | switch (chip_pstate & 0xf0) { |
12866 | case PLS_DISABLED: |
12867 | return IB_PORTPHYSSTATE_DISABLED; |
12868 | case PLS_OFFLINE: |
12869 | return OPA_PORTPHYSSTATE_OFFLINE; |
12870 | case PLS_POLLING: |
12871 | return IB_PORTPHYSSTATE_POLLING; |
12872 | case PLS_CONFIGPHY: |
12873 | return IB_PORTPHYSSTATE_TRAINING; |
12874 | case PLS_LINKUP: |
12875 | return IB_PORTPHYSSTATE_LINKUP; |
12876 | case PLS_PHYTEST: |
12877 | return IB_PORTPHYSSTATE_PHY_TEST; |
12878 | default: |
12879 | dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n" , |
12880 | chip_pstate); |
12881 | return IB_PORTPHYSSTATE_DISABLED; |
12882 | } |
12883 | } |
12884 | |
12885 | /* return the OPA port logical state name */ |
12886 | const char *opa_lstate_name(u32 lstate) |
12887 | { |
12888 | static const char * const port_logical_names[] = { |
12889 | "PORT_NOP" , |
12890 | "PORT_DOWN" , |
12891 | "PORT_INIT" , |
12892 | "PORT_ARMED" , |
12893 | "PORT_ACTIVE" , |
12894 | "PORT_ACTIVE_DEFER" , |
12895 | }; |
12896 | if (lstate < ARRAY_SIZE(port_logical_names)) |
12897 | return port_logical_names[lstate]; |
12898 | return "unknown" ; |
12899 | } |
12900 | |
12901 | /* return the OPA port physical state name */ |
12902 | const char *opa_pstate_name(u32 pstate) |
12903 | { |
12904 | static const char * const port_physical_names[] = { |
12905 | "PHYS_NOP" , |
12906 | "reserved1" , |
12907 | "PHYS_POLL" , |
12908 | "PHYS_DISABLED" , |
12909 | "PHYS_TRAINING" , |
12910 | "PHYS_LINKUP" , |
12911 | "PHYS_LINK_ERR_RECOVER" , |
12912 | "PHYS_PHY_TEST" , |
12913 | "reserved8" , |
12914 | "PHYS_OFFLINE" , |
12915 | "PHYS_GANGED" , |
12916 | "PHYS_TEST" , |
12917 | }; |
12918 | if (pstate < ARRAY_SIZE(port_physical_names)) |
12919 | return port_physical_names[pstate]; |
12920 | return "unknown" ; |
12921 | } |
12922 | |
12923 | /** |
12924 | * update_statusp - Update userspace status flag |
12925 | * @ppd: Port data structure |
12926 | * @state: port state information |
12927 | * |
12928 | * Actual port status is determined by the host_link_state value |
12929 | * in the ppd. |
12930 | * |
12931 | * host_link_state MUST be updated before updating the user space |
12932 | * statusp. |
12933 | */ |
12934 | static void update_statusp(struct hfi1_pportdata *ppd, u32 state) |
12935 | { |
12936 | /* |
12937 | * Set port status flags in the page mapped into userspace |
12938 | * memory. Do it here to ensure a reliable state - this is |
12939 | * the only function called by all state handling code. |
12940 | * Always set the flags due to the fact that the cache value |
12941 | * might have been changed explicitly outside of this |
12942 | * function. |
12943 | */ |
12944 | if (ppd->statusp) { |
12945 | switch (state) { |
12946 | case IB_PORT_DOWN: |
12947 | case IB_PORT_INIT: |
12948 | *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | |
12949 | HFI1_STATUS_IB_READY); |
12950 | break; |
12951 | case IB_PORT_ARMED: |
12952 | *ppd->statusp |= HFI1_STATUS_IB_CONF; |
12953 | break; |
12954 | case IB_PORT_ACTIVE: |
12955 | *ppd->statusp |= HFI1_STATUS_IB_READY; |
12956 | break; |
12957 | } |
12958 | } |
12959 | dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n" , |
12960 | opa_lstate_name(state), state); |
12961 | } |
12962 | |
12963 | /** |
12964 | * wait_logical_linkstate - wait for an IB link state change to occur |
12965 | * @ppd: port device |
12966 | * @state: the state to wait for |
12967 | * @msecs: the number of milliseconds to wait |
12968 | * |
12969 | * Wait up to msecs milliseconds for IB link state change to occur. |
12970 | * For now, take the easy polling route. |
12971 | * Returns 0 if state reached, otherwise -ETIMEDOUT. |
12972 | */ |
12973 | static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, |
12974 | int msecs) |
12975 | { |
12976 | unsigned long timeout; |
12977 | u32 new_state; |
12978 | |
12979 | timeout = jiffies + msecs_to_jiffies(m: msecs); |
12980 | while (1) { |
12981 | new_state = chip_to_opa_lstate(dd: ppd->dd, |
12982 | chip_lstate: read_logical_state(dd: ppd->dd)); |
12983 | if (new_state == state) |
12984 | break; |
12985 | if (time_after(jiffies, timeout)) { |
12986 | dd_dev_err(ppd->dd, |
12987 | "timeout waiting for link state 0x%x\n" , |
12988 | state); |
12989 | return -ETIMEDOUT; |
12990 | } |
12991 | msleep(msecs: 20); |
12992 | } |
12993 | |
12994 | return 0; |
12995 | } |
12996 | |
12997 | static void log_state_transition(struct hfi1_pportdata *ppd, u32 state) |
12998 | { |
12999 | u32 ib_pstate = chip_to_opa_pstate(dd: ppd->dd, chip_pstate: state); |
13000 | |
13001 | dd_dev_info(ppd->dd, |
13002 | "physical state changed to %s (0x%x), phy 0x%x\n" , |
13003 | opa_pstate_name(ib_pstate), ib_pstate, state); |
13004 | } |
13005 | |
13006 | /* |
13007 | * Read the physical hardware link state and check if it matches host |
13008 | * drivers anticipated state. |
13009 | */ |
13010 | static void log_physical_state(struct hfi1_pportdata *ppd, u32 state) |
13011 | { |
13012 | u32 read_state = read_physical_state(dd: ppd->dd); |
13013 | |
13014 | if (read_state == state) { |
13015 | log_state_transition(ppd, state); |
13016 | } else { |
13017 | dd_dev_err(ppd->dd, |
13018 | "anticipated phy link state 0x%x, read 0x%x\n" , |
13019 | state, read_state); |
13020 | } |
13021 | } |
13022 | |
13023 | /* |
13024 | * wait_physical_linkstate - wait for an physical link state change to occur |
13025 | * @ppd: port device |
13026 | * @state: the state to wait for |
13027 | * @msecs: the number of milliseconds to wait |
13028 | * |
13029 | * Wait up to msecs milliseconds for physical link state change to occur. |
13030 | * Returns 0 if state reached, otherwise -ETIMEDOUT. |
13031 | */ |
13032 | static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state, |
13033 | int msecs) |
13034 | { |
13035 | u32 read_state; |
13036 | unsigned long timeout; |
13037 | |
13038 | timeout = jiffies + msecs_to_jiffies(m: msecs); |
13039 | while (1) { |
13040 | read_state = read_physical_state(dd: ppd->dd); |
13041 | if (read_state == state) |
13042 | break; |
13043 | if (time_after(jiffies, timeout)) { |
13044 | dd_dev_err(ppd->dd, |
13045 | "timeout waiting for phy link state 0x%x\n" , |
13046 | state); |
13047 | return -ETIMEDOUT; |
13048 | } |
13049 | usleep_range(min: 1950, max: 2050); /* sleep 2ms-ish */ |
13050 | } |
13051 | |
13052 | log_state_transition(ppd, state); |
13053 | return 0; |
13054 | } |
13055 | |
13056 | /* |
13057 | * wait_phys_link_offline_quiet_substates - wait for any offline substate |
13058 | * @ppd: port device |
13059 | * @msecs: the number of milliseconds to wait |
13060 | * |
13061 | * Wait up to msecs milliseconds for any offline physical link |
13062 | * state change to occur. |
13063 | * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT. |
13064 | */ |
13065 | static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, |
13066 | int msecs) |
13067 | { |
13068 | u32 read_state; |
13069 | unsigned long timeout; |
13070 | |
13071 | timeout = jiffies + msecs_to_jiffies(m: msecs); |
13072 | while (1) { |
13073 | read_state = read_physical_state(dd: ppd->dd); |
13074 | if ((read_state & 0xF0) == PLS_OFFLINE) |
13075 | break; |
13076 | if (time_after(jiffies, timeout)) { |
13077 | dd_dev_err(ppd->dd, |
13078 | "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n" , |
13079 | read_state, msecs); |
13080 | return -ETIMEDOUT; |
13081 | } |
13082 | usleep_range(min: 1950, max: 2050); /* sleep 2ms-ish */ |
13083 | } |
13084 | |
13085 | log_state_transition(ppd, state: read_state); |
13086 | return read_state; |
13087 | } |
13088 | |
13089 | /* |
13090 | * wait_phys_link_out_of_offline - wait for any out of offline state |
13091 | * @ppd: port device |
13092 | * @msecs: the number of milliseconds to wait |
13093 | * |
13094 | * Wait up to msecs milliseconds for any out of offline physical link |
13095 | * state change to occur. |
13096 | * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT. |
13097 | */ |
13098 | static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd, |
13099 | int msecs) |
13100 | { |
13101 | u32 read_state; |
13102 | unsigned long timeout; |
13103 | |
13104 | timeout = jiffies + msecs_to_jiffies(m: msecs); |
13105 | while (1) { |
13106 | read_state = read_physical_state(dd: ppd->dd); |
13107 | if ((read_state & 0xF0) != PLS_OFFLINE) |
13108 | break; |
13109 | if (time_after(jiffies, timeout)) { |
13110 | dd_dev_err(ppd->dd, |
13111 | "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n" , |
13112 | read_state, msecs); |
13113 | return -ETIMEDOUT; |
13114 | } |
13115 | usleep_range(min: 1950, max: 2050); /* sleep 2ms-ish */ |
13116 | } |
13117 | |
13118 | log_state_transition(ppd, state: read_state); |
13119 | return read_state; |
13120 | } |
13121 | |
13122 | #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ |
13123 | (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) |
13124 | |
13125 | #define SET_STATIC_RATE_CONTROL_SMASK(r) \ |
13126 | (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) |
13127 | |
13128 | void hfi1_init_ctxt(struct send_context *sc) |
13129 | { |
13130 | if (sc) { |
13131 | struct hfi1_devdata *dd = sc->dd; |
13132 | u64 reg; |
13133 | u8 set = (sc->type == SC_USER ? |
13134 | HFI1_CAP_IS_USET(STATIC_RATE_CTRL) : |
13135 | HFI1_CAP_IS_KSET(STATIC_RATE_CTRL)); |
13136 | reg = read_kctxt_csr(dd, ctxt: sc->hw_context, |
13137 | SEND_CTXT_CHECK_ENABLE); |
13138 | if (set) |
13139 | CLEAR_STATIC_RATE_CONTROL_SMASK(reg); |
13140 | else |
13141 | SET_STATIC_RATE_CONTROL_SMASK(reg); |
13142 | write_kctxt_csr(dd, ctxt: sc->hw_context, |
13143 | SEND_CTXT_CHECK_ENABLE, value: reg); |
13144 | } |
13145 | } |
13146 | |
13147 | int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp) |
13148 | { |
13149 | int ret = 0; |
13150 | u64 reg; |
13151 | |
13152 | if (dd->icode != ICODE_RTL_SILICON) { |
13153 | if (HFI1_CAP_IS_KSET(PRINT_UNIMPL)) |
13154 | dd_dev_info(dd, "%s: tempsense not supported by HW\n" , |
13155 | __func__); |
13156 | return -EINVAL; |
13157 | } |
13158 | reg = read_csr(dd, ASIC_STS_THERM); |
13159 | temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) & |
13160 | ASIC_STS_THERM_CURR_TEMP_MASK); |
13161 | temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) & |
13162 | ASIC_STS_THERM_LO_TEMP_MASK); |
13163 | temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) & |
13164 | ASIC_STS_THERM_HI_TEMP_MASK); |
13165 | temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) & |
13166 | ASIC_STS_THERM_CRIT_TEMP_MASK); |
13167 | /* triggers is a 3-bit value - 1 bit per trigger. */ |
13168 | temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7); |
13169 | |
13170 | return ret; |
13171 | } |
13172 | |
13173 | /* ========================================================================= */ |
13174 | |
13175 | /** |
13176 | * read_mod_write() - Calculate the IRQ register index and set/clear the bits |
13177 | * @dd: valid devdata |
13178 | * @src: IRQ source to determine register index from |
13179 | * @bits: the bits to set or clear |
13180 | * @set: true == set the bits, false == clear the bits |
13181 | * |
13182 | */ |
13183 | static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits, |
13184 | bool set) |
13185 | { |
13186 | u64 reg; |
13187 | u16 idx = src / BITS_PER_REGISTER; |
13188 | unsigned long flags; |
13189 | |
13190 | spin_lock_irqsave(&dd->irq_src_lock, flags); |
13191 | reg = read_csr(dd, CCE_INT_MASK + (8 * idx)); |
13192 | if (set) |
13193 | reg |= bits; |
13194 | else |
13195 | reg &= ~bits; |
13196 | write_csr(dd, CCE_INT_MASK + (8 * idx), value: reg); |
13197 | spin_unlock_irqrestore(lock: &dd->irq_src_lock, flags); |
13198 | } |
13199 | |
13200 | /** |
13201 | * set_intr_bits() - Enable/disable a range (one or more) IRQ sources |
13202 | * @dd: valid devdata |
13203 | * @first: first IRQ source to set/clear |
13204 | * @last: last IRQ source (inclusive) to set/clear |
13205 | * @set: true == set the bits, false == clear the bits |
13206 | * |
13207 | * If first == last, set the exact source. |
13208 | */ |
13209 | int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set) |
13210 | { |
13211 | u64 bits = 0; |
13212 | u64 bit; |
13213 | u16 src; |
13214 | |
13215 | if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES) |
13216 | return -EINVAL; |
13217 | |
13218 | if (last < first) |
13219 | return -ERANGE; |
13220 | |
13221 | for (src = first; src <= last; src++) { |
13222 | bit = src % BITS_PER_REGISTER; |
13223 | /* wrapped to next register? */ |
13224 | if (!bit && bits) { |
13225 | read_mod_write(dd, src: src - 1, bits, set); |
13226 | bits = 0; |
13227 | } |
13228 | bits |= BIT_ULL(bit); |
13229 | } |
13230 | read_mod_write(dd, src: last, bits, set); |
13231 | |
13232 | return 0; |
13233 | } |
13234 | |
13235 | /* |
13236 | * Clear all interrupt sources on the chip. |
13237 | */ |
13238 | void clear_all_interrupts(struct hfi1_devdata *dd) |
13239 | { |
13240 | int i; |
13241 | |
13242 | for (i = 0; i < CCE_NUM_INT_CSRS; i++) |
13243 | write_csr(dd, CCE_INT_CLEAR + (8 * i), value: ~(u64)0); |
13244 | |
13245 | write_csr(dd, CCE_ERR_CLEAR, value: ~(u64)0); |
13246 | write_csr(dd, MISC_ERR_CLEAR, value: ~(u64)0); |
13247 | write_csr(dd, RCV_ERR_CLEAR, value: ~(u64)0); |
13248 | write_csr(dd, SEND_ERR_CLEAR, value: ~(u64)0); |
13249 | write_csr(dd, SEND_PIO_ERR_CLEAR, value: ~(u64)0); |
13250 | write_csr(dd, SEND_DMA_ERR_CLEAR, value: ~(u64)0); |
13251 | write_csr(dd, SEND_EGRESS_ERR_CLEAR, value: ~(u64)0); |
13252 | for (i = 0; i < chip_send_contexts(dd); i++) |
13253 | write_kctxt_csr(dd, ctxt: i, SEND_CTXT_ERR_CLEAR, value: ~(u64)0); |
13254 | for (i = 0; i < chip_sdma_engines(dd); i++) |
13255 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_ENG_ERR_CLEAR, value: ~(u64)0); |
13256 | |
13257 | write_csr(dd, DCC_ERR_FLG_CLR, value: ~(u64)0); |
13258 | write_csr(dd, DC_LCB_ERR_CLR, value: ~(u64)0); |
13259 | write_csr(dd, DC_DC8051_ERR_CLR, value: ~(u64)0); |
13260 | } |
13261 | |
13262 | /* |
13263 | * Remap the interrupt source from the general handler to the given MSI-X |
13264 | * interrupt. |
13265 | */ |
13266 | void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr) |
13267 | { |
13268 | u64 reg; |
13269 | int m, n; |
13270 | |
13271 | /* clear from the handled mask of the general interrupt */ |
13272 | m = isrc / 64; |
13273 | n = isrc % 64; |
13274 | if (likely(m < CCE_NUM_INT_CSRS)) { |
13275 | dd->gi_mask[m] &= ~((u64)1 << n); |
13276 | } else { |
13277 | dd_dev_err(dd, "remap interrupt err\n" ); |
13278 | return; |
13279 | } |
13280 | |
13281 | /* direct the chip source to the given MSI-X interrupt */ |
13282 | m = isrc / 8; |
13283 | n = isrc % 8; |
13284 | reg = read_csr(dd, CCE_INT_MAP + (8 * m)); |
13285 | reg &= ~((u64)0xff << (8 * n)); |
13286 | reg |= ((u64)msix_intr & 0xff) << (8 * n); |
13287 | write_csr(dd, CCE_INT_MAP + (8 * m), value: reg); |
13288 | } |
13289 | |
13290 | void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr) |
13291 | { |
13292 | /* |
13293 | * SDMA engine interrupt sources grouped by type, rather than |
13294 | * engine. Per-engine interrupts are as follows: |
13295 | * SDMA |
13296 | * SDMAProgress |
13297 | * SDMAIdle |
13298 | */ |
13299 | remap_intr(dd, IS_SDMA_START + engine, msix_intr); |
13300 | remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr); |
13301 | remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr); |
13302 | } |
13303 | |
13304 | /* |
13305 | * Set the general handler to accept all interrupts, remap all |
13306 | * chip interrupts back to MSI-X 0. |
13307 | */ |
13308 | void reset_interrupts(struct hfi1_devdata *dd) |
13309 | { |
13310 | int i; |
13311 | |
13312 | /* all interrupts handled by the general handler */ |
13313 | for (i = 0; i < CCE_NUM_INT_CSRS; i++) |
13314 | dd->gi_mask[i] = ~(u64)0; |
13315 | |
13316 | /* all chip interrupts map to MSI-X 0 */ |
13317 | for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++) |
13318 | write_csr(dd, CCE_INT_MAP + (8 * i), value: 0); |
13319 | } |
13320 | |
13321 | /** |
13322 | * set_up_interrupts() - Initialize the IRQ resources and state |
13323 | * @dd: valid devdata |
13324 | * |
13325 | */ |
13326 | static int set_up_interrupts(struct hfi1_devdata *dd) |
13327 | { |
13328 | int ret; |
13329 | |
13330 | /* mask all interrupts */ |
13331 | set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, set: false); |
13332 | |
13333 | /* clear all pending interrupts */ |
13334 | clear_all_interrupts(dd); |
13335 | |
13336 | /* reset general handler mask, chip MSI-X mappings */ |
13337 | reset_interrupts(dd); |
13338 | |
13339 | /* ask for MSI-X interrupts */ |
13340 | ret = msix_initialize(dd); |
13341 | if (ret) |
13342 | return ret; |
13343 | |
13344 | ret = msix_request_irqs(dd); |
13345 | if (ret) |
13346 | msix_clean_up_interrupts(dd); |
13347 | |
13348 | return ret; |
13349 | } |
13350 | |
13351 | /* |
13352 | * Set up context values in dd. Sets: |
13353 | * |
13354 | * num_rcv_contexts - number of contexts being used |
13355 | * n_krcv_queues - number of kernel contexts |
13356 | * first_dyn_alloc_ctxt - first dynamically allocated context |
13357 | * in array of contexts |
13358 | * freectxts - number of free user contexts |
13359 | * num_send_contexts - number of PIO send contexts being used |
13360 | * num_netdev_contexts - number of contexts reserved for netdev |
13361 | */ |
13362 | static int set_up_context_variables(struct hfi1_devdata *dd) |
13363 | { |
13364 | unsigned long num_kernel_contexts; |
13365 | u16 num_netdev_contexts; |
13366 | int ret; |
13367 | unsigned ngroups; |
13368 | int rmt_count; |
13369 | u32 n_usr_ctxts; |
13370 | u32 send_contexts = chip_send_contexts(dd); |
13371 | u32 rcv_contexts = chip_rcv_contexts(dd); |
13372 | |
13373 | /* |
13374 | * Kernel receive contexts: |
13375 | * - Context 0 - control context (VL15/multicast/error) |
13376 | * - Context 1 - first kernel context |
13377 | * - Context 2 - second kernel context |
13378 | * ... |
13379 | */ |
13380 | if (n_krcvqs) |
13381 | /* |
13382 | * n_krcvqs is the sum of module parameter kernel receive |
13383 | * contexts, krcvqs[]. It does not include the control |
13384 | * context, so add that. |
13385 | */ |
13386 | num_kernel_contexts = n_krcvqs + 1; |
13387 | else |
13388 | num_kernel_contexts = DEFAULT_KRCVQS + 1; |
13389 | /* |
13390 | * Every kernel receive context needs an ACK send context. |
13391 | * one send context is allocated for each VL{0-7} and VL15 |
13392 | */ |
13393 | if (num_kernel_contexts > (send_contexts - num_vls - 1)) { |
13394 | dd_dev_err(dd, |
13395 | "Reducing # kernel rcv contexts to: %d, from %lu\n" , |
13396 | send_contexts - num_vls - 1, |
13397 | num_kernel_contexts); |
13398 | num_kernel_contexts = send_contexts - num_vls - 1; |
13399 | } |
13400 | |
13401 | /* |
13402 | * User contexts: |
13403 | * - default to 1 user context per real (non-HT) CPU core if |
13404 | * num_user_contexts is negative |
13405 | */ |
13406 | if (num_user_contexts < 0) |
13407 | n_usr_ctxts = cpumask_weight(srcp: &node_affinity.real_cpu_mask); |
13408 | else |
13409 | n_usr_ctxts = num_user_contexts; |
13410 | /* |
13411 | * Adjust the counts given a global max. |
13412 | */ |
13413 | if (num_kernel_contexts + n_usr_ctxts > rcv_contexts) { |
13414 | dd_dev_err(dd, |
13415 | "Reducing # user receive contexts to: %u, from %u\n" , |
13416 | (u32)(rcv_contexts - num_kernel_contexts), |
13417 | n_usr_ctxts); |
13418 | /* recalculate */ |
13419 | n_usr_ctxts = rcv_contexts - num_kernel_contexts; |
13420 | } |
13421 | |
13422 | num_netdev_contexts = |
13423 | hfi1_num_netdev_contexts(dd, available_contexts: rcv_contexts - |
13424 | (num_kernel_contexts + n_usr_ctxts), |
13425 | cpu_mask: &node_affinity.real_cpu_mask); |
13426 | /* |
13427 | * RMT entries are allocated as follows: |
13428 | * 1. QOS (0 to 128 entries) |
13429 | * 2. FECN (num_kernel_context - 1 [a] + num_user_contexts + |
13430 | * num_netdev_contexts [b]) |
13431 | * 3. netdev (NUM_NETDEV_MAP_ENTRIES) |
13432 | * |
13433 | * Notes: |
13434 | * [a] Kernel contexts (except control) are included in FECN if kernel |
13435 | * TID_RDMA is active. |
13436 | * [b] Netdev and user contexts are randomly allocated from the same |
13437 | * context pool, so FECN must cover all contexts in the pool. |
13438 | */ |
13439 | rmt_count = qos_rmt_entries(n_krcv_queues: num_kernel_contexts - 1, NULL, NULL) |
13440 | + (HFI1_CAP_IS_KSET(TID_RDMA) ? num_kernel_contexts - 1 |
13441 | : 0) |
13442 | + n_usr_ctxts |
13443 | + num_netdev_contexts |
13444 | + NUM_NETDEV_MAP_ENTRIES; |
13445 | if (rmt_count > NUM_MAP_ENTRIES) { |
13446 | int over = rmt_count - NUM_MAP_ENTRIES; |
13447 | /* try to squish user contexts, minimum of 1 */ |
13448 | if (over >= n_usr_ctxts) { |
13449 | dd_dev_err(dd, "RMT overflow: reduce the requested number of contexts\n" ); |
13450 | return -EINVAL; |
13451 | } |
13452 | dd_dev_err(dd, "RMT overflow: reducing # user contexts from %u to %u\n" , |
13453 | n_usr_ctxts, n_usr_ctxts - over); |
13454 | n_usr_ctxts -= over; |
13455 | } |
13456 | |
13457 | /* the first N are kernel contexts, the rest are user/netdev contexts */ |
13458 | dd->num_rcv_contexts = |
13459 | num_kernel_contexts + n_usr_ctxts + num_netdev_contexts; |
13460 | dd->n_krcv_queues = num_kernel_contexts; |
13461 | dd->first_dyn_alloc_ctxt = num_kernel_contexts; |
13462 | dd->num_netdev_contexts = num_netdev_contexts; |
13463 | dd->num_user_contexts = n_usr_ctxts; |
13464 | dd->freectxts = n_usr_ctxts; |
13465 | dd_dev_info(dd, |
13466 | "rcv contexts: chip %d, used %d (kernel %d, netdev %u, user %u)\n" , |
13467 | rcv_contexts, |
13468 | (int)dd->num_rcv_contexts, |
13469 | (int)dd->n_krcv_queues, |
13470 | dd->num_netdev_contexts, |
13471 | dd->num_user_contexts); |
13472 | |
13473 | /* |
13474 | * Receive array allocation: |
13475 | * All RcvArray entries are divided into groups of 8. This |
13476 | * is required by the hardware and will speed up writes to |
13477 | * consecutive entries by using write-combining of the entire |
13478 | * cacheline. |
13479 | * |
13480 | * The number of groups are evenly divided among all contexts. |
13481 | * any left over groups will be given to the first N user |
13482 | * contexts. |
13483 | */ |
13484 | dd->rcv_entries.group_size = RCV_INCREMENT; |
13485 | ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size; |
13486 | dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts; |
13487 | dd->rcv_entries.nctxt_extra = ngroups - |
13488 | (dd->num_rcv_contexts * dd->rcv_entries.ngroups); |
13489 | dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n" , |
13490 | dd->rcv_entries.ngroups, |
13491 | dd->rcv_entries.nctxt_extra); |
13492 | if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size > |
13493 | MAX_EAGER_ENTRIES * 2) { |
13494 | dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) / |
13495 | dd->rcv_entries.group_size; |
13496 | dd_dev_info(dd, |
13497 | "RcvArray group count too high, change to %u\n" , |
13498 | dd->rcv_entries.ngroups); |
13499 | dd->rcv_entries.nctxt_extra = 0; |
13500 | } |
13501 | /* |
13502 | * PIO send contexts |
13503 | */ |
13504 | ret = init_sc_pools_and_sizes(dd); |
13505 | if (ret >= 0) { /* success */ |
13506 | dd->num_send_contexts = ret; |
13507 | dd_dev_info( |
13508 | dd, |
13509 | "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n" , |
13510 | send_contexts, |
13511 | dd->num_send_contexts, |
13512 | dd->sc_sizes[SC_KERNEL].count, |
13513 | dd->sc_sizes[SC_ACK].count, |
13514 | dd->sc_sizes[SC_USER].count, |
13515 | dd->sc_sizes[SC_VL15].count); |
13516 | ret = 0; /* success */ |
13517 | } |
13518 | |
13519 | return ret; |
13520 | } |
13521 | |
13522 | /* |
13523 | * Set the device/port partition key table. The MAD code |
13524 | * will ensure that, at least, the partial management |
13525 | * partition key is present in the table. |
13526 | */ |
13527 | static void set_partition_keys(struct hfi1_pportdata *ppd) |
13528 | { |
13529 | struct hfi1_devdata *dd = ppd->dd; |
13530 | u64 reg = 0; |
13531 | int i; |
13532 | |
13533 | dd_dev_info(dd, "Setting partition keys\n" ); |
13534 | for (i = 0; i < hfi1_get_npkeys(dd); i++) { |
13535 | reg |= (ppd->pkeys[i] & |
13536 | RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) << |
13537 | ((i % 4) * |
13538 | RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT); |
13539 | /* Each register holds 4 PKey values. */ |
13540 | if ((i % 4) == 3) { |
13541 | write_csr(dd, RCV_PARTITION_KEY + |
13542 | ((i - 3) * 2), value: reg); |
13543 | reg = 0; |
13544 | } |
13545 | } |
13546 | |
13547 | /* Always enable HW pkeys check when pkeys table is set */ |
13548 | add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK); |
13549 | } |
13550 | |
13551 | /* |
13552 | * These CSRs and memories are uninitialized on reset and must be |
13553 | * written before reading to set the ECC/parity bits. |
13554 | * |
13555 | * NOTE: All user context CSRs that are not mmaped write-only |
13556 | * (e.g. the TID flows) must be initialized even if the driver never |
13557 | * reads them. |
13558 | */ |
13559 | static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd) |
13560 | { |
13561 | int i, j; |
13562 | |
13563 | /* CceIntMap */ |
13564 | for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++) |
13565 | write_csr(dd, CCE_INT_MAP + (8 * i), value: 0); |
13566 | |
13567 | /* SendCtxtCreditReturnAddr */ |
13568 | for (i = 0; i < chip_send_contexts(dd); i++) |
13569 | write_kctxt_csr(dd, ctxt: i, SEND_CTXT_CREDIT_RETURN_ADDR, value: 0); |
13570 | |
13571 | /* PIO Send buffers */ |
13572 | /* SDMA Send buffers */ |
13573 | /* |
13574 | * These are not normally read, and (presently) have no method |
13575 | * to be read, so are not pre-initialized |
13576 | */ |
13577 | |
13578 | /* RcvHdrAddr */ |
13579 | /* RcvHdrTailAddr */ |
13580 | /* RcvTidFlowTable */ |
13581 | for (i = 0; i < chip_rcv_contexts(dd); i++) { |
13582 | write_kctxt_csr(dd, ctxt: i, RCV_HDR_ADDR, value: 0); |
13583 | write_kctxt_csr(dd, ctxt: i, RCV_HDR_TAIL_ADDR, value: 0); |
13584 | for (j = 0; j < RXE_NUM_TID_FLOWS; j++) |
13585 | write_uctxt_csr(dd, ctxt: i, RCV_TID_FLOW_TABLE + (8 * j), value: 0); |
13586 | } |
13587 | |
13588 | /* RcvArray */ |
13589 | for (i = 0; i < chip_rcv_array_count(dd); i++) |
13590 | hfi1_put_tid(dd, index: i, PT_INVALID_FLUSH, pa: 0, order: 0); |
13591 | |
13592 | /* RcvQPMapTable */ |
13593 | for (i = 0; i < 32; i++) |
13594 | write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), value: 0); |
13595 | } |
13596 | |
13597 | /* |
13598 | * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus. |
13599 | */ |
13600 | static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits, |
13601 | u64 ctrl_bits) |
13602 | { |
13603 | unsigned long timeout; |
13604 | u64 reg; |
13605 | |
13606 | /* is the condition present? */ |
13607 | reg = read_csr(dd, CCE_STATUS); |
13608 | if ((reg & status_bits) == 0) |
13609 | return; |
13610 | |
13611 | /* clear the condition */ |
13612 | write_csr(dd, CCE_CTRL, value: ctrl_bits); |
13613 | |
13614 | /* wait for the condition to clear */ |
13615 | timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT); |
13616 | while (1) { |
13617 | reg = read_csr(dd, CCE_STATUS); |
13618 | if ((reg & status_bits) == 0) |
13619 | return; |
13620 | if (time_after(jiffies, timeout)) { |
13621 | dd_dev_err(dd, |
13622 | "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n" , |
13623 | status_bits, reg & status_bits); |
13624 | return; |
13625 | } |
13626 | udelay(1); |
13627 | } |
13628 | } |
13629 | |
13630 | /* set CCE CSRs to chip reset defaults */ |
13631 | static void reset_cce_csrs(struct hfi1_devdata *dd) |
13632 | { |
13633 | int i; |
13634 | |
13635 | /* CCE_REVISION read-only */ |
13636 | /* CCE_REVISION2 read-only */ |
13637 | /* CCE_CTRL - bits clear automatically */ |
13638 | /* CCE_STATUS read-only, use CceCtrl to clear */ |
13639 | clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK); |
13640 | clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK); |
13641 | clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK); |
13642 | for (i = 0; i < CCE_NUM_SCRATCH; i++) |
13643 | write_csr(dd, CCE_SCRATCH + (8 * i), value: 0); |
13644 | /* CCE_ERR_STATUS read-only */ |
13645 | write_csr(dd, CCE_ERR_MASK, value: 0); |
13646 | write_csr(dd, CCE_ERR_CLEAR, value: ~0ull); |
13647 | /* CCE_ERR_FORCE leave alone */ |
13648 | for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++) |
13649 | write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), value: 0); |
13650 | write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR); |
13651 | /* CCE_PCIE_CTRL leave alone */ |
13652 | for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) { |
13653 | write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), value: 0); |
13654 | write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i), |
13655 | CCE_MSIX_TABLE_UPPER_RESETCSR); |
13656 | } |
13657 | for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) { |
13658 | /* CCE_MSIX_PBA read-only */ |
13659 | write_csr(dd, CCE_MSIX_INT_GRANTED, value: ~0ull); |
13660 | write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, value: ~0ull); |
13661 | } |
13662 | for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++) |
13663 | write_csr(dd, CCE_INT_MAP, value: 0); |
13664 | for (i = 0; i < CCE_NUM_INT_CSRS; i++) { |
13665 | /* CCE_INT_STATUS read-only */ |
13666 | write_csr(dd, CCE_INT_MASK + (8 * i), value: 0); |
13667 | write_csr(dd, CCE_INT_CLEAR + (8 * i), value: ~0ull); |
13668 | /* CCE_INT_FORCE leave alone */ |
13669 | /* CCE_INT_BLOCKED read-only */ |
13670 | } |
13671 | for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++) |
13672 | write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), value: 0); |
13673 | } |
13674 | |
13675 | /* set MISC CSRs to chip reset defaults */ |
13676 | static void reset_misc_csrs(struct hfi1_devdata *dd) |
13677 | { |
13678 | int i; |
13679 | |
13680 | for (i = 0; i < 32; i++) { |
13681 | write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), value: 0); |
13682 | write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), value: 0); |
13683 | write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), value: 0); |
13684 | } |
13685 | /* |
13686 | * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can |
13687 | * only be written 128-byte chunks |
13688 | */ |
13689 | /* init RSA engine to clear lingering errors */ |
13690 | write_csr(dd, MISC_CFG_RSA_CMD, value: 1); |
13691 | write_csr(dd, MISC_CFG_RSA_MU, value: 0); |
13692 | write_csr(dd, MISC_CFG_FW_CTRL, value: 0); |
13693 | /* MISC_STS_8051_DIGEST read-only */ |
13694 | /* MISC_STS_SBM_DIGEST read-only */ |
13695 | /* MISC_STS_PCIE_DIGEST read-only */ |
13696 | /* MISC_STS_FAB_DIGEST read-only */ |
13697 | /* MISC_ERR_STATUS read-only */ |
13698 | write_csr(dd, MISC_ERR_MASK, value: 0); |
13699 | write_csr(dd, MISC_ERR_CLEAR, value: ~0ull); |
13700 | /* MISC_ERR_FORCE leave alone */ |
13701 | } |
13702 | |
13703 | /* set TXE CSRs to chip reset defaults */ |
13704 | static void reset_txe_csrs(struct hfi1_devdata *dd) |
13705 | { |
13706 | int i; |
13707 | |
13708 | /* |
13709 | * TXE Kernel CSRs |
13710 | */ |
13711 | write_csr(dd, SEND_CTRL, value: 0); |
13712 | __cm_reset(dd, sendctrl: 0); /* reset CM internal state */ |
13713 | /* SEND_CONTEXTS read-only */ |
13714 | /* SEND_DMA_ENGINES read-only */ |
13715 | /* SEND_PIO_MEM_SIZE read-only */ |
13716 | /* SEND_DMA_MEM_SIZE read-only */ |
13717 | write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, value: 0); |
13718 | pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */ |
13719 | /* SEND_PIO_ERR_STATUS read-only */ |
13720 | write_csr(dd, SEND_PIO_ERR_MASK, value: 0); |
13721 | write_csr(dd, SEND_PIO_ERR_CLEAR, value: ~0ull); |
13722 | /* SEND_PIO_ERR_FORCE leave alone */ |
13723 | /* SEND_DMA_ERR_STATUS read-only */ |
13724 | write_csr(dd, SEND_DMA_ERR_MASK, value: 0); |
13725 | write_csr(dd, SEND_DMA_ERR_CLEAR, value: ~0ull); |
13726 | /* SEND_DMA_ERR_FORCE leave alone */ |
13727 | /* SEND_EGRESS_ERR_STATUS read-only */ |
13728 | write_csr(dd, SEND_EGRESS_ERR_MASK, value: 0); |
13729 | write_csr(dd, SEND_EGRESS_ERR_CLEAR, value: ~0ull); |
13730 | /* SEND_EGRESS_ERR_FORCE leave alone */ |
13731 | write_csr(dd, SEND_BTH_QP, value: 0); |
13732 | write_csr(dd, SEND_STATIC_RATE_CONTROL, value: 0); |
13733 | write_csr(dd, SEND_SC2VLT0, value: 0); |
13734 | write_csr(dd, SEND_SC2VLT1, value: 0); |
13735 | write_csr(dd, SEND_SC2VLT2, value: 0); |
13736 | write_csr(dd, SEND_SC2VLT3, value: 0); |
13737 | write_csr(dd, SEND_LEN_CHECK0, value: 0); |
13738 | write_csr(dd, SEND_LEN_CHECK1, value: 0); |
13739 | /* SEND_ERR_STATUS read-only */ |
13740 | write_csr(dd, SEND_ERR_MASK, value: 0); |
13741 | write_csr(dd, SEND_ERR_CLEAR, value: ~0ull); |
13742 | /* SEND_ERR_FORCE read-only */ |
13743 | for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++) |
13744 | write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), value: 0); |
13745 | for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++) |
13746 | write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), value: 0); |
13747 | for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++) |
13748 | write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), value: 0); |
13749 | for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++) |
13750 | write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), value: 0); |
13751 | for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++) |
13752 | write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), value: 0); |
13753 | write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR); |
13754 | write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR); |
13755 | /* SEND_CM_CREDIT_USED_STATUS read-only */ |
13756 | write_csr(dd, SEND_CM_TIMER_CTRL, value: 0); |
13757 | write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, value: 0); |
13758 | write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, value: 0); |
13759 | write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, value: 0); |
13760 | write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, value: 0); |
13761 | for (i = 0; i < TXE_NUM_DATA_VL; i++) |
13762 | write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), value: 0); |
13763 | write_csr(dd, SEND_CM_CREDIT_VL15, value: 0); |
13764 | /* SEND_CM_CREDIT_USED_VL read-only */ |
13765 | /* SEND_CM_CREDIT_USED_VL15 read-only */ |
13766 | /* SEND_EGRESS_CTXT_STATUS read-only */ |
13767 | /* SEND_EGRESS_SEND_DMA_STATUS read-only */ |
13768 | write_csr(dd, SEND_EGRESS_ERR_INFO, value: ~0ull); |
13769 | /* SEND_EGRESS_ERR_INFO read-only */ |
13770 | /* SEND_EGRESS_ERR_SOURCE read-only */ |
13771 | |
13772 | /* |
13773 | * TXE Per-Context CSRs |
13774 | */ |
13775 | for (i = 0; i < chip_send_contexts(dd); i++) { |
13776 | write_kctxt_csr(dd, ctxt: i, SEND_CTXT_CTRL, value: 0); |
13777 | write_kctxt_csr(dd, ctxt: i, SEND_CTXT_CREDIT_CTRL, value: 0); |
13778 | write_kctxt_csr(dd, ctxt: i, SEND_CTXT_CREDIT_RETURN_ADDR, value: 0); |
13779 | write_kctxt_csr(dd, ctxt: i, SEND_CTXT_CREDIT_FORCE, value: 0); |
13780 | write_kctxt_csr(dd, ctxt: i, SEND_CTXT_ERR_MASK, value: 0); |
13781 | write_kctxt_csr(dd, ctxt: i, SEND_CTXT_ERR_CLEAR, value: ~0ull); |
13782 | write_kctxt_csr(dd, ctxt: i, SEND_CTXT_CHECK_ENABLE, value: 0); |
13783 | write_kctxt_csr(dd, ctxt: i, SEND_CTXT_CHECK_VL, value: 0); |
13784 | write_kctxt_csr(dd, ctxt: i, SEND_CTXT_CHECK_JOB_KEY, value: 0); |
13785 | write_kctxt_csr(dd, ctxt: i, SEND_CTXT_CHECK_PARTITION_KEY, value: 0); |
13786 | write_kctxt_csr(dd, ctxt: i, SEND_CTXT_CHECK_SLID, value: 0); |
13787 | write_kctxt_csr(dd, ctxt: i, SEND_CTXT_CHECK_OPCODE, value: 0); |
13788 | } |
13789 | |
13790 | /* |
13791 | * TXE Per-SDMA CSRs |
13792 | */ |
13793 | for (i = 0; i < chip_sdma_engines(dd); i++) { |
13794 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_CTRL, value: 0); |
13795 | /* SEND_DMA_STATUS read-only */ |
13796 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_BASE_ADDR, value: 0); |
13797 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_LEN_GEN, value: 0); |
13798 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_TAIL, value: 0); |
13799 | /* SEND_DMA_HEAD read-only */ |
13800 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_HEAD_ADDR, value: 0); |
13801 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_PRIORITY_THLD, value: 0); |
13802 | /* SEND_DMA_IDLE_CNT read-only */ |
13803 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_RELOAD_CNT, value: 0); |
13804 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_DESC_CNT, value: 0); |
13805 | /* SEND_DMA_DESC_FETCHED_CNT read-only */ |
13806 | /* SEND_DMA_ENG_ERR_STATUS read-only */ |
13807 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_ENG_ERR_MASK, value: 0); |
13808 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_ENG_ERR_CLEAR, value: ~0ull); |
13809 | /* SEND_DMA_ENG_ERR_FORCE leave alone */ |
13810 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_CHECK_ENABLE, value: 0); |
13811 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_CHECK_VL, value: 0); |
13812 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_CHECK_JOB_KEY, value: 0); |
13813 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_CHECK_PARTITION_KEY, value: 0); |
13814 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_CHECK_SLID, value: 0); |
13815 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_CHECK_OPCODE, value: 0); |
13816 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_MEMORY, value: 0); |
13817 | } |
13818 | } |
13819 | |
13820 | /* |
13821 | * Expect on entry: |
13822 | * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0 |
13823 | */ |
13824 | static void init_rbufs(struct hfi1_devdata *dd) |
13825 | { |
13826 | u64 reg; |
13827 | int count; |
13828 | |
13829 | /* |
13830 | * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are |
13831 | * clear. |
13832 | */ |
13833 | count = 0; |
13834 | while (1) { |
13835 | reg = read_csr(dd, RCV_STATUS); |
13836 | if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK |
13837 | | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0) |
13838 | break; |
13839 | /* |
13840 | * Give up after 1ms - maximum wait time. |
13841 | * |
13842 | * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at |
13843 | * 250MB/s bandwidth. Lower rate to 66% for overhead to get: |
13844 | * 136 KB / (66% * 250MB/s) = 844us |
13845 | */ |
13846 | if (count++ > 500) { |
13847 | dd_dev_err(dd, |
13848 | "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n" , |
13849 | __func__, reg); |
13850 | break; |
13851 | } |
13852 | udelay(2); /* do not busy-wait the CSR */ |
13853 | } |
13854 | |
13855 | /* start the init - expect RcvCtrl to be 0 */ |
13856 | write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK); |
13857 | |
13858 | /* |
13859 | * Read to force the write of Rcvtrl.RxRbufInit. There is a brief |
13860 | * period after the write before RcvStatus.RxRbufInitDone is valid. |
13861 | * The delay in the first run through the loop below is sufficient and |
13862 | * required before the first read of RcvStatus.RxRbufInintDone. |
13863 | */ |
13864 | read_csr(dd, RCV_CTRL); |
13865 | |
13866 | /* wait for the init to finish */ |
13867 | count = 0; |
13868 | while (1) { |
13869 | /* delay is required first time through - see above */ |
13870 | udelay(2); /* do not busy-wait the CSR */ |
13871 | reg = read_csr(dd, RCV_STATUS); |
13872 | if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK)) |
13873 | break; |
13874 | |
13875 | /* give up after 100us - slowest possible at 33MHz is 73us */ |
13876 | if (count++ > 50) { |
13877 | dd_dev_err(dd, |
13878 | "%s: RcvStatus.RxRbufInit not set, continuing\n" , |
13879 | __func__); |
13880 | break; |
13881 | } |
13882 | } |
13883 | } |
13884 | |
13885 | /* set RXE CSRs to chip reset defaults */ |
13886 | static void reset_rxe_csrs(struct hfi1_devdata *dd) |
13887 | { |
13888 | int i, j; |
13889 | |
13890 | /* |
13891 | * RXE Kernel CSRs |
13892 | */ |
13893 | write_csr(dd, RCV_CTRL, value: 0); |
13894 | init_rbufs(dd); |
13895 | /* RCV_STATUS read-only */ |
13896 | /* RCV_CONTEXTS read-only */ |
13897 | /* RCV_ARRAY_CNT read-only */ |
13898 | /* RCV_BUF_SIZE read-only */ |
13899 | write_csr(dd, RCV_BTH_QP, value: 0); |
13900 | write_csr(dd, RCV_MULTICAST, value: 0); |
13901 | write_csr(dd, RCV_BYPASS, value: 0); |
13902 | write_csr(dd, RCV_VL15, value: 0); |
13903 | /* this is a clear-down */ |
13904 | write_csr(dd, RCV_ERR_INFO, |
13905 | RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK); |
13906 | /* RCV_ERR_STATUS read-only */ |
13907 | write_csr(dd, RCV_ERR_MASK, value: 0); |
13908 | write_csr(dd, RCV_ERR_CLEAR, value: ~0ull); |
13909 | /* RCV_ERR_FORCE leave alone */ |
13910 | for (i = 0; i < 32; i++) |
13911 | write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), value: 0); |
13912 | for (i = 0; i < 4; i++) |
13913 | write_csr(dd, RCV_PARTITION_KEY + (8 * i), value: 0); |
13914 | for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++) |
13915 | write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), value: 0); |
13916 | for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++) |
13917 | write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), value: 0); |
13918 | for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) |
13919 | clear_rsm_rule(dd, rule_index: i); |
13920 | for (i = 0; i < 32; i++) |
13921 | write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), value: 0); |
13922 | |
13923 | /* |
13924 | * RXE Kernel and User Per-Context CSRs |
13925 | */ |
13926 | for (i = 0; i < chip_rcv_contexts(dd); i++) { |
13927 | /* kernel */ |
13928 | write_kctxt_csr(dd, ctxt: i, RCV_CTXT_CTRL, value: 0); |
13929 | /* RCV_CTXT_STATUS read-only */ |
13930 | write_kctxt_csr(dd, ctxt: i, RCV_EGR_CTRL, value: 0); |
13931 | write_kctxt_csr(dd, ctxt: i, RCV_TID_CTRL, value: 0); |
13932 | write_kctxt_csr(dd, ctxt: i, RCV_KEY_CTRL, value: 0); |
13933 | write_kctxt_csr(dd, ctxt: i, RCV_HDR_ADDR, value: 0); |
13934 | write_kctxt_csr(dd, ctxt: i, RCV_HDR_CNT, value: 0); |
13935 | write_kctxt_csr(dd, ctxt: i, RCV_HDR_ENT_SIZE, value: 0); |
13936 | write_kctxt_csr(dd, ctxt: i, RCV_HDR_SIZE, value: 0); |
13937 | write_kctxt_csr(dd, ctxt: i, RCV_HDR_TAIL_ADDR, value: 0); |
13938 | write_kctxt_csr(dd, ctxt: i, RCV_AVAIL_TIME_OUT, value: 0); |
13939 | write_kctxt_csr(dd, ctxt: i, RCV_HDR_OVFL_CNT, value: 0); |
13940 | |
13941 | /* user */ |
13942 | /* RCV_HDR_TAIL read-only */ |
13943 | write_uctxt_csr(dd, ctxt: i, RCV_HDR_HEAD, value: 0); |
13944 | /* RCV_EGR_INDEX_TAIL read-only */ |
13945 | write_uctxt_csr(dd, ctxt: i, RCV_EGR_INDEX_HEAD, value: 0); |
13946 | /* RCV_EGR_OFFSET_TAIL read-only */ |
13947 | for (j = 0; j < RXE_NUM_TID_FLOWS; j++) { |
13948 | write_uctxt_csr(dd, ctxt: i, |
13949 | RCV_TID_FLOW_TABLE + (8 * j), value: 0); |
13950 | } |
13951 | } |
13952 | } |
13953 | |
13954 | /* |
13955 | * Set sc2vl tables. |
13956 | * |
13957 | * They power on to zeros, so to avoid send context errors |
13958 | * they need to be set: |
13959 | * |
13960 | * SC 0-7 -> VL 0-7 (respectively) |
13961 | * SC 15 -> VL 15 |
13962 | * otherwise |
13963 | * -> VL 0 |
13964 | */ |
13965 | static void init_sc2vl_tables(struct hfi1_devdata *dd) |
13966 | { |
13967 | int i; |
13968 | /* init per architecture spec, constrained by hardware capability */ |
13969 | |
13970 | /* HFI maps sent packets */ |
13971 | write_csr(dd, SEND_SC2VLT0, SC2VL_VAL( |
13972 | 0, |
13973 | 0, 0, 1, 1, |
13974 | 2, 2, 3, 3, |
13975 | 4, 4, 5, 5, |
13976 | 6, 6, 7, 7)); |
13977 | write_csr(dd, SEND_SC2VLT1, SC2VL_VAL( |
13978 | 1, |
13979 | 8, 0, 9, 0, |
13980 | 10, 0, 11, 0, |
13981 | 12, 0, 13, 0, |
13982 | 14, 0, 15, 15)); |
13983 | write_csr(dd, SEND_SC2VLT2, SC2VL_VAL( |
13984 | 2, |
13985 | 16, 0, 17, 0, |
13986 | 18, 0, 19, 0, |
13987 | 20, 0, 21, 0, |
13988 | 22, 0, 23, 0)); |
13989 | write_csr(dd, SEND_SC2VLT3, SC2VL_VAL( |
13990 | 3, |
13991 | 24, 0, 25, 0, |
13992 | 26, 0, 27, 0, |
13993 | 28, 0, 29, 0, |
13994 | 30, 0, 31, 0)); |
13995 | |
13996 | /* DC maps received packets */ |
13997 | write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL( |
13998 | 15_0, |
13999 | 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, |
14000 | 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15)); |
14001 | write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL( |
14002 | 31_16, |
14003 | 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0, |
14004 | 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0)); |
14005 | |
14006 | /* initialize the cached sc2vl values consistently with h/w */ |
14007 | for (i = 0; i < 32; i++) { |
14008 | if (i < 8 || i == 15) |
14009 | *((u8 *)(dd->sc2vl) + i) = (u8)i; |
14010 | else |
14011 | *((u8 *)(dd->sc2vl) + i) = 0; |
14012 | } |
14013 | } |
14014 | |
14015 | /* |
14016 | * Read chip sizes and then reset parts to sane, disabled, values. We cannot |
14017 | * depend on the chip going through a power-on reset - a driver may be loaded |
14018 | * and unloaded many times. |
14019 | * |
14020 | * Do not write any CSR values to the chip in this routine - there may be |
14021 | * a reset following the (possible) FLR in this routine. |
14022 | * |
14023 | */ |
14024 | static int init_chip(struct hfi1_devdata *dd) |
14025 | { |
14026 | int i; |
14027 | int ret = 0; |
14028 | |
14029 | /* |
14030 | * Put the HFI CSRs in a known state. |
14031 | * Combine this with a DC reset. |
14032 | * |
14033 | * Stop the device from doing anything while we do a |
14034 | * reset. We know there are no other active users of |
14035 | * the device since we are now in charge. Turn off |
14036 | * off all outbound and inbound traffic and make sure |
14037 | * the device does not generate any interrupts. |
14038 | */ |
14039 | |
14040 | /* disable send contexts and SDMA engines */ |
14041 | write_csr(dd, SEND_CTRL, value: 0); |
14042 | for (i = 0; i < chip_send_contexts(dd); i++) |
14043 | write_kctxt_csr(dd, ctxt: i, SEND_CTXT_CTRL, value: 0); |
14044 | for (i = 0; i < chip_sdma_engines(dd); i++) |
14045 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_CTRL, value: 0); |
14046 | /* disable port (turn off RXE inbound traffic) and contexts */ |
14047 | write_csr(dd, RCV_CTRL, value: 0); |
14048 | for (i = 0; i < chip_rcv_contexts(dd); i++) |
14049 | write_csr(dd, RCV_CTXT_CTRL, value: 0); |
14050 | /* mask all interrupt sources */ |
14051 | for (i = 0; i < CCE_NUM_INT_CSRS; i++) |
14052 | write_csr(dd, CCE_INT_MASK + (8 * i), value: 0ull); |
14053 | |
14054 | /* |
14055 | * DC Reset: do a full DC reset before the register clear. |
14056 | * A recommended length of time to hold is one CSR read, |
14057 | * so reread the CceDcCtrl. Then, hold the DC in reset |
14058 | * across the clear. |
14059 | */ |
14060 | write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK); |
14061 | (void)read_csr(dd, CCE_DC_CTRL); |
14062 | |
14063 | if (use_flr) { |
14064 | /* |
14065 | * A FLR will reset the SPC core and part of the PCIe. |
14066 | * The parts that need to be restored have already been |
14067 | * saved. |
14068 | */ |
14069 | dd_dev_info(dd, "Resetting CSRs with FLR\n" ); |
14070 | |
14071 | /* do the FLR, the DC reset will remain */ |
14072 | pcie_flr(dev: dd->pcidev); |
14073 | |
14074 | /* restore command and BARs */ |
14075 | ret = restore_pci_variables(dd); |
14076 | if (ret) { |
14077 | dd_dev_err(dd, "%s: Could not restore PCI variables\n" , |
14078 | __func__); |
14079 | return ret; |
14080 | } |
14081 | |
14082 | if (is_ax(dd)) { |
14083 | dd_dev_info(dd, "Resetting CSRs with FLR\n" ); |
14084 | pcie_flr(dev: dd->pcidev); |
14085 | ret = restore_pci_variables(dd); |
14086 | if (ret) { |
14087 | dd_dev_err(dd, "%s: Could not restore PCI variables\n" , |
14088 | __func__); |
14089 | return ret; |
14090 | } |
14091 | } |
14092 | } else { |
14093 | dd_dev_info(dd, "Resetting CSRs with writes\n" ); |
14094 | reset_cce_csrs(dd); |
14095 | reset_txe_csrs(dd); |
14096 | reset_rxe_csrs(dd); |
14097 | reset_misc_csrs(dd); |
14098 | } |
14099 | /* clear the DC reset */ |
14100 | write_csr(dd, CCE_DC_CTRL, value: 0); |
14101 | |
14102 | /* Set the LED off */ |
14103 | setextled(dd, on: 0); |
14104 | |
14105 | /* |
14106 | * Clear the QSFP reset. |
14107 | * An FLR enforces a 0 on all out pins. The driver does not touch |
14108 | * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and |
14109 | * anything plugged constantly in reset, if it pays attention |
14110 | * to RESET_N. |
14111 | * Prime examples of this are optical cables. Set all pins high. |
14112 | * I2CCLK and I2CDAT will change per direction, and INT_N and |
14113 | * MODPRS_N are input only and their value is ignored. |
14114 | */ |
14115 | write_csr(dd, ASIC_QSFP1_OUT, value: 0x1f); |
14116 | write_csr(dd, ASIC_QSFP2_OUT, value: 0x1f); |
14117 | init_chip_resources(dd); |
14118 | return ret; |
14119 | } |
14120 | |
14121 | static void init_early_variables(struct hfi1_devdata *dd) |
14122 | { |
14123 | int i; |
14124 | |
14125 | /* assign link credit variables */ |
14126 | dd->vau = CM_VAU; |
14127 | dd->link_credits = CM_GLOBAL_CREDITS; |
14128 | if (is_ax(dd)) |
14129 | dd->link_credits--; |
14130 | dd->vcu = cu_to_vcu(cu: hfi1_cu); |
14131 | /* enough room for 8 MAD packets plus header - 17K */ |
14132 | dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(vau: dd->vau); |
14133 | if (dd->vl15_init > dd->link_credits) |
14134 | dd->vl15_init = dd->link_credits; |
14135 | |
14136 | write_uninitialized_csrs_and_memories(dd); |
14137 | |
14138 | if (HFI1_CAP_IS_KSET(PKEY_CHECK)) |
14139 | for (i = 0; i < dd->num_pports; i++) { |
14140 | struct hfi1_pportdata *ppd = &dd->pport[i]; |
14141 | |
14142 | set_partition_keys(ppd); |
14143 | } |
14144 | init_sc2vl_tables(dd); |
14145 | } |
14146 | |
14147 | static void init_kdeth_qp(struct hfi1_devdata *dd) |
14148 | { |
14149 | write_csr(dd, SEND_BTH_QP, |
14150 | value: (RVT_KDETH_QP_PREFIX & SEND_BTH_QP_KDETH_QP_MASK) << |
14151 | SEND_BTH_QP_KDETH_QP_SHIFT); |
14152 | |
14153 | write_csr(dd, RCV_BTH_QP, |
14154 | value: (RVT_KDETH_QP_PREFIX & RCV_BTH_QP_KDETH_QP_MASK) << |
14155 | RCV_BTH_QP_KDETH_QP_SHIFT); |
14156 | } |
14157 | |
14158 | /** |
14159 | * hfi1_get_qp_map - get qp map |
14160 | * @dd: device data |
14161 | * @idx: index to read |
14162 | */ |
14163 | u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx) |
14164 | { |
14165 | u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8); |
14166 | |
14167 | reg >>= (idx % 8) * 8; |
14168 | return reg; |
14169 | } |
14170 | |
14171 | /** |
14172 | * init_qpmap_table - init qp map |
14173 | * @dd: device data |
14174 | * @first_ctxt: first context |
14175 | * @last_ctxt: first context |
14176 | * |
14177 | * This return sets the qpn mapping table that |
14178 | * is indexed by qpn[8:1]. |
14179 | * |
14180 | * The routine will round robin the 256 settings |
14181 | * from first_ctxt to last_ctxt. |
14182 | * |
14183 | * The first/last looks ahead to having specialized |
14184 | * receive contexts for mgmt and bypass. Normal |
14185 | * verbs traffic will assumed to be on a range |
14186 | * of receive contexts. |
14187 | */ |
14188 | static void init_qpmap_table(struct hfi1_devdata *dd, |
14189 | u32 first_ctxt, |
14190 | u32 last_ctxt) |
14191 | { |
14192 | u64 reg = 0; |
14193 | u64 regno = RCV_QP_MAP_TABLE; |
14194 | int i; |
14195 | u64 ctxt = first_ctxt; |
14196 | |
14197 | for (i = 0; i < 256; i++) { |
14198 | reg |= ctxt << (8 * (i % 8)); |
14199 | ctxt++; |
14200 | if (ctxt > last_ctxt) |
14201 | ctxt = first_ctxt; |
14202 | if (i % 8 == 7) { |
14203 | write_csr(dd, offset: regno, value: reg); |
14204 | reg = 0; |
14205 | regno += 8; |
14206 | } |
14207 | } |
14208 | |
14209 | add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK |
14210 | | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK); |
14211 | } |
14212 | |
14213 | struct rsm_map_table { |
14214 | u64 map[NUM_MAP_REGS]; |
14215 | unsigned int used; |
14216 | }; |
14217 | |
14218 | struct rsm_rule_data { |
14219 | u8 offset; |
14220 | u8 pkt_type; |
14221 | u32 field1_off; |
14222 | u32 field2_off; |
14223 | u32 index1_off; |
14224 | u32 index1_width; |
14225 | u32 index2_off; |
14226 | u32 index2_width; |
14227 | u32 mask1; |
14228 | u32 value1; |
14229 | u32 mask2; |
14230 | u32 value2; |
14231 | }; |
14232 | |
14233 | /* |
14234 | * Return an initialized RMT map table for users to fill in. OK if it |
14235 | * returns NULL, indicating no table. |
14236 | */ |
14237 | static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd) |
14238 | { |
14239 | struct rsm_map_table *rmt; |
14240 | u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */ |
14241 | |
14242 | rmt = kmalloc(size: sizeof(*rmt), GFP_KERNEL); |
14243 | if (rmt) { |
14244 | memset(rmt->map, rxcontext, sizeof(rmt->map)); |
14245 | rmt->used = 0; |
14246 | } |
14247 | |
14248 | return rmt; |
14249 | } |
14250 | |
14251 | /* |
14252 | * Write the final RMT map table to the chip and free the table. OK if |
14253 | * table is NULL. |
14254 | */ |
14255 | static void complete_rsm_map_table(struct hfi1_devdata *dd, |
14256 | struct rsm_map_table *rmt) |
14257 | { |
14258 | int i; |
14259 | |
14260 | if (rmt) { |
14261 | /* write table to chip */ |
14262 | for (i = 0; i < NUM_MAP_REGS; i++) |
14263 | write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), value: rmt->map[i]); |
14264 | |
14265 | /* enable RSM */ |
14266 | add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); |
14267 | } |
14268 | } |
14269 | |
14270 | /* Is a receive side mapping rule */ |
14271 | static bool has_rsm_rule(struct hfi1_devdata *dd, u8 rule_index) |
14272 | { |
14273 | return read_csr(dd, RCV_RSM_CFG + (8 * rule_index)) != 0; |
14274 | } |
14275 | |
14276 | /* |
14277 | * Add a receive side mapping rule. |
14278 | */ |
14279 | static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index, |
14280 | struct rsm_rule_data *rrd) |
14281 | { |
14282 | write_csr(dd, RCV_RSM_CFG + (8 * rule_index), |
14283 | value: (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT | |
14284 | 1ull << rule_index | /* enable bit */ |
14285 | (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT); |
14286 | write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), |
14287 | value: (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT | |
14288 | (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT | |
14289 | (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT | |
14290 | (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT | |
14291 | (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT | |
14292 | (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT); |
14293 | write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), |
14294 | value: (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT | |
14295 | (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT | |
14296 | (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT | |
14297 | (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT); |
14298 | } |
14299 | |
14300 | /* |
14301 | * Clear a receive side mapping rule. |
14302 | */ |
14303 | static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index) |
14304 | { |
14305 | write_csr(dd, RCV_RSM_CFG + (8 * rule_index), value: 0); |
14306 | write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), value: 0); |
14307 | write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), value: 0); |
14308 | } |
14309 | |
14310 | /* return the number of RSM map table entries that will be used for QOS */ |
14311 | static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp, |
14312 | unsigned int *np) |
14313 | { |
14314 | int i; |
14315 | unsigned int m, n; |
14316 | uint max_by_vl = 0; |
14317 | |
14318 | /* is QOS active at all? */ |
14319 | if (n_krcv_queues < MIN_KERNEL_KCTXTS || |
14320 | num_vls == 1 || |
14321 | krcvqsset <= 1) |
14322 | goto no_qos; |
14323 | |
14324 | /* determine bits for qpn */ |
14325 | for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++) |
14326 | if (krcvqs[i] > max_by_vl) |
14327 | max_by_vl = krcvqs[i]; |
14328 | if (max_by_vl > 32) |
14329 | goto no_qos; |
14330 | m = ilog2(__roundup_pow_of_two(max_by_vl)); |
14331 | |
14332 | /* determine bits for vl */ |
14333 | n = ilog2(__roundup_pow_of_two(num_vls)); |
14334 | |
14335 | /* reject if too much is used */ |
14336 | if ((m + n) > 7) |
14337 | goto no_qos; |
14338 | |
14339 | if (mp) |
14340 | *mp = m; |
14341 | if (np) |
14342 | *np = n; |
14343 | |
14344 | return 1 << (m + n); |
14345 | |
14346 | no_qos: |
14347 | if (mp) |
14348 | *mp = 0; |
14349 | if (np) |
14350 | *np = 0; |
14351 | return 0; |
14352 | } |
14353 | |
14354 | /** |
14355 | * init_qos - init RX qos |
14356 | * @dd: device data |
14357 | * @rmt: RSM map table |
14358 | * |
14359 | * This routine initializes Rule 0 and the RSM map table to implement |
14360 | * quality of service (qos). |
14361 | * |
14362 | * If all of the limit tests succeed, qos is applied based on the array |
14363 | * interpretation of krcvqs where entry 0 is VL0. |
14364 | * |
14365 | * The number of vl bits (n) and the number of qpn bits (m) are computed to |
14366 | * feed both the RSM map table and the single rule. |
14367 | */ |
14368 | static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt) |
14369 | { |
14370 | struct rsm_rule_data rrd; |
14371 | unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m; |
14372 | unsigned int rmt_entries; |
14373 | u64 reg; |
14374 | |
14375 | if (!rmt) |
14376 | goto bail; |
14377 | rmt_entries = qos_rmt_entries(n_krcv_queues: dd->n_krcv_queues - 1, mp: &m, np: &n); |
14378 | if (rmt_entries == 0) |
14379 | goto bail; |
14380 | qpns_per_vl = 1 << m; |
14381 | |
14382 | /* enough room in the map table? */ |
14383 | rmt_entries = 1 << (m + n); |
14384 | if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES) |
14385 | goto bail; |
14386 | |
14387 | /* add qos entries to the RSM map table */ |
14388 | for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) { |
14389 | unsigned tctxt; |
14390 | |
14391 | for (qpn = 0, tctxt = ctxt; |
14392 | krcvqs[i] && qpn < qpns_per_vl; qpn++) { |
14393 | unsigned idx, regoff, regidx; |
14394 | |
14395 | /* generate the index the hardware will produce */ |
14396 | idx = rmt->used + ((qpn << n) ^ i); |
14397 | regoff = (idx % 8) * 8; |
14398 | regidx = idx / 8; |
14399 | /* replace default with context number */ |
14400 | reg = rmt->map[regidx]; |
14401 | reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK |
14402 | << regoff); |
14403 | reg |= (u64)(tctxt++) << regoff; |
14404 | rmt->map[regidx] = reg; |
14405 | if (tctxt == ctxt + krcvqs[i]) |
14406 | tctxt = ctxt; |
14407 | } |
14408 | ctxt += krcvqs[i]; |
14409 | } |
14410 | |
14411 | rrd.offset = rmt->used; |
14412 | rrd.pkt_type = 2; |
14413 | rrd.field1_off = LRH_BTH_MATCH_OFFSET; |
14414 | rrd.field2_off = LRH_SC_MATCH_OFFSET; |
14415 | rrd.index1_off = LRH_SC_SELECT_OFFSET; |
14416 | rrd.index1_width = n; |
14417 | rrd.index2_off = QPN_SELECT_OFFSET; |
14418 | rrd.index2_width = m + n; |
14419 | rrd.mask1 = LRH_BTH_MASK; |
14420 | rrd.value1 = LRH_BTH_VALUE; |
14421 | rrd.mask2 = LRH_SC_MASK; |
14422 | rrd.value2 = LRH_SC_VALUE; |
14423 | |
14424 | /* add rule 0 */ |
14425 | add_rsm_rule(dd, RSM_INS_VERBS, rrd: &rrd); |
14426 | |
14427 | /* mark RSM map entries as used */ |
14428 | rmt->used += rmt_entries; |
14429 | /* map everything else to the mcast/err/vl15 context */ |
14430 | init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT); |
14431 | dd->qos_shift = n + 1; |
14432 | return; |
14433 | bail: |
14434 | dd->qos_shift = 1; |
14435 | init_qpmap_table(dd, FIRST_KERNEL_KCTXT, last_ctxt: dd->n_krcv_queues - 1); |
14436 | } |
14437 | |
14438 | static void init_fecn_handling(struct hfi1_devdata *dd, |
14439 | struct rsm_map_table *rmt) |
14440 | { |
14441 | struct rsm_rule_data rrd; |
14442 | u64 reg; |
14443 | int i, idx, regoff, regidx, start; |
14444 | u8 offset; |
14445 | u32 total_cnt; |
14446 | |
14447 | if (HFI1_CAP_IS_KSET(TID_RDMA)) |
14448 | /* Exclude context 0 */ |
14449 | start = 1; |
14450 | else |
14451 | start = dd->first_dyn_alloc_ctxt; |
14452 | |
14453 | total_cnt = dd->num_rcv_contexts - start; |
14454 | |
14455 | /* there needs to be enough room in the map table */ |
14456 | if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) { |
14457 | dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n" ); |
14458 | return; |
14459 | } |
14460 | |
14461 | /* |
14462 | * RSM will extract the destination context as an index into the |
14463 | * map table. The destination contexts are a sequential block |
14464 | * in the range start...num_rcv_contexts-1 (inclusive). |
14465 | * Map entries are accessed as offset + extracted value. Adjust |
14466 | * the added offset so this sequence can be placed anywhere in |
14467 | * the table - as long as the entries themselves do not wrap. |
14468 | * There are only enough bits in offset for the table size, so |
14469 | * start with that to allow for a "negative" offset. |
14470 | */ |
14471 | offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start); |
14472 | |
14473 | for (i = start, idx = rmt->used; i < dd->num_rcv_contexts; |
14474 | i++, idx++) { |
14475 | /* replace with identity mapping */ |
14476 | regoff = (idx % 8) * 8; |
14477 | regidx = idx / 8; |
14478 | reg = rmt->map[regidx]; |
14479 | reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff); |
14480 | reg |= (u64)i << regoff; |
14481 | rmt->map[regidx] = reg; |
14482 | } |
14483 | |
14484 | /* |
14485 | * For RSM intercept of Expected FECN packets: |
14486 | * o packet type 0 - expected |
14487 | * o match on F (bit 95), using select/match 1, and |
14488 | * o match on SH (bit 133), using select/match 2. |
14489 | * |
14490 | * Use index 1 to extract the 8-bit receive context from DestQP |
14491 | * (start at bit 64). Use that as the RSM map table index. |
14492 | */ |
14493 | rrd.offset = offset; |
14494 | rrd.pkt_type = 0; |
14495 | rrd.field1_off = 95; |
14496 | rrd.field2_off = 133; |
14497 | rrd.index1_off = 64; |
14498 | rrd.index1_width = 8; |
14499 | rrd.index2_off = 0; |
14500 | rrd.index2_width = 0; |
14501 | rrd.mask1 = 1; |
14502 | rrd.value1 = 1; |
14503 | rrd.mask2 = 1; |
14504 | rrd.value2 = 1; |
14505 | |
14506 | /* add rule 1 */ |
14507 | add_rsm_rule(dd, RSM_INS_FECN, rrd: &rrd); |
14508 | |
14509 | rmt->used += total_cnt; |
14510 | } |
14511 | |
14512 | static inline bool hfi1_is_rmt_full(int start, int spare) |
14513 | { |
14514 | return (start + spare) > NUM_MAP_ENTRIES; |
14515 | } |
14516 | |
14517 | static bool hfi1_netdev_update_rmt(struct hfi1_devdata *dd) |
14518 | { |
14519 | u8 i, j; |
14520 | u8 ctx_id = 0; |
14521 | u64 reg; |
14522 | u32 regoff; |
14523 | int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); |
14524 | int ctxt_count = hfi1_netdev_ctxt_count(dd); |
14525 | |
14526 | /* We already have contexts mapped in RMT */ |
14527 | if (has_rsm_rule(dd, RSM_INS_VNIC) || has_rsm_rule(dd, RSM_INS_AIP)) { |
14528 | dd_dev_info(dd, "Contexts are already mapped in RMT\n" ); |
14529 | return true; |
14530 | } |
14531 | |
14532 | if (hfi1_is_rmt_full(start: rmt_start, NUM_NETDEV_MAP_ENTRIES)) { |
14533 | dd_dev_err(dd, "Not enough RMT entries used = %d\n" , |
14534 | rmt_start); |
14535 | return false; |
14536 | } |
14537 | |
14538 | dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n" , |
14539 | rmt_start, |
14540 | rmt_start + NUM_NETDEV_MAP_ENTRIES); |
14541 | |
14542 | /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */ |
14543 | regoff = RCV_RSM_MAP_TABLE + (rmt_start / 8) * 8; |
14544 | reg = read_csr(dd, offset: regoff); |
14545 | for (i = 0; i < NUM_NETDEV_MAP_ENTRIES; i++) { |
14546 | /* Update map register with netdev context */ |
14547 | j = (rmt_start + i) % 8; |
14548 | reg &= ~(0xffllu << (j * 8)); |
14549 | reg |= (u64)hfi1_netdev_get_ctxt(dd, ctxt: ctx_id++)->ctxt << (j * 8); |
14550 | /* Wrap up netdev ctx index */ |
14551 | ctx_id %= ctxt_count; |
14552 | /* Write back map register */ |
14553 | if (j == 7 || ((i + 1) == NUM_NETDEV_MAP_ENTRIES)) { |
14554 | dev_dbg(&(dd)->pcidev->dev, |
14555 | "RMT[%d] =0x%llx\n" , |
14556 | regoff - RCV_RSM_MAP_TABLE, reg); |
14557 | |
14558 | write_csr(dd, offset: regoff, value: reg); |
14559 | regoff += 8; |
14560 | if (i < (NUM_NETDEV_MAP_ENTRIES - 1)) |
14561 | reg = read_csr(dd, offset: regoff); |
14562 | } |
14563 | } |
14564 | |
14565 | return true; |
14566 | } |
14567 | |
14568 | static void hfi1_enable_rsm_rule(struct hfi1_devdata *dd, |
14569 | int rule, struct rsm_rule_data *rrd) |
14570 | { |
14571 | if (!hfi1_netdev_update_rmt(dd)) { |
14572 | dd_dev_err(dd, "Failed to update RMT for RSM%d rule\n" , rule); |
14573 | return; |
14574 | } |
14575 | |
14576 | add_rsm_rule(dd, rule_index: rule, rrd); |
14577 | add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); |
14578 | } |
14579 | |
14580 | void hfi1_init_aip_rsm(struct hfi1_devdata *dd) |
14581 | { |
14582 | /* |
14583 | * go through with the initialisation only if this rule actually doesn't |
14584 | * exist yet |
14585 | */ |
14586 | if (atomic_fetch_inc(v: &dd->ipoib_rsm_usr_num) == 0) { |
14587 | int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); |
14588 | struct rsm_rule_data rrd = { |
14589 | .offset = rmt_start, |
14590 | .pkt_type = IB_PACKET_TYPE, |
14591 | .field1_off = LRH_BTH_MATCH_OFFSET, |
14592 | .mask1 = LRH_BTH_MASK, |
14593 | .value1 = LRH_BTH_VALUE, |
14594 | .field2_off = BTH_DESTQP_MATCH_OFFSET, |
14595 | .mask2 = BTH_DESTQP_MASK, |
14596 | .value2 = BTH_DESTQP_VALUE, |
14597 | .index1_off = DETH_AIP_SQPN_SELECT_OFFSET + |
14598 | ilog2(NUM_NETDEV_MAP_ENTRIES), |
14599 | .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES), |
14600 | .index2_off = DETH_AIP_SQPN_SELECT_OFFSET, |
14601 | .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES) |
14602 | }; |
14603 | |
14604 | hfi1_enable_rsm_rule(dd, RSM_INS_AIP, rrd: &rrd); |
14605 | } |
14606 | } |
14607 | |
14608 | /* Initialize RSM for VNIC */ |
14609 | void hfi1_init_vnic_rsm(struct hfi1_devdata *dd) |
14610 | { |
14611 | int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); |
14612 | struct rsm_rule_data rrd = { |
14613 | /* Add rule for vnic */ |
14614 | .offset = rmt_start, |
14615 | .pkt_type = 4, |
14616 | /* Match 16B packets */ |
14617 | .field1_off = L2_TYPE_MATCH_OFFSET, |
14618 | .mask1 = L2_TYPE_MASK, |
14619 | .value1 = L2_16B_VALUE, |
14620 | /* Match ETH L4 packets */ |
14621 | .field2_off = L4_TYPE_MATCH_OFFSET, |
14622 | .mask2 = L4_16B_TYPE_MASK, |
14623 | .value2 = L4_16B_ETH_VALUE, |
14624 | /* Calc context from veswid and entropy */ |
14625 | .index1_off = L4_16B_HDR_VESWID_OFFSET, |
14626 | .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES), |
14627 | .index2_off = L2_16B_ENTROPY_OFFSET, |
14628 | .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES) |
14629 | }; |
14630 | |
14631 | hfi1_enable_rsm_rule(dd, RSM_INS_VNIC, rrd: &rrd); |
14632 | } |
14633 | |
14634 | void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd) |
14635 | { |
14636 | clear_rsm_rule(dd, RSM_INS_VNIC); |
14637 | } |
14638 | |
14639 | void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd) |
14640 | { |
14641 | /* only actually clear the rule if it's the last user asking to do so */ |
14642 | if (atomic_fetch_add_unless(v: &dd->ipoib_rsm_usr_num, a: -1, u: 0) == 1) |
14643 | clear_rsm_rule(dd, RSM_INS_AIP); |
14644 | } |
14645 | |
14646 | static int init_rxe(struct hfi1_devdata *dd) |
14647 | { |
14648 | struct rsm_map_table *rmt; |
14649 | u64 val; |
14650 | |
14651 | /* enable all receive errors */ |
14652 | write_csr(dd, RCV_ERR_MASK, value: ~0ull); |
14653 | |
14654 | rmt = alloc_rsm_map_table(dd); |
14655 | if (!rmt) |
14656 | return -ENOMEM; |
14657 | |
14658 | /* set up QOS, including the QPN map table */ |
14659 | init_qos(dd, rmt); |
14660 | init_fecn_handling(dd, rmt); |
14661 | complete_rsm_map_table(dd, rmt); |
14662 | /* record number of used rsm map entries for netdev */ |
14663 | hfi1_netdev_set_free_rmt_idx(dd, rmt_idx: rmt->used); |
14664 | kfree(objp: rmt); |
14665 | |
14666 | /* |
14667 | * make sure RcvCtrl.RcvWcb <= PCIe Device Control |
14668 | * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config |
14669 | * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one |
14670 | * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and |
14671 | * Max_PayLoad_Size set to its minimum of 128. |
14672 | * |
14673 | * Presently, RcvCtrl.RcvWcb is not modified from its default of 0 |
14674 | * (64 bytes). Max_Payload_Size is possibly modified upward in |
14675 | * tune_pcie_caps() which is called after this routine. |
14676 | */ |
14677 | |
14678 | /* Have 16 bytes (4DW) of bypass header available in header queue */ |
14679 | val = read_csr(dd, RCV_BYPASS); |
14680 | val &= ~RCV_BYPASS_HDR_SIZE_SMASK; |
14681 | val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) << |
14682 | RCV_BYPASS_HDR_SIZE_SHIFT); |
14683 | write_csr(dd, RCV_BYPASS, value: val); |
14684 | return 0; |
14685 | } |
14686 | |
14687 | static void init_other(struct hfi1_devdata *dd) |
14688 | { |
14689 | /* enable all CCE errors */ |
14690 | write_csr(dd, CCE_ERR_MASK, value: ~0ull); |
14691 | /* enable *some* Misc errors */ |
14692 | write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK); |
14693 | /* enable all DC errors, except LCB */ |
14694 | write_csr(dd, DCC_ERR_FLG_EN, value: ~0ull); |
14695 | write_csr(dd, DC_DC8051_ERR_EN, value: ~0ull); |
14696 | } |
14697 | |
14698 | /* |
14699 | * Fill out the given AU table using the given CU. A CU is defined in terms |
14700 | * AUs. The table is a an encoding: given the index, how many AUs does that |
14701 | * represent? |
14702 | * |
14703 | * NOTE: Assumes that the register layout is the same for the |
14704 | * local and remote tables. |
14705 | */ |
14706 | static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu, |
14707 | u32 csr0to3, u32 csr4to7) |
14708 | { |
14709 | write_csr(dd, offset: csr0to3, |
14710 | value: 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT | |
14711 | 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT | |
14712 | 2ull * cu << |
14713 | SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT | |
14714 | 4ull * cu << |
14715 | SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT); |
14716 | write_csr(dd, offset: csr4to7, |
14717 | value: 8ull * cu << |
14718 | SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT | |
14719 | 16ull * cu << |
14720 | SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT | |
14721 | 32ull * cu << |
14722 | SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT | |
14723 | 64ull * cu << |
14724 | SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT); |
14725 | } |
14726 | |
14727 | static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu) |
14728 | { |
14729 | assign_cm_au_table(dd, cu: vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3, |
14730 | SEND_CM_LOCAL_AU_TABLE4_TO7); |
14731 | } |
14732 | |
14733 | void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu) |
14734 | { |
14735 | assign_cm_au_table(dd, cu: vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3, |
14736 | SEND_CM_REMOTE_AU_TABLE4_TO7); |
14737 | } |
14738 | |
14739 | static void init_txe(struct hfi1_devdata *dd) |
14740 | { |
14741 | int i; |
14742 | |
14743 | /* enable all PIO, SDMA, general, and Egress errors */ |
14744 | write_csr(dd, SEND_PIO_ERR_MASK, value: ~0ull); |
14745 | write_csr(dd, SEND_DMA_ERR_MASK, value: ~0ull); |
14746 | write_csr(dd, SEND_ERR_MASK, value: ~0ull); |
14747 | write_csr(dd, SEND_EGRESS_ERR_MASK, value: ~0ull); |
14748 | |
14749 | /* enable all per-context and per-SDMA engine errors */ |
14750 | for (i = 0; i < chip_send_contexts(dd); i++) |
14751 | write_kctxt_csr(dd, ctxt: i, SEND_CTXT_ERR_MASK, value: ~0ull); |
14752 | for (i = 0; i < chip_sdma_engines(dd); i++) |
14753 | write_kctxt_csr(dd, ctxt: i, SEND_DMA_ENG_ERR_MASK, value: ~0ull); |
14754 | |
14755 | /* set the local CU to AU mapping */ |
14756 | assign_local_cm_au_table(dd, vcu: dd->vcu); |
14757 | |
14758 | /* |
14759 | * Set reasonable default for Credit Return Timer |
14760 | * Don't set on Simulator - causes it to choke. |
14761 | */ |
14762 | if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR) |
14763 | write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE); |
14764 | } |
14765 | |
14766 | int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd, |
14767 | u16 jkey) |
14768 | { |
14769 | u8 hw_ctxt; |
14770 | u64 reg; |
14771 | |
14772 | if (!rcd || !rcd->sc) |
14773 | return -EINVAL; |
14774 | |
14775 | hw_ctxt = rcd->sc->hw_context; |
14776 | reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */ |
14777 | ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) << |
14778 | SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT); |
14779 | /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */ |
14780 | if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY)) |
14781 | reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK; |
14782 | write_kctxt_csr(dd, ctxt: hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, value: reg); |
14783 | /* |
14784 | * Enable send-side J_KEY integrity check, unless this is A0 h/w |
14785 | */ |
14786 | if (!is_ax(dd)) { |
14787 | reg = read_kctxt_csr(dd, ctxt: hw_ctxt, SEND_CTXT_CHECK_ENABLE); |
14788 | reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; |
14789 | write_kctxt_csr(dd, ctxt: hw_ctxt, SEND_CTXT_CHECK_ENABLE, value: reg); |
14790 | } |
14791 | |
14792 | /* Enable J_KEY check on receive context. */ |
14793 | reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK | |
14794 | ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) << |
14795 | RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT); |
14796 | write_kctxt_csr(dd, ctxt: rcd->ctxt, RCV_KEY_CTRL, value: reg); |
14797 | |
14798 | return 0; |
14799 | } |
14800 | |
14801 | int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) |
14802 | { |
14803 | u8 hw_ctxt; |
14804 | u64 reg; |
14805 | |
14806 | if (!rcd || !rcd->sc) |
14807 | return -EINVAL; |
14808 | |
14809 | hw_ctxt = rcd->sc->hw_context; |
14810 | write_kctxt_csr(dd, ctxt: hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, value: 0); |
14811 | /* |
14812 | * Disable send-side J_KEY integrity check, unless this is A0 h/w. |
14813 | * This check would not have been enabled for A0 h/w, see |
14814 | * set_ctxt_jkey(). |
14815 | */ |
14816 | if (!is_ax(dd)) { |
14817 | reg = read_kctxt_csr(dd, ctxt: hw_ctxt, SEND_CTXT_CHECK_ENABLE); |
14818 | reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; |
14819 | write_kctxt_csr(dd, ctxt: hw_ctxt, SEND_CTXT_CHECK_ENABLE, value: reg); |
14820 | } |
14821 | /* Turn off the J_KEY on the receive side */ |
14822 | write_kctxt_csr(dd, ctxt: rcd->ctxt, RCV_KEY_CTRL, value: 0); |
14823 | |
14824 | return 0; |
14825 | } |
14826 | |
14827 | int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd, |
14828 | u16 pkey) |
14829 | { |
14830 | u8 hw_ctxt; |
14831 | u64 reg; |
14832 | |
14833 | if (!rcd || !rcd->sc) |
14834 | return -EINVAL; |
14835 | |
14836 | hw_ctxt = rcd->sc->hw_context; |
14837 | reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) << |
14838 | SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT; |
14839 | write_kctxt_csr(dd, ctxt: hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, value: reg); |
14840 | reg = read_kctxt_csr(dd, ctxt: hw_ctxt, SEND_CTXT_CHECK_ENABLE); |
14841 | reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK; |
14842 | reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK; |
14843 | write_kctxt_csr(dd, ctxt: hw_ctxt, SEND_CTXT_CHECK_ENABLE, value: reg); |
14844 | |
14845 | return 0; |
14846 | } |
14847 | |
14848 | int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt) |
14849 | { |
14850 | u8 hw_ctxt; |
14851 | u64 reg; |
14852 | |
14853 | if (!ctxt || !ctxt->sc) |
14854 | return -EINVAL; |
14855 | |
14856 | hw_ctxt = ctxt->sc->hw_context; |
14857 | reg = read_kctxt_csr(dd, ctxt: hw_ctxt, SEND_CTXT_CHECK_ENABLE); |
14858 | reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK; |
14859 | write_kctxt_csr(dd, ctxt: hw_ctxt, SEND_CTXT_CHECK_ENABLE, value: reg); |
14860 | write_kctxt_csr(dd, ctxt: hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, value: 0); |
14861 | |
14862 | return 0; |
14863 | } |
14864 | |
14865 | /* |
14866 | * Start doing the clean up the chip. Our clean up happens in multiple |
14867 | * stages and this is just the first. |
14868 | */ |
14869 | void hfi1_start_cleanup(struct hfi1_devdata *dd) |
14870 | { |
14871 | aspm_exit(dd); |
14872 | free_cntrs(dd); |
14873 | free_rcverr(dd); |
14874 | finish_chip_resources(dd); |
14875 | } |
14876 | |
14877 | #define HFI_BASE_GUID(dev) \ |
14878 | ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT)) |
14879 | |
14880 | /* |
14881 | * Information can be shared between the two HFIs on the same ASIC |
14882 | * in the same OS. This function finds the peer device and sets |
14883 | * up a shared structure. |
14884 | */ |
14885 | static int init_asic_data(struct hfi1_devdata *dd) |
14886 | { |
14887 | unsigned long index; |
14888 | struct hfi1_devdata *peer; |
14889 | struct hfi1_asic_data *asic_data; |
14890 | int ret = 0; |
14891 | |
14892 | /* pre-allocate the asic structure in case we are the first device */ |
14893 | asic_data = kzalloc(size: sizeof(*dd->asic_data), GFP_KERNEL); |
14894 | if (!asic_data) |
14895 | return -ENOMEM; |
14896 | |
14897 | xa_lock_irq(&hfi1_dev_table); |
14898 | /* Find our peer device */ |
14899 | xa_for_each(&hfi1_dev_table, index, peer) { |
14900 | if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) && |
14901 | dd->unit != peer->unit) |
14902 | break; |
14903 | } |
14904 | |
14905 | if (peer) { |
14906 | /* use already allocated structure */ |
14907 | dd->asic_data = peer->asic_data; |
14908 | kfree(objp: asic_data); |
14909 | } else { |
14910 | dd->asic_data = asic_data; |
14911 | mutex_init(&dd->asic_data->asic_resource_mutex); |
14912 | } |
14913 | dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */ |
14914 | xa_unlock_irq(&hfi1_dev_table); |
14915 | |
14916 | /* first one through - set up i2c devices */ |
14917 | if (!peer) |
14918 | ret = set_up_i2c(dd, ad: dd->asic_data); |
14919 | |
14920 | return ret; |
14921 | } |
14922 | |
14923 | /* |
14924 | * Set dd->boardname. Use a generic name if a name is not returned from |
14925 | * EFI variable space. |
14926 | * |
14927 | * Return 0 on success, -ENOMEM if space could not be allocated. |
14928 | */ |
14929 | static int obtain_boardname(struct hfi1_devdata *dd) |
14930 | { |
14931 | /* generic board description */ |
14932 | const char generic[] = |
14933 | "Cornelis Omni-Path Host Fabric Interface Adapter 100 Series" ; |
14934 | unsigned long size; |
14935 | int ret; |
14936 | |
14937 | ret = read_hfi1_efi_var(dd, kind: "description" , size: &size, |
14938 | return_data: (void **)&dd->boardname); |
14939 | if (ret) { |
14940 | dd_dev_info(dd, "Board description not found\n" ); |
14941 | /* use generic description */ |
14942 | dd->boardname = kstrdup(s: generic, GFP_KERNEL); |
14943 | if (!dd->boardname) |
14944 | return -ENOMEM; |
14945 | } |
14946 | return 0; |
14947 | } |
14948 | |
14949 | /* |
14950 | * Check the interrupt registers to make sure that they are mapped correctly. |
14951 | * It is intended to help user identify any mismapping by VMM when the driver |
14952 | * is running in a VM. This function should only be called before interrupt |
14953 | * is set up properly. |
14954 | * |
14955 | * Return 0 on success, -EINVAL on failure. |
14956 | */ |
14957 | static int check_int_registers(struct hfi1_devdata *dd) |
14958 | { |
14959 | u64 reg; |
14960 | u64 all_bits = ~(u64)0; |
14961 | u64 mask; |
14962 | |
14963 | /* Clear CceIntMask[0] to avoid raising any interrupts */ |
14964 | mask = read_csr(dd, CCE_INT_MASK); |
14965 | write_csr(dd, CCE_INT_MASK, value: 0ull); |
14966 | reg = read_csr(dd, CCE_INT_MASK); |
14967 | if (reg) |
14968 | goto err_exit; |
14969 | |
14970 | /* Clear all interrupt status bits */ |
14971 | write_csr(dd, CCE_INT_CLEAR, value: all_bits); |
14972 | reg = read_csr(dd, CCE_INT_STATUS); |
14973 | if (reg) |
14974 | goto err_exit; |
14975 | |
14976 | /* Set all interrupt status bits */ |
14977 | write_csr(dd, CCE_INT_FORCE, value: all_bits); |
14978 | reg = read_csr(dd, CCE_INT_STATUS); |
14979 | if (reg != all_bits) |
14980 | goto err_exit; |
14981 | |
14982 | /* Restore the interrupt mask */ |
14983 | write_csr(dd, CCE_INT_CLEAR, value: all_bits); |
14984 | write_csr(dd, CCE_INT_MASK, value: mask); |
14985 | |
14986 | return 0; |
14987 | err_exit: |
14988 | write_csr(dd, CCE_INT_MASK, value: mask); |
14989 | dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n" ); |
14990 | return -EINVAL; |
14991 | } |
14992 | |
14993 | /** |
14994 | * hfi1_init_dd() - Initialize most of the dd structure. |
14995 | * @dd: the dd device |
14996 | * |
14997 | * This is global, and is called directly at init to set up the |
14998 | * chip-specific function pointers for later use. |
14999 | */ |
15000 | int hfi1_init_dd(struct hfi1_devdata *dd) |
15001 | { |
15002 | struct pci_dev *pdev = dd->pcidev; |
15003 | struct hfi1_pportdata *ppd; |
15004 | u64 reg; |
15005 | int i, ret; |
15006 | static const char * const inames[] = { /* implementation names */ |
15007 | "RTL silicon" , |
15008 | "RTL VCS simulation" , |
15009 | "RTL FPGA emulation" , |
15010 | "Functional simulator" |
15011 | }; |
15012 | struct pci_dev *parent = pdev->bus->self; |
15013 | u32 sdma_engines = chip_sdma_engines(dd); |
15014 | |
15015 | ppd = dd->pport; |
15016 | for (i = 0; i < dd->num_pports; i++, ppd++) { |
15017 | int vl; |
15018 | /* init common fields */ |
15019 | hfi1_init_pportdata(pdev, ppd, dd, hw_pidx: 0, port: 1); |
15020 | /* DC supports 4 link widths */ |
15021 | ppd->link_width_supported = |
15022 | OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X | |
15023 | OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X; |
15024 | ppd->link_width_downgrade_supported = |
15025 | ppd->link_width_supported; |
15026 | /* start out enabling only 4X */ |
15027 | ppd->link_width_enabled = OPA_LINK_WIDTH_4X; |
15028 | ppd->link_width_downgrade_enabled = |
15029 | ppd->link_width_downgrade_supported; |
15030 | /* link width active is 0 when link is down */ |
15031 | /* link width downgrade active is 0 when link is down */ |
15032 | |
15033 | if (num_vls < HFI1_MIN_VLS_SUPPORTED || |
15034 | num_vls > HFI1_MAX_VLS_SUPPORTED) { |
15035 | dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n" , |
15036 | num_vls, HFI1_MAX_VLS_SUPPORTED); |
15037 | num_vls = HFI1_MAX_VLS_SUPPORTED; |
15038 | } |
15039 | ppd->vls_supported = num_vls; |
15040 | ppd->vls_operational = ppd->vls_supported; |
15041 | /* Set the default MTU. */ |
15042 | for (vl = 0; vl < num_vls; vl++) |
15043 | dd->vld[vl].mtu = hfi1_max_mtu; |
15044 | dd->vld[15].mtu = MAX_MAD_PACKET; |
15045 | /* |
15046 | * Set the initial values to reasonable default, will be set |
15047 | * for real when link is up. |
15048 | */ |
15049 | ppd->overrun_threshold = 0x4; |
15050 | ppd->phy_error_threshold = 0xf; |
15051 | ppd->port_crc_mode_enabled = link_crc_mask; |
15052 | /* initialize supported LTP CRC mode */ |
15053 | ppd->port_ltp_crc_mode = cap_to_port_ltp(cap: link_crc_mask) << 8; |
15054 | /* initialize enabled LTP CRC mode */ |
15055 | ppd->port_ltp_crc_mode |= cap_to_port_ltp(cap: link_crc_mask) << 4; |
15056 | /* start in offline */ |
15057 | ppd->host_link_state = HLS_DN_OFFLINE; |
15058 | init_vl_arb_caches(ppd); |
15059 | } |
15060 | |
15061 | /* |
15062 | * Do remaining PCIe setup and save PCIe values in dd. |
15063 | * Any error printing is already done by the init code. |
15064 | * On return, we have the chip mapped. |
15065 | */ |
15066 | ret = hfi1_pcie_ddinit(dd, pdev); |
15067 | if (ret < 0) |
15068 | goto bail_free; |
15069 | |
15070 | /* Save PCI space registers to rewrite after device reset */ |
15071 | ret = save_pci_variables(dd); |
15072 | if (ret < 0) |
15073 | goto bail_cleanup; |
15074 | |
15075 | dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT) |
15076 | & CCE_REVISION_CHIP_REV_MAJOR_MASK; |
15077 | dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT) |
15078 | & CCE_REVISION_CHIP_REV_MINOR_MASK; |
15079 | |
15080 | /* |
15081 | * Check interrupt registers mapping if the driver has no access to |
15082 | * the upstream component. In this case, it is likely that the driver |
15083 | * is running in a VM. |
15084 | */ |
15085 | if (!parent) { |
15086 | ret = check_int_registers(dd); |
15087 | if (ret) |
15088 | goto bail_cleanup; |
15089 | } |
15090 | |
15091 | /* |
15092 | * obtain the hardware ID - NOT related to unit, which is a |
15093 | * software enumeration |
15094 | */ |
15095 | reg = read_csr(dd, CCE_REVISION2); |
15096 | dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT) |
15097 | & CCE_REVISION2_HFI_ID_MASK; |
15098 | /* the variable size will remove unwanted bits */ |
15099 | dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT; |
15100 | dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT; |
15101 | dd_dev_info(dd, "Implementation: %s, revision 0x%x\n" , |
15102 | dd->icode < ARRAY_SIZE(inames) ? |
15103 | inames[dd->icode] : "unknown" , (int)dd->irev); |
15104 | |
15105 | /* speeds the hardware can support */ |
15106 | dd->pport->link_speed_supported = OPA_LINK_SPEED_25G; |
15107 | /* speeds allowed to run at */ |
15108 | dd->pport->link_speed_enabled = dd->pport->link_speed_supported; |
15109 | /* give a reasonable active value, will be set on link up */ |
15110 | dd->pport->link_speed_active = OPA_LINK_SPEED_25G; |
15111 | |
15112 | /* fix up link widths for emulation _p */ |
15113 | ppd = dd->pport; |
15114 | if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) { |
15115 | ppd->link_width_supported = |
15116 | ppd->link_width_enabled = |
15117 | ppd->link_width_downgrade_supported = |
15118 | ppd->link_width_downgrade_enabled = |
15119 | OPA_LINK_WIDTH_1X; |
15120 | } |
15121 | /* insure num_vls isn't larger than number of sdma engines */ |
15122 | if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) { |
15123 | dd_dev_err(dd, "num_vls %u too large, using %u VLs\n" , |
15124 | num_vls, sdma_engines); |
15125 | num_vls = sdma_engines; |
15126 | ppd->vls_supported = sdma_engines; |
15127 | ppd->vls_operational = ppd->vls_supported; |
15128 | } |
15129 | |
15130 | /* |
15131 | * Convert the ns parameter to the 64 * cclocks used in the CSR. |
15132 | * Limit the max if larger than the field holds. If timeout is |
15133 | * non-zero, then the calculated field will be at least 1. |
15134 | * |
15135 | * Must be after icode is set up - the cclock rate depends |
15136 | * on knowing the hardware being used. |
15137 | */ |
15138 | dd->rcv_intr_timeout_csr = ns_to_cclock(dd, ns: rcv_intr_timeout) / 64; |
15139 | if (dd->rcv_intr_timeout_csr > |
15140 | RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK) |
15141 | dd->rcv_intr_timeout_csr = |
15142 | RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK; |
15143 | else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout) |
15144 | dd->rcv_intr_timeout_csr = 1; |
15145 | |
15146 | /* needs to be done before we look for the peer device */ |
15147 | read_guid(dd); |
15148 | |
15149 | /* set up shared ASIC data with peer device */ |
15150 | ret = init_asic_data(dd); |
15151 | if (ret) |
15152 | goto bail_cleanup; |
15153 | |
15154 | /* obtain chip sizes, reset chip CSRs */ |
15155 | ret = init_chip(dd); |
15156 | if (ret) |
15157 | goto bail_cleanup; |
15158 | |
15159 | /* read in the PCIe link speed information */ |
15160 | ret = pcie_speeds(dd); |
15161 | if (ret) |
15162 | goto bail_cleanup; |
15163 | |
15164 | /* call before get_platform_config(), after init_chip_resources() */ |
15165 | ret = eprom_init(dd); |
15166 | if (ret) |
15167 | goto bail_free_rcverr; |
15168 | |
15169 | /* Needs to be called before hfi1_firmware_init */ |
15170 | get_platform_config(dd); |
15171 | |
15172 | /* read in firmware */ |
15173 | ret = hfi1_firmware_init(dd); |
15174 | if (ret) |
15175 | goto bail_cleanup; |
15176 | |
15177 | /* |
15178 | * In general, the PCIe Gen3 transition must occur after the |
15179 | * chip has been idled (so it won't initiate any PCIe transactions |
15180 | * e.g. an interrupt) and before the driver changes any registers |
15181 | * (the transition will reset the registers). |
15182 | * |
15183 | * In particular, place this call after: |
15184 | * - init_chip() - the chip will not initiate any PCIe transactions |
15185 | * - pcie_speeds() - reads the current link speed |
15186 | * - hfi1_firmware_init() - the needed firmware is ready to be |
15187 | * downloaded |
15188 | */ |
15189 | ret = do_pcie_gen3_transition(dd); |
15190 | if (ret) |
15191 | goto bail_cleanup; |
15192 | |
15193 | /* |
15194 | * This should probably occur in hfi1_pcie_init(), but historically |
15195 | * occurs after the do_pcie_gen3_transition() code. |
15196 | */ |
15197 | tune_pcie_caps(dd); |
15198 | |
15199 | /* start setting dd values and adjusting CSRs */ |
15200 | init_early_variables(dd); |
15201 | |
15202 | parse_platform_config(dd); |
15203 | |
15204 | ret = obtain_boardname(dd); |
15205 | if (ret) |
15206 | goto bail_cleanup; |
15207 | |
15208 | snprintf(buf: dd->boardversion, BOARD_VERS_MAX, |
15209 | fmt: "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n" , |
15210 | HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN, |
15211 | (u32)dd->majrev, |
15212 | (u32)dd->minrev, |
15213 | (dd->revision >> CCE_REVISION_SW_SHIFT) |
15214 | & CCE_REVISION_SW_MASK); |
15215 | |
15216 | /* alloc VNIC/AIP rx data */ |
15217 | ret = hfi1_alloc_rx(dd); |
15218 | if (ret) |
15219 | goto bail_cleanup; |
15220 | |
15221 | ret = set_up_context_variables(dd); |
15222 | if (ret) |
15223 | goto bail_cleanup; |
15224 | |
15225 | /* set initial RXE CSRs */ |
15226 | ret = init_rxe(dd); |
15227 | if (ret) |
15228 | goto bail_cleanup; |
15229 | |
15230 | /* set initial TXE CSRs */ |
15231 | init_txe(dd); |
15232 | /* set initial non-RXE, non-TXE CSRs */ |
15233 | init_other(dd); |
15234 | /* set up KDETH QP prefix in both RX and TX CSRs */ |
15235 | init_kdeth_qp(dd); |
15236 | |
15237 | ret = hfi1_dev_affinity_init(dd); |
15238 | if (ret) |
15239 | goto bail_cleanup; |
15240 | |
15241 | /* send contexts must be set up before receive contexts */ |
15242 | ret = init_send_contexts(dd); |
15243 | if (ret) |
15244 | goto bail_cleanup; |
15245 | |
15246 | ret = hfi1_create_kctxts(dd); |
15247 | if (ret) |
15248 | goto bail_cleanup; |
15249 | |
15250 | /* |
15251 | * Initialize aspm, to be done after gen3 transition and setting up |
15252 | * contexts and before enabling interrupts |
15253 | */ |
15254 | aspm_init(dd); |
15255 | |
15256 | ret = init_pervl_scs(dd); |
15257 | if (ret) |
15258 | goto bail_cleanup; |
15259 | |
15260 | /* sdma init */ |
15261 | for (i = 0; i < dd->num_pports; ++i) { |
15262 | ret = sdma_init(dd, port: i); |
15263 | if (ret) |
15264 | goto bail_cleanup; |
15265 | } |
15266 | |
15267 | /* use contexts created by hfi1_create_kctxts */ |
15268 | ret = set_up_interrupts(dd); |
15269 | if (ret) |
15270 | goto bail_cleanup; |
15271 | |
15272 | ret = hfi1_comp_vectors_set_up(dd); |
15273 | if (ret) |
15274 | goto bail_clear_intr; |
15275 | |
15276 | /* set up LCB access - must be after set_up_interrupts() */ |
15277 | init_lcb_access(dd); |
15278 | |
15279 | /* |
15280 | * Serial number is created from the base guid: |
15281 | * [27:24] = base guid [38:35] |
15282 | * [23: 0] = base guid [23: 0] |
15283 | */ |
15284 | snprintf(buf: dd->serial, SERIAL_MAX, fmt: "0x%08llx\n" , |
15285 | (dd->base_guid & 0xFFFFFF) | |
15286 | ((dd->base_guid >> 11) & 0xF000000)); |
15287 | |
15288 | dd->oui1 = dd->base_guid >> 56 & 0xFF; |
15289 | dd->oui2 = dd->base_guid >> 48 & 0xFF; |
15290 | dd->oui3 = dd->base_guid >> 40 & 0xFF; |
15291 | |
15292 | ret = load_firmware(dd); /* asymmetric with dispose_firmware() */ |
15293 | if (ret) |
15294 | goto bail_clear_intr; |
15295 | |
15296 | thermal_init(dd); |
15297 | |
15298 | ret = init_cntrs(dd); |
15299 | if (ret) |
15300 | goto bail_clear_intr; |
15301 | |
15302 | ret = init_rcverr(dd); |
15303 | if (ret) |
15304 | goto bail_free_cntrs; |
15305 | |
15306 | init_completion(x: &dd->user_comp); |
15307 | |
15308 | /* The user refcount starts with one to inidicate an active device */ |
15309 | refcount_set(r: &dd->user_refcount, n: 1); |
15310 | |
15311 | goto bail; |
15312 | |
15313 | bail_free_rcverr: |
15314 | free_rcverr(dd); |
15315 | bail_free_cntrs: |
15316 | free_cntrs(dd); |
15317 | bail_clear_intr: |
15318 | hfi1_comp_vectors_clean_up(dd); |
15319 | msix_clean_up_interrupts(dd); |
15320 | bail_cleanup: |
15321 | hfi1_free_rx(dd); |
15322 | hfi1_pcie_ddcleanup(dd); |
15323 | bail_free: |
15324 | hfi1_free_devdata(dd); |
15325 | bail: |
15326 | return ret; |
15327 | } |
15328 | |
15329 | static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate, |
15330 | u32 dw_len) |
15331 | { |
15332 | u32 delta_cycles; |
15333 | u32 current_egress_rate = ppd->current_egress_rate; |
15334 | /* rates here are in units of 10^6 bits/sec */ |
15335 | |
15336 | if (desired_egress_rate == -1) |
15337 | return 0; /* shouldn't happen */ |
15338 | |
15339 | if (desired_egress_rate >= current_egress_rate) |
15340 | return 0; /* we can't help go faster, only slower */ |
15341 | |
15342 | delta_cycles = egress_cycles(len: dw_len * 4, rate: desired_egress_rate) - |
15343 | egress_cycles(len: dw_len * 4, rate: current_egress_rate); |
15344 | |
15345 | return (u16)delta_cycles; |
15346 | } |
15347 | |
15348 | /** |
15349 | * create_pbc - build a pbc for transmission |
15350 | * @ppd: info of physical Hfi port |
15351 | * @flags: special case flags or-ed in built pbc |
15352 | * @srate_mbs: static rate |
15353 | * @vl: vl |
15354 | * @dw_len: dword length (header words + data words + pbc words) |
15355 | * |
15356 | * Create a PBC with the given flags, rate, VL, and length. |
15357 | * |
15358 | * NOTE: The PBC created will not insert any HCRC - all callers but one are |
15359 | * for verbs, which does not use this PSM feature. The lone other caller |
15360 | * is for the diagnostic interface which calls this if the user does not |
15361 | * supply their own PBC. |
15362 | */ |
15363 | u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl, |
15364 | u32 dw_len) |
15365 | { |
15366 | u64 pbc, delay = 0; |
15367 | |
15368 | if (unlikely(srate_mbs)) |
15369 | delay = delay_cycles(ppd, desired_egress_rate: srate_mbs, dw_len); |
15370 | |
15371 | pbc = flags |
15372 | | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT) |
15373 | | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT) |
15374 | | (vl & PBC_VL_MASK) << PBC_VL_SHIFT |
15375 | | (dw_len & PBC_LENGTH_DWS_MASK) |
15376 | << PBC_LENGTH_DWS_SHIFT; |
15377 | |
15378 | return pbc; |
15379 | } |
15380 | |
15381 | #define SBUS_THERMAL 0x4f |
15382 | #define SBUS_THERM_MONITOR_MODE 0x1 |
15383 | |
15384 | #define THERM_FAILURE(dev, ret, reason) \ |
15385 | dd_dev_err((dd), \ |
15386 | "Thermal sensor initialization failed: %s (%d)\n", \ |
15387 | (reason), (ret)) |
15388 | |
15389 | /* |
15390 | * Initialize the thermal sensor. |
15391 | * |
15392 | * After initialization, enable polling of thermal sensor through |
15393 | * SBus interface. In order for this to work, the SBus Master |
15394 | * firmware has to be loaded due to the fact that the HW polling |
15395 | * logic uses SBus interrupts, which are not supported with |
15396 | * default firmware. Otherwise, no data will be returned through |
15397 | * the ASIC_STS_THERM CSR. |
15398 | */ |
15399 | static int thermal_init(struct hfi1_devdata *dd) |
15400 | { |
15401 | int ret = 0; |
15402 | |
15403 | if (dd->icode != ICODE_RTL_SILICON || |
15404 | check_chip_resource(dd, CR_THERM_INIT, NULL)) |
15405 | return ret; |
15406 | |
15407 | ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); |
15408 | if (ret) { |
15409 | THERM_FAILURE(dd, ret, "Acquire SBus" ); |
15410 | return ret; |
15411 | } |
15412 | |
15413 | dd_dev_info(dd, "Initializing thermal sensor\n" ); |
15414 | /* Disable polling of thermal readings */ |
15415 | write_csr(dd, ASIC_CFG_THERM_POLL_EN, value: 0x0); |
15416 | msleep(msecs: 100); |
15417 | /* Thermal Sensor Initialization */ |
15418 | /* Step 1: Reset the Thermal SBus Receiver */ |
15419 | ret = sbus_request_slow(dd, SBUS_THERMAL, data_addr: 0x0, |
15420 | RESET_SBUS_RECEIVER, data_in: 0); |
15421 | if (ret) { |
15422 | THERM_FAILURE(dd, ret, "Bus Reset" ); |
15423 | goto done; |
15424 | } |
15425 | /* Step 2: Set Reset bit in Thermal block */ |
15426 | ret = sbus_request_slow(dd, SBUS_THERMAL, data_addr: 0x0, |
15427 | WRITE_SBUS_RECEIVER, data_in: 0x1); |
15428 | if (ret) { |
15429 | THERM_FAILURE(dd, ret, "Therm Block Reset" ); |
15430 | goto done; |
15431 | } |
15432 | /* Step 3: Write clock divider value (100MHz -> 2MHz) */ |
15433 | ret = sbus_request_slow(dd, SBUS_THERMAL, data_addr: 0x1, |
15434 | WRITE_SBUS_RECEIVER, data_in: 0x32); |
15435 | if (ret) { |
15436 | THERM_FAILURE(dd, ret, "Write Clock Div" ); |
15437 | goto done; |
15438 | } |
15439 | /* Step 4: Select temperature mode */ |
15440 | ret = sbus_request_slow(dd, SBUS_THERMAL, data_addr: 0x3, |
15441 | WRITE_SBUS_RECEIVER, |
15442 | SBUS_THERM_MONITOR_MODE); |
15443 | if (ret) { |
15444 | THERM_FAILURE(dd, ret, "Write Mode Sel" ); |
15445 | goto done; |
15446 | } |
15447 | /* Step 5: De-assert block reset and start conversion */ |
15448 | ret = sbus_request_slow(dd, SBUS_THERMAL, data_addr: 0x0, |
15449 | WRITE_SBUS_RECEIVER, data_in: 0x2); |
15450 | if (ret) { |
15451 | THERM_FAILURE(dd, ret, "Write Reset Deassert" ); |
15452 | goto done; |
15453 | } |
15454 | /* Step 5.1: Wait for first conversion (21.5ms per spec) */ |
15455 | msleep(msecs: 22); |
15456 | |
15457 | /* Enable polling of thermal readings */ |
15458 | write_csr(dd, ASIC_CFG_THERM_POLL_EN, value: 0x1); |
15459 | |
15460 | /* Set initialized flag */ |
15461 | ret = acquire_chip_resource(dd, CR_THERM_INIT, mswait: 0); |
15462 | if (ret) |
15463 | THERM_FAILURE(dd, ret, "Unable to set thermal init flag" ); |
15464 | |
15465 | done: |
15466 | release_chip_resource(dd, CR_SBUS); |
15467 | return ret; |
15468 | } |
15469 | |
15470 | static void handle_temp_err(struct hfi1_devdata *dd) |
15471 | { |
15472 | struct hfi1_pportdata *ppd = &dd->pport[0]; |
15473 | /* |
15474 | * Thermal Critical Interrupt |
15475 | * Put the device into forced freeze mode, take link down to |
15476 | * offline, and put DC into reset. |
15477 | */ |
15478 | dd_dev_emerg(dd, |
15479 | "Critical temperature reached! Forcing device into freeze mode!\n" ); |
15480 | dd->flags |= HFI1_FORCED_FREEZE; |
15481 | start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT); |
15482 | /* |
15483 | * Shut DC down as much and as quickly as possible. |
15484 | * |
15485 | * Step 1: Take the link down to OFFLINE. This will cause the |
15486 | * 8051 to put the Serdes in reset. However, we don't want to |
15487 | * go through the entire link state machine since we want to |
15488 | * shutdown ASAP. Furthermore, this is not a graceful shutdown |
15489 | * but rather an attempt to save the chip. |
15490 | * Code below is almost the same as quiet_serdes() but avoids |
15491 | * all the extra work and the sleeps. |
15492 | */ |
15493 | ppd->driver_link_ready = 0; |
15494 | ppd->link_enabled = 0; |
15495 | set_physical_link_state(dd, state: (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) | |
15496 | PLS_OFFLINE); |
15497 | /* |
15498 | * Step 2: Shutdown LCB and 8051 |
15499 | * After shutdown, do not restore DC_CFG_RESET value. |
15500 | */ |
15501 | dc_shutdown(dd); |
15502 | } |
15503 | |