1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2019 HiSilicon Limited. */ |
3 | #include <asm/page.h> |
4 | #include <linux/acpi.h> |
5 | #include <linux/bitmap.h> |
6 | #include <linux/dma-mapping.h> |
7 | #include <linux/idr.h> |
8 | #include <linux/io.h> |
9 | #include <linux/irqreturn.h> |
10 | #include <linux/log2.h> |
11 | #include <linux/pm_runtime.h> |
12 | #include <linux/seq_file.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/uacce.h> |
15 | #include <linux/uaccess.h> |
16 | #include <uapi/misc/uacce/hisi_qm.h> |
17 | #include <linux/hisi_acc_qm.h> |
18 | #include "qm_common.h" |
19 | |
20 | /* eq/aeq irq enable */ |
21 | #define QM_VF_AEQ_INT_SOURCE 0x0 |
22 | #define QM_VF_AEQ_INT_MASK 0x4 |
23 | #define QM_VF_EQ_INT_SOURCE 0x8 |
24 | #define QM_VF_EQ_INT_MASK 0xc |
25 | |
26 | #define QM_IRQ_VECTOR_MASK GENMASK(15, 0) |
27 | #define QM_IRQ_TYPE_MASK GENMASK(15, 0) |
28 | #define QM_IRQ_TYPE_SHIFT 16 |
29 | #define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0) |
30 | |
31 | /* mailbox */ |
32 | #define QM_MB_PING_ALL_VFS 0xffff |
33 | #define QM_MB_CMD_DATA_SHIFT 32 |
34 | #define QM_MB_CMD_DATA_MASK GENMASK(31, 0) |
35 | #define QM_MB_STATUS_MASK GENMASK(12, 9) |
36 | |
37 | /* sqc shift */ |
38 | #define QM_SQ_HOP_NUM_SHIFT 0 |
39 | #define QM_SQ_PAGE_SIZE_SHIFT 4 |
40 | #define QM_SQ_BUF_SIZE_SHIFT 8 |
41 | #define QM_SQ_SQE_SIZE_SHIFT 12 |
42 | #define QM_SQ_PRIORITY_SHIFT 0 |
43 | #define QM_SQ_ORDERS_SHIFT 4 |
44 | #define QM_SQ_TYPE_SHIFT 8 |
45 | #define QM_QC_PASID_ENABLE 0x1 |
46 | #define QM_QC_PASID_ENABLE_SHIFT 7 |
47 | |
48 | #define QM_SQ_TYPE_MASK GENMASK(3, 0) |
49 | #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1) |
50 | |
51 | /* cqc shift */ |
52 | #define QM_CQ_HOP_NUM_SHIFT 0 |
53 | #define QM_CQ_PAGE_SIZE_SHIFT 4 |
54 | #define QM_CQ_BUF_SIZE_SHIFT 8 |
55 | #define QM_CQ_CQE_SIZE_SHIFT 12 |
56 | #define QM_CQ_PHASE_SHIFT 0 |
57 | #define QM_CQ_FLAG_SHIFT 1 |
58 | |
59 | #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1) |
60 | #define QM_QC_CQE_SIZE 4 |
61 | #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc).w11) >> 6) & 0x1) |
62 | |
63 | /* eqc shift */ |
64 | #define QM_EQE_AEQE_SIZE (2UL << 12) |
65 | #define QM_EQC_PHASE_SHIFT 16 |
66 | |
67 | #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1) |
68 | #define QM_EQE_CQN_MASK GENMASK(15, 0) |
69 | |
70 | #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1) |
71 | #define QM_AEQE_TYPE_SHIFT 17 |
72 | #define QM_AEQE_TYPE_MASK 0xf |
73 | #define QM_AEQE_CQN_MASK GENMASK(15, 0) |
74 | #define QM_CQ_OVERFLOW 0 |
75 | #define QM_EQ_OVERFLOW 1 |
76 | #define QM_CQE_ERROR 2 |
77 | |
78 | #define QM_XQ_DEPTH_SHIFT 16 |
79 | #define QM_XQ_DEPTH_MASK GENMASK(15, 0) |
80 | |
81 | #define QM_DOORBELL_CMD_SQ 0 |
82 | #define QM_DOORBELL_CMD_CQ 1 |
83 | #define QM_DOORBELL_CMD_EQ 2 |
84 | #define QM_DOORBELL_CMD_AEQ 3 |
85 | |
86 | #define QM_DOORBELL_BASE_V1 0x340 |
87 | #define QM_DB_CMD_SHIFT_V1 16 |
88 | #define QM_DB_INDEX_SHIFT_V1 32 |
89 | #define QM_DB_PRIORITY_SHIFT_V1 48 |
90 | #define QM_PAGE_SIZE 0x0034 |
91 | #define QM_QP_DB_INTERVAL 0x10000 |
92 | #define QM_DB_TIMEOUT_CFG 0x100074 |
93 | #define QM_DB_TIMEOUT_SET 0x1fffff |
94 | |
95 | #define QM_MEM_START_INIT 0x100040 |
96 | #define QM_MEM_INIT_DONE 0x100044 |
97 | #define QM_VFT_CFG_RDY 0x10006c |
98 | #define QM_VFT_CFG_OP_WR 0x100058 |
99 | #define QM_VFT_CFG_TYPE 0x10005c |
100 | #define QM_VFT_CFG 0x100060 |
101 | #define QM_VFT_CFG_OP_ENABLE 0x100054 |
102 | #define QM_PM_CTRL 0x100148 |
103 | #define QM_IDLE_DISABLE BIT(9) |
104 | |
105 | #define QM_VFT_CFG_DATA_L 0x100064 |
106 | #define QM_VFT_CFG_DATA_H 0x100068 |
107 | #define QM_SQC_VFT_BUF_SIZE (7ULL << 8) |
108 | #define QM_SQC_VFT_SQC_SIZE (5ULL << 12) |
109 | #define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16) |
110 | #define QM_SQC_VFT_START_SQN_SHIFT 28 |
111 | #define QM_SQC_VFT_VALID (1ULL << 44) |
112 | #define QM_SQC_VFT_SQN_SHIFT 45 |
113 | #define QM_CQC_VFT_BUF_SIZE (7ULL << 8) |
114 | #define QM_CQC_VFT_SQC_SIZE (5ULL << 12) |
115 | #define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16) |
116 | #define QM_CQC_VFT_VALID (1ULL << 28) |
117 | |
118 | #define QM_SQC_VFT_BASE_SHIFT_V2 28 |
119 | #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0) |
120 | #define QM_SQC_VFT_NUM_SHIFT_V2 45 |
121 | #define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0) |
122 | |
123 | #define QM_ABNORMAL_INT_SOURCE 0x100000 |
124 | #define QM_ABNORMAL_INT_MASK 0x100004 |
125 | #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff |
126 | #define QM_ABNORMAL_INT_STATUS 0x100008 |
127 | #define QM_ABNORMAL_INT_SET 0x10000c |
128 | #define QM_ABNORMAL_INF00 0x100010 |
129 | #define QM_FIFO_OVERFLOW_TYPE 0xc0 |
130 | #define QM_FIFO_OVERFLOW_TYPE_SHIFT 6 |
131 | #define QM_FIFO_OVERFLOW_VF 0x3f |
132 | #define QM_FIFO_OVERFLOW_QP_SHIFT 16 |
133 | #define QM_ABNORMAL_INF01 0x100014 |
134 | #define QM_DB_TIMEOUT_TYPE 0xc0 |
135 | #define QM_DB_TIMEOUT_TYPE_SHIFT 6 |
136 | #define QM_DB_TIMEOUT_VF 0x3f |
137 | #define QM_DB_TIMEOUT_QP_SHIFT 16 |
138 | #define QM_ABNORMAL_INF02 0x100018 |
139 | #define QM_AXI_POISON_ERR BIT(22) |
140 | #define QM_RAS_CE_ENABLE 0x1000ec |
141 | #define QM_RAS_FE_ENABLE 0x1000f0 |
142 | #define QM_RAS_NFE_ENABLE 0x1000f4 |
143 | #define QM_RAS_CE_THRESHOLD 0x1000f8 |
144 | #define QM_RAS_CE_TIMES_PER_IRQ 1 |
145 | #define QM_OOO_SHUTDOWN_SEL 0x1040f8 |
146 | #define QM_AXI_RRESP_ERR BIT(0) |
147 | #define QM_ECC_MBIT BIT(2) |
148 | #define QM_DB_TIMEOUT BIT(10) |
149 | #define QM_OF_FIFO_OF BIT(11) |
150 | |
151 | #define QM_RESET_WAIT_TIMEOUT 400 |
152 | #define QM_PEH_VENDOR_ID 0x1000d8 |
153 | #define ACC_VENDOR_ID_VALUE 0x5a5a |
154 | #define QM_PEH_DFX_INFO0 0x1000fc |
155 | #define QM_PEH_DFX_INFO1 0x100100 |
156 | #define QM_PEH_DFX_MASK (BIT(0) | BIT(2)) |
157 | #define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16) |
158 | #define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3 |
159 | #define ACC_PEH_MSI_DISABLE GENMASK(31, 0) |
160 | #define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1 |
161 | #define ACC_MASTER_TRANS_RETURN_RW 3 |
162 | #define ACC_MASTER_TRANS_RETURN 0x300150 |
163 | #define ACC_MASTER_GLOBAL_CTRL 0x300000 |
164 | #define ACC_AM_CFG_PORT_WR_EN 0x30001c |
165 | #define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT |
166 | #define ACC_AM_ROB_ECC_INT_STS 0x300104 |
167 | #define ACC_ROB_ECC_ERR_MULTPL BIT(1) |
168 | #define QM_MSI_CAP_ENABLE BIT(16) |
169 | |
170 | /* interfunction communication */ |
171 | #define QM_IFC_READY_STATUS 0x100128 |
172 | #define QM_IFC_INT_SET_P 0x100130 |
173 | #define QM_IFC_INT_CFG 0x100134 |
174 | #define QM_IFC_INT_SOURCE_P 0x100138 |
175 | #define QM_IFC_INT_SOURCE_V 0x0020 |
176 | #define QM_IFC_INT_MASK 0x0024 |
177 | #define QM_IFC_INT_STATUS 0x0028 |
178 | #define QM_IFC_INT_SET_V 0x002C |
179 | #define QM_IFC_SEND_ALL_VFS GENMASK(6, 0) |
180 | #define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0) |
181 | #define QM_IFC_INT_SOURCE_MASK BIT(0) |
182 | #define QM_IFC_INT_DISABLE BIT(0) |
183 | #define QM_IFC_INT_STATUS_MASK BIT(0) |
184 | #define QM_IFC_INT_SET_MASK BIT(0) |
185 | #define QM_WAIT_DST_ACK 10 |
186 | #define QM_MAX_PF_WAIT_COUNT 10 |
187 | #define QM_MAX_VF_WAIT_COUNT 40 |
188 | #define QM_VF_RESET_WAIT_US 20000 |
189 | #define QM_VF_RESET_WAIT_CNT 3000 |
190 | #define QM_VF_RESET_WAIT_TIMEOUT_US \ |
191 | (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT) |
192 | |
193 | #define POLL_PERIOD 10 |
194 | #define POLL_TIMEOUT 1000 |
195 | #define WAIT_PERIOD_US_MAX 200 |
196 | #define WAIT_PERIOD_US_MIN 100 |
197 | #define MAX_WAIT_COUNTS 1000 |
198 | #define QM_CACHE_WB_START 0x204 |
199 | #define QM_CACHE_WB_DONE 0x208 |
200 | #define QM_FUNC_CAPS_REG 0x3100 |
201 | #define QM_CAPBILITY_VERSION GENMASK(7, 0) |
202 | |
203 | #define PCI_BAR_2 2 |
204 | #define PCI_BAR_4 4 |
205 | #define QMC_ALIGN(sz) ALIGN(sz, 32) |
206 | |
207 | #define QM_DBG_READ_LEN 256 |
208 | #define QM_PCI_COMMAND_INVALID ~0 |
209 | #define QM_RESET_STOP_TX_OFFSET 1 |
210 | #define QM_RESET_STOP_RX_OFFSET 2 |
211 | |
212 | #define WAIT_PERIOD 20 |
213 | #define REMOVE_WAIT_DELAY 10 |
214 | |
215 | #define QM_QOS_PARAM_NUM 2 |
216 | #define QM_QOS_MAX_VAL 1000 |
217 | #define QM_QOS_RATE 100 |
218 | #define QM_QOS_EXPAND_RATE 1000 |
219 | #define QM_SHAPER_CIR_B_MASK GENMASK(7, 0) |
220 | #define QM_SHAPER_CIR_U_MASK GENMASK(10, 8) |
221 | #define QM_SHAPER_CIR_S_MASK GENMASK(14, 11) |
222 | #define QM_SHAPER_FACTOR_CIR_U_SHIFT 8 |
223 | #define QM_SHAPER_FACTOR_CIR_S_SHIFT 11 |
224 | #define QM_SHAPER_FACTOR_CBS_B_SHIFT 15 |
225 | #define QM_SHAPER_FACTOR_CBS_S_SHIFT 19 |
226 | #define QM_SHAPER_CBS_B 1 |
227 | #define QM_SHAPER_VFT_OFFSET 6 |
228 | #define QM_QOS_MIN_ERROR_RATE 5 |
229 | #define QM_SHAPER_MIN_CBS_S 8 |
230 | #define QM_QOS_TICK 0x300U |
231 | #define QM_QOS_DIVISOR_CLK 0x1f40U |
232 | #define QM_QOS_MAX_CIR_B 200 |
233 | #define QM_QOS_MIN_CIR_B 100 |
234 | #define QM_QOS_MAX_CIR_U 6 |
235 | #define QM_AUTOSUSPEND_DELAY 3000 |
236 | |
237 | #define QM_DEV_ALG_MAX_LEN 256 |
238 | |
239 | /* abnormal status value for stopping queue */ |
240 | #define QM_STOP_QUEUE_FAIL 1 |
241 | #define QM_DUMP_SQC_FAIL 3 |
242 | #define QM_DUMP_CQC_FAIL 4 |
243 | #define QM_FINISH_WAIT 5 |
244 | |
245 | #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ |
246 | (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ |
247 | ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ |
248 | ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \ |
249 | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) |
250 | |
251 | #define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \ |
252 | ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) |
253 | |
254 | #define QM_MK_SQC_W13(priority, orders, alg_type) \ |
255 | (((priority) << QM_SQ_PRIORITY_SHIFT) | \ |
256 | ((orders) << QM_SQ_ORDERS_SHIFT) | \ |
257 | (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT)) |
258 | |
259 | #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \ |
260 | (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \ |
261 | ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \ |
262 | ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \ |
263 | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) |
264 | |
265 | #define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \ |
266 | ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) |
267 | |
268 | enum vft_type { |
269 | SQC_VFT = 0, |
270 | CQC_VFT, |
271 | SHAPER_VFT, |
272 | }; |
273 | |
274 | enum acc_err_result { |
275 | ACC_ERR_NONE, |
276 | ACC_ERR_NEED_RESET, |
277 | ACC_ERR_RECOVERED, |
278 | }; |
279 | |
280 | enum qm_alg_type { |
281 | ALG_TYPE_0, |
282 | ALG_TYPE_1, |
283 | }; |
284 | |
285 | enum qm_mb_cmd { |
286 | QM_PF_FLR_PREPARE = 0x01, |
287 | QM_PF_SRST_PREPARE, |
288 | QM_PF_RESET_DONE, |
289 | QM_VF_PREPARE_DONE, |
290 | QM_VF_PREPARE_FAIL, |
291 | QM_VF_START_DONE, |
292 | QM_VF_START_FAIL, |
293 | QM_PF_SET_QOS, |
294 | QM_VF_GET_QOS, |
295 | }; |
296 | |
297 | enum qm_basic_type { |
298 | QM_TOTAL_QP_NUM_CAP = 0x0, |
299 | QM_FUNC_MAX_QP_CAP, |
300 | QM_XEQ_DEPTH_CAP, |
301 | QM_QP_DEPTH_CAP, |
302 | QM_EQ_IRQ_TYPE_CAP, |
303 | QM_AEQ_IRQ_TYPE_CAP, |
304 | QM_ABN_IRQ_TYPE_CAP, |
305 | QM_PF2VF_IRQ_TYPE_CAP, |
306 | QM_PF_IRQ_NUM_CAP, |
307 | QM_VF_IRQ_NUM_CAP, |
308 | }; |
309 | |
310 | enum qm_pre_store_cap_idx { |
311 | QM_EQ_IRQ_TYPE_CAP_IDX = 0x0, |
312 | QM_AEQ_IRQ_TYPE_CAP_IDX, |
313 | QM_ABN_IRQ_TYPE_CAP_IDX, |
314 | QM_PF2VF_IRQ_TYPE_CAP_IDX, |
315 | }; |
316 | |
317 | static const struct hisi_qm_cap_info qm_cap_info_comm[] = { |
318 | {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0}, |
319 | {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1}, |
320 | {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1}, |
321 | {QM_SUPPORT_STOP_FUNC, 0x3100, 0, BIT(10), 0x0, 0x0, 0x1}, |
322 | {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1}, |
323 | {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1}, |
324 | }; |
325 | |
326 | static const struct hisi_qm_cap_info qm_cap_info_pf[] = { |
327 | {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1}, |
328 | }; |
329 | |
330 | static const struct hisi_qm_cap_info qm_cap_info_vf[] = { |
331 | {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0}, |
332 | }; |
333 | |
334 | static const struct hisi_qm_cap_info qm_basic_info[] = { |
335 | {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400}, |
336 | {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400}, |
337 | {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(31, 0), 0x800, 0x4000800, 0x4000800}, |
338 | {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400}, |
339 | {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000}, |
340 | {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001}, |
341 | {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003}, |
342 | {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002}, |
343 | {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4}, |
344 | {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3}, |
345 | }; |
346 | |
347 | static const u32 qm_pre_store_caps[] = { |
348 | QM_EQ_IRQ_TYPE_CAP, |
349 | QM_AEQ_IRQ_TYPE_CAP, |
350 | QM_ABN_IRQ_TYPE_CAP, |
351 | QM_PF2VF_IRQ_TYPE_CAP, |
352 | }; |
353 | |
354 | struct qm_mailbox { |
355 | __le16 w0; |
356 | __le16 queue_num; |
357 | __le32 base_l; |
358 | __le32 base_h; |
359 | __le32 rsvd; |
360 | }; |
361 | |
362 | struct qm_doorbell { |
363 | __le16 queue_num; |
364 | __le16 cmd; |
365 | __le16 index; |
366 | __le16 priority; |
367 | }; |
368 | |
369 | struct hisi_qm_resource { |
370 | struct hisi_qm *qm; |
371 | int distance; |
372 | struct list_head list; |
373 | }; |
374 | |
375 | /** |
376 | * struct qm_hw_err - Structure describing the device errors |
377 | * @list: hardware error list |
378 | * @timestamp: timestamp when the error occurred |
379 | */ |
380 | struct qm_hw_err { |
381 | struct list_head list; |
382 | unsigned long long timestamp; |
383 | }; |
384 | |
385 | struct hisi_qm_hw_ops { |
386 | int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number); |
387 | void (*qm_db)(struct hisi_qm *qm, u16 qn, |
388 | u8 cmd, u16 index, u8 priority); |
389 | int (*debug_init)(struct hisi_qm *qm); |
390 | void (*hw_error_init)(struct hisi_qm *qm); |
391 | void (*hw_error_uninit)(struct hisi_qm *qm); |
392 | enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); |
393 | int (*set_msi)(struct hisi_qm *qm, bool set); |
394 | }; |
395 | |
396 | struct hisi_qm_hw_error { |
397 | u32 int_msk; |
398 | const char *msg; |
399 | }; |
400 | |
401 | static const struct hisi_qm_hw_error qm_hw_error[] = { |
402 | { .int_msk = BIT(0), .msg = "qm_axi_rresp" }, |
403 | { .int_msk = BIT(1), .msg = "qm_axi_bresp" }, |
404 | { .int_msk = BIT(2), .msg = "qm_ecc_mbit" }, |
405 | { .int_msk = BIT(3), .msg = "qm_ecc_1bit" }, |
406 | { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" }, |
407 | { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" }, |
408 | { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" }, |
409 | { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" }, |
410 | { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" }, |
411 | { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" }, |
412 | { .int_msk = BIT(10), .msg = "qm_db_timeout" }, |
413 | { .int_msk = BIT(11), .msg = "qm_of_fifo_of" }, |
414 | { .int_msk = BIT(12), .msg = "qm_db_random_invalid" }, |
415 | { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" }, |
416 | { .int_msk = BIT(14), .msg = "qm_flr_timeout" }, |
417 | }; |
418 | |
419 | static const char * const qm_db_timeout[] = { |
420 | "sq" , "cq" , "eq" , "aeq" , |
421 | }; |
422 | |
423 | static const char * const qm_fifo_overflow[] = { |
424 | "cq" , "eq" , "aeq" , |
425 | }; |
426 | |
427 | struct qm_typical_qos_table { |
428 | u32 start; |
429 | u32 end; |
430 | u32 val; |
431 | }; |
432 | |
433 | /* the qos step is 100 */ |
434 | static struct qm_typical_qos_table shaper_cir_s[] = { |
435 | {100, 100, 4}, |
436 | {200, 200, 3}, |
437 | {300, 500, 2}, |
438 | {600, 1000, 1}, |
439 | {1100, 100000, 0}, |
440 | }; |
441 | |
442 | static struct qm_typical_qos_table shaper_cbs_s[] = { |
443 | {100, 200, 9}, |
444 | {300, 500, 11}, |
445 | {600, 1000, 12}, |
446 | {1100, 10000, 16}, |
447 | {10100, 25000, 17}, |
448 | {25100, 50000, 18}, |
449 | {50100, 100000, 19} |
450 | }; |
451 | |
452 | static void qm_irqs_unregister(struct hisi_qm *qm); |
453 | |
454 | static u32 qm_get_hw_error_status(struct hisi_qm *qm) |
455 | { |
456 | return readl(addr: qm->io_base + QM_ABNORMAL_INT_STATUS); |
457 | } |
458 | |
459 | static u32 qm_get_dev_err_status(struct hisi_qm *qm) |
460 | { |
461 | return qm->err_ini->get_dev_hw_err_status(qm); |
462 | } |
463 | |
464 | /* Check if the error causes the master ooo block */ |
465 | static bool qm_check_dev_error(struct hisi_qm *qm) |
466 | { |
467 | u32 val, dev_val; |
468 | |
469 | if (qm->fun_type == QM_HW_VF) |
470 | return false; |
471 | |
472 | val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask; |
473 | dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask; |
474 | |
475 | return val || dev_val; |
476 | } |
477 | |
478 | static int qm_wait_reset_finish(struct hisi_qm *qm) |
479 | { |
480 | int delay = 0; |
481 | |
482 | /* All reset requests need to be queued for processing */ |
483 | while (test_and_set_bit(nr: QM_RESETTING, addr: &qm->misc_ctl)) { |
484 | msleep(msecs: ++delay); |
485 | if (delay > QM_RESET_WAIT_TIMEOUT) |
486 | return -EBUSY; |
487 | } |
488 | |
489 | return 0; |
490 | } |
491 | |
492 | static int qm_reset_prepare_ready(struct hisi_qm *qm) |
493 | { |
494 | struct pci_dev *pdev = qm->pdev; |
495 | struct hisi_qm *pf_qm = pci_get_drvdata(pdev: pci_physfn(dev: pdev)); |
496 | |
497 | /* |
498 | * PF and VF on host doesnot support resetting at the |
499 | * same time on Kunpeng920. |
500 | */ |
501 | if (qm->ver < QM_HW_V3) |
502 | return qm_wait_reset_finish(qm: pf_qm); |
503 | |
504 | return qm_wait_reset_finish(qm); |
505 | } |
506 | |
507 | static void qm_reset_bit_clear(struct hisi_qm *qm) |
508 | { |
509 | struct pci_dev *pdev = qm->pdev; |
510 | struct hisi_qm *pf_qm = pci_get_drvdata(pdev: pci_physfn(dev: pdev)); |
511 | |
512 | if (qm->ver < QM_HW_V3) |
513 | clear_bit(nr: QM_RESETTING, addr: &pf_qm->misc_ctl); |
514 | |
515 | clear_bit(nr: QM_RESETTING, addr: &qm->misc_ctl); |
516 | } |
517 | |
518 | static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, |
519 | u64 base, u16 queue, bool op) |
520 | { |
521 | mailbox->w0 = cpu_to_le16((cmd) | |
522 | ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) | |
523 | (0x1 << QM_MB_BUSY_SHIFT)); |
524 | mailbox->queue_num = cpu_to_le16(queue); |
525 | mailbox->base_l = cpu_to_le32(lower_32_bits(base)); |
526 | mailbox->base_h = cpu_to_le32(upper_32_bits(base)); |
527 | mailbox->rsvd = 0; |
528 | } |
529 | |
530 | /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ |
531 | int hisi_qm_wait_mb_ready(struct hisi_qm *qm) |
532 | { |
533 | u32 val; |
534 | |
535 | return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, |
536 | val, !((val >> QM_MB_BUSY_SHIFT) & |
537 | 0x1), POLL_PERIOD, POLL_TIMEOUT); |
538 | } |
539 | EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready); |
540 | |
541 | /* 128 bit should be written to hardware at one time to trigger a mailbox */ |
542 | static void qm_mb_write(struct hisi_qm *qm, const void *src) |
543 | { |
544 | void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; |
545 | |
546 | #if IS_ENABLED(CONFIG_ARM64) |
547 | unsigned long tmp0 = 0, tmp1 = 0; |
548 | #endif |
549 | |
550 | if (!IS_ENABLED(CONFIG_ARM64)) { |
551 | memcpy_toio(fun_base, src, 16); |
552 | dma_wmb(); |
553 | return; |
554 | } |
555 | |
556 | #if IS_ENABLED(CONFIG_ARM64) |
557 | asm volatile("ldp %0, %1, %3\n" |
558 | "stp %0, %1, %2\n" |
559 | "dmb oshst\n" |
560 | : "=&r" (tmp0), |
561 | "=&r" (tmp1), |
562 | "+Q" (*((char __iomem *)fun_base)) |
563 | : "Q" (*((char *)src)) |
564 | : "memory" ); |
565 | #endif |
566 | } |
567 | |
568 | static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) |
569 | { |
570 | int ret; |
571 | u32 val; |
572 | |
573 | if (unlikely(hisi_qm_wait_mb_ready(qm))) { |
574 | dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n" ); |
575 | ret = -EBUSY; |
576 | goto mb_busy; |
577 | } |
578 | |
579 | qm_mb_write(qm, src: mailbox); |
580 | |
581 | if (unlikely(hisi_qm_wait_mb_ready(qm))) { |
582 | dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n" ); |
583 | ret = -ETIMEDOUT; |
584 | goto mb_busy; |
585 | } |
586 | |
587 | val = readl(addr: qm->io_base + QM_MB_CMD_SEND_BASE); |
588 | if (val & QM_MB_STATUS_MASK) { |
589 | dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n" ); |
590 | ret = -EIO; |
591 | goto mb_busy; |
592 | } |
593 | |
594 | return 0; |
595 | |
596 | mb_busy: |
597 | atomic64_inc(v: &qm->debug.dfx.mb_err_cnt); |
598 | return ret; |
599 | } |
600 | |
601 | int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, |
602 | bool op) |
603 | { |
604 | struct qm_mailbox mailbox; |
605 | int ret; |
606 | |
607 | qm_mb_pre_init(mailbox: &mailbox, cmd, base: dma_addr, queue, op); |
608 | |
609 | mutex_lock(&qm->mailbox_lock); |
610 | ret = qm_mb_nolock(qm, mailbox: &mailbox); |
611 | mutex_unlock(lock: &qm->mailbox_lock); |
612 | |
613 | return ret; |
614 | } |
615 | EXPORT_SYMBOL_GPL(hisi_qm_mb); |
616 | |
617 | /* op 0: set xqc information to hardware, 1: get xqc information from hardware. */ |
618 | int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op) |
619 | { |
620 | struct hisi_qm *pf_qm = pci_get_drvdata(pdev: pci_physfn(dev: qm->pdev)); |
621 | struct qm_mailbox mailbox; |
622 | dma_addr_t xqc_dma; |
623 | void *tmp_xqc; |
624 | size_t size; |
625 | int ret; |
626 | |
627 | switch (cmd) { |
628 | case QM_MB_CMD_SQC: |
629 | size = sizeof(struct qm_sqc); |
630 | tmp_xqc = qm->xqc_buf.sqc; |
631 | xqc_dma = qm->xqc_buf.sqc_dma; |
632 | break; |
633 | case QM_MB_CMD_CQC: |
634 | size = sizeof(struct qm_cqc); |
635 | tmp_xqc = qm->xqc_buf.cqc; |
636 | xqc_dma = qm->xqc_buf.cqc_dma; |
637 | break; |
638 | case QM_MB_CMD_EQC: |
639 | size = sizeof(struct qm_eqc); |
640 | tmp_xqc = qm->xqc_buf.eqc; |
641 | xqc_dma = qm->xqc_buf.eqc_dma; |
642 | break; |
643 | case QM_MB_CMD_AEQC: |
644 | size = sizeof(struct qm_aeqc); |
645 | tmp_xqc = qm->xqc_buf.aeqc; |
646 | xqc_dma = qm->xqc_buf.aeqc_dma; |
647 | break; |
648 | } |
649 | |
650 | /* Setting xqc will fail if master OOO is blocked. */ |
651 | if (qm_check_dev_error(qm: pf_qm)) { |
652 | dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n" ); |
653 | return -EIO; |
654 | } |
655 | |
656 | mutex_lock(&qm->mailbox_lock); |
657 | if (!op) |
658 | memcpy(tmp_xqc, xqc, size); |
659 | |
660 | qm_mb_pre_init(mailbox: &mailbox, cmd, base: xqc_dma, queue: qp_id, op); |
661 | ret = qm_mb_nolock(qm, mailbox: &mailbox); |
662 | if (!ret && op) |
663 | memcpy(xqc, tmp_xqc, size); |
664 | |
665 | mutex_unlock(lock: &qm->mailbox_lock); |
666 | |
667 | return ret; |
668 | } |
669 | |
670 | static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) |
671 | { |
672 | u64 doorbell; |
673 | |
674 | doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) | |
675 | ((u64)index << QM_DB_INDEX_SHIFT_V1) | |
676 | ((u64)priority << QM_DB_PRIORITY_SHIFT_V1); |
677 | |
678 | writeq(val: doorbell, addr: qm->io_base + QM_DOORBELL_BASE_V1); |
679 | } |
680 | |
681 | static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) |
682 | { |
683 | void __iomem *io_base = qm->io_base; |
684 | u16 randata = 0; |
685 | u64 doorbell; |
686 | |
687 | if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ) |
688 | io_base = qm->db_io_base + (u64)qn * qm->db_interval + |
689 | QM_DOORBELL_SQ_CQ_BASE_V2; |
690 | else |
691 | io_base += QM_DOORBELL_EQ_AEQ_BASE_V2; |
692 | |
693 | doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) | |
694 | ((u64)randata << QM_DB_RAND_SHIFT_V2) | |
695 | ((u64)index << QM_DB_INDEX_SHIFT_V2) | |
696 | ((u64)priority << QM_DB_PRIORITY_SHIFT_V2); |
697 | |
698 | writeq(val: doorbell, addr: io_base); |
699 | } |
700 | |
701 | static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) |
702 | { |
703 | dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n" , |
704 | qn, cmd, index); |
705 | |
706 | qm->ops->qm_db(qm, qn, cmd, index, priority); |
707 | } |
708 | |
709 | static void qm_disable_clock_gate(struct hisi_qm *qm) |
710 | { |
711 | u32 val; |
712 | |
713 | /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */ |
714 | if (qm->ver < QM_HW_V3) |
715 | return; |
716 | |
717 | val = readl(addr: qm->io_base + QM_PM_CTRL); |
718 | val |= QM_IDLE_DISABLE; |
719 | writel(val, addr: qm->io_base + QM_PM_CTRL); |
720 | } |
721 | |
722 | static int qm_dev_mem_reset(struct hisi_qm *qm) |
723 | { |
724 | u32 val; |
725 | |
726 | writel(val: 0x1, addr: qm->io_base + QM_MEM_START_INIT); |
727 | return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, |
728 | val & BIT(0), POLL_PERIOD, |
729 | POLL_TIMEOUT); |
730 | } |
731 | |
732 | /** |
733 | * hisi_qm_get_hw_info() - Get device information. |
734 | * @qm: The qm which want to get information. |
735 | * @info_table: Array for storing device information. |
736 | * @index: Index in info_table. |
737 | * @is_read: Whether read from reg, 0: not support read from reg. |
738 | * |
739 | * This function returns device information the caller needs. |
740 | */ |
741 | u32 hisi_qm_get_hw_info(struct hisi_qm *qm, |
742 | const struct hisi_qm_cap_info *info_table, |
743 | u32 index, bool is_read) |
744 | { |
745 | u32 val; |
746 | |
747 | switch (qm->ver) { |
748 | case QM_HW_V1: |
749 | return info_table[index].v1_val; |
750 | case QM_HW_V2: |
751 | return info_table[index].v2_val; |
752 | default: |
753 | if (!is_read) |
754 | return info_table[index].v3_val; |
755 | |
756 | val = readl(addr: qm->io_base + info_table[index].offset); |
757 | return (val >> info_table[index].shift) & info_table[index].mask; |
758 | } |
759 | } |
760 | EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info); |
761 | |
762 | static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, |
763 | u16 *high_bits, enum qm_basic_type type) |
764 | { |
765 | u32 depth; |
766 | |
767 | depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); |
768 | *low_bits = depth & QM_XQ_DEPTH_MASK; |
769 | *high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK; |
770 | } |
771 | |
772 | int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs, |
773 | u32 dev_algs_size) |
774 | { |
775 | struct device *dev = &qm->pdev->dev; |
776 | char *algs, *ptr; |
777 | int i; |
778 | |
779 | if (!qm->uacce) |
780 | return 0; |
781 | |
782 | if (dev_algs_size >= QM_DEV_ALG_MAX_LEN) { |
783 | dev_err(dev, "algs size %u is equal or larger than %d.\n" , |
784 | dev_algs_size, QM_DEV_ALG_MAX_LEN); |
785 | return -EINVAL; |
786 | } |
787 | |
788 | algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); |
789 | if (!algs) |
790 | return -ENOMEM; |
791 | |
792 | for (i = 0; i < dev_algs_size; i++) |
793 | if (alg_msk & dev_algs[i].alg_msk) |
794 | strcat(p: algs, q: dev_algs[i].alg); |
795 | |
796 | ptr = strrchr(algs, '\n'); |
797 | if (ptr) { |
798 | *ptr = '\0'; |
799 | qm->uacce->algs = algs; |
800 | } |
801 | |
802 | return 0; |
803 | } |
804 | EXPORT_SYMBOL_GPL(hisi_qm_set_algs); |
805 | |
806 | static u32 qm_get_irq_num(struct hisi_qm *qm) |
807 | { |
808 | if (qm->fun_type == QM_HW_PF) |
809 | return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver); |
810 | |
811 | return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver); |
812 | } |
813 | |
814 | static int qm_pm_get_sync(struct hisi_qm *qm) |
815 | { |
816 | struct device *dev = &qm->pdev->dev; |
817 | int ret; |
818 | |
819 | if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) |
820 | return 0; |
821 | |
822 | ret = pm_runtime_resume_and_get(dev); |
823 | if (ret < 0) { |
824 | dev_err(dev, "failed to get_sync(%d).\n" , ret); |
825 | return ret; |
826 | } |
827 | |
828 | return 0; |
829 | } |
830 | |
831 | static void qm_pm_put_sync(struct hisi_qm *qm) |
832 | { |
833 | struct device *dev = &qm->pdev->dev; |
834 | |
835 | if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) |
836 | return; |
837 | |
838 | pm_runtime_mark_last_busy(dev); |
839 | pm_runtime_put_autosuspend(dev); |
840 | } |
841 | |
842 | static void qm_cq_head_update(struct hisi_qp *qp) |
843 | { |
844 | if (qp->qp_status.cq_head == qp->cq_depth - 1) { |
845 | qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; |
846 | qp->qp_status.cq_head = 0; |
847 | } else { |
848 | qp->qp_status.cq_head++; |
849 | } |
850 | } |
851 | |
852 | static void qm_poll_req_cb(struct hisi_qp *qp) |
853 | { |
854 | struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; |
855 | struct hisi_qm *qm = qp->qm; |
856 | |
857 | while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { |
858 | dma_rmb(); |
859 | qp->req_cb(qp, qp->sqe + qm->sqe_size * |
860 | le16_to_cpu(cqe->sq_head)); |
861 | qm_cq_head_update(qp); |
862 | cqe = qp->cqe + qp->qp_status.cq_head; |
863 | qm_db(qm, qn: qp->qp_id, QM_DOORBELL_CMD_CQ, |
864 | index: qp->qp_status.cq_head, priority: 0); |
865 | atomic_dec(v: &qp->qp_status.used); |
866 | |
867 | cond_resched(); |
868 | } |
869 | |
870 | /* set c_flag */ |
871 | qm_db(qm, qn: qp->qp_id, QM_DOORBELL_CMD_CQ, index: qp->qp_status.cq_head, priority: 1); |
872 | } |
873 | |
874 | static void qm_work_process(struct work_struct *work) |
875 | { |
876 | struct hisi_qm_poll_data *poll_data = |
877 | container_of(work, struct hisi_qm_poll_data, work); |
878 | struct hisi_qm *qm = poll_data->qm; |
879 | u16 eqe_num = poll_data->eqe_num; |
880 | struct hisi_qp *qp; |
881 | int i; |
882 | |
883 | for (i = eqe_num - 1; i >= 0; i--) { |
884 | qp = &qm->qp_array[poll_data->qp_finish_id[i]]; |
885 | if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) |
886 | continue; |
887 | |
888 | if (qp->event_cb) { |
889 | qp->event_cb(qp); |
890 | continue; |
891 | } |
892 | |
893 | if (likely(qp->req_cb)) |
894 | qm_poll_req_cb(qp); |
895 | } |
896 | } |
897 | |
898 | static void qm_get_complete_eqe_num(struct hisi_qm *qm) |
899 | { |
900 | struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; |
901 | struct hisi_qm_poll_data *poll_data = NULL; |
902 | u16 eq_depth = qm->eq_depth; |
903 | u16 cqn, eqe_num = 0; |
904 | |
905 | if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) { |
906 | atomic64_inc(v: &qm->debug.dfx.err_irq_cnt); |
907 | qm_db(qm, qn: 0, QM_DOORBELL_CMD_EQ, index: qm->status.eq_head, priority: 0); |
908 | return; |
909 | } |
910 | |
911 | cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; |
912 | if (unlikely(cqn >= qm->qp_num)) |
913 | return; |
914 | poll_data = &qm->poll_data[cqn]; |
915 | |
916 | while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { |
917 | cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; |
918 | poll_data->qp_finish_id[eqe_num] = cqn; |
919 | eqe_num++; |
920 | |
921 | if (qm->status.eq_head == eq_depth - 1) { |
922 | qm->status.eqc_phase = !qm->status.eqc_phase; |
923 | eqe = qm->eqe; |
924 | qm->status.eq_head = 0; |
925 | } else { |
926 | eqe++; |
927 | qm->status.eq_head++; |
928 | } |
929 | |
930 | if (eqe_num == (eq_depth >> 1) - 1) |
931 | break; |
932 | } |
933 | |
934 | poll_data->eqe_num = eqe_num; |
935 | queue_work(wq: qm->wq, work: &poll_data->work); |
936 | qm_db(qm, qn: 0, QM_DOORBELL_CMD_EQ, index: qm->status.eq_head, priority: 0); |
937 | } |
938 | |
939 | static irqreturn_t qm_eq_irq(int irq, void *data) |
940 | { |
941 | struct hisi_qm *qm = data; |
942 | |
943 | /* Get qp id of completed tasks and re-enable the interrupt */ |
944 | qm_get_complete_eqe_num(qm); |
945 | |
946 | return IRQ_HANDLED; |
947 | } |
948 | |
949 | static irqreturn_t qm_mb_cmd_irq(int irq, void *data) |
950 | { |
951 | struct hisi_qm *qm = data; |
952 | u32 val; |
953 | |
954 | val = readl(addr: qm->io_base + QM_IFC_INT_STATUS); |
955 | val &= QM_IFC_INT_STATUS_MASK; |
956 | if (!val) |
957 | return IRQ_NONE; |
958 | |
959 | if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) { |
960 | dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n" ); |
961 | return IRQ_HANDLED; |
962 | } |
963 | |
964 | schedule_work(work: &qm->cmd_process); |
965 | |
966 | return IRQ_HANDLED; |
967 | } |
968 | |
969 | static void qm_set_qp_disable(struct hisi_qp *qp, int offset) |
970 | { |
971 | u32 *addr; |
972 | |
973 | if (qp->is_in_kernel) |
974 | return; |
975 | |
976 | addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset; |
977 | *addr = 1; |
978 | |
979 | /* make sure setup is completed */ |
980 | smp_wmb(); |
981 | } |
982 | |
983 | static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id) |
984 | { |
985 | struct hisi_qp *qp = &qm->qp_array[qp_id]; |
986 | |
987 | qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET); |
988 | hisi_qm_stop_qp(qp); |
989 | qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET); |
990 | } |
991 | |
992 | static void qm_reset_function(struct hisi_qm *qm) |
993 | { |
994 | struct hisi_qm *pf_qm = pci_get_drvdata(pdev: pci_physfn(dev: qm->pdev)); |
995 | struct device *dev = &qm->pdev->dev; |
996 | int ret; |
997 | |
998 | if (qm_check_dev_error(qm: pf_qm)) |
999 | return; |
1000 | |
1001 | ret = qm_reset_prepare_ready(qm); |
1002 | if (ret) { |
1003 | dev_err(dev, "reset function not ready\n" ); |
1004 | return; |
1005 | } |
1006 | |
1007 | ret = hisi_qm_stop(qm, r: QM_DOWN); |
1008 | if (ret) { |
1009 | dev_err(dev, "failed to stop qm when reset function\n" ); |
1010 | goto clear_bit; |
1011 | } |
1012 | |
1013 | ret = hisi_qm_start(qm); |
1014 | if (ret) |
1015 | dev_err(dev, "failed to start qm when reset function\n" ); |
1016 | |
1017 | clear_bit: |
1018 | qm_reset_bit_clear(qm); |
1019 | } |
1020 | |
1021 | static irqreturn_t qm_aeq_thread(int irq, void *data) |
1022 | { |
1023 | struct hisi_qm *qm = data; |
1024 | struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; |
1025 | u16 aeq_depth = qm->aeq_depth; |
1026 | u32 type, qp_id; |
1027 | |
1028 | atomic64_inc(v: &qm->debug.dfx.aeq_irq_cnt); |
1029 | |
1030 | while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { |
1031 | type = (le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT) & |
1032 | QM_AEQE_TYPE_MASK; |
1033 | qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK; |
1034 | |
1035 | switch (type) { |
1036 | case QM_EQ_OVERFLOW: |
1037 | dev_err(&qm->pdev->dev, "eq overflow, reset function\n" ); |
1038 | qm_reset_function(qm); |
1039 | return IRQ_HANDLED; |
1040 | case QM_CQ_OVERFLOW: |
1041 | dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n" , |
1042 | qp_id); |
1043 | fallthrough; |
1044 | case QM_CQE_ERROR: |
1045 | qm_disable_qp(qm, qp_id); |
1046 | break; |
1047 | default: |
1048 | dev_err(&qm->pdev->dev, "unknown error type %u\n" , |
1049 | type); |
1050 | break; |
1051 | } |
1052 | |
1053 | if (qm->status.aeq_head == aeq_depth - 1) { |
1054 | qm->status.aeqc_phase = !qm->status.aeqc_phase; |
1055 | aeqe = qm->aeqe; |
1056 | qm->status.aeq_head = 0; |
1057 | } else { |
1058 | aeqe++; |
1059 | qm->status.aeq_head++; |
1060 | } |
1061 | } |
1062 | |
1063 | qm_db(qm, qn: 0, QM_DOORBELL_CMD_AEQ, index: qm->status.aeq_head, priority: 0); |
1064 | |
1065 | return IRQ_HANDLED; |
1066 | } |
1067 | |
1068 | static void qm_init_qp_status(struct hisi_qp *qp) |
1069 | { |
1070 | struct hisi_qp_status *qp_status = &qp->qp_status; |
1071 | |
1072 | qp_status->sq_tail = 0; |
1073 | qp_status->cq_head = 0; |
1074 | qp_status->cqc_phase = true; |
1075 | atomic_set(v: &qp_status->used, i: 0); |
1076 | } |
1077 | |
1078 | static void qm_init_prefetch(struct hisi_qm *qm) |
1079 | { |
1080 | struct device *dev = &qm->pdev->dev; |
1081 | u32 page_type = 0x0; |
1082 | |
1083 | if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) |
1084 | return; |
1085 | |
1086 | switch (PAGE_SIZE) { |
1087 | case SZ_4K: |
1088 | page_type = 0x0; |
1089 | break; |
1090 | case SZ_16K: |
1091 | page_type = 0x1; |
1092 | break; |
1093 | case SZ_64K: |
1094 | page_type = 0x2; |
1095 | break; |
1096 | default: |
1097 | dev_err(dev, "system page size is not support: %lu, default set to 4KB" , |
1098 | PAGE_SIZE); |
1099 | } |
1100 | |
1101 | writel(val: page_type, addr: qm->io_base + QM_PAGE_SIZE); |
1102 | } |
1103 | |
1104 | /* |
1105 | * acc_shaper_para_calc() Get the IR value by the qos formula, the return value |
1106 | * is the expected qos calculated. |
1107 | * the formula: |
1108 | * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps |
1109 | * |
1110 | * IR_b * (2 ^ IR_u) * 8000 |
1111 | * IR(Mbps) = ------------------------- |
1112 | * Tick * (2 ^ IR_s) |
1113 | */ |
1114 | static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s) |
1115 | { |
1116 | return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) / |
1117 | (QM_QOS_TICK * (1 << cir_s)); |
1118 | } |
1119 | |
1120 | static u32 acc_shaper_calc_cbs_s(u32 ir) |
1121 | { |
1122 | int table_size = ARRAY_SIZE(shaper_cbs_s); |
1123 | int i; |
1124 | |
1125 | for (i = 0; i < table_size; i++) { |
1126 | if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end) |
1127 | return shaper_cbs_s[i].val; |
1128 | } |
1129 | |
1130 | return QM_SHAPER_MIN_CBS_S; |
1131 | } |
1132 | |
1133 | static u32 acc_shaper_calc_cir_s(u32 ir) |
1134 | { |
1135 | int table_size = ARRAY_SIZE(shaper_cir_s); |
1136 | int i; |
1137 | |
1138 | for (i = 0; i < table_size; i++) { |
1139 | if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end) |
1140 | return shaper_cir_s[i].val; |
1141 | } |
1142 | |
1143 | return 0; |
1144 | } |
1145 | |
1146 | static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor) |
1147 | { |
1148 | u32 cir_b, cir_u, cir_s, ir_calc; |
1149 | u32 error_rate; |
1150 | |
1151 | factor->cbs_s = acc_shaper_calc_cbs_s(ir); |
1152 | cir_s = acc_shaper_calc_cir_s(ir); |
1153 | |
1154 | for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) { |
1155 | for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) { |
1156 | ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); |
1157 | |
1158 | error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; |
1159 | if (error_rate <= QM_QOS_MIN_ERROR_RATE) { |
1160 | factor->cir_b = cir_b; |
1161 | factor->cir_u = cir_u; |
1162 | factor->cir_s = cir_s; |
1163 | return 0; |
1164 | } |
1165 | } |
1166 | } |
1167 | |
1168 | return -EINVAL; |
1169 | } |
1170 | |
1171 | static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, |
1172 | u32 number, struct qm_shaper_factor *factor) |
1173 | { |
1174 | u64 tmp = 0; |
1175 | |
1176 | if (number > 0) { |
1177 | switch (type) { |
1178 | case SQC_VFT: |
1179 | if (qm->ver == QM_HW_V1) { |
1180 | tmp = QM_SQC_VFT_BUF_SIZE | |
1181 | QM_SQC_VFT_SQC_SIZE | |
1182 | QM_SQC_VFT_INDEX_NUMBER | |
1183 | QM_SQC_VFT_VALID | |
1184 | (u64)base << QM_SQC_VFT_START_SQN_SHIFT; |
1185 | } else { |
1186 | tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT | |
1187 | QM_SQC_VFT_VALID | |
1188 | (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; |
1189 | } |
1190 | break; |
1191 | case CQC_VFT: |
1192 | if (qm->ver == QM_HW_V1) { |
1193 | tmp = QM_CQC_VFT_BUF_SIZE | |
1194 | QM_CQC_VFT_SQC_SIZE | |
1195 | QM_CQC_VFT_INDEX_NUMBER | |
1196 | QM_CQC_VFT_VALID; |
1197 | } else { |
1198 | tmp = QM_CQC_VFT_VALID; |
1199 | } |
1200 | break; |
1201 | case SHAPER_VFT: |
1202 | if (factor) { |
1203 | tmp = factor->cir_b | |
1204 | (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | |
1205 | (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | |
1206 | (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) | |
1207 | (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT); |
1208 | } |
1209 | break; |
1210 | } |
1211 | } |
1212 | |
1213 | writel(lower_32_bits(tmp), addr: qm->io_base + QM_VFT_CFG_DATA_L); |
1214 | writel(upper_32_bits(tmp), addr: qm->io_base + QM_VFT_CFG_DATA_H); |
1215 | } |
1216 | |
1217 | static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, |
1218 | u32 fun_num, u32 base, u32 number) |
1219 | { |
1220 | struct qm_shaper_factor *factor = NULL; |
1221 | unsigned int val; |
1222 | int ret; |
1223 | |
1224 | if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) |
1225 | factor = &qm->factor[fun_num]; |
1226 | |
1227 | ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, |
1228 | val & BIT(0), POLL_PERIOD, |
1229 | POLL_TIMEOUT); |
1230 | if (ret) |
1231 | return ret; |
1232 | |
1233 | writel(val: 0x0, addr: qm->io_base + QM_VFT_CFG_OP_WR); |
1234 | writel(val: type, addr: qm->io_base + QM_VFT_CFG_TYPE); |
1235 | if (type == SHAPER_VFT) |
1236 | fun_num |= base << QM_SHAPER_VFT_OFFSET; |
1237 | |
1238 | writel(val: fun_num, addr: qm->io_base + QM_VFT_CFG); |
1239 | |
1240 | qm_vft_data_cfg(qm, type, base, number, factor); |
1241 | |
1242 | writel(val: 0x0, addr: qm->io_base + QM_VFT_CFG_RDY); |
1243 | writel(val: 0x1, addr: qm->io_base + QM_VFT_CFG_OP_ENABLE); |
1244 | |
1245 | return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, |
1246 | val & BIT(0), POLL_PERIOD, |
1247 | POLL_TIMEOUT); |
1248 | } |
1249 | |
1250 | static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num) |
1251 | { |
1252 | u32 qos = qm->factor[fun_num].func_qos; |
1253 | int ret, i; |
1254 | |
1255 | ret = qm_get_shaper_para(ir: qos * QM_QOS_RATE, factor: &qm->factor[fun_num]); |
1256 | if (ret) { |
1257 | dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n" ); |
1258 | return ret; |
1259 | } |
1260 | writel(val: qm->type_rate, addr: qm->io_base + QM_SHAPER_CFG); |
1261 | for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { |
1262 | /* The base number of queue reuse for different alg type */ |
1263 | ret = qm_set_vft_common(qm, type: SHAPER_VFT, fun_num, base: i, number: 1); |
1264 | if (ret) |
1265 | return ret; |
1266 | } |
1267 | |
1268 | return 0; |
1269 | } |
1270 | |
1271 | /* The config should be conducted after qm_dev_mem_reset() */ |
1272 | static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, |
1273 | u32 number) |
1274 | { |
1275 | int ret, i; |
1276 | |
1277 | for (i = SQC_VFT; i <= CQC_VFT; i++) { |
1278 | ret = qm_set_vft_common(qm, type: i, fun_num, base, number); |
1279 | if (ret) |
1280 | return ret; |
1281 | } |
1282 | |
1283 | /* init default shaper qos val */ |
1284 | if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { |
1285 | ret = qm_shaper_init_vft(qm, fun_num); |
1286 | if (ret) |
1287 | goto back_sqc_cqc; |
1288 | } |
1289 | |
1290 | return 0; |
1291 | back_sqc_cqc: |
1292 | for (i = SQC_VFT; i <= CQC_VFT; i++) |
1293 | qm_set_vft_common(qm, type: i, fun_num, base: 0, number: 0); |
1294 | |
1295 | return ret; |
1296 | } |
1297 | |
1298 | static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) |
1299 | { |
1300 | u64 sqc_vft; |
1301 | int ret; |
1302 | |
1303 | ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); |
1304 | if (ret) |
1305 | return ret; |
1306 | |
1307 | sqc_vft = readl(addr: qm->io_base + QM_MB_CMD_DATA_ADDR_L) | |
1308 | ((u64)readl(addr: qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); |
1309 | *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); |
1310 | *number = (QM_SQC_VFT_NUM_MASK_V2 & |
1311 | (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; |
1312 | |
1313 | return 0; |
1314 | } |
1315 | |
1316 | static void qm_hw_error_init_v1(struct hisi_qm *qm) |
1317 | { |
1318 | writel(QM_ABNORMAL_INT_MASK_VALUE, addr: qm->io_base + QM_ABNORMAL_INT_MASK); |
1319 | } |
1320 | |
1321 | static void qm_hw_error_cfg(struct hisi_qm *qm) |
1322 | { |
1323 | struct hisi_qm_err_info *err_info = &qm->err_info; |
1324 | |
1325 | qm->error_mask = err_info->nfe | err_info->ce | err_info->fe; |
1326 | /* clear QM hw residual error source */ |
1327 | writel(val: qm->error_mask, addr: qm->io_base + QM_ABNORMAL_INT_SOURCE); |
1328 | |
1329 | /* configure error type */ |
1330 | writel(val: err_info->ce, addr: qm->io_base + QM_RAS_CE_ENABLE); |
1331 | writel(QM_RAS_CE_TIMES_PER_IRQ, addr: qm->io_base + QM_RAS_CE_THRESHOLD); |
1332 | writel(val: err_info->nfe, addr: qm->io_base + QM_RAS_NFE_ENABLE); |
1333 | writel(val: err_info->fe, addr: qm->io_base + QM_RAS_FE_ENABLE); |
1334 | } |
1335 | |
1336 | static void qm_hw_error_init_v2(struct hisi_qm *qm) |
1337 | { |
1338 | u32 irq_unmask; |
1339 | |
1340 | qm_hw_error_cfg(qm); |
1341 | |
1342 | irq_unmask = ~qm->error_mask; |
1343 | irq_unmask &= readl(addr: qm->io_base + QM_ABNORMAL_INT_MASK); |
1344 | writel(val: irq_unmask, addr: qm->io_base + QM_ABNORMAL_INT_MASK); |
1345 | } |
1346 | |
1347 | static void qm_hw_error_uninit_v2(struct hisi_qm *qm) |
1348 | { |
1349 | u32 irq_mask = qm->error_mask; |
1350 | |
1351 | irq_mask |= readl(addr: qm->io_base + QM_ABNORMAL_INT_MASK); |
1352 | writel(val: irq_mask, addr: qm->io_base + QM_ABNORMAL_INT_MASK); |
1353 | } |
1354 | |
1355 | static void qm_hw_error_init_v3(struct hisi_qm *qm) |
1356 | { |
1357 | u32 irq_unmask; |
1358 | |
1359 | qm_hw_error_cfg(qm); |
1360 | |
1361 | /* enable close master ooo when hardware error happened */ |
1362 | writel(val: qm->err_info.qm_shutdown_mask, addr: qm->io_base + QM_OOO_SHUTDOWN_SEL); |
1363 | |
1364 | irq_unmask = ~qm->error_mask; |
1365 | irq_unmask &= readl(addr: qm->io_base + QM_ABNORMAL_INT_MASK); |
1366 | writel(val: irq_unmask, addr: qm->io_base + QM_ABNORMAL_INT_MASK); |
1367 | } |
1368 | |
1369 | static void qm_hw_error_uninit_v3(struct hisi_qm *qm) |
1370 | { |
1371 | u32 irq_mask = qm->error_mask; |
1372 | |
1373 | irq_mask |= readl(addr: qm->io_base + QM_ABNORMAL_INT_MASK); |
1374 | writel(val: irq_mask, addr: qm->io_base + QM_ABNORMAL_INT_MASK); |
1375 | |
1376 | /* disable close master ooo when hardware error happened */ |
1377 | writel(val: 0x0, addr: qm->io_base + QM_OOO_SHUTDOWN_SEL); |
1378 | } |
1379 | |
1380 | static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) |
1381 | { |
1382 | const struct hisi_qm_hw_error *err; |
1383 | struct device *dev = &qm->pdev->dev; |
1384 | u32 reg_val, type, vf_num, qp_id; |
1385 | int i; |
1386 | |
1387 | for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) { |
1388 | err = &qm_hw_error[i]; |
1389 | if (!(err->int_msk & error_status)) |
1390 | continue; |
1391 | |
1392 | dev_err(dev, "%s [error status=0x%x] found\n" , |
1393 | err->msg, err->int_msk); |
1394 | |
1395 | if (err->int_msk & QM_DB_TIMEOUT) { |
1396 | reg_val = readl(addr: qm->io_base + QM_ABNORMAL_INF01); |
1397 | type = (reg_val & QM_DB_TIMEOUT_TYPE) >> |
1398 | QM_DB_TIMEOUT_TYPE_SHIFT; |
1399 | vf_num = reg_val & QM_DB_TIMEOUT_VF; |
1400 | qp_id = reg_val >> QM_DB_TIMEOUT_QP_SHIFT; |
1401 | dev_err(dev, "qm %s doorbell timeout in function %u qp %u\n" , |
1402 | qm_db_timeout[type], vf_num, qp_id); |
1403 | } else if (err->int_msk & QM_OF_FIFO_OF) { |
1404 | reg_val = readl(addr: qm->io_base + QM_ABNORMAL_INF00); |
1405 | type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >> |
1406 | QM_FIFO_OVERFLOW_TYPE_SHIFT; |
1407 | vf_num = reg_val & QM_FIFO_OVERFLOW_VF; |
1408 | qp_id = reg_val >> QM_FIFO_OVERFLOW_QP_SHIFT; |
1409 | if (type < ARRAY_SIZE(qm_fifo_overflow)) |
1410 | dev_err(dev, "qm %s fifo overflow in function %u qp %u\n" , |
1411 | qm_fifo_overflow[type], vf_num, qp_id); |
1412 | else |
1413 | dev_err(dev, "unknown error type\n" ); |
1414 | } else if (err->int_msk & QM_AXI_RRESP_ERR) { |
1415 | reg_val = readl(addr: qm->io_base + QM_ABNORMAL_INF02); |
1416 | if (reg_val & QM_AXI_POISON_ERR) |
1417 | dev_err(dev, "qm axi poison error happened\n" ); |
1418 | } |
1419 | } |
1420 | } |
1421 | |
1422 | static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) |
1423 | { |
1424 | u32 error_status, tmp; |
1425 | |
1426 | /* read err sts */ |
1427 | tmp = readl(addr: qm->io_base + QM_ABNORMAL_INT_STATUS); |
1428 | error_status = qm->error_mask & tmp; |
1429 | |
1430 | if (error_status) { |
1431 | if (error_status & QM_ECC_MBIT) |
1432 | qm->err_status.is_qm_ecc_mbit = true; |
1433 | |
1434 | qm_log_hw_error(qm, error_status); |
1435 | if (error_status & qm->err_info.qm_reset_mask) |
1436 | return ACC_ERR_NEED_RESET; |
1437 | |
1438 | writel(val: error_status, addr: qm->io_base + QM_ABNORMAL_INT_SOURCE); |
1439 | writel(val: qm->err_info.nfe, addr: qm->io_base + QM_RAS_NFE_ENABLE); |
1440 | } |
1441 | |
1442 | return ACC_ERR_RECOVERED; |
1443 | } |
1444 | |
1445 | static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num) |
1446 | { |
1447 | struct qm_mailbox mailbox; |
1448 | int ret; |
1449 | |
1450 | qm_mb_pre_init(mailbox: &mailbox, QM_MB_CMD_DST, base: 0, queue: fun_num, op: 0); |
1451 | mutex_lock(&qm->mailbox_lock); |
1452 | ret = qm_mb_nolock(qm, mailbox: &mailbox); |
1453 | if (ret) |
1454 | goto err_unlock; |
1455 | |
1456 | *msg = readl(addr: qm->io_base + QM_MB_CMD_DATA_ADDR_L) | |
1457 | ((u64)readl(addr: qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); |
1458 | |
1459 | err_unlock: |
1460 | mutex_unlock(lock: &qm->mailbox_lock); |
1461 | return ret; |
1462 | } |
1463 | |
1464 | static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask) |
1465 | { |
1466 | u32 val; |
1467 | |
1468 | if (qm->fun_type == QM_HW_PF) |
1469 | writeq(val: vf_mask, addr: qm->io_base + QM_IFC_INT_SOURCE_P); |
1470 | |
1471 | val = readl(addr: qm->io_base + QM_IFC_INT_SOURCE_V); |
1472 | val |= QM_IFC_INT_SOURCE_MASK; |
1473 | writel(val, addr: qm->io_base + QM_IFC_INT_SOURCE_V); |
1474 | } |
1475 | |
1476 | static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id) |
1477 | { |
1478 | struct device *dev = &qm->pdev->dev; |
1479 | u32 cmd; |
1480 | u64 msg; |
1481 | int ret; |
1482 | |
1483 | ret = qm_get_mb_cmd(qm, msg: &msg, fun_num: vf_id); |
1484 | if (ret) { |
1485 | dev_err(dev, "failed to get msg from VF(%u)!\n" , vf_id); |
1486 | return; |
1487 | } |
1488 | |
1489 | cmd = msg & QM_MB_CMD_DATA_MASK; |
1490 | switch (cmd) { |
1491 | case QM_VF_PREPARE_FAIL: |
1492 | dev_err(dev, "failed to stop VF(%u)!\n" , vf_id); |
1493 | break; |
1494 | case QM_VF_START_FAIL: |
1495 | dev_err(dev, "failed to start VF(%u)!\n" , vf_id); |
1496 | break; |
1497 | case QM_VF_PREPARE_DONE: |
1498 | case QM_VF_START_DONE: |
1499 | break; |
1500 | default: |
1501 | dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n" , cmd, vf_id); |
1502 | break; |
1503 | } |
1504 | } |
1505 | |
1506 | static int qm_wait_vf_prepare_finish(struct hisi_qm *qm) |
1507 | { |
1508 | struct device *dev = &qm->pdev->dev; |
1509 | u32 vfs_num = qm->vfs_num; |
1510 | int cnt = 0; |
1511 | int ret = 0; |
1512 | u64 val; |
1513 | u32 i; |
1514 | |
1515 | if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) |
1516 | return 0; |
1517 | |
1518 | while (true) { |
1519 | val = readq(addr: qm->io_base + QM_IFC_INT_SOURCE_P); |
1520 | /* All VFs send command to PF, break */ |
1521 | if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1)) |
1522 | break; |
1523 | |
1524 | if (++cnt > QM_MAX_PF_WAIT_COUNT) { |
1525 | ret = -EBUSY; |
1526 | break; |
1527 | } |
1528 | |
1529 | msleep(QM_WAIT_DST_ACK); |
1530 | } |
1531 | |
1532 | /* PF check VFs msg */ |
1533 | for (i = 1; i <= vfs_num; i++) { |
1534 | if (val & BIT(i)) |
1535 | qm_handle_vf_msg(qm, vf_id: i); |
1536 | else |
1537 | dev_err(dev, "VF(%u) not ping PF!\n" , i); |
1538 | } |
1539 | |
1540 | /* PF clear interrupt to ack VFs */ |
1541 | qm_clear_cmd_interrupt(qm, vf_mask: val); |
1542 | |
1543 | return ret; |
1544 | } |
1545 | |
1546 | static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num) |
1547 | { |
1548 | u32 val; |
1549 | |
1550 | val = readl(addr: qm->io_base + QM_IFC_INT_CFG); |
1551 | val &= ~QM_IFC_SEND_ALL_VFS; |
1552 | val |= fun_num; |
1553 | writel(val, addr: qm->io_base + QM_IFC_INT_CFG); |
1554 | |
1555 | val = readl(addr: qm->io_base + QM_IFC_INT_SET_P); |
1556 | val |= QM_IFC_INT_SET_MASK; |
1557 | writel(val, addr: qm->io_base + QM_IFC_INT_SET_P); |
1558 | } |
1559 | |
1560 | static void qm_trigger_pf_interrupt(struct hisi_qm *qm) |
1561 | { |
1562 | u32 val; |
1563 | |
1564 | val = readl(addr: qm->io_base + QM_IFC_INT_SET_V); |
1565 | val |= QM_IFC_INT_SET_MASK; |
1566 | writel(val, addr: qm->io_base + QM_IFC_INT_SET_V); |
1567 | } |
1568 | |
1569 | static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num) |
1570 | { |
1571 | struct device *dev = &qm->pdev->dev; |
1572 | struct qm_mailbox mailbox; |
1573 | int cnt = 0; |
1574 | u64 val; |
1575 | int ret; |
1576 | |
1577 | qm_mb_pre_init(mailbox: &mailbox, QM_MB_CMD_SRC, base: cmd, queue: fun_num, op: 0); |
1578 | mutex_lock(&qm->mailbox_lock); |
1579 | ret = qm_mb_nolock(qm, mailbox: &mailbox); |
1580 | if (ret) { |
1581 | dev_err(dev, "failed to send command to vf(%u)!\n" , fun_num); |
1582 | goto err_unlock; |
1583 | } |
1584 | |
1585 | qm_trigger_vf_interrupt(qm, fun_num); |
1586 | while (true) { |
1587 | msleep(QM_WAIT_DST_ACK); |
1588 | val = readq(addr: qm->io_base + QM_IFC_READY_STATUS); |
1589 | /* if VF respond, PF notifies VF successfully. */ |
1590 | if (!(val & BIT(fun_num))) |
1591 | goto err_unlock; |
1592 | |
1593 | if (++cnt > QM_MAX_PF_WAIT_COUNT) { |
1594 | dev_err(dev, "failed to get response from VF(%u)!\n" , fun_num); |
1595 | ret = -ETIMEDOUT; |
1596 | break; |
1597 | } |
1598 | } |
1599 | |
1600 | err_unlock: |
1601 | mutex_unlock(lock: &qm->mailbox_lock); |
1602 | return ret; |
1603 | } |
1604 | |
1605 | static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd) |
1606 | { |
1607 | struct device *dev = &qm->pdev->dev; |
1608 | u32 vfs_num = qm->vfs_num; |
1609 | struct qm_mailbox mailbox; |
1610 | u64 val = 0; |
1611 | int cnt = 0; |
1612 | int ret; |
1613 | u32 i; |
1614 | |
1615 | qm_mb_pre_init(mailbox: &mailbox, QM_MB_CMD_SRC, base: cmd, QM_MB_PING_ALL_VFS, op: 0); |
1616 | mutex_lock(&qm->mailbox_lock); |
1617 | /* PF sends command to all VFs by mailbox */ |
1618 | ret = qm_mb_nolock(qm, mailbox: &mailbox); |
1619 | if (ret) { |
1620 | dev_err(dev, "failed to send command to VFs!\n" ); |
1621 | mutex_unlock(lock: &qm->mailbox_lock); |
1622 | return ret; |
1623 | } |
1624 | |
1625 | qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS); |
1626 | while (true) { |
1627 | msleep(QM_WAIT_DST_ACK); |
1628 | val = readq(addr: qm->io_base + QM_IFC_READY_STATUS); |
1629 | /* If all VFs acked, PF notifies VFs successfully. */ |
1630 | if (!(val & GENMASK(vfs_num, 1))) { |
1631 | mutex_unlock(lock: &qm->mailbox_lock); |
1632 | return 0; |
1633 | } |
1634 | |
1635 | if (++cnt > QM_MAX_PF_WAIT_COUNT) |
1636 | break; |
1637 | } |
1638 | |
1639 | mutex_unlock(lock: &qm->mailbox_lock); |
1640 | |
1641 | /* Check which vf respond timeout. */ |
1642 | for (i = 1; i <= vfs_num; i++) { |
1643 | if (val & BIT(i)) |
1644 | dev_err(dev, "failed to get response from VF(%u)!\n" , i); |
1645 | } |
1646 | |
1647 | return -ETIMEDOUT; |
1648 | } |
1649 | |
1650 | static int qm_ping_pf(struct hisi_qm *qm, u64 cmd) |
1651 | { |
1652 | struct qm_mailbox mailbox; |
1653 | int cnt = 0; |
1654 | u32 val; |
1655 | int ret; |
1656 | |
1657 | qm_mb_pre_init(mailbox: &mailbox, QM_MB_CMD_SRC, base: cmd, queue: 0, op: 0); |
1658 | mutex_lock(&qm->mailbox_lock); |
1659 | ret = qm_mb_nolock(qm, mailbox: &mailbox); |
1660 | if (ret) { |
1661 | dev_err(&qm->pdev->dev, "failed to send command to PF!\n" ); |
1662 | goto unlock; |
1663 | } |
1664 | |
1665 | qm_trigger_pf_interrupt(qm); |
1666 | /* Waiting for PF response */ |
1667 | while (true) { |
1668 | msleep(QM_WAIT_DST_ACK); |
1669 | val = readl(addr: qm->io_base + QM_IFC_INT_SET_V); |
1670 | if (!(val & QM_IFC_INT_STATUS_MASK)) |
1671 | break; |
1672 | |
1673 | if (++cnt > QM_MAX_VF_WAIT_COUNT) { |
1674 | ret = -ETIMEDOUT; |
1675 | break; |
1676 | } |
1677 | } |
1678 | |
1679 | unlock: |
1680 | mutex_unlock(lock: &qm->mailbox_lock); |
1681 | return ret; |
1682 | } |
1683 | |
1684 | static int qm_drain_qm(struct hisi_qm *qm) |
1685 | { |
1686 | return hisi_qm_mb(qm, QM_MB_CMD_FLUSH_QM, 0, 0, 0); |
1687 | } |
1688 | |
1689 | static int qm_stop_qp(struct hisi_qp *qp) |
1690 | { |
1691 | return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); |
1692 | } |
1693 | |
1694 | static int qm_set_msi(struct hisi_qm *qm, bool set) |
1695 | { |
1696 | struct pci_dev *pdev = qm->pdev; |
1697 | |
1698 | if (set) { |
1699 | pci_write_config_dword(dev: pdev, where: pdev->msi_cap + PCI_MSI_MASK_64, |
1700 | val: 0); |
1701 | } else { |
1702 | pci_write_config_dword(dev: pdev, where: pdev->msi_cap + PCI_MSI_MASK_64, |
1703 | ACC_PEH_MSI_DISABLE); |
1704 | if (qm->err_status.is_qm_ecc_mbit || |
1705 | qm->err_status.is_dev_ecc_mbit) |
1706 | return 0; |
1707 | |
1708 | mdelay(1); |
1709 | if (readl(addr: qm->io_base + QM_PEH_DFX_INFO0)) |
1710 | return -EFAULT; |
1711 | } |
1712 | |
1713 | return 0; |
1714 | } |
1715 | |
1716 | static void qm_wait_msi_finish(struct hisi_qm *qm) |
1717 | { |
1718 | struct pci_dev *pdev = qm->pdev; |
1719 | u32 cmd = ~0; |
1720 | int cnt = 0; |
1721 | u32 val; |
1722 | int ret; |
1723 | |
1724 | while (true) { |
1725 | pci_read_config_dword(dev: pdev, where: pdev->msi_cap + |
1726 | PCI_MSI_PENDING_64, val: &cmd); |
1727 | if (!cmd) |
1728 | break; |
1729 | |
1730 | if (++cnt > MAX_WAIT_COUNTS) { |
1731 | pci_warn(pdev, "failed to empty MSI PENDING!\n" ); |
1732 | break; |
1733 | } |
1734 | |
1735 | udelay(1); |
1736 | } |
1737 | |
1738 | ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0, |
1739 | val, !(val & QM_PEH_DFX_MASK), |
1740 | POLL_PERIOD, POLL_TIMEOUT); |
1741 | if (ret) |
1742 | pci_warn(pdev, "failed to empty PEH MSI!\n" ); |
1743 | |
1744 | ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1, |
1745 | val, !(val & QM_PEH_MSI_FINISH_MASK), |
1746 | POLL_PERIOD, POLL_TIMEOUT); |
1747 | if (ret) |
1748 | pci_warn(pdev, "failed to finish MSI operation!\n" ); |
1749 | } |
1750 | |
1751 | static int qm_set_msi_v3(struct hisi_qm *qm, bool set) |
1752 | { |
1753 | struct pci_dev *pdev = qm->pdev; |
1754 | int ret = -ETIMEDOUT; |
1755 | u32 cmd, i; |
1756 | |
1757 | pci_read_config_dword(dev: pdev, where: pdev->msi_cap, val: &cmd); |
1758 | if (set) |
1759 | cmd |= QM_MSI_CAP_ENABLE; |
1760 | else |
1761 | cmd &= ~QM_MSI_CAP_ENABLE; |
1762 | |
1763 | pci_write_config_dword(dev: pdev, where: pdev->msi_cap, val: cmd); |
1764 | if (set) { |
1765 | for (i = 0; i < MAX_WAIT_COUNTS; i++) { |
1766 | pci_read_config_dword(dev: pdev, where: pdev->msi_cap, val: &cmd); |
1767 | if (cmd & QM_MSI_CAP_ENABLE) |
1768 | return 0; |
1769 | |
1770 | udelay(1); |
1771 | } |
1772 | } else { |
1773 | udelay(WAIT_PERIOD_US_MIN); |
1774 | qm_wait_msi_finish(qm); |
1775 | ret = 0; |
1776 | } |
1777 | |
1778 | return ret; |
1779 | } |
1780 | |
1781 | static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { |
1782 | .qm_db = qm_db_v1, |
1783 | .hw_error_init = qm_hw_error_init_v1, |
1784 | .set_msi = qm_set_msi, |
1785 | }; |
1786 | |
1787 | static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { |
1788 | .get_vft = qm_get_vft_v2, |
1789 | .qm_db = qm_db_v2, |
1790 | .hw_error_init = qm_hw_error_init_v2, |
1791 | .hw_error_uninit = qm_hw_error_uninit_v2, |
1792 | .hw_error_handle = qm_hw_error_handle_v2, |
1793 | .set_msi = qm_set_msi, |
1794 | }; |
1795 | |
1796 | static const struct hisi_qm_hw_ops qm_hw_ops_v3 = { |
1797 | .get_vft = qm_get_vft_v2, |
1798 | .qm_db = qm_db_v2, |
1799 | .hw_error_init = qm_hw_error_init_v3, |
1800 | .hw_error_uninit = qm_hw_error_uninit_v3, |
1801 | .hw_error_handle = qm_hw_error_handle_v2, |
1802 | .set_msi = qm_set_msi_v3, |
1803 | }; |
1804 | |
1805 | static void *qm_get_avail_sqe(struct hisi_qp *qp) |
1806 | { |
1807 | struct hisi_qp_status *qp_status = &qp->qp_status; |
1808 | u16 sq_tail = qp_status->sq_tail; |
1809 | |
1810 | if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1)) |
1811 | return NULL; |
1812 | |
1813 | return qp->sqe + sq_tail * qp->qm->sqe_size; |
1814 | } |
1815 | |
1816 | static void hisi_qm_unset_hw_reset(struct hisi_qp *qp) |
1817 | { |
1818 | u64 *addr; |
1819 | |
1820 | /* Use last 64 bits of DUS to reset status. */ |
1821 | addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET; |
1822 | *addr = 0; |
1823 | } |
1824 | |
1825 | static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) |
1826 | { |
1827 | struct device *dev = &qm->pdev->dev; |
1828 | struct hisi_qp *qp; |
1829 | int qp_id; |
1830 | |
1831 | if (atomic_read(v: &qm->status.flags) == QM_STOP) { |
1832 | dev_info_ratelimited(dev, "failed to create qp as qm is stop!\n" ); |
1833 | return ERR_PTR(error: -EPERM); |
1834 | } |
1835 | |
1836 | if (qm->qp_in_used == qm->qp_num) { |
1837 | dev_info_ratelimited(dev, "All %u queues of QM are busy!\n" , |
1838 | qm->qp_num); |
1839 | atomic64_inc(v: &qm->debug.dfx.create_qp_err_cnt); |
1840 | return ERR_PTR(error: -EBUSY); |
1841 | } |
1842 | |
1843 | qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, start: 0, end: qm->qp_num, GFP_ATOMIC); |
1844 | if (qp_id < 0) { |
1845 | dev_info_ratelimited(dev, "All %u queues of QM are busy!\n" , |
1846 | qm->qp_num); |
1847 | atomic64_inc(v: &qm->debug.dfx.create_qp_err_cnt); |
1848 | return ERR_PTR(error: -EBUSY); |
1849 | } |
1850 | |
1851 | qp = &qm->qp_array[qp_id]; |
1852 | hisi_qm_unset_hw_reset(qp); |
1853 | memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth); |
1854 | |
1855 | qp->event_cb = NULL; |
1856 | qp->req_cb = NULL; |
1857 | qp->qp_id = qp_id; |
1858 | qp->alg_type = alg_type; |
1859 | qp->is_in_kernel = true; |
1860 | qm->qp_in_used++; |
1861 | |
1862 | return qp; |
1863 | } |
1864 | |
1865 | /** |
1866 | * hisi_qm_create_qp() - Create a queue pair from qm. |
1867 | * @qm: The qm we create a qp from. |
1868 | * @alg_type: Accelerator specific algorithm type in sqc. |
1869 | * |
1870 | * Return created qp, negative error code if failed. |
1871 | */ |
1872 | static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) |
1873 | { |
1874 | struct hisi_qp *qp; |
1875 | int ret; |
1876 | |
1877 | ret = qm_pm_get_sync(qm); |
1878 | if (ret) |
1879 | return ERR_PTR(error: ret); |
1880 | |
1881 | down_write(sem: &qm->qps_lock); |
1882 | qp = qm_create_qp_nolock(qm, alg_type); |
1883 | up_write(sem: &qm->qps_lock); |
1884 | |
1885 | if (IS_ERR(ptr: qp)) |
1886 | qm_pm_put_sync(qm); |
1887 | |
1888 | return qp; |
1889 | } |
1890 | |
1891 | /** |
1892 | * hisi_qm_release_qp() - Release a qp back to its qm. |
1893 | * @qp: The qp we want to release. |
1894 | * |
1895 | * This function releases the resource of a qp. |
1896 | */ |
1897 | static void hisi_qm_release_qp(struct hisi_qp *qp) |
1898 | { |
1899 | struct hisi_qm *qm = qp->qm; |
1900 | |
1901 | down_write(sem: &qm->qps_lock); |
1902 | |
1903 | qm->qp_in_used--; |
1904 | idr_remove(&qm->qp_idr, id: qp->qp_id); |
1905 | |
1906 | up_write(sem: &qm->qps_lock); |
1907 | |
1908 | qm_pm_put_sync(qm); |
1909 | } |
1910 | |
1911 | static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) |
1912 | { |
1913 | struct hisi_qm *qm = qp->qm; |
1914 | enum qm_hw_ver ver = qm->ver; |
1915 | struct qm_sqc sqc = {0}; |
1916 | |
1917 | if (ver == QM_HW_V1) { |
1918 | sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); |
1919 | sqc.w8 = cpu_to_le16(qp->sq_depth - 1); |
1920 | } else { |
1921 | sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); |
1922 | sqc.w8 = 0; /* rand_qc */ |
1923 | } |
1924 | sqc.w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); |
1925 | sqc.base_l = cpu_to_le32(lower_32_bits(qp->sqe_dma)); |
1926 | sqc.base_h = cpu_to_le32(upper_32_bits(qp->sqe_dma)); |
1927 | sqc.cq_num = cpu_to_le16(qp_id); |
1928 | sqc.pasid = cpu_to_le16(pasid); |
1929 | |
1930 | if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) |
1931 | sqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE << |
1932 | QM_QC_PASID_ENABLE_SHIFT); |
1933 | |
1934 | return qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, xqc: &sqc, qp_id, op: 0); |
1935 | } |
1936 | |
1937 | static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) |
1938 | { |
1939 | struct hisi_qm *qm = qp->qm; |
1940 | enum qm_hw_ver ver = qm->ver; |
1941 | struct qm_cqc cqc = {0}; |
1942 | |
1943 | if (ver == QM_HW_V1) { |
1944 | cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, QM_QC_CQE_SIZE)); |
1945 | cqc.w8 = cpu_to_le16(qp->cq_depth - 1); |
1946 | } else { |
1947 | cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); |
1948 | cqc.w8 = 0; /* rand_qc */ |
1949 | } |
1950 | /* |
1951 | * Enable request finishing interrupts defaultly. |
1952 | * So, there will be some interrupts until disabling |
1953 | * this. |
1954 | */ |
1955 | cqc.dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); |
1956 | cqc.base_l = cpu_to_le32(lower_32_bits(qp->cqe_dma)); |
1957 | cqc.base_h = cpu_to_le32(upper_32_bits(qp->cqe_dma)); |
1958 | cqc.pasid = cpu_to_le16(pasid); |
1959 | |
1960 | if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) |
1961 | cqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE); |
1962 | |
1963 | return qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, xqc: &cqc, qp_id, op: 0); |
1964 | } |
1965 | |
1966 | static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) |
1967 | { |
1968 | int ret; |
1969 | |
1970 | qm_init_qp_status(qp); |
1971 | |
1972 | ret = qm_sq_ctx_cfg(qp, qp_id, pasid); |
1973 | if (ret) |
1974 | return ret; |
1975 | |
1976 | return qm_cq_ctx_cfg(qp, qp_id, pasid); |
1977 | } |
1978 | |
1979 | static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) |
1980 | { |
1981 | struct hisi_qm *qm = qp->qm; |
1982 | struct device *dev = &qm->pdev->dev; |
1983 | int qp_id = qp->qp_id; |
1984 | u32 pasid = arg; |
1985 | int ret; |
1986 | |
1987 | if (atomic_read(v: &qm->status.flags) == QM_STOP) { |
1988 | dev_info_ratelimited(dev, "failed to start qp as qm is stop!\n" ); |
1989 | return -EPERM; |
1990 | } |
1991 | |
1992 | ret = qm_qp_ctx_cfg(qp, qp_id, pasid); |
1993 | if (ret) |
1994 | return ret; |
1995 | |
1996 | atomic_set(v: &qp->qp_status.flags, i: QP_START); |
1997 | dev_dbg(dev, "queue %d started\n" , qp_id); |
1998 | |
1999 | return 0; |
2000 | } |
2001 | |
2002 | /** |
2003 | * hisi_qm_start_qp() - Start a qp into running. |
2004 | * @qp: The qp we want to start to run. |
2005 | * @arg: Accelerator specific argument. |
2006 | * |
2007 | * After this function, qp can receive request from user. Return 0 if |
2008 | * successful, negative error code if failed. |
2009 | */ |
2010 | int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) |
2011 | { |
2012 | struct hisi_qm *qm = qp->qm; |
2013 | int ret; |
2014 | |
2015 | down_write(sem: &qm->qps_lock); |
2016 | ret = qm_start_qp_nolock(qp, arg); |
2017 | up_write(sem: &qm->qps_lock); |
2018 | |
2019 | return ret; |
2020 | } |
2021 | EXPORT_SYMBOL_GPL(hisi_qm_start_qp); |
2022 | |
2023 | /** |
2024 | * qp_stop_fail_cb() - call request cb. |
2025 | * @qp: stopped failed qp. |
2026 | * |
2027 | * Callback function should be called whether task completed or not. |
2028 | */ |
2029 | static void qp_stop_fail_cb(struct hisi_qp *qp) |
2030 | { |
2031 | int qp_used = atomic_read(v: &qp->qp_status.used); |
2032 | u16 cur_tail = qp->qp_status.sq_tail; |
2033 | u16 sq_depth = qp->sq_depth; |
2034 | u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth; |
2035 | struct hisi_qm *qm = qp->qm; |
2036 | u16 pos; |
2037 | int i; |
2038 | |
2039 | for (i = 0; i < qp_used; i++) { |
2040 | pos = (i + cur_head) % sq_depth; |
2041 | qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); |
2042 | atomic_dec(v: &qp->qp_status.used); |
2043 | } |
2044 | } |
2045 | |
2046 | static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id) |
2047 | { |
2048 | struct device *dev = &qm->pdev->dev; |
2049 | struct qm_sqc sqc; |
2050 | struct qm_cqc cqc; |
2051 | int ret, i = 0; |
2052 | |
2053 | while (++i) { |
2054 | ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, xqc: &sqc, qp_id, op: 1); |
2055 | if (ret) { |
2056 | dev_err_ratelimited(dev, "Failed to dump sqc!\n" ); |
2057 | *state = QM_DUMP_SQC_FAIL; |
2058 | return ret; |
2059 | } |
2060 | |
2061 | ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, xqc: &cqc, qp_id, op: 1); |
2062 | if (ret) { |
2063 | dev_err_ratelimited(dev, "Failed to dump cqc!\n" ); |
2064 | *state = QM_DUMP_CQC_FAIL; |
2065 | return ret; |
2066 | } |
2067 | |
2068 | if ((sqc.tail == cqc.tail) && |
2069 | (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc))) |
2070 | break; |
2071 | |
2072 | if (i == MAX_WAIT_COUNTS) { |
2073 | dev_err(dev, "Fail to empty queue %u!\n" , qp_id); |
2074 | *state = QM_STOP_QUEUE_FAIL; |
2075 | return -ETIMEDOUT; |
2076 | } |
2077 | |
2078 | usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); |
2079 | } |
2080 | |
2081 | return 0; |
2082 | } |
2083 | |
2084 | /** |
2085 | * qm_drain_qp() - Drain a qp. |
2086 | * @qp: The qp we want to drain. |
2087 | * |
2088 | * If the device does not support stopping queue by sending mailbox, |
2089 | * determine whether the queue is cleared by judging the tail pointers of |
2090 | * sq and cq. |
2091 | */ |
2092 | static int qm_drain_qp(struct hisi_qp *qp) |
2093 | { |
2094 | struct hisi_qm *qm = qp->qm; |
2095 | struct hisi_qm *pf_qm = pci_get_drvdata(pdev: pci_physfn(dev: qm->pdev)); |
2096 | u32 state = 0; |
2097 | int ret; |
2098 | |
2099 | /* No need to judge if master OOO is blocked. */ |
2100 | if (qm_check_dev_error(qm: pf_qm)) |
2101 | return 0; |
2102 | |
2103 | /* HW V3 supports drain qp by device */ |
2104 | if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { |
2105 | ret = qm_stop_qp(qp); |
2106 | if (ret) { |
2107 | dev_err(&qm->pdev->dev, "Failed to stop qp!\n" ); |
2108 | state = QM_STOP_QUEUE_FAIL; |
2109 | goto set_dev_state; |
2110 | } |
2111 | return ret; |
2112 | } |
2113 | |
2114 | ret = qm_wait_qp_empty(qm, state: &state, qp_id: qp->qp_id); |
2115 | if (ret) |
2116 | goto set_dev_state; |
2117 | |
2118 | return 0; |
2119 | |
2120 | set_dev_state: |
2121 | if (qm->debug.dev_dfx.dev_timeout) |
2122 | qm->debug.dev_dfx.dev_state = state; |
2123 | |
2124 | return ret; |
2125 | } |
2126 | |
2127 | static void qm_stop_qp_nolock(struct hisi_qp *qp) |
2128 | { |
2129 | struct hisi_qm *qm = qp->qm; |
2130 | struct device *dev = &qm->pdev->dev; |
2131 | int ret; |
2132 | |
2133 | /* |
2134 | * It is allowed to stop and release qp when reset, If the qp is |
2135 | * stopped when reset but still want to be released then, the |
2136 | * is_resetting flag should be set negative so that this qp will not |
2137 | * be restarted after reset. |
2138 | */ |
2139 | if (atomic_read(v: &qp->qp_status.flags) != QP_START) { |
2140 | qp->is_resetting = false; |
2141 | return; |
2142 | } |
2143 | |
2144 | atomic_set(v: &qp->qp_status.flags, i: QP_STOP); |
2145 | |
2146 | /* V3 supports direct stop function when FLR prepare */ |
2147 | if (qm->ver < QM_HW_V3 || qm->status.stop_reason == QM_NORMAL) { |
2148 | ret = qm_drain_qp(qp); |
2149 | if (ret) |
2150 | dev_err(dev, "Failed to drain out data for stopping qp(%u)!\n" , qp->qp_id); |
2151 | } |
2152 | |
2153 | flush_workqueue(qm->wq); |
2154 | if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) |
2155 | qp_stop_fail_cb(qp); |
2156 | |
2157 | dev_dbg(dev, "stop queue %u!" , qp->qp_id); |
2158 | } |
2159 | |
2160 | /** |
2161 | * hisi_qm_stop_qp() - Stop a qp in qm. |
2162 | * @qp: The qp we want to stop. |
2163 | * |
2164 | * This function is reverse of hisi_qm_start_qp. |
2165 | */ |
2166 | void hisi_qm_stop_qp(struct hisi_qp *qp) |
2167 | { |
2168 | down_write(sem: &qp->qm->qps_lock); |
2169 | qm_stop_qp_nolock(qp); |
2170 | up_write(sem: &qp->qm->qps_lock); |
2171 | } |
2172 | EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); |
2173 | |
2174 | /** |
2175 | * hisi_qp_send() - Queue up a task in the hardware queue. |
2176 | * @qp: The qp in which to put the message. |
2177 | * @msg: The message. |
2178 | * |
2179 | * This function will return -EBUSY if qp is currently full, and -EAGAIN |
2180 | * if qp related qm is resetting. |
2181 | * |
2182 | * Note: This function may run with qm_irq_thread and ACC reset at same time. |
2183 | * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC |
2184 | * reset may happen, we have no lock here considering performance. This |
2185 | * causes current qm_db sending fail or can not receive sended sqe. QM |
2186 | * sync/async receive function should handle the error sqe. ACC reset |
2187 | * done function should clear used sqe to 0. |
2188 | */ |
2189 | int hisi_qp_send(struct hisi_qp *qp, const void *msg) |
2190 | { |
2191 | struct hisi_qp_status *qp_status = &qp->qp_status; |
2192 | u16 sq_tail = qp_status->sq_tail; |
2193 | u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth; |
2194 | void *sqe = qm_get_avail_sqe(qp); |
2195 | |
2196 | if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || |
2197 | atomic_read(&qp->qm->status.flags) == QM_STOP || |
2198 | qp->is_resetting)) { |
2199 | dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n" ); |
2200 | return -EAGAIN; |
2201 | } |
2202 | |
2203 | if (!sqe) |
2204 | return -EBUSY; |
2205 | |
2206 | memcpy(sqe, msg, qp->qm->sqe_size); |
2207 | |
2208 | qm_db(qm: qp->qm, qn: qp->qp_id, QM_DOORBELL_CMD_SQ, index: sq_tail_next, priority: 0); |
2209 | atomic_inc(v: &qp->qp_status.used); |
2210 | qp_status->sq_tail = sq_tail_next; |
2211 | |
2212 | return 0; |
2213 | } |
2214 | EXPORT_SYMBOL_GPL(hisi_qp_send); |
2215 | |
2216 | static void hisi_qm_cache_wb(struct hisi_qm *qm) |
2217 | { |
2218 | unsigned int val; |
2219 | |
2220 | if (qm->ver == QM_HW_V1) |
2221 | return; |
2222 | |
2223 | writel(val: 0x1, addr: qm->io_base + QM_CACHE_WB_START); |
2224 | if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, |
2225 | val, val & BIT(0), POLL_PERIOD, |
2226 | POLL_TIMEOUT)) |
2227 | dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n" ); |
2228 | } |
2229 | |
2230 | static void qm_qp_event_notifier(struct hisi_qp *qp) |
2231 | { |
2232 | wake_up_interruptible(&qp->uacce_q->wait); |
2233 | } |
2234 | |
2235 | /* This function returns free number of qp in qm. */ |
2236 | static int hisi_qm_get_available_instances(struct uacce_device *uacce) |
2237 | { |
2238 | struct hisi_qm *qm = uacce->priv; |
2239 | int ret; |
2240 | |
2241 | down_read(sem: &qm->qps_lock); |
2242 | ret = qm->qp_num - qm->qp_in_used; |
2243 | up_read(sem: &qm->qps_lock); |
2244 | |
2245 | return ret; |
2246 | } |
2247 | |
2248 | static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset) |
2249 | { |
2250 | int i; |
2251 | |
2252 | for (i = 0; i < qm->qp_num; i++) |
2253 | qm_set_qp_disable(qp: &qm->qp_array[i], offset); |
2254 | } |
2255 | |
2256 | static int hisi_qm_uacce_get_queue(struct uacce_device *uacce, |
2257 | unsigned long arg, |
2258 | struct uacce_queue *q) |
2259 | { |
2260 | struct hisi_qm *qm = uacce->priv; |
2261 | struct hisi_qp *qp; |
2262 | u8 alg_type = 0; |
2263 | |
2264 | qp = hisi_qm_create_qp(qm, alg_type); |
2265 | if (IS_ERR(ptr: qp)) |
2266 | return PTR_ERR(ptr: qp); |
2267 | |
2268 | q->priv = qp; |
2269 | q->uacce = uacce; |
2270 | qp->uacce_q = q; |
2271 | qp->event_cb = qm_qp_event_notifier; |
2272 | qp->pasid = arg; |
2273 | qp->is_in_kernel = false; |
2274 | |
2275 | return 0; |
2276 | } |
2277 | |
2278 | static void hisi_qm_uacce_put_queue(struct uacce_queue *q) |
2279 | { |
2280 | struct hisi_qp *qp = q->priv; |
2281 | |
2282 | hisi_qm_release_qp(qp); |
2283 | } |
2284 | |
2285 | /* map sq/cq/doorbell to user space */ |
2286 | static int hisi_qm_uacce_mmap(struct uacce_queue *q, |
2287 | struct vm_area_struct *vma, |
2288 | struct uacce_qfile_region *qfr) |
2289 | { |
2290 | struct hisi_qp *qp = q->priv; |
2291 | struct hisi_qm *qm = qp->qm; |
2292 | resource_size_t phys_base = qm->db_phys_base + |
2293 | qp->qp_id * qm->db_interval; |
2294 | size_t sz = vma->vm_end - vma->vm_start; |
2295 | struct pci_dev *pdev = qm->pdev; |
2296 | struct device *dev = &pdev->dev; |
2297 | unsigned long vm_pgoff; |
2298 | int ret; |
2299 | |
2300 | switch (qfr->type) { |
2301 | case UACCE_QFRT_MMIO: |
2302 | if (qm->ver == QM_HW_V1) { |
2303 | if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) |
2304 | return -EINVAL; |
2305 | } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { |
2306 | if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + |
2307 | QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) |
2308 | return -EINVAL; |
2309 | } else { |
2310 | if (sz > qm->db_interval) |
2311 | return -EINVAL; |
2312 | } |
2313 | |
2314 | vm_flags_set(vma, VM_IO); |
2315 | |
2316 | return remap_pfn_range(vma, addr: vma->vm_start, |
2317 | pfn: phys_base >> PAGE_SHIFT, |
2318 | size: sz, pgprot_noncached(vma->vm_page_prot)); |
2319 | case UACCE_QFRT_DUS: |
2320 | if (sz != qp->qdma.size) |
2321 | return -EINVAL; |
2322 | |
2323 | /* |
2324 | * dma_mmap_coherent() requires vm_pgoff as 0 |
2325 | * restore vm_pfoff to initial value for mmap() |
2326 | */ |
2327 | vm_pgoff = vma->vm_pgoff; |
2328 | vma->vm_pgoff = 0; |
2329 | ret = dma_mmap_coherent(dev, vma, qp->qdma.va, |
2330 | qp->qdma.dma, sz); |
2331 | vma->vm_pgoff = vm_pgoff; |
2332 | return ret; |
2333 | |
2334 | default: |
2335 | return -EINVAL; |
2336 | } |
2337 | } |
2338 | |
2339 | static int hisi_qm_uacce_start_queue(struct uacce_queue *q) |
2340 | { |
2341 | struct hisi_qp *qp = q->priv; |
2342 | |
2343 | return hisi_qm_start_qp(qp, qp->pasid); |
2344 | } |
2345 | |
2346 | static void hisi_qm_uacce_stop_queue(struct uacce_queue *q) |
2347 | { |
2348 | struct hisi_qp *qp = q->priv; |
2349 | struct hisi_qm *qm = qp->qm; |
2350 | struct qm_dev_dfx *dev_dfx = &qm->debug.dev_dfx; |
2351 | u32 i = 0; |
2352 | |
2353 | hisi_qm_stop_qp(qp); |
2354 | |
2355 | if (!dev_dfx->dev_timeout || !dev_dfx->dev_state) |
2356 | return; |
2357 | |
2358 | /* |
2359 | * After the queue fails to be stopped, |
2360 | * wait for a period of time before releasing the queue. |
2361 | */ |
2362 | while (++i) { |
2363 | msleep(WAIT_PERIOD); |
2364 | |
2365 | /* Since dev_timeout maybe modified, check i >= dev_timeout */ |
2366 | if (i >= dev_dfx->dev_timeout) { |
2367 | dev_err(&qm->pdev->dev, "Stop q %u timeout, state %u\n" , |
2368 | qp->qp_id, dev_dfx->dev_state); |
2369 | dev_dfx->dev_state = QM_FINISH_WAIT; |
2370 | break; |
2371 | } |
2372 | } |
2373 | } |
2374 | |
2375 | static int hisi_qm_is_q_updated(struct uacce_queue *q) |
2376 | { |
2377 | struct hisi_qp *qp = q->priv; |
2378 | struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; |
2379 | int updated = 0; |
2380 | |
2381 | while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { |
2382 | /* make sure to read data from memory */ |
2383 | dma_rmb(); |
2384 | qm_cq_head_update(qp); |
2385 | cqe = qp->cqe + qp->qp_status.cq_head; |
2386 | updated = 1; |
2387 | } |
2388 | |
2389 | return updated; |
2390 | } |
2391 | |
2392 | static void qm_set_sqctype(struct uacce_queue *q, u16 type) |
2393 | { |
2394 | struct hisi_qm *qm = q->uacce->priv; |
2395 | struct hisi_qp *qp = q->priv; |
2396 | |
2397 | down_write(sem: &qm->qps_lock); |
2398 | qp->alg_type = type; |
2399 | up_write(sem: &qm->qps_lock); |
2400 | } |
2401 | |
2402 | static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd, |
2403 | unsigned long arg) |
2404 | { |
2405 | struct hisi_qp *qp = q->priv; |
2406 | struct hisi_qp_info qp_info; |
2407 | struct hisi_qp_ctx qp_ctx; |
2408 | |
2409 | if (cmd == UACCE_CMD_QM_SET_QP_CTX) { |
2410 | if (copy_from_user(to: &qp_ctx, from: (void __user *)arg, |
2411 | n: sizeof(struct hisi_qp_ctx))) |
2412 | return -EFAULT; |
2413 | |
2414 | if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1) |
2415 | return -EINVAL; |
2416 | |
2417 | qm_set_sqctype(q, type: qp_ctx.qc_type); |
2418 | qp_ctx.id = qp->qp_id; |
2419 | |
2420 | if (copy_to_user(to: (void __user *)arg, from: &qp_ctx, |
2421 | n: sizeof(struct hisi_qp_ctx))) |
2422 | return -EFAULT; |
2423 | |
2424 | return 0; |
2425 | } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) { |
2426 | if (copy_from_user(to: &qp_info, from: (void __user *)arg, |
2427 | n: sizeof(struct hisi_qp_info))) |
2428 | return -EFAULT; |
2429 | |
2430 | qp_info.sqe_size = qp->qm->sqe_size; |
2431 | qp_info.sq_depth = qp->sq_depth; |
2432 | qp_info.cq_depth = qp->cq_depth; |
2433 | |
2434 | if (copy_to_user(to: (void __user *)arg, from: &qp_info, |
2435 | n: sizeof(struct hisi_qp_info))) |
2436 | return -EFAULT; |
2437 | |
2438 | return 0; |
2439 | } |
2440 | |
2441 | return -EINVAL; |
2442 | } |
2443 | |
2444 | /** |
2445 | * qm_hw_err_isolate() - Try to set the isolation status of the uacce device |
2446 | * according to user's configuration of error threshold. |
2447 | * @qm: the uacce device |
2448 | */ |
2449 | static int qm_hw_err_isolate(struct hisi_qm *qm) |
2450 | { |
2451 | struct qm_hw_err *err, *tmp, *hw_err; |
2452 | struct qm_err_isolate *isolate; |
2453 | u32 count = 0; |
2454 | |
2455 | isolate = &qm->isolate_data; |
2456 | |
2457 | #define SECONDS_PER_HOUR 3600 |
2458 | |
2459 | /* All the hw errs are processed by PF driver */ |
2460 | if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold) |
2461 | return 0; |
2462 | |
2463 | hw_err = kzalloc(size: sizeof(*hw_err), GFP_KERNEL); |
2464 | if (!hw_err) |
2465 | return -ENOMEM; |
2466 | |
2467 | /* |
2468 | * Time-stamp every slot AER error. Then check the AER error log when the |
2469 | * next device AER error occurred. if the device slot AER error count exceeds |
2470 | * the setting error threshold in one hour, the isolated state will be set |
2471 | * to true. And the AER error logs that exceed one hour will be cleared. |
2472 | */ |
2473 | mutex_lock(&isolate->isolate_lock); |
2474 | hw_err->timestamp = jiffies; |
2475 | list_for_each_entry_safe(err, tmp, &isolate->qm_hw_errs, list) { |
2476 | if ((hw_err->timestamp - err->timestamp) / HZ > |
2477 | SECONDS_PER_HOUR) { |
2478 | list_del(entry: &err->list); |
2479 | kfree(objp: err); |
2480 | } else { |
2481 | count++; |
2482 | } |
2483 | } |
2484 | list_add(new: &hw_err->list, head: &isolate->qm_hw_errs); |
2485 | mutex_unlock(lock: &isolate->isolate_lock); |
2486 | |
2487 | if (count >= isolate->err_threshold) |
2488 | isolate->is_isolate = true; |
2489 | |
2490 | return 0; |
2491 | } |
2492 | |
2493 | static void qm_hw_err_destroy(struct hisi_qm *qm) |
2494 | { |
2495 | struct qm_hw_err *err, *tmp; |
2496 | |
2497 | mutex_lock(&qm->isolate_data.isolate_lock); |
2498 | list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) { |
2499 | list_del(entry: &err->list); |
2500 | kfree(objp: err); |
2501 | } |
2502 | mutex_unlock(lock: &qm->isolate_data.isolate_lock); |
2503 | } |
2504 | |
2505 | static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce) |
2506 | { |
2507 | struct hisi_qm *qm = uacce->priv; |
2508 | struct hisi_qm *pf_qm; |
2509 | |
2510 | if (uacce->is_vf) |
2511 | pf_qm = pci_get_drvdata(pdev: pci_physfn(dev: qm->pdev)); |
2512 | else |
2513 | pf_qm = qm; |
2514 | |
2515 | return pf_qm->isolate_data.is_isolate ? |
2516 | UACCE_DEV_ISOLATE : UACCE_DEV_NORMAL; |
2517 | } |
2518 | |
2519 | static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num) |
2520 | { |
2521 | struct hisi_qm *qm = uacce->priv; |
2522 | |
2523 | /* Must be set by PF */ |
2524 | if (uacce->is_vf) |
2525 | return -EPERM; |
2526 | |
2527 | if (qm->isolate_data.is_isolate) |
2528 | return -EPERM; |
2529 | |
2530 | qm->isolate_data.err_threshold = num; |
2531 | |
2532 | /* After the policy is updated, need to reset the hardware err list */ |
2533 | qm_hw_err_destroy(qm); |
2534 | |
2535 | return 0; |
2536 | } |
2537 | |
2538 | static u32 hisi_qm_isolate_threshold_read(struct uacce_device *uacce) |
2539 | { |
2540 | struct hisi_qm *qm = uacce->priv; |
2541 | struct hisi_qm *pf_qm; |
2542 | |
2543 | if (uacce->is_vf) { |
2544 | pf_qm = pci_get_drvdata(pdev: pci_physfn(dev: qm->pdev)); |
2545 | return pf_qm->isolate_data.err_threshold; |
2546 | } |
2547 | |
2548 | return qm->isolate_data.err_threshold; |
2549 | } |
2550 | |
2551 | static const struct uacce_ops uacce_qm_ops = { |
2552 | .get_available_instances = hisi_qm_get_available_instances, |
2553 | .get_queue = hisi_qm_uacce_get_queue, |
2554 | .put_queue = hisi_qm_uacce_put_queue, |
2555 | .start_queue = hisi_qm_uacce_start_queue, |
2556 | .stop_queue = hisi_qm_uacce_stop_queue, |
2557 | .mmap = hisi_qm_uacce_mmap, |
2558 | .ioctl = hisi_qm_uacce_ioctl, |
2559 | .is_q_updated = hisi_qm_is_q_updated, |
2560 | .get_isolate_state = hisi_qm_get_isolate_state, |
2561 | .isolate_err_threshold_write = hisi_qm_isolate_threshold_write, |
2562 | .isolate_err_threshold_read = hisi_qm_isolate_threshold_read, |
2563 | }; |
2564 | |
2565 | static void qm_remove_uacce(struct hisi_qm *qm) |
2566 | { |
2567 | struct uacce_device *uacce = qm->uacce; |
2568 | |
2569 | if (qm->use_sva) { |
2570 | qm_hw_err_destroy(qm); |
2571 | uacce_remove(uacce); |
2572 | qm->uacce = NULL; |
2573 | } |
2574 | } |
2575 | |
2576 | static int qm_alloc_uacce(struct hisi_qm *qm) |
2577 | { |
2578 | struct pci_dev *pdev = qm->pdev; |
2579 | struct uacce_device *uacce; |
2580 | unsigned long mmio_page_nr; |
2581 | unsigned long dus_page_nr; |
2582 | u16 sq_depth, cq_depth; |
2583 | struct uacce_interface interface = { |
2584 | .flags = UACCE_DEV_SVA, |
2585 | .ops = &uacce_qm_ops, |
2586 | }; |
2587 | int ret; |
2588 | |
2589 | ret = strscpy(interface.name, dev_driver_string(&pdev->dev), |
2590 | sizeof(interface.name)); |
2591 | if (ret < 0) |
2592 | return -ENAMETOOLONG; |
2593 | |
2594 | uacce = uacce_alloc(parent: &pdev->dev, interface: &interface); |
2595 | if (IS_ERR(ptr: uacce)) |
2596 | return PTR_ERR(ptr: uacce); |
2597 | |
2598 | if (uacce->flags & UACCE_DEV_SVA) { |
2599 | qm->use_sva = true; |
2600 | } else { |
2601 | /* only consider sva case */ |
2602 | qm_remove_uacce(qm); |
2603 | return -EINVAL; |
2604 | } |
2605 | |
2606 | uacce->is_vf = pdev->is_virtfn; |
2607 | uacce->priv = qm; |
2608 | |
2609 | if (qm->ver == QM_HW_V1) |
2610 | uacce->api_ver = HISI_QM_API_VER_BASE; |
2611 | else if (qm->ver == QM_HW_V2) |
2612 | uacce->api_ver = HISI_QM_API_VER2_BASE; |
2613 | else |
2614 | uacce->api_ver = HISI_QM_API_VER3_BASE; |
2615 | |
2616 | if (qm->ver == QM_HW_V1) |
2617 | mmio_page_nr = QM_DOORBELL_PAGE_NR; |
2618 | else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) |
2619 | mmio_page_nr = QM_DOORBELL_PAGE_NR + |
2620 | QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE; |
2621 | else |
2622 | mmio_page_nr = qm->db_interval / PAGE_SIZE; |
2623 | |
2624 | qm_get_xqc_depth(qm, low_bits: &sq_depth, high_bits: &cq_depth, type: QM_QP_DEPTH_CAP); |
2625 | |
2626 | /* Add one more page for device or qp status */ |
2627 | dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + |
2628 | sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >> |
2629 | PAGE_SHIFT; |
2630 | |
2631 | uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; |
2632 | uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; |
2633 | |
2634 | qm->uacce = uacce; |
2635 | INIT_LIST_HEAD(list: &qm->isolate_data.qm_hw_errs); |
2636 | mutex_init(&qm->isolate_data.isolate_lock); |
2637 | |
2638 | return 0; |
2639 | } |
2640 | |
2641 | /** |
2642 | * qm_frozen() - Try to froze QM to cut continuous queue request. If |
2643 | * there is user on the QM, return failure without doing anything. |
2644 | * @qm: The qm needed to be fronzen. |
2645 | * |
2646 | * This function frozes QM, then we can do SRIOV disabling. |
2647 | */ |
2648 | static int qm_frozen(struct hisi_qm *qm) |
2649 | { |
2650 | if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) |
2651 | return 0; |
2652 | |
2653 | down_write(sem: &qm->qps_lock); |
2654 | |
2655 | if (!qm->qp_in_used) { |
2656 | qm->qp_in_used = qm->qp_num; |
2657 | up_write(sem: &qm->qps_lock); |
2658 | set_bit(nr: QM_DRIVER_REMOVING, addr: &qm->misc_ctl); |
2659 | return 0; |
2660 | } |
2661 | |
2662 | up_write(sem: &qm->qps_lock); |
2663 | |
2664 | return -EBUSY; |
2665 | } |
2666 | |
2667 | static int qm_try_frozen_vfs(struct pci_dev *pdev, |
2668 | struct hisi_qm_list *qm_list) |
2669 | { |
2670 | struct hisi_qm *qm, *vf_qm; |
2671 | struct pci_dev *dev; |
2672 | int ret = 0; |
2673 | |
2674 | if (!qm_list || !pdev) |
2675 | return -EINVAL; |
2676 | |
2677 | /* Try to frozen all the VFs as disable SRIOV */ |
2678 | mutex_lock(&qm_list->lock); |
2679 | list_for_each_entry(qm, &qm_list->list, list) { |
2680 | dev = qm->pdev; |
2681 | if (dev == pdev) |
2682 | continue; |
2683 | if (pci_physfn(dev) == pdev) { |
2684 | vf_qm = pci_get_drvdata(pdev: dev); |
2685 | ret = qm_frozen(qm: vf_qm); |
2686 | if (ret) |
2687 | goto frozen_fail; |
2688 | } |
2689 | } |
2690 | |
2691 | frozen_fail: |
2692 | mutex_unlock(lock: &qm_list->lock); |
2693 | |
2694 | return ret; |
2695 | } |
2696 | |
2697 | /** |
2698 | * hisi_qm_wait_task_finish() - Wait until the task is finished |
2699 | * when removing the driver. |
2700 | * @qm: The qm needed to wait for the task to finish. |
2701 | * @qm_list: The list of all available devices. |
2702 | */ |
2703 | void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list) |
2704 | { |
2705 | while (qm_frozen(qm) || |
2706 | ((qm->fun_type == QM_HW_PF) && |
2707 | qm_try_frozen_vfs(pdev: qm->pdev, qm_list))) { |
2708 | msleep(WAIT_PERIOD); |
2709 | } |
2710 | |
2711 | while (test_bit(QM_RST_SCHED, &qm->misc_ctl) || |
2712 | test_bit(QM_RESETTING, &qm->misc_ctl)) |
2713 | msleep(WAIT_PERIOD); |
2714 | |
2715 | if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) |
2716 | flush_work(work: &qm->cmd_process); |
2717 | |
2718 | udelay(REMOVE_WAIT_DELAY); |
2719 | } |
2720 | EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish); |
2721 | |
2722 | static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) |
2723 | { |
2724 | struct device *dev = &qm->pdev->dev; |
2725 | struct qm_dma *qdma; |
2726 | int i; |
2727 | |
2728 | for (i = num - 1; i >= 0; i--) { |
2729 | qdma = &qm->qp_array[i].qdma; |
2730 | dma_free_coherent(dev, size: qdma->size, cpu_addr: qdma->va, dma_handle: qdma->dma); |
2731 | kfree(objp: qm->poll_data[i].qp_finish_id); |
2732 | } |
2733 | |
2734 | kfree(objp: qm->poll_data); |
2735 | kfree(objp: qm->qp_array); |
2736 | } |
2737 | |
2738 | static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, |
2739 | u16 sq_depth, u16 cq_depth) |
2740 | { |
2741 | struct device *dev = &qm->pdev->dev; |
2742 | size_t off = qm->sqe_size * sq_depth; |
2743 | struct hisi_qp *qp; |
2744 | int ret = -ENOMEM; |
2745 | |
2746 | qm->poll_data[id].qp_finish_id = kcalloc(n: qm->qp_num, size: sizeof(u16), |
2747 | GFP_KERNEL); |
2748 | if (!qm->poll_data[id].qp_finish_id) |
2749 | return -ENOMEM; |
2750 | |
2751 | qp = &qm->qp_array[id]; |
2752 | qp->qdma.va = dma_alloc_coherent(dev, size: dma_size, dma_handle: &qp->qdma.dma, |
2753 | GFP_KERNEL); |
2754 | if (!qp->qdma.va) |
2755 | goto err_free_qp_finish_id; |
2756 | |
2757 | qp->sqe = qp->qdma.va; |
2758 | qp->sqe_dma = qp->qdma.dma; |
2759 | qp->cqe = qp->qdma.va + off; |
2760 | qp->cqe_dma = qp->qdma.dma + off; |
2761 | qp->qdma.size = dma_size; |
2762 | qp->sq_depth = sq_depth; |
2763 | qp->cq_depth = cq_depth; |
2764 | qp->qm = qm; |
2765 | qp->qp_id = id; |
2766 | |
2767 | return 0; |
2768 | |
2769 | err_free_qp_finish_id: |
2770 | kfree(objp: qm->poll_data[id].qp_finish_id); |
2771 | return ret; |
2772 | } |
2773 | |
2774 | static void hisi_qm_pre_init(struct hisi_qm *qm) |
2775 | { |
2776 | struct pci_dev *pdev = qm->pdev; |
2777 | |
2778 | if (qm->ver == QM_HW_V1) |
2779 | qm->ops = &qm_hw_ops_v1; |
2780 | else if (qm->ver == QM_HW_V2) |
2781 | qm->ops = &qm_hw_ops_v2; |
2782 | else |
2783 | qm->ops = &qm_hw_ops_v3; |
2784 | |
2785 | pci_set_drvdata(pdev, data: qm); |
2786 | mutex_init(&qm->mailbox_lock); |
2787 | init_rwsem(&qm->qps_lock); |
2788 | qm->qp_in_used = 0; |
2789 | if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { |
2790 | if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) |
2791 | dev_info(&pdev->dev, "_PS0 and _PR0 are not defined" ); |
2792 | } |
2793 | } |
2794 | |
2795 | static void qm_cmd_uninit(struct hisi_qm *qm) |
2796 | { |
2797 | u32 val; |
2798 | |
2799 | if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) |
2800 | return; |
2801 | |
2802 | val = readl(addr: qm->io_base + QM_IFC_INT_MASK); |
2803 | val |= QM_IFC_INT_DISABLE; |
2804 | writel(val, addr: qm->io_base + QM_IFC_INT_MASK); |
2805 | } |
2806 | |
2807 | static void qm_cmd_init(struct hisi_qm *qm) |
2808 | { |
2809 | u32 val; |
2810 | |
2811 | if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) |
2812 | return; |
2813 | |
2814 | /* Clear communication interrupt source */ |
2815 | qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR); |
2816 | |
2817 | /* Enable pf to vf communication reg. */ |
2818 | val = readl(addr: qm->io_base + QM_IFC_INT_MASK); |
2819 | val &= ~QM_IFC_INT_DISABLE; |
2820 | writel(val, addr: qm->io_base + QM_IFC_INT_MASK); |
2821 | } |
2822 | |
2823 | static void qm_put_pci_res(struct hisi_qm *qm) |
2824 | { |
2825 | struct pci_dev *pdev = qm->pdev; |
2826 | |
2827 | if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) |
2828 | iounmap(addr: qm->db_io_base); |
2829 | |
2830 | iounmap(addr: qm->io_base); |
2831 | pci_release_mem_regions(pdev); |
2832 | } |
2833 | |
2834 | static void hisi_qm_pci_uninit(struct hisi_qm *qm) |
2835 | { |
2836 | struct pci_dev *pdev = qm->pdev; |
2837 | |
2838 | pci_free_irq_vectors(dev: pdev); |
2839 | qm_put_pci_res(qm); |
2840 | pci_disable_device(dev: pdev); |
2841 | } |
2842 | |
2843 | static void hisi_qm_set_state(struct hisi_qm *qm, u8 state) |
2844 | { |
2845 | if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF) |
2846 | writel(val: state, addr: qm->io_base + QM_VF_STATE); |
2847 | } |
2848 | |
2849 | static void hisi_qm_unint_work(struct hisi_qm *qm) |
2850 | { |
2851 | destroy_workqueue(wq: qm->wq); |
2852 | } |
2853 | |
2854 | static void hisi_qm_free_rsv_buf(struct hisi_qm *qm) |
2855 | { |
2856 | struct qm_dma *xqc_dma = &qm->xqc_buf.qcdma; |
2857 | struct device *dev = &qm->pdev->dev; |
2858 | |
2859 | dma_free_coherent(dev, size: xqc_dma->size, cpu_addr: xqc_dma->va, dma_handle: xqc_dma->dma); |
2860 | } |
2861 | |
2862 | static void hisi_qm_memory_uninit(struct hisi_qm *qm) |
2863 | { |
2864 | struct device *dev = &qm->pdev->dev; |
2865 | |
2866 | hisi_qp_memory_uninit(qm, num: qm->qp_num); |
2867 | hisi_qm_free_rsv_buf(qm); |
2868 | if (qm->qdma.va) { |
2869 | hisi_qm_cache_wb(qm); |
2870 | dma_free_coherent(dev, size: qm->qdma.size, |
2871 | cpu_addr: qm->qdma.va, dma_handle: qm->qdma.dma); |
2872 | } |
2873 | |
2874 | idr_destroy(&qm->qp_idr); |
2875 | |
2876 | if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) |
2877 | kfree(objp: qm->factor); |
2878 | } |
2879 | |
2880 | /** |
2881 | * hisi_qm_uninit() - Uninitialize qm. |
2882 | * @qm: The qm needed uninit. |
2883 | * |
2884 | * This function uninits qm related device resources. |
2885 | */ |
2886 | void hisi_qm_uninit(struct hisi_qm *qm) |
2887 | { |
2888 | qm_cmd_uninit(qm); |
2889 | hisi_qm_unint_work(qm); |
2890 | |
2891 | down_write(sem: &qm->qps_lock); |
2892 | hisi_qm_memory_uninit(qm); |
2893 | hisi_qm_set_state(qm, state: QM_NOT_READY); |
2894 | up_write(sem: &qm->qps_lock); |
2895 | |
2896 | qm_irqs_unregister(qm); |
2897 | hisi_qm_pci_uninit(qm); |
2898 | if (qm->use_sva) { |
2899 | uacce_remove(uacce: qm->uacce); |
2900 | qm->uacce = NULL; |
2901 | } |
2902 | } |
2903 | EXPORT_SYMBOL_GPL(hisi_qm_uninit); |
2904 | |
2905 | /** |
2906 | * hisi_qm_get_vft() - Get vft from a qm. |
2907 | * @qm: The qm we want to get its vft. |
2908 | * @base: The base number of queue in vft. |
2909 | * @number: The number of queues in vft. |
2910 | * |
2911 | * We can allocate multiple queues to a qm by configuring virtual function |
2912 | * table. We get related configures by this function. Normally, we call this |
2913 | * function in VF driver to get the queue information. |
2914 | * |
2915 | * qm hw v1 does not support this interface. |
2916 | */ |
2917 | static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) |
2918 | { |
2919 | if (!base || !number) |
2920 | return -EINVAL; |
2921 | |
2922 | if (!qm->ops->get_vft) { |
2923 | dev_err(&qm->pdev->dev, "Don't support vft read!\n" ); |
2924 | return -EINVAL; |
2925 | } |
2926 | |
2927 | return qm->ops->get_vft(qm, base, number); |
2928 | } |
2929 | |
2930 | /** |
2931 | * hisi_qm_set_vft() - Set vft to a qm. |
2932 | * @qm: The qm we want to set its vft. |
2933 | * @fun_num: The function number. |
2934 | * @base: The base number of queue in vft. |
2935 | * @number: The number of queues in vft. |
2936 | * |
2937 | * This function is alway called in PF driver, it is used to assign queues |
2938 | * among PF and VFs. |
2939 | * |
2940 | * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1) |
2941 | * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1) |
2942 | * (VF function number 0x2) |
2943 | */ |
2944 | static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, |
2945 | u32 number) |
2946 | { |
2947 | u32 max_q_num = qm->ctrl_qp_num; |
2948 | |
2949 | if (base >= max_q_num || number > max_q_num || |
2950 | (base + number) > max_q_num) |
2951 | return -EINVAL; |
2952 | |
2953 | return qm_set_sqc_cqc_vft(qm, fun_num, base, number); |
2954 | } |
2955 | |
2956 | static void qm_init_eq_aeq_status(struct hisi_qm *qm) |
2957 | { |
2958 | struct hisi_qm_status *status = &qm->status; |
2959 | |
2960 | status->eq_head = 0; |
2961 | status->aeq_head = 0; |
2962 | status->eqc_phase = true; |
2963 | status->aeqc_phase = true; |
2964 | } |
2965 | |
2966 | static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm) |
2967 | { |
2968 | /* Clear eq/aeq interrupt source */ |
2969 | qm_db(qm, qn: 0, QM_DOORBELL_CMD_AEQ, index: qm->status.aeq_head, priority: 0); |
2970 | qm_db(qm, qn: 0, QM_DOORBELL_CMD_EQ, index: qm->status.eq_head, priority: 0); |
2971 | |
2972 | writel(val: 0x0, addr: qm->io_base + QM_VF_EQ_INT_MASK); |
2973 | writel(val: 0x0, addr: qm->io_base + QM_VF_AEQ_INT_MASK); |
2974 | } |
2975 | |
2976 | static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm) |
2977 | { |
2978 | writel(val: 0x1, addr: qm->io_base + QM_VF_EQ_INT_MASK); |
2979 | writel(val: 0x1, addr: qm->io_base + QM_VF_AEQ_INT_MASK); |
2980 | } |
2981 | |
2982 | static int qm_eq_ctx_cfg(struct hisi_qm *qm) |
2983 | { |
2984 | struct qm_eqc eqc = {0}; |
2985 | |
2986 | eqc.base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); |
2987 | eqc.base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); |
2988 | if (qm->ver == QM_HW_V1) |
2989 | eqc.dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); |
2990 | eqc.dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); |
2991 | |
2992 | return qm_set_and_get_xqc(qm, QM_MB_CMD_EQC, xqc: &eqc, qp_id: 0, op: 0); |
2993 | } |
2994 | |
2995 | static int qm_aeq_ctx_cfg(struct hisi_qm *qm) |
2996 | { |
2997 | struct qm_aeqc aeqc = {0}; |
2998 | |
2999 | aeqc.base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); |
3000 | aeqc.base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); |
3001 | aeqc.dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); |
3002 | |
3003 | return qm_set_and_get_xqc(qm, QM_MB_CMD_AEQC, xqc: &aeqc, qp_id: 0, op: 0); |
3004 | } |
3005 | |
3006 | static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm) |
3007 | { |
3008 | struct device *dev = &qm->pdev->dev; |
3009 | int ret; |
3010 | |
3011 | qm_init_eq_aeq_status(qm); |
3012 | |
3013 | ret = qm_eq_ctx_cfg(qm); |
3014 | if (ret) { |
3015 | dev_err(dev, "Set eqc failed!\n" ); |
3016 | return ret; |
3017 | } |
3018 | |
3019 | return qm_aeq_ctx_cfg(qm); |
3020 | } |
3021 | |
3022 | static int __hisi_qm_start(struct hisi_qm *qm) |
3023 | { |
3024 | int ret; |
3025 | |
3026 | WARN_ON(!qm->qdma.va); |
3027 | |
3028 | if (qm->fun_type == QM_HW_PF) { |
3029 | ret = hisi_qm_set_vft(qm, fun_num: 0, base: qm->qp_base, number: qm->qp_num); |
3030 | if (ret) |
3031 | return ret; |
3032 | } |
3033 | |
3034 | ret = qm_eq_aeq_ctx_cfg(qm); |
3035 | if (ret) |
3036 | return ret; |
3037 | |
3038 | ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); |
3039 | if (ret) |
3040 | return ret; |
3041 | |
3042 | ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); |
3043 | if (ret) |
3044 | return ret; |
3045 | |
3046 | qm_init_prefetch(qm); |
3047 | qm_enable_eq_aeq_interrupts(qm); |
3048 | |
3049 | return 0; |
3050 | } |
3051 | |
3052 | /** |
3053 | * hisi_qm_start() - start qm |
3054 | * @qm: The qm to be started. |
3055 | * |
3056 | * This function starts a qm, then we can allocate qp from this qm. |
3057 | */ |
3058 | int hisi_qm_start(struct hisi_qm *qm) |
3059 | { |
3060 | struct device *dev = &qm->pdev->dev; |
3061 | int ret = 0; |
3062 | |
3063 | down_write(sem: &qm->qps_lock); |
3064 | |
3065 | dev_dbg(dev, "qm start with %u queue pairs\n" , qm->qp_num); |
3066 | |
3067 | if (!qm->qp_num) { |
3068 | dev_err(dev, "qp_num should not be 0\n" ); |
3069 | ret = -EINVAL; |
3070 | goto err_unlock; |
3071 | } |
3072 | |
3073 | ret = __hisi_qm_start(qm); |
3074 | if (ret) |
3075 | goto err_unlock; |
3076 | |
3077 | atomic_set(v: &qm->status.flags, i: QM_WORK); |
3078 | hisi_qm_set_state(qm, state: QM_READY); |
3079 | |
3080 | err_unlock: |
3081 | up_write(sem: &qm->qps_lock); |
3082 | return ret; |
3083 | } |
3084 | EXPORT_SYMBOL_GPL(hisi_qm_start); |
3085 | |
3086 | static int qm_restart(struct hisi_qm *qm) |
3087 | { |
3088 | struct device *dev = &qm->pdev->dev; |
3089 | struct hisi_qp *qp; |
3090 | int ret, i; |
3091 | |
3092 | ret = hisi_qm_start(qm); |
3093 | if (ret < 0) |
3094 | return ret; |
3095 | |
3096 | down_write(sem: &qm->qps_lock); |
3097 | for (i = 0; i < qm->qp_num; i++) { |
3098 | qp = &qm->qp_array[i]; |
3099 | if (atomic_read(v: &qp->qp_status.flags) == QP_STOP && |
3100 | qp->is_resetting == true) { |
3101 | ret = qm_start_qp_nolock(qp, arg: 0); |
3102 | if (ret < 0) { |
3103 | dev_err(dev, "Failed to start qp%d!\n" , i); |
3104 | |
3105 | up_write(sem: &qm->qps_lock); |
3106 | return ret; |
3107 | } |
3108 | qp->is_resetting = false; |
3109 | } |
3110 | } |
3111 | up_write(sem: &qm->qps_lock); |
3112 | |
3113 | return 0; |
3114 | } |
3115 | |
3116 | /* Stop started qps in reset flow */ |
3117 | static void qm_stop_started_qp(struct hisi_qm *qm) |
3118 | { |
3119 | struct hisi_qp *qp; |
3120 | int i; |
3121 | |
3122 | for (i = 0; i < qm->qp_num; i++) { |
3123 | qp = &qm->qp_array[i]; |
3124 | if (atomic_read(v: &qp->qp_status.flags) == QP_START) { |
3125 | qp->is_resetting = true; |
3126 | qm_stop_qp_nolock(qp); |
3127 | } |
3128 | } |
3129 | } |
3130 | |
3131 | /** |
3132 | * qm_clear_queues() - Clear all queues memory in a qm. |
3133 | * @qm: The qm in which the queues will be cleared. |
3134 | * |
3135 | * This function clears all queues memory in a qm. Reset of accelerator can |
3136 | * use this to clear queues. |
3137 | */ |
3138 | static void qm_clear_queues(struct hisi_qm *qm) |
3139 | { |
3140 | struct hisi_qp *qp; |
3141 | int i; |
3142 | |
3143 | for (i = 0; i < qm->qp_num; i++) { |
3144 | qp = &qm->qp_array[i]; |
3145 | if (qp->is_in_kernel && qp->is_resetting) |
3146 | memset(qp->qdma.va, 0, qp->qdma.size); |
3147 | } |
3148 | |
3149 | memset(qm->qdma.va, 0, qm->qdma.size); |
3150 | } |
3151 | |
3152 | /** |
3153 | * hisi_qm_stop() - Stop a qm. |
3154 | * @qm: The qm which will be stopped. |
3155 | * @r: The reason to stop qm. |
3156 | * |
3157 | * This function stops qm and its qps, then qm can not accept request. |
3158 | * Related resources are not released at this state, we can use hisi_qm_start |
3159 | * to let qm start again. |
3160 | */ |
3161 | int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) |
3162 | { |
3163 | struct device *dev = &qm->pdev->dev; |
3164 | int ret = 0; |
3165 | |
3166 | down_write(sem: &qm->qps_lock); |
3167 | |
3168 | if (atomic_read(v: &qm->status.flags) == QM_STOP) |
3169 | goto err_unlock; |
3170 | |
3171 | /* Stop all the request sending at first. */ |
3172 | atomic_set(v: &qm->status.flags, i: QM_STOP); |
3173 | qm->status.stop_reason = r; |
3174 | |
3175 | if (qm->status.stop_reason != QM_NORMAL) { |
3176 | hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); |
3177 | /* |
3178 | * When performing soft reset, the hardware will no longer |
3179 | * do tasks, and the tasks in the device will be flushed |
3180 | * out directly since the master ooo is closed. |
3181 | */ |
3182 | if (test_bit(QM_SUPPORT_STOP_FUNC, &qm->caps) && |
3183 | r != QM_SOFT_RESET) { |
3184 | ret = qm_drain_qm(qm); |
3185 | if (ret) { |
3186 | dev_err(dev, "failed to drain qm!\n" ); |
3187 | goto err_unlock; |
3188 | } |
3189 | } |
3190 | |
3191 | qm_stop_started_qp(qm); |
3192 | |
3193 | hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); |
3194 | } |
3195 | |
3196 | qm_disable_eq_aeq_interrupts(qm); |
3197 | if (qm->fun_type == QM_HW_PF) { |
3198 | ret = hisi_qm_set_vft(qm, fun_num: 0, base: 0, number: 0); |
3199 | if (ret < 0) { |
3200 | dev_err(dev, "Failed to set vft!\n" ); |
3201 | ret = -EBUSY; |
3202 | goto err_unlock; |
3203 | } |
3204 | } |
3205 | |
3206 | qm_clear_queues(qm); |
3207 | qm->status.stop_reason = QM_NORMAL; |
3208 | |
3209 | err_unlock: |
3210 | up_write(sem: &qm->qps_lock); |
3211 | return ret; |
3212 | } |
3213 | EXPORT_SYMBOL_GPL(hisi_qm_stop); |
3214 | |
3215 | static void qm_hw_error_init(struct hisi_qm *qm) |
3216 | { |
3217 | if (!qm->ops->hw_error_init) { |
3218 | dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n" ); |
3219 | return; |
3220 | } |
3221 | |
3222 | qm->ops->hw_error_init(qm); |
3223 | } |
3224 | |
3225 | static void qm_hw_error_uninit(struct hisi_qm *qm) |
3226 | { |
3227 | if (!qm->ops->hw_error_uninit) { |
3228 | dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n" ); |
3229 | return; |
3230 | } |
3231 | |
3232 | qm->ops->hw_error_uninit(qm); |
3233 | } |
3234 | |
3235 | static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) |
3236 | { |
3237 | if (!qm->ops->hw_error_handle) { |
3238 | dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n" ); |
3239 | return ACC_ERR_NONE; |
3240 | } |
3241 | |
3242 | return qm->ops->hw_error_handle(qm); |
3243 | } |
3244 | |
3245 | /** |
3246 | * hisi_qm_dev_err_init() - Initialize device error configuration. |
3247 | * @qm: The qm for which we want to do error initialization. |
3248 | * |
3249 | * Initialize QM and device error related configuration. |
3250 | */ |
3251 | void hisi_qm_dev_err_init(struct hisi_qm *qm) |
3252 | { |
3253 | if (qm->fun_type == QM_HW_VF) |
3254 | return; |
3255 | |
3256 | qm_hw_error_init(qm); |
3257 | |
3258 | if (!qm->err_ini->hw_err_enable) { |
3259 | dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n" ); |
3260 | return; |
3261 | } |
3262 | qm->err_ini->hw_err_enable(qm); |
3263 | } |
3264 | EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init); |
3265 | |
3266 | /** |
3267 | * hisi_qm_dev_err_uninit() - Uninitialize device error configuration. |
3268 | * @qm: The qm for which we want to do error uninitialization. |
3269 | * |
3270 | * Uninitialize QM and device error related configuration. |
3271 | */ |
3272 | void hisi_qm_dev_err_uninit(struct hisi_qm *qm) |
3273 | { |
3274 | if (qm->fun_type == QM_HW_VF) |
3275 | return; |
3276 | |
3277 | qm_hw_error_uninit(qm); |
3278 | |
3279 | if (!qm->err_ini->hw_err_disable) { |
3280 | dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n" ); |
3281 | return; |
3282 | } |
3283 | qm->err_ini->hw_err_disable(qm); |
3284 | } |
3285 | EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit); |
3286 | |
3287 | /** |
3288 | * hisi_qm_free_qps() - free multiple queue pairs. |
3289 | * @qps: The queue pairs need to be freed. |
3290 | * @qp_num: The num of queue pairs. |
3291 | */ |
3292 | void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num) |
3293 | { |
3294 | int i; |
3295 | |
3296 | if (!qps || qp_num <= 0) |
3297 | return; |
3298 | |
3299 | for (i = qp_num - 1; i >= 0; i--) |
3300 | hisi_qm_release_qp(qp: qps[i]); |
3301 | } |
3302 | EXPORT_SYMBOL_GPL(hisi_qm_free_qps); |
3303 | |
3304 | static void free_list(struct list_head *head) |
3305 | { |
3306 | struct hisi_qm_resource *res, *tmp; |
3307 | |
3308 | list_for_each_entry_safe(res, tmp, head, list) { |
3309 | list_del(entry: &res->list); |
3310 | kfree(objp: res); |
3311 | } |
3312 | } |
3313 | |
3314 | static int hisi_qm_sort_devices(int node, struct list_head *head, |
3315 | struct hisi_qm_list *qm_list) |
3316 | { |
3317 | struct hisi_qm_resource *res, *tmp; |
3318 | struct hisi_qm *qm; |
3319 | struct list_head *n; |
3320 | struct device *dev; |
3321 | int dev_node; |
3322 | |
3323 | list_for_each_entry(qm, &qm_list->list, list) { |
3324 | dev = &qm->pdev->dev; |
3325 | |
3326 | dev_node = dev_to_node(dev); |
3327 | if (dev_node < 0) |
3328 | dev_node = 0; |
3329 | |
3330 | res = kzalloc(size: sizeof(*res), GFP_KERNEL); |
3331 | if (!res) |
3332 | return -ENOMEM; |
3333 | |
3334 | res->qm = qm; |
3335 | res->distance = node_distance(dev_node, node); |
3336 | n = head; |
3337 | list_for_each_entry(tmp, head, list) { |
3338 | if (res->distance < tmp->distance) { |
3339 | n = &tmp->list; |
3340 | break; |
3341 | } |
3342 | } |
3343 | list_add_tail(new: &res->list, head: n); |
3344 | } |
3345 | |
3346 | return 0; |
3347 | } |
3348 | |
3349 | /** |
3350 | * hisi_qm_alloc_qps_node() - Create multiple queue pairs. |
3351 | * @qm_list: The list of all available devices. |
3352 | * @qp_num: The number of queue pairs need created. |
3353 | * @alg_type: The algorithm type. |
3354 | * @node: The numa node. |
3355 | * @qps: The queue pairs need created. |
3356 | * |
3357 | * This function will sort all available device according to numa distance. |
3358 | * Then try to create all queue pairs from one device, if all devices do |
3359 | * not meet the requirements will return error. |
3360 | */ |
3361 | int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, |
3362 | u8 alg_type, int node, struct hisi_qp **qps) |
3363 | { |
3364 | struct hisi_qm_resource *tmp; |
3365 | int ret = -ENODEV; |
3366 | LIST_HEAD(head); |
3367 | int i; |
3368 | |
3369 | if (!qps || !qm_list || qp_num <= 0) |
3370 | return -EINVAL; |
3371 | |
3372 | mutex_lock(&qm_list->lock); |
3373 | if (hisi_qm_sort_devices(node, head: &head, qm_list)) { |
3374 | mutex_unlock(lock: &qm_list->lock); |
3375 | goto err; |
3376 | } |
3377 | |
3378 | list_for_each_entry(tmp, &head, list) { |
3379 | for (i = 0; i < qp_num; i++) { |
3380 | qps[i] = hisi_qm_create_qp(qm: tmp->qm, alg_type); |
3381 | if (IS_ERR(ptr: qps[i])) { |
3382 | hisi_qm_free_qps(qps, i); |
3383 | break; |
3384 | } |
3385 | } |
3386 | |
3387 | if (i == qp_num) { |
3388 | ret = 0; |
3389 | break; |
3390 | } |
3391 | } |
3392 | |
3393 | mutex_unlock(lock: &qm_list->lock); |
3394 | if (ret) |
3395 | pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n" , |
3396 | node, alg_type, qp_num); |
3397 | |
3398 | err: |
3399 | free_list(head: &head); |
3400 | return ret; |
3401 | } |
3402 | EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node); |
3403 | |
3404 | static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) |
3405 | { |
3406 | u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j; |
3407 | u32 max_qp_num = qm->max_qp_num; |
3408 | u32 q_base = qm->qp_num; |
3409 | int ret; |
3410 | |
3411 | if (!num_vfs) |
3412 | return -EINVAL; |
3413 | |
3414 | vfs_q_num = qm->ctrl_qp_num - qm->qp_num; |
3415 | |
3416 | /* If vfs_q_num is less than num_vfs, return error. */ |
3417 | if (vfs_q_num < num_vfs) |
3418 | return -EINVAL; |
3419 | |
3420 | q_num = vfs_q_num / num_vfs; |
3421 | remain_q_num = vfs_q_num % num_vfs; |
3422 | |
3423 | for (i = num_vfs; i > 0; i--) { |
3424 | /* |
3425 | * if q_num + remain_q_num > max_qp_num in last vf, divide the |
3426 | * remaining queues equally. |
3427 | */ |
3428 | if (i == num_vfs && q_num + remain_q_num <= max_qp_num) { |
3429 | act_q_num = q_num + remain_q_num; |
3430 | remain_q_num = 0; |
3431 | } else if (remain_q_num > 0) { |
3432 | act_q_num = q_num + 1; |
3433 | remain_q_num--; |
3434 | } else { |
3435 | act_q_num = q_num; |
3436 | } |
3437 | |
3438 | act_q_num = min(act_q_num, max_qp_num); |
3439 | ret = hisi_qm_set_vft(qm, fun_num: i, base: q_base, number: act_q_num); |
3440 | if (ret) { |
3441 | for (j = num_vfs; j > i; j--) |
3442 | hisi_qm_set_vft(qm, fun_num: j, base: 0, number: 0); |
3443 | return ret; |
3444 | } |
3445 | q_base += act_q_num; |
3446 | } |
3447 | |
3448 | return 0; |
3449 | } |
3450 | |
3451 | static int qm_clear_vft_config(struct hisi_qm *qm) |
3452 | { |
3453 | int ret; |
3454 | u32 i; |
3455 | |
3456 | for (i = 1; i <= qm->vfs_num; i++) { |
3457 | ret = hisi_qm_set_vft(qm, fun_num: i, base: 0, number: 0); |
3458 | if (ret) |
3459 | return ret; |
3460 | } |
3461 | qm->vfs_num = 0; |
3462 | |
3463 | return 0; |
3464 | } |
3465 | |
3466 | static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) |
3467 | { |
3468 | struct device *dev = &qm->pdev->dev; |
3469 | u32 ir = qos * QM_QOS_RATE; |
3470 | int ret, total_vfs, i; |
3471 | |
3472 | total_vfs = pci_sriov_get_totalvfs(dev: qm->pdev); |
3473 | if (fun_index > total_vfs) |
3474 | return -EINVAL; |
3475 | |
3476 | qm->factor[fun_index].func_qos = qos; |
3477 | |
3478 | ret = qm_get_shaper_para(ir, factor: &qm->factor[fun_index]); |
3479 | if (ret) { |
3480 | dev_err(dev, "failed to calculate shaper parameter!\n" ); |
3481 | return -EINVAL; |
3482 | } |
3483 | |
3484 | for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { |
3485 | /* The base number of queue reuse for different alg type */ |
3486 | ret = qm_set_vft_common(qm, type: SHAPER_VFT, fun_num: fun_index, base: i, number: 1); |
3487 | if (ret) { |
3488 | dev_err(dev, "type: %d, failed to set shaper vft!\n" , i); |
3489 | return -EINVAL; |
3490 | } |
3491 | } |
3492 | |
3493 | return 0; |
3494 | } |
3495 | |
3496 | static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index) |
3497 | { |
3498 | u64 cir_u = 0, cir_b = 0, cir_s = 0; |
3499 | u64 shaper_vft, ir_calc, ir; |
3500 | unsigned int val; |
3501 | u32 error_rate; |
3502 | int ret; |
3503 | |
3504 | ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, |
3505 | val & BIT(0), POLL_PERIOD, |
3506 | POLL_TIMEOUT); |
3507 | if (ret) |
3508 | return 0; |
3509 | |
3510 | writel(val: 0x1, addr: qm->io_base + QM_VFT_CFG_OP_WR); |
3511 | writel(val: SHAPER_VFT, addr: qm->io_base + QM_VFT_CFG_TYPE); |
3512 | writel(val: fun_index, addr: qm->io_base + QM_VFT_CFG); |
3513 | |
3514 | writel(val: 0x0, addr: qm->io_base + QM_VFT_CFG_RDY); |
3515 | writel(val: 0x1, addr: qm->io_base + QM_VFT_CFG_OP_ENABLE); |
3516 | |
3517 | ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, |
3518 | val & BIT(0), POLL_PERIOD, |
3519 | POLL_TIMEOUT); |
3520 | if (ret) |
3521 | return 0; |
3522 | |
3523 | shaper_vft = readl(addr: qm->io_base + QM_VFT_CFG_DATA_L) | |
3524 | ((u64)readl(addr: qm->io_base + QM_VFT_CFG_DATA_H) << 32); |
3525 | |
3526 | cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK; |
3527 | cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK; |
3528 | cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT; |
3529 | |
3530 | cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK; |
3531 | cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT; |
3532 | |
3533 | ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); |
3534 | |
3535 | ir = qm->factor[fun_index].func_qos * QM_QOS_RATE; |
3536 | |
3537 | error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; |
3538 | if (error_rate > QM_QOS_MIN_ERROR_RATE) { |
3539 | pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n" , error_rate); |
3540 | return 0; |
3541 | } |
3542 | |
3543 | return ir; |
3544 | } |
3545 | |
3546 | static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num) |
3547 | { |
3548 | struct device *dev = &qm->pdev->dev; |
3549 | u64 mb_cmd; |
3550 | u32 qos; |
3551 | int ret; |
3552 | |
3553 | qos = qm_get_shaper_vft_qos(qm, fun_index: fun_num); |
3554 | if (!qos) { |
3555 | dev_err(dev, "function(%u) failed to get qos by PF!\n" , fun_num); |
3556 | return; |
3557 | } |
3558 | |
3559 | mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT; |
3560 | ret = qm_ping_single_vf(qm, cmd: mb_cmd, fun_num); |
3561 | if (ret) |
3562 | dev_err(dev, "failed to send cmd to VF(%u)!\n" , fun_num); |
3563 | } |
3564 | |
3565 | static int qm_vf_read_qos(struct hisi_qm *qm) |
3566 | { |
3567 | int cnt = 0; |
3568 | int ret = -EINVAL; |
3569 | |
3570 | /* reset mailbox qos val */ |
3571 | qm->mb_qos = 0; |
3572 | |
3573 | /* vf ping pf to get function qos */ |
3574 | ret = qm_ping_pf(qm, cmd: QM_VF_GET_QOS); |
3575 | if (ret) { |
3576 | pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n" ); |
3577 | return ret; |
3578 | } |
3579 | |
3580 | while (true) { |
3581 | msleep(QM_WAIT_DST_ACK); |
3582 | if (qm->mb_qos) |
3583 | break; |
3584 | |
3585 | if (++cnt > QM_MAX_VF_WAIT_COUNT) { |
3586 | pci_err(qm->pdev, "PF ping VF timeout!\n" ); |
3587 | return -ETIMEDOUT; |
3588 | } |
3589 | } |
3590 | |
3591 | return ret; |
3592 | } |
3593 | |
3594 | static ssize_t qm_algqos_read(struct file *filp, char __user *buf, |
3595 | size_t count, loff_t *pos) |
3596 | { |
3597 | struct hisi_qm *qm = filp->private_data; |
3598 | char tbuf[QM_DBG_READ_LEN]; |
3599 | u32 qos_val, ir; |
3600 | int ret; |
3601 | |
3602 | ret = hisi_qm_get_dfx_access(qm); |
3603 | if (ret) |
3604 | return ret; |
3605 | |
3606 | /* Mailbox and reset cannot be operated at the same time */ |
3607 | if (test_and_set_bit(nr: QM_RESETTING, addr: &qm->misc_ctl)) { |
3608 | pci_err(qm->pdev, "dev resetting, read alg qos failed!\n" ); |
3609 | ret = -EAGAIN; |
3610 | goto err_put_dfx_access; |
3611 | } |
3612 | |
3613 | if (qm->fun_type == QM_HW_PF) { |
3614 | ir = qm_get_shaper_vft_qos(qm, fun_index: 0); |
3615 | } else { |
3616 | ret = qm_vf_read_qos(qm); |
3617 | if (ret) |
3618 | goto err_get_status; |
3619 | ir = qm->mb_qos; |
3620 | } |
3621 | |
3622 | qos_val = ir / QM_QOS_RATE; |
3623 | ret = scnprintf(buf: tbuf, QM_DBG_READ_LEN, fmt: "%u\n" , qos_val); |
3624 | |
3625 | ret = simple_read_from_buffer(to: buf, count, ppos: pos, from: tbuf, available: ret); |
3626 | |
3627 | err_get_status: |
3628 | clear_bit(nr: QM_RESETTING, addr: &qm->misc_ctl); |
3629 | err_put_dfx_access: |
3630 | hisi_qm_put_dfx_access(qm); |
3631 | return ret; |
3632 | } |
3633 | |
3634 | static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, |
3635 | unsigned long *val, |
3636 | unsigned int *fun_index) |
3637 | { |
3638 | const struct bus_type *bus_type = qm->pdev->dev.bus; |
3639 | char tbuf_bdf[QM_DBG_READ_LEN] = {0}; |
3640 | char val_buf[QM_DBG_READ_LEN] = {0}; |
3641 | struct pci_dev *pdev; |
3642 | struct device *dev; |
3643 | int ret; |
3644 | |
3645 | ret = sscanf(buf, "%s %s" , tbuf_bdf, val_buf); |
3646 | if (ret != QM_QOS_PARAM_NUM) |
3647 | return -EINVAL; |
3648 | |
3649 | ret = kstrtoul(s: val_buf, base: 10, res: val); |
3650 | if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) { |
3651 | pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n" ); |
3652 | return -EINVAL; |
3653 | } |
3654 | |
3655 | dev = bus_find_device_by_name(bus: bus_type, NULL, name: tbuf_bdf); |
3656 | if (!dev) { |
3657 | pci_err(qm->pdev, "input pci bdf number is error!\n" ); |
3658 | return -ENODEV; |
3659 | } |
3660 | |
3661 | pdev = container_of(dev, struct pci_dev, dev); |
3662 | |
3663 | *fun_index = pdev->devfn; |
3664 | |
3665 | return 0; |
3666 | } |
3667 | |
3668 | static ssize_t qm_algqos_write(struct file *filp, const char __user *buf, |
3669 | size_t count, loff_t *pos) |
3670 | { |
3671 | struct hisi_qm *qm = filp->private_data; |
3672 | char tbuf[QM_DBG_READ_LEN]; |
3673 | unsigned int fun_index; |
3674 | unsigned long val; |
3675 | int len, ret; |
3676 | |
3677 | if (*pos != 0) |
3678 | return 0; |
3679 | |
3680 | if (count >= QM_DBG_READ_LEN) |
3681 | return -ENOSPC; |
3682 | |
3683 | len = simple_write_to_buffer(to: tbuf, QM_DBG_READ_LEN - 1, ppos: pos, from: buf, count); |
3684 | if (len < 0) |
3685 | return len; |
3686 | |
3687 | tbuf[len] = '\0'; |
3688 | ret = qm_get_qos_value(qm, buf: tbuf, val: &val, fun_index: &fun_index); |
3689 | if (ret) |
3690 | return ret; |
3691 | |
3692 | /* Mailbox and reset cannot be operated at the same time */ |
3693 | if (test_and_set_bit(nr: QM_RESETTING, addr: &qm->misc_ctl)) { |
3694 | pci_err(qm->pdev, "dev resetting, write alg qos failed!\n" ); |
3695 | return -EAGAIN; |
3696 | } |
3697 | |
3698 | ret = qm_pm_get_sync(qm); |
3699 | if (ret) { |
3700 | ret = -EINVAL; |
3701 | goto err_get_status; |
3702 | } |
3703 | |
3704 | ret = qm_func_shaper_enable(qm, fun_index, qos: val); |
3705 | if (ret) { |
3706 | pci_err(qm->pdev, "failed to enable function shaper!\n" ); |
3707 | ret = -EINVAL; |
3708 | goto err_put_sync; |
3709 | } |
3710 | |
3711 | pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n" , |
3712 | fun_index, val); |
3713 | ret = count; |
3714 | |
3715 | err_put_sync: |
3716 | qm_pm_put_sync(qm); |
3717 | err_get_status: |
3718 | clear_bit(nr: QM_RESETTING, addr: &qm->misc_ctl); |
3719 | return ret; |
3720 | } |
3721 | |
3722 | static const struct file_operations qm_algqos_fops = { |
3723 | .owner = THIS_MODULE, |
3724 | .open = simple_open, |
3725 | .read = qm_algqos_read, |
3726 | .write = qm_algqos_write, |
3727 | }; |
3728 | |
3729 | /** |
3730 | * hisi_qm_set_algqos_init() - Initialize function qos debugfs files. |
3731 | * @qm: The qm for which we want to add debugfs files. |
3732 | * |
3733 | * Create function qos debugfs files, VF ping PF to get function qos. |
3734 | */ |
3735 | void hisi_qm_set_algqos_init(struct hisi_qm *qm) |
3736 | { |
3737 | if (qm->fun_type == QM_HW_PF) |
3738 | debugfs_create_file(name: "alg_qos" , mode: 0644, parent: qm->debug.debug_root, |
3739 | data: qm, fops: &qm_algqos_fops); |
3740 | else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) |
3741 | debugfs_create_file(name: "alg_qos" , mode: 0444, parent: qm->debug.debug_root, |
3742 | data: qm, fops: &qm_algqos_fops); |
3743 | } |
3744 | |
3745 | static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func) |
3746 | { |
3747 | int i; |
3748 | |
3749 | for (i = 1; i <= total_func; i++) |
3750 | qm->factor[i].func_qos = QM_QOS_MAX_VAL; |
3751 | } |
3752 | |
3753 | /** |
3754 | * hisi_qm_sriov_enable() - enable virtual functions |
3755 | * @pdev: the PCIe device |
3756 | * @max_vfs: the number of virtual functions to enable |
3757 | * |
3758 | * Returns the number of enabled VFs. If there are VFs enabled already or |
3759 | * max_vfs is more than the total number of device can be enabled, returns |
3760 | * failure. |
3761 | */ |
3762 | int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs) |
3763 | { |
3764 | struct hisi_qm *qm = pci_get_drvdata(pdev); |
3765 | int pre_existing_vfs, num_vfs, total_vfs, ret; |
3766 | |
3767 | ret = qm_pm_get_sync(qm); |
3768 | if (ret) |
3769 | return ret; |
3770 | |
3771 | total_vfs = pci_sriov_get_totalvfs(dev: pdev); |
3772 | pre_existing_vfs = pci_num_vf(dev: pdev); |
3773 | if (pre_existing_vfs) { |
3774 | pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n" , |
3775 | pre_existing_vfs); |
3776 | goto err_put_sync; |
3777 | } |
3778 | |
3779 | if (max_vfs > total_vfs) { |
3780 | pci_err(pdev, "%d VFs is more than total VFs %d!\n" , max_vfs, total_vfs); |
3781 | ret = -ERANGE; |
3782 | goto err_put_sync; |
3783 | } |
3784 | |
3785 | num_vfs = max_vfs; |
3786 | |
3787 | if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) |
3788 | hisi_qm_init_vf_qos(qm, total_func: num_vfs); |
3789 | |
3790 | ret = qm_vf_q_assign(qm, num_vfs); |
3791 | if (ret) { |
3792 | pci_err(pdev, "Can't assign queues for VF!\n" ); |
3793 | goto err_put_sync; |
3794 | } |
3795 | |
3796 | qm->vfs_num = num_vfs; |
3797 | |
3798 | ret = pci_enable_sriov(dev: pdev, nr_virtfn: num_vfs); |
3799 | if (ret) { |
3800 | pci_err(pdev, "Can't enable VF!\n" ); |
3801 | qm_clear_vft_config(qm); |
3802 | goto err_put_sync; |
3803 | } |
3804 | |
3805 | pci_info(pdev, "VF enabled, vfs_num(=%d)!\n" , num_vfs); |
3806 | |
3807 | return num_vfs; |
3808 | |
3809 | err_put_sync: |
3810 | qm_pm_put_sync(qm); |
3811 | return ret; |
3812 | } |
3813 | EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable); |
3814 | |
3815 | /** |
3816 | * hisi_qm_sriov_disable - disable virtual functions |
3817 | * @pdev: the PCI device. |
3818 | * @is_frozen: true when all the VFs are frozen. |
3819 | * |
3820 | * Return failure if there are VFs assigned already or VF is in used. |
3821 | */ |
3822 | int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) |
3823 | { |
3824 | struct hisi_qm *qm = pci_get_drvdata(pdev); |
3825 | int ret; |
3826 | |
3827 | if (pci_vfs_assigned(dev: pdev)) { |
3828 | pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n" ); |
3829 | return -EPERM; |
3830 | } |
3831 | |
3832 | /* While VF is in used, SRIOV cannot be disabled. */ |
3833 | if (!is_frozen && qm_try_frozen_vfs(pdev, qm_list: qm->qm_list)) { |
3834 | pci_err(pdev, "Task is using its VF!\n" ); |
3835 | return -EBUSY; |
3836 | } |
3837 | |
3838 | pci_disable_sriov(dev: pdev); |
3839 | |
3840 | ret = qm_clear_vft_config(qm); |
3841 | if (ret) |
3842 | return ret; |
3843 | |
3844 | qm_pm_put_sync(qm); |
3845 | |
3846 | return 0; |
3847 | } |
3848 | EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable); |
3849 | |
3850 | /** |
3851 | * hisi_qm_sriov_configure - configure the number of VFs |
3852 | * @pdev: The PCI device |
3853 | * @num_vfs: The number of VFs need enabled |
3854 | * |
3855 | * Enable SR-IOV according to num_vfs, 0 means disable. |
3856 | */ |
3857 | int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs) |
3858 | { |
3859 | if (num_vfs == 0) |
3860 | return hisi_qm_sriov_disable(pdev, false); |
3861 | else |
3862 | return hisi_qm_sriov_enable(pdev, num_vfs); |
3863 | } |
3864 | EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure); |
3865 | |
3866 | static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) |
3867 | { |
3868 | u32 err_sts; |
3869 | |
3870 | if (!qm->err_ini->get_dev_hw_err_status) { |
3871 | dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n" ); |
3872 | return ACC_ERR_NONE; |
3873 | } |
3874 | |
3875 | /* get device hardware error status */ |
3876 | err_sts = qm->err_ini->get_dev_hw_err_status(qm); |
3877 | if (err_sts) { |
3878 | if (err_sts & qm->err_info.ecc_2bits_mask) |
3879 | qm->err_status.is_dev_ecc_mbit = true; |
3880 | |
3881 | if (qm->err_ini->log_dev_hw_err) |
3882 | qm->err_ini->log_dev_hw_err(qm, err_sts); |
3883 | |
3884 | if (err_sts & qm->err_info.dev_reset_mask) |
3885 | return ACC_ERR_NEED_RESET; |
3886 | |
3887 | if (qm->err_ini->clear_dev_hw_err_status) |
3888 | qm->err_ini->clear_dev_hw_err_status(qm, err_sts); |
3889 | } |
3890 | |
3891 | return ACC_ERR_RECOVERED; |
3892 | } |
3893 | |
3894 | static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm) |
3895 | { |
3896 | enum acc_err_result qm_ret, dev_ret; |
3897 | |
3898 | /* log qm error */ |
3899 | qm_ret = qm_hw_error_handle(qm); |
3900 | |
3901 | /* log device error */ |
3902 | dev_ret = qm_dev_err_handle(qm); |
3903 | |
3904 | return (qm_ret == ACC_ERR_NEED_RESET || |
3905 | dev_ret == ACC_ERR_NEED_RESET) ? |
3906 | ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED; |
3907 | } |
3908 | |
3909 | /** |
3910 | * hisi_qm_dev_err_detected() - Get device and qm error status then log it. |
3911 | * @pdev: The PCI device which need report error. |
3912 | * @state: The connectivity between CPU and device. |
3913 | * |
3914 | * We register this function into PCIe AER handlers, It will report device or |
3915 | * qm hardware error status when error occur. |
3916 | */ |
3917 | pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, |
3918 | pci_channel_state_t state) |
3919 | { |
3920 | struct hisi_qm *qm = pci_get_drvdata(pdev); |
3921 | enum acc_err_result ret; |
3922 | |
3923 | if (pdev->is_virtfn) |
3924 | return PCI_ERS_RESULT_NONE; |
3925 | |
3926 | pci_info(pdev, "PCI error detected, state(=%u)!!\n" , state); |
3927 | if (state == pci_channel_io_perm_failure) |
3928 | return PCI_ERS_RESULT_DISCONNECT; |
3929 | |
3930 | ret = qm_process_dev_error(qm); |
3931 | if (ret == ACC_ERR_NEED_RESET) |
3932 | return PCI_ERS_RESULT_NEED_RESET; |
3933 | |
3934 | return PCI_ERS_RESULT_RECOVERED; |
3935 | } |
3936 | EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected); |
3937 | |
3938 | static int qm_check_req_recv(struct hisi_qm *qm) |
3939 | { |
3940 | struct pci_dev *pdev = qm->pdev; |
3941 | int ret; |
3942 | u32 val; |
3943 | |
3944 | if (qm->ver >= QM_HW_V3) |
3945 | return 0; |
3946 | |
3947 | writel(ACC_VENDOR_ID_VALUE, addr: qm->io_base + QM_PEH_VENDOR_ID); |
3948 | ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, |
3949 | (val == ACC_VENDOR_ID_VALUE), |
3950 | POLL_PERIOD, POLL_TIMEOUT); |
3951 | if (ret) { |
3952 | dev_err(&pdev->dev, "Fails to read QM reg!\n" ); |
3953 | return ret; |
3954 | } |
3955 | |
3956 | writel(PCI_VENDOR_ID_HUAWEI, addr: qm->io_base + QM_PEH_VENDOR_ID); |
3957 | ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, |
3958 | (val == PCI_VENDOR_ID_HUAWEI), |
3959 | POLL_PERIOD, POLL_TIMEOUT); |
3960 | if (ret) |
3961 | dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n" ); |
3962 | |
3963 | return ret; |
3964 | } |
3965 | |
3966 | static int qm_set_pf_mse(struct hisi_qm *qm, bool set) |
3967 | { |
3968 | struct pci_dev *pdev = qm->pdev; |
3969 | u16 cmd; |
3970 | int i; |
3971 | |
3972 | pci_read_config_word(dev: pdev, PCI_COMMAND, val: &cmd); |
3973 | if (set) |
3974 | cmd |= PCI_COMMAND_MEMORY; |
3975 | else |
3976 | cmd &= ~PCI_COMMAND_MEMORY; |
3977 | |
3978 | pci_write_config_word(dev: pdev, PCI_COMMAND, val: cmd); |
3979 | for (i = 0; i < MAX_WAIT_COUNTS; i++) { |
3980 | pci_read_config_word(dev: pdev, PCI_COMMAND, val: &cmd); |
3981 | if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1)) |
3982 | return 0; |
3983 | |
3984 | udelay(1); |
3985 | } |
3986 | |
3987 | return -ETIMEDOUT; |
3988 | } |
3989 | |
3990 | static int qm_set_vf_mse(struct hisi_qm *qm, bool set) |
3991 | { |
3992 | struct pci_dev *pdev = qm->pdev; |
3993 | u16 sriov_ctrl; |
3994 | int pos; |
3995 | int i; |
3996 | |
3997 | /* |
3998 | * Since function qm_set_vf_mse is called only after SRIOV is enabled, |
3999 | * pci_find_ext_capability cannot return 0, pos does not need to be |
4000 | * checked. |
4001 | */ |
4002 | pos = pci_find_ext_capability(dev: pdev, PCI_EXT_CAP_ID_SRIOV); |
4003 | pci_read_config_word(dev: pdev, where: pos + PCI_SRIOV_CTRL, val: &sriov_ctrl); |
4004 | if (set) |
4005 | sriov_ctrl |= PCI_SRIOV_CTRL_MSE; |
4006 | else |
4007 | sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE; |
4008 | pci_write_config_word(dev: pdev, where: pos + PCI_SRIOV_CTRL, val: sriov_ctrl); |
4009 | |
4010 | for (i = 0; i < MAX_WAIT_COUNTS; i++) { |
4011 | pci_read_config_word(dev: pdev, where: pos + PCI_SRIOV_CTRL, val: &sriov_ctrl); |
4012 | if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >> |
4013 | ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT) |
4014 | return 0; |
4015 | |
4016 | udelay(1); |
4017 | } |
4018 | |
4019 | return -ETIMEDOUT; |
4020 | } |
4021 | |
4022 | static int qm_vf_reset_prepare(struct hisi_qm *qm, |
4023 | enum qm_stop_reason stop_reason) |
4024 | { |
4025 | struct hisi_qm_list *qm_list = qm->qm_list; |
4026 | struct pci_dev *pdev = qm->pdev; |
4027 | struct pci_dev *virtfn; |
4028 | struct hisi_qm *vf_qm; |
4029 | int ret = 0; |
4030 | |
4031 | mutex_lock(&qm_list->lock); |
4032 | list_for_each_entry(vf_qm, &qm_list->list, list) { |
4033 | virtfn = vf_qm->pdev; |
4034 | if (virtfn == pdev) |
4035 | continue; |
4036 | |
4037 | if (pci_physfn(dev: virtfn) == pdev) { |
4038 | /* save VFs PCIE BAR configuration */ |
4039 | pci_save_state(dev: virtfn); |
4040 | |
4041 | ret = hisi_qm_stop(vf_qm, stop_reason); |
4042 | if (ret) |
4043 | goto stop_fail; |
4044 | } |
4045 | } |
4046 | |
4047 | stop_fail: |
4048 | mutex_unlock(lock: &qm_list->lock); |
4049 | return ret; |
4050 | } |
4051 | |
4052 | static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd, |
4053 | enum qm_stop_reason stop_reason) |
4054 | { |
4055 | struct pci_dev *pdev = qm->pdev; |
4056 | int ret; |
4057 | |
4058 | if (!qm->vfs_num) |
4059 | return 0; |
4060 | |
4061 | /* Kunpeng930 supports to notify VFs to stop before PF reset */ |
4062 | if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { |
4063 | ret = qm_ping_all_vfs(qm, cmd); |
4064 | if (ret) |
4065 | pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n" ); |
4066 | } else { |
4067 | ret = qm_vf_reset_prepare(qm, stop_reason); |
4068 | if (ret) |
4069 | pci_err(pdev, "failed to prepare reset, ret = %d.\n" , ret); |
4070 | } |
4071 | |
4072 | return ret; |
4073 | } |
4074 | |
4075 | static int qm_controller_reset_prepare(struct hisi_qm *qm) |
4076 | { |
4077 | struct pci_dev *pdev = qm->pdev; |
4078 | int ret; |
4079 | |
4080 | ret = qm_reset_prepare_ready(qm); |
4081 | if (ret) { |
4082 | pci_err(pdev, "Controller reset not ready!\n" ); |
4083 | return ret; |
4084 | } |
4085 | |
4086 | /* PF obtains the information of VF by querying the register. */ |
4087 | qm_cmd_uninit(qm); |
4088 | |
4089 | /* Whether VFs stop successfully, soft reset will continue. */ |
4090 | ret = qm_try_stop_vfs(qm, cmd: QM_PF_SRST_PREPARE, stop_reason: QM_SOFT_RESET); |
4091 | if (ret) |
4092 | pci_err(pdev, "failed to stop vfs by pf in soft reset.\n" ); |
4093 | |
4094 | ret = hisi_qm_stop(qm, QM_SOFT_RESET); |
4095 | if (ret) { |
4096 | pci_err(pdev, "Fails to stop QM!\n" ); |
4097 | qm_reset_bit_clear(qm); |
4098 | return ret; |
4099 | } |
4100 | |
4101 | if (qm->use_sva) { |
4102 | ret = qm_hw_err_isolate(qm); |
4103 | if (ret) |
4104 | pci_err(pdev, "failed to isolate hw err!\n" ); |
4105 | } |
4106 | |
4107 | ret = qm_wait_vf_prepare_finish(qm); |
4108 | if (ret) |
4109 | pci_err(pdev, "failed to stop by vfs in soft reset!\n" ); |
4110 | |
4111 | clear_bit(nr: QM_RST_SCHED, addr: &qm->misc_ctl); |
4112 | |
4113 | return 0; |
4114 | } |
4115 | |
4116 | static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) |
4117 | { |
4118 | u32 nfe_enb = 0; |
4119 | |
4120 | /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ |
4121 | if (qm->ver >= QM_HW_V3) |
4122 | return; |
4123 | |
4124 | if (!qm->err_status.is_dev_ecc_mbit && |
4125 | qm->err_status.is_qm_ecc_mbit && |
4126 | qm->err_ini->close_axi_master_ooo) { |
4127 | qm->err_ini->close_axi_master_ooo(qm); |
4128 | } else if (qm->err_status.is_dev_ecc_mbit && |
4129 | !qm->err_status.is_qm_ecc_mbit && |
4130 | !qm->err_ini->close_axi_master_ooo) { |
4131 | nfe_enb = readl(addr: qm->io_base + QM_RAS_NFE_ENABLE); |
4132 | writel(val: nfe_enb & QM_RAS_NFE_MBIT_DISABLE, |
4133 | addr: qm->io_base + QM_RAS_NFE_ENABLE); |
4134 | writel(QM_ECC_MBIT, addr: qm->io_base + QM_ABNORMAL_INT_SET); |
4135 | } |
4136 | } |
4137 | |
4138 | static int qm_soft_reset(struct hisi_qm *qm) |
4139 | { |
4140 | struct pci_dev *pdev = qm->pdev; |
4141 | int ret; |
4142 | u32 val; |
4143 | |
4144 | /* Ensure all doorbells and mailboxes received by QM */ |
4145 | ret = qm_check_req_recv(qm); |
4146 | if (ret) |
4147 | return ret; |
4148 | |
4149 | if (qm->vfs_num) { |
4150 | ret = qm_set_vf_mse(qm, set: false); |
4151 | if (ret) { |
4152 | pci_err(pdev, "Fails to disable vf MSE bit.\n" ); |
4153 | return ret; |
4154 | } |
4155 | } |
4156 | |
4157 | ret = qm->ops->set_msi(qm, false); |
4158 | if (ret) { |
4159 | pci_err(pdev, "Fails to disable PEH MSI bit.\n" ); |
4160 | return ret; |
4161 | } |
4162 | |
4163 | qm_dev_ecc_mbit_handle(qm); |
4164 | |
4165 | /* OOO register set and check */ |
4166 | writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, |
4167 | addr: qm->io_base + ACC_MASTER_GLOBAL_CTRL); |
4168 | |
4169 | /* If bus lock, reset chip */ |
4170 | ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, |
4171 | val, |
4172 | (val == ACC_MASTER_TRANS_RETURN_RW), |
4173 | POLL_PERIOD, POLL_TIMEOUT); |
4174 | if (ret) { |
4175 | pci_emerg(pdev, "Bus lock! Please reset system.\n" ); |
4176 | return ret; |
4177 | } |
4178 | |
4179 | if (qm->err_ini->close_sva_prefetch) |
4180 | qm->err_ini->close_sva_prefetch(qm); |
4181 | |
4182 | ret = qm_set_pf_mse(qm, set: false); |
4183 | if (ret) { |
4184 | pci_err(pdev, "Fails to disable pf MSE bit.\n" ); |
4185 | return ret; |
4186 | } |
4187 | |
4188 | /* The reset related sub-control registers are not in PCI BAR */ |
4189 | if (ACPI_HANDLE(&pdev->dev)) { |
4190 | unsigned long long value = 0; |
4191 | acpi_status s; |
4192 | |
4193 | s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), |
4194 | pathname: qm->err_info.acpi_rst, |
4195 | NULL, data: &value); |
4196 | if (ACPI_FAILURE(s)) { |
4197 | pci_err(pdev, "NO controller reset method!\n" ); |
4198 | return -EIO; |
4199 | } |
4200 | |
4201 | if (value) { |
4202 | pci_err(pdev, "Reset step %llu failed!\n" , value); |
4203 | return -EIO; |
4204 | } |
4205 | } else { |
4206 | pci_err(pdev, "No reset method!\n" ); |
4207 | return -EINVAL; |
4208 | } |
4209 | |
4210 | return 0; |
4211 | } |
4212 | |
4213 | static int qm_vf_reset_done(struct hisi_qm *qm) |
4214 | { |
4215 | struct hisi_qm_list *qm_list = qm->qm_list; |
4216 | struct pci_dev *pdev = qm->pdev; |
4217 | struct pci_dev *virtfn; |
4218 | struct hisi_qm *vf_qm; |
4219 | int ret = 0; |
4220 | |
4221 | mutex_lock(&qm_list->lock); |
4222 | list_for_each_entry(vf_qm, &qm_list->list, list) { |
4223 | virtfn = vf_qm->pdev; |
4224 | if (virtfn == pdev) |
4225 | continue; |
4226 | |
4227 | if (pci_physfn(dev: virtfn) == pdev) { |
4228 | /* enable VFs PCIE BAR configuration */ |
4229 | pci_restore_state(dev: virtfn); |
4230 | |
4231 | ret = qm_restart(qm: vf_qm); |
4232 | if (ret) |
4233 | goto restart_fail; |
4234 | } |
4235 | } |
4236 | |
4237 | restart_fail: |
4238 | mutex_unlock(lock: &qm_list->lock); |
4239 | return ret; |
4240 | } |
4241 | |
4242 | static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd) |
4243 | { |
4244 | struct pci_dev *pdev = qm->pdev; |
4245 | int ret; |
4246 | |
4247 | if (!qm->vfs_num) |
4248 | return 0; |
4249 | |
4250 | ret = qm_vf_q_assign(qm, num_vfs: qm->vfs_num); |
4251 | if (ret) { |
4252 | pci_err(pdev, "failed to assign VFs, ret = %d.\n" , ret); |
4253 | return ret; |
4254 | } |
4255 | |
4256 | /* Kunpeng930 supports to notify VFs to start after PF reset. */ |
4257 | if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { |
4258 | ret = qm_ping_all_vfs(qm, cmd); |
4259 | if (ret) |
4260 | pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n" ); |
4261 | } else { |
4262 | ret = qm_vf_reset_done(qm); |
4263 | if (ret) |
4264 | pci_warn(pdev, "failed to start vfs, ret = %d.\n" , ret); |
4265 | } |
4266 | |
4267 | return ret; |
4268 | } |
4269 | |
4270 | static int qm_dev_hw_init(struct hisi_qm *qm) |
4271 | { |
4272 | return qm->err_ini->hw_init(qm); |
4273 | } |
4274 | |
4275 | static void qm_restart_prepare(struct hisi_qm *qm) |
4276 | { |
4277 | u32 value; |
4278 | |
4279 | if (qm->err_ini->open_sva_prefetch) |
4280 | qm->err_ini->open_sva_prefetch(qm); |
4281 | |
4282 | if (qm->ver >= QM_HW_V3) |
4283 | return; |
4284 | |
4285 | if (!qm->err_status.is_qm_ecc_mbit && |
4286 | !qm->err_status.is_dev_ecc_mbit) |
4287 | return; |
4288 | |
4289 | /* temporarily close the OOO port used for PEH to write out MSI */ |
4290 | value = readl(addr: qm->io_base + ACC_AM_CFG_PORT_WR_EN); |
4291 | writel(val: value & ~qm->err_info.msi_wr_port, |
4292 | addr: qm->io_base + ACC_AM_CFG_PORT_WR_EN); |
4293 | |
4294 | /* clear dev ecc 2bit error source if having */ |
4295 | value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; |
4296 | if (value && qm->err_ini->clear_dev_hw_err_status) |
4297 | qm->err_ini->clear_dev_hw_err_status(qm, value); |
4298 | |
4299 | /* clear QM ecc mbit error source */ |
4300 | writel(QM_ECC_MBIT, addr: qm->io_base + QM_ABNORMAL_INT_SOURCE); |
4301 | |
4302 | /* clear AM Reorder Buffer ecc mbit source */ |
4303 | writel(ACC_ROB_ECC_ERR_MULTPL, addr: qm->io_base + ACC_AM_ROB_ECC_INT_STS); |
4304 | } |
4305 | |
4306 | static void qm_restart_done(struct hisi_qm *qm) |
4307 | { |
4308 | u32 value; |
4309 | |
4310 | if (qm->ver >= QM_HW_V3) |
4311 | goto clear_flags; |
4312 | |
4313 | if (!qm->err_status.is_qm_ecc_mbit && |
4314 | !qm->err_status.is_dev_ecc_mbit) |
4315 | return; |
4316 | |
4317 | /* open the OOO port for PEH to write out MSI */ |
4318 | value = readl(addr: qm->io_base + ACC_AM_CFG_PORT_WR_EN); |
4319 | value |= qm->err_info.msi_wr_port; |
4320 | writel(val: value, addr: qm->io_base + ACC_AM_CFG_PORT_WR_EN); |
4321 | |
4322 | clear_flags: |
4323 | qm->err_status.is_qm_ecc_mbit = false; |
4324 | qm->err_status.is_dev_ecc_mbit = false; |
4325 | } |
4326 | |
4327 | static int qm_controller_reset_done(struct hisi_qm *qm) |
4328 | { |
4329 | struct pci_dev *pdev = qm->pdev; |
4330 | int ret; |
4331 | |
4332 | ret = qm->ops->set_msi(qm, true); |
4333 | if (ret) { |
4334 | pci_err(pdev, "Fails to enable PEH MSI bit!\n" ); |
4335 | return ret; |
4336 | } |
4337 | |
4338 | ret = qm_set_pf_mse(qm, set: true); |
4339 | if (ret) { |
4340 | pci_err(pdev, "Fails to enable pf MSE bit!\n" ); |
4341 | return ret; |
4342 | } |
4343 | |
4344 | if (qm->vfs_num) { |
4345 | ret = qm_set_vf_mse(qm, set: true); |
4346 | if (ret) { |
4347 | pci_err(pdev, "Fails to enable vf MSE bit!\n" ); |
4348 | return ret; |
4349 | } |
4350 | } |
4351 | |
4352 | ret = qm_dev_hw_init(qm); |
4353 | if (ret) { |
4354 | pci_err(pdev, "Failed to init device\n" ); |
4355 | return ret; |
4356 | } |
4357 | |
4358 | qm_restart_prepare(qm); |
4359 | hisi_qm_dev_err_init(qm); |
4360 | if (qm->err_ini->open_axi_master_ooo) |
4361 | qm->err_ini->open_axi_master_ooo(qm); |
4362 | |
4363 | ret = qm_dev_mem_reset(qm); |
4364 | if (ret) { |
4365 | pci_err(pdev, "failed to reset device memory\n" ); |
4366 | return ret; |
4367 | } |
4368 | |
4369 | ret = qm_restart(qm); |
4370 | if (ret) { |
4371 | pci_err(pdev, "Failed to start QM!\n" ); |
4372 | return ret; |
4373 | } |
4374 | |
4375 | ret = qm_try_start_vfs(qm, cmd: QM_PF_RESET_DONE); |
4376 | if (ret) |
4377 | pci_err(pdev, "failed to start vfs by pf in soft reset.\n" ); |
4378 | |
4379 | ret = qm_wait_vf_prepare_finish(qm); |
4380 | if (ret) |
4381 | pci_err(pdev, "failed to start by vfs in soft reset!\n" ); |
4382 | |
4383 | qm_cmd_init(qm); |
4384 | qm_restart_done(qm); |
4385 | |
4386 | qm_reset_bit_clear(qm); |
4387 | |
4388 | return 0; |
4389 | } |
4390 | |
4391 | static int qm_controller_reset(struct hisi_qm *qm) |
4392 | { |
4393 | struct pci_dev *pdev = qm->pdev; |
4394 | int ret; |
4395 | |
4396 | pci_info(pdev, "Controller resetting...\n" ); |
4397 | |
4398 | ret = qm_controller_reset_prepare(qm); |
4399 | if (ret) { |
4400 | hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); |
4401 | hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); |
4402 | clear_bit(nr: QM_RST_SCHED, addr: &qm->misc_ctl); |
4403 | return ret; |
4404 | } |
4405 | |
4406 | hisi_qm_show_last_dfx_regs(qm); |
4407 | if (qm->err_ini->show_last_dfx_regs) |
4408 | qm->err_ini->show_last_dfx_regs(qm); |
4409 | |
4410 | ret = qm_soft_reset(qm); |
4411 | if (ret) |
4412 | goto err_reset; |
4413 | |
4414 | ret = qm_controller_reset_done(qm); |
4415 | if (ret) |
4416 | goto err_reset; |
4417 | |
4418 | pci_info(pdev, "Controller reset complete\n" ); |
4419 | |
4420 | return 0; |
4421 | |
4422 | err_reset: |
4423 | pci_err(pdev, "Controller reset failed (%d)\n" , ret); |
4424 | qm_reset_bit_clear(qm); |
4425 | |
4426 | /* if resetting fails, isolate the device */ |
4427 | if (qm->use_sva) |
4428 | qm->isolate_data.is_isolate = true; |
4429 | return ret; |
4430 | } |
4431 | |
4432 | /** |
4433 | * hisi_qm_dev_slot_reset() - slot reset |
4434 | * @pdev: the PCIe device |
4435 | * |
4436 | * This function offers QM relate PCIe device reset interface. Drivers which |
4437 | * use QM can use this function as slot_reset in its struct pci_error_handlers. |
4438 | */ |
4439 | pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev) |
4440 | { |
4441 | struct hisi_qm *qm = pci_get_drvdata(pdev); |
4442 | int ret; |
4443 | |
4444 | if (pdev->is_virtfn) |
4445 | return PCI_ERS_RESULT_RECOVERED; |
4446 | |
4447 | /* reset pcie device controller */ |
4448 | ret = qm_controller_reset(qm); |
4449 | if (ret) { |
4450 | pci_err(pdev, "Controller reset failed (%d)\n" , ret); |
4451 | return PCI_ERS_RESULT_DISCONNECT; |
4452 | } |
4453 | |
4454 | return PCI_ERS_RESULT_RECOVERED; |
4455 | } |
4456 | EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset); |
4457 | |
4458 | void hisi_qm_reset_prepare(struct pci_dev *pdev) |
4459 | { |
4460 | struct hisi_qm *pf_qm = pci_get_drvdata(pdev: pci_physfn(dev: pdev)); |
4461 | struct hisi_qm *qm = pci_get_drvdata(pdev); |
4462 | u32 delay = 0; |
4463 | int ret; |
4464 | |
4465 | hisi_qm_dev_err_uninit(pf_qm); |
4466 | |
4467 | /* |
4468 | * Check whether there is an ECC mbit error, If it occurs, need to |
4469 | * wait for soft reset to fix it. |
4470 | */ |
4471 | while (qm_check_dev_error(qm: pf_qm)) { |
4472 | msleep(msecs: ++delay); |
4473 | if (delay > QM_RESET_WAIT_TIMEOUT) |
4474 | return; |
4475 | } |
4476 | |
4477 | ret = qm_reset_prepare_ready(qm); |
4478 | if (ret) { |
4479 | pci_err(pdev, "FLR not ready!\n" ); |
4480 | return; |
4481 | } |
4482 | |
4483 | /* PF obtains the information of VF by querying the register. */ |
4484 | if (qm->fun_type == QM_HW_PF) |
4485 | qm_cmd_uninit(qm); |
4486 | |
4487 | ret = qm_try_stop_vfs(qm, cmd: QM_PF_FLR_PREPARE, stop_reason: QM_DOWN); |
4488 | if (ret) |
4489 | pci_err(pdev, "failed to stop vfs by pf in FLR.\n" ); |
4490 | |
4491 | ret = hisi_qm_stop(qm, QM_DOWN); |
4492 | if (ret) { |
4493 | pci_err(pdev, "Failed to stop QM, ret = %d.\n" , ret); |
4494 | hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); |
4495 | hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); |
4496 | return; |
4497 | } |
4498 | |
4499 | ret = qm_wait_vf_prepare_finish(qm); |
4500 | if (ret) |
4501 | pci_err(pdev, "failed to stop by vfs in FLR!\n" ); |
4502 | |
4503 | pci_info(pdev, "FLR resetting...\n" ); |
4504 | } |
4505 | EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare); |
4506 | |
4507 | static bool qm_flr_reset_complete(struct pci_dev *pdev) |
4508 | { |
4509 | struct pci_dev *pf_pdev = pci_physfn(dev: pdev); |
4510 | struct hisi_qm *qm = pci_get_drvdata(pdev: pf_pdev); |
4511 | u32 id; |
4512 | |
4513 | pci_read_config_dword(dev: qm->pdev, PCI_COMMAND, val: &id); |
4514 | if (id == QM_PCI_COMMAND_INVALID) { |
4515 | pci_err(pdev, "Device can not be used!\n" ); |
4516 | return false; |
4517 | } |
4518 | |
4519 | return true; |
4520 | } |
4521 | |
4522 | void hisi_qm_reset_done(struct pci_dev *pdev) |
4523 | { |
4524 | struct hisi_qm *pf_qm = pci_get_drvdata(pdev: pci_physfn(dev: pdev)); |
4525 | struct hisi_qm *qm = pci_get_drvdata(pdev); |
4526 | int ret; |
4527 | |
4528 | if (qm->fun_type == QM_HW_PF) { |
4529 | ret = qm_dev_hw_init(qm); |
4530 | if (ret) { |
4531 | pci_err(pdev, "Failed to init PF, ret = %d.\n" , ret); |
4532 | goto flr_done; |
4533 | } |
4534 | } |
4535 | |
4536 | hisi_qm_dev_err_init(pf_qm); |
4537 | |
4538 | ret = qm_restart(qm); |
4539 | if (ret) { |
4540 | pci_err(pdev, "Failed to start QM, ret = %d.\n" , ret); |
4541 | goto flr_done; |
4542 | } |
4543 | |
4544 | ret = qm_try_start_vfs(qm, cmd: QM_PF_RESET_DONE); |
4545 | if (ret) |
4546 | pci_err(pdev, "failed to start vfs by pf in FLR.\n" ); |
4547 | |
4548 | ret = qm_wait_vf_prepare_finish(qm); |
4549 | if (ret) |
4550 | pci_err(pdev, "failed to start by vfs in FLR!\n" ); |
4551 | |
4552 | flr_done: |
4553 | if (qm->fun_type == QM_HW_PF) |
4554 | qm_cmd_init(qm); |
4555 | |
4556 | if (qm_flr_reset_complete(pdev)) |
4557 | pci_info(pdev, "FLR reset complete\n" ); |
4558 | |
4559 | qm_reset_bit_clear(qm); |
4560 | } |
4561 | EXPORT_SYMBOL_GPL(hisi_qm_reset_done); |
4562 | |
4563 | static irqreturn_t qm_abnormal_irq(int irq, void *data) |
4564 | { |
4565 | struct hisi_qm *qm = data; |
4566 | enum acc_err_result ret; |
4567 | |
4568 | atomic64_inc(v: &qm->debug.dfx.abnormal_irq_cnt); |
4569 | ret = qm_process_dev_error(qm); |
4570 | if (ret == ACC_ERR_NEED_RESET && |
4571 | !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) && |
4572 | !test_and_set_bit(nr: QM_RST_SCHED, addr: &qm->misc_ctl)) |
4573 | schedule_work(work: &qm->rst_work); |
4574 | |
4575 | return IRQ_HANDLED; |
4576 | } |
4577 | |
4578 | /** |
4579 | * hisi_qm_dev_shutdown() - Shutdown device. |
4580 | * @pdev: The device will be shutdown. |
4581 | * |
4582 | * This function will stop qm when OS shutdown or rebooting. |
4583 | */ |
4584 | void hisi_qm_dev_shutdown(struct pci_dev *pdev) |
4585 | { |
4586 | struct hisi_qm *qm = pci_get_drvdata(pdev); |
4587 | int ret; |
4588 | |
4589 | ret = hisi_qm_stop(qm, QM_DOWN); |
4590 | if (ret) |
4591 | dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n" ); |
4592 | |
4593 | hisi_qm_cache_wb(qm); |
4594 | } |
4595 | EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown); |
4596 | |
4597 | static void hisi_qm_controller_reset(struct work_struct *rst_work) |
4598 | { |
4599 | struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work); |
4600 | int ret; |
4601 | |
4602 | ret = qm_pm_get_sync(qm); |
4603 | if (ret) { |
4604 | clear_bit(nr: QM_RST_SCHED, addr: &qm->misc_ctl); |
4605 | return; |
4606 | } |
4607 | |
4608 | /* reset pcie device controller */ |
4609 | ret = qm_controller_reset(qm); |
4610 | if (ret) |
4611 | dev_err(&qm->pdev->dev, "controller reset failed (%d)\n" , ret); |
4612 | |
4613 | qm_pm_put_sync(qm); |
4614 | } |
4615 | |
4616 | static void qm_pf_reset_vf_prepare(struct hisi_qm *qm, |
4617 | enum qm_stop_reason stop_reason) |
4618 | { |
4619 | enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE; |
4620 | struct pci_dev *pdev = qm->pdev; |
4621 | int ret; |
4622 | |
4623 | ret = qm_reset_prepare_ready(qm); |
4624 | if (ret) { |
4625 | dev_err(&pdev->dev, "reset prepare not ready!\n" ); |
4626 | atomic_set(v: &qm->status.flags, i: QM_STOP); |
4627 | cmd = QM_VF_PREPARE_FAIL; |
4628 | goto err_prepare; |
4629 | } |
4630 | |
4631 | ret = hisi_qm_stop(qm, stop_reason); |
4632 | if (ret) { |
4633 | dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n" , ret); |
4634 | atomic_set(v: &qm->status.flags, i: QM_STOP); |
4635 | cmd = QM_VF_PREPARE_FAIL; |
4636 | goto err_prepare; |
4637 | } else { |
4638 | goto out; |
4639 | } |
4640 | |
4641 | err_prepare: |
4642 | hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); |
4643 | hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); |
4644 | out: |
4645 | pci_save_state(dev: pdev); |
4646 | ret = qm_ping_pf(qm, cmd); |
4647 | if (ret) |
4648 | dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n" ); |
4649 | } |
4650 | |
4651 | static void qm_pf_reset_vf_done(struct hisi_qm *qm) |
4652 | { |
4653 | enum qm_mb_cmd cmd = QM_VF_START_DONE; |
4654 | struct pci_dev *pdev = qm->pdev; |
4655 | int ret; |
4656 | |
4657 | pci_restore_state(dev: pdev); |
4658 | ret = hisi_qm_start(qm); |
4659 | if (ret) { |
4660 | dev_err(&pdev->dev, "failed to start QM, ret = %d.\n" , ret); |
4661 | cmd = QM_VF_START_FAIL; |
4662 | } |
4663 | |
4664 | qm_cmd_init(qm); |
4665 | ret = qm_ping_pf(qm, cmd); |
4666 | if (ret) |
4667 | dev_warn(&pdev->dev, "PF responds timeout in reset done!\n" ); |
4668 | |
4669 | qm_reset_bit_clear(qm); |
4670 | } |
4671 | |
4672 | static int qm_wait_pf_reset_finish(struct hisi_qm *qm) |
4673 | { |
4674 | struct device *dev = &qm->pdev->dev; |
4675 | u32 val, cmd; |
4676 | u64 msg; |
4677 | int ret; |
4678 | |
4679 | /* Wait for reset to finish */ |
4680 | ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val, |
4681 | val == BIT(0), QM_VF_RESET_WAIT_US, |
4682 | QM_VF_RESET_WAIT_TIMEOUT_US); |
4683 | /* hardware completion status should be available by this time */ |
4684 | if (ret) { |
4685 | dev_err(dev, "couldn't get reset done status from PF, timeout!\n" ); |
4686 | return -ETIMEDOUT; |
4687 | } |
4688 | |
4689 | /* |
4690 | * Whether message is got successfully, |
4691 | * VF needs to ack PF by clearing the interrupt. |
4692 | */ |
4693 | ret = qm_get_mb_cmd(qm, msg: &msg, fun_num: 0); |
4694 | qm_clear_cmd_interrupt(qm, vf_mask: 0); |
4695 | if (ret) { |
4696 | dev_err(dev, "failed to get msg from PF in reset done!\n" ); |
4697 | return ret; |
4698 | } |
4699 | |
4700 | cmd = msg & QM_MB_CMD_DATA_MASK; |
4701 | if (cmd != QM_PF_RESET_DONE) { |
4702 | dev_err(dev, "the cmd(%u) is not reset done!\n" , cmd); |
4703 | ret = -EINVAL; |
4704 | } |
4705 | |
4706 | return ret; |
4707 | } |
4708 | |
4709 | static void qm_pf_reset_vf_process(struct hisi_qm *qm, |
4710 | enum qm_stop_reason stop_reason) |
4711 | { |
4712 | struct device *dev = &qm->pdev->dev; |
4713 | int ret; |
4714 | |
4715 | dev_info(dev, "device reset start...\n" ); |
4716 | |
4717 | /* The message is obtained by querying the register during resetting */ |
4718 | qm_cmd_uninit(qm); |
4719 | qm_pf_reset_vf_prepare(qm, stop_reason); |
4720 | |
4721 | ret = qm_wait_pf_reset_finish(qm); |
4722 | if (ret) |
4723 | goto err_get_status; |
4724 | |
4725 | qm_pf_reset_vf_done(qm); |
4726 | |
4727 | dev_info(dev, "device reset done.\n" ); |
4728 | |
4729 | return; |
4730 | |
4731 | err_get_status: |
4732 | qm_cmd_init(qm); |
4733 | qm_reset_bit_clear(qm); |
4734 | } |
4735 | |
4736 | static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) |
4737 | { |
4738 | struct device *dev = &qm->pdev->dev; |
4739 | u64 msg; |
4740 | u32 cmd; |
4741 | int ret; |
4742 | |
4743 | /* |
4744 | * Get the msg from source by sending mailbox. Whether message is got |
4745 | * successfully, destination needs to ack source by clearing the interrupt. |
4746 | */ |
4747 | ret = qm_get_mb_cmd(qm, msg: &msg, fun_num); |
4748 | qm_clear_cmd_interrupt(qm, BIT(fun_num)); |
4749 | if (ret) { |
4750 | dev_err(dev, "failed to get msg from source!\n" ); |
4751 | return; |
4752 | } |
4753 | |
4754 | cmd = msg & QM_MB_CMD_DATA_MASK; |
4755 | switch (cmd) { |
4756 | case QM_PF_FLR_PREPARE: |
4757 | qm_pf_reset_vf_process(qm, stop_reason: QM_DOWN); |
4758 | break; |
4759 | case QM_PF_SRST_PREPARE: |
4760 | qm_pf_reset_vf_process(qm, stop_reason: QM_SOFT_RESET); |
4761 | break; |
4762 | case QM_VF_GET_QOS: |
4763 | qm_vf_get_qos(qm, fun_num); |
4764 | break; |
4765 | case QM_PF_SET_QOS: |
4766 | qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT; |
4767 | break; |
4768 | default: |
4769 | dev_err(dev, "unsupported cmd %u sent by function(%u)!\n" , cmd, fun_num); |
4770 | break; |
4771 | } |
4772 | } |
4773 | |
4774 | static void qm_cmd_process(struct work_struct *cmd_process) |
4775 | { |
4776 | struct hisi_qm *qm = container_of(cmd_process, |
4777 | struct hisi_qm, cmd_process); |
4778 | u32 vfs_num = qm->vfs_num; |
4779 | u64 val; |
4780 | u32 i; |
4781 | |
4782 | if (qm->fun_type == QM_HW_PF) { |
4783 | val = readq(addr: qm->io_base + QM_IFC_INT_SOURCE_P); |
4784 | if (!val) |
4785 | return; |
4786 | |
4787 | for (i = 1; i <= vfs_num; i++) { |
4788 | if (val & BIT(i)) |
4789 | qm_handle_cmd_msg(qm, fun_num: i); |
4790 | } |
4791 | |
4792 | return; |
4793 | } |
4794 | |
4795 | qm_handle_cmd_msg(qm, fun_num: 0); |
4796 | } |
4797 | |
4798 | /** |
4799 | * hisi_qm_alg_register() - Register alg to crypto. |
4800 | * @qm: The qm needs add. |
4801 | * @qm_list: The qm list. |
4802 | * @guard: Guard of qp_num. |
4803 | * |
4804 | * Register algorithm to crypto when the function is satisfy guard. |
4805 | */ |
4806 | int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard) |
4807 | { |
4808 | struct device *dev = &qm->pdev->dev; |
4809 | |
4810 | if (qm->ver <= QM_HW_V2 && qm->use_sva) { |
4811 | dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n" ); |
4812 | return 0; |
4813 | } |
4814 | |
4815 | if (qm->qp_num < guard) { |
4816 | dev_info(dev, "qp_num is less than task need.\n" ); |
4817 | return 0; |
4818 | } |
4819 | |
4820 | return qm_list->register_to_crypto(qm); |
4821 | } |
4822 | EXPORT_SYMBOL_GPL(hisi_qm_alg_register); |
4823 | |
4824 | /** |
4825 | * hisi_qm_alg_unregister() - Unregister alg from crypto. |
4826 | * @qm: The qm needs delete. |
4827 | * @qm_list: The qm list. |
4828 | * @guard: Guard of qp_num. |
4829 | * |
4830 | * Unregister algorithm from crypto when the last function is satisfy guard. |
4831 | */ |
4832 | void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard) |
4833 | { |
4834 | if (qm->ver <= QM_HW_V2 && qm->use_sva) |
4835 | return; |
4836 | |
4837 | if (qm->qp_num < guard) |
4838 | return; |
4839 | |
4840 | qm_list->unregister_from_crypto(qm); |
4841 | } |
4842 | EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); |
4843 | |
4844 | static void qm_unregister_abnormal_irq(struct hisi_qm *qm) |
4845 | { |
4846 | struct pci_dev *pdev = qm->pdev; |
4847 | u32 irq_vector, val; |
4848 | |
4849 | if (qm->fun_type == QM_HW_VF) |
4850 | return; |
4851 | |
4852 | val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; |
4853 | if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) |
4854 | return; |
4855 | |
4856 | irq_vector = val & QM_IRQ_VECTOR_MASK; |
4857 | free_irq(pci_irq_vector(dev: pdev, nr: irq_vector), qm); |
4858 | } |
4859 | |
4860 | static int qm_register_abnormal_irq(struct hisi_qm *qm) |
4861 | { |
4862 | struct pci_dev *pdev = qm->pdev; |
4863 | u32 irq_vector, val; |
4864 | int ret; |
4865 | |
4866 | if (qm->fun_type == QM_HW_VF) |
4867 | return 0; |
4868 | |
4869 | val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; |
4870 | if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) |
4871 | return 0; |
4872 | |
4873 | irq_vector = val & QM_IRQ_VECTOR_MASK; |
4874 | ret = request_irq(irq: pci_irq_vector(dev: pdev, nr: irq_vector), handler: qm_abnormal_irq, flags: 0, name: qm->dev_name, dev: qm); |
4875 | if (ret) |
4876 | dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d" , ret); |
4877 | |
4878 | return ret; |
4879 | } |
4880 | |
4881 | static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) |
4882 | { |
4883 | struct pci_dev *pdev = qm->pdev; |
4884 | u32 irq_vector, val; |
4885 | |
4886 | val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val; |
4887 | if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) |
4888 | return; |
4889 | |
4890 | irq_vector = val & QM_IRQ_VECTOR_MASK; |
4891 | free_irq(pci_irq_vector(dev: pdev, nr: irq_vector), qm); |
4892 | } |
4893 | |
4894 | static int qm_register_mb_cmd_irq(struct hisi_qm *qm) |
4895 | { |
4896 | struct pci_dev *pdev = qm->pdev; |
4897 | u32 irq_vector, val; |
4898 | int ret; |
4899 | |
4900 | val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val; |
4901 | if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) |
4902 | return 0; |
4903 | |
4904 | irq_vector = val & QM_IRQ_VECTOR_MASK; |
4905 | ret = request_irq(irq: pci_irq_vector(dev: pdev, nr: irq_vector), handler: qm_mb_cmd_irq, flags: 0, name: qm->dev_name, dev: qm); |
4906 | if (ret) |
4907 | dev_err(&pdev->dev, "failed to request function communication irq, ret = %d" , ret); |
4908 | |
4909 | return ret; |
4910 | } |
4911 | |
4912 | static void qm_unregister_aeq_irq(struct hisi_qm *qm) |
4913 | { |
4914 | struct pci_dev *pdev = qm->pdev; |
4915 | u32 irq_vector, val; |
4916 | |
4917 | val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val; |
4918 | if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) |
4919 | return; |
4920 | |
4921 | irq_vector = val & QM_IRQ_VECTOR_MASK; |
4922 | free_irq(pci_irq_vector(dev: pdev, nr: irq_vector), qm); |
4923 | } |
4924 | |
4925 | static int qm_register_aeq_irq(struct hisi_qm *qm) |
4926 | { |
4927 | struct pci_dev *pdev = qm->pdev; |
4928 | u32 irq_vector, val; |
4929 | int ret; |
4930 | |
4931 | val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val; |
4932 | if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) |
4933 | return 0; |
4934 | |
4935 | irq_vector = val & QM_IRQ_VECTOR_MASK; |
4936 | ret = request_threaded_irq(irq: pci_irq_vector(dev: pdev, nr: irq_vector), NULL, |
4937 | thread_fn: qm_aeq_thread, IRQF_ONESHOT, name: qm->dev_name, dev: qm); |
4938 | if (ret) |
4939 | dev_err(&pdev->dev, "failed to request eq irq, ret = %d" , ret); |
4940 | |
4941 | return ret; |
4942 | } |
4943 | |
4944 | static void qm_unregister_eq_irq(struct hisi_qm *qm) |
4945 | { |
4946 | struct pci_dev *pdev = qm->pdev; |
4947 | u32 irq_vector, val; |
4948 | |
4949 | val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val; |
4950 | if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) |
4951 | return; |
4952 | |
4953 | irq_vector = val & QM_IRQ_VECTOR_MASK; |
4954 | free_irq(pci_irq_vector(dev: pdev, nr: irq_vector), qm); |
4955 | } |
4956 | |
4957 | static int qm_register_eq_irq(struct hisi_qm *qm) |
4958 | { |
4959 | struct pci_dev *pdev = qm->pdev; |
4960 | u32 irq_vector, val; |
4961 | int ret; |
4962 | |
4963 | val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val; |
4964 | if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) |
4965 | return 0; |
4966 | |
4967 | irq_vector = val & QM_IRQ_VECTOR_MASK; |
4968 | ret = request_irq(irq: pci_irq_vector(dev: pdev, nr: irq_vector), handler: qm_eq_irq, flags: 0, name: qm->dev_name, dev: qm); |
4969 | if (ret) |
4970 | dev_err(&pdev->dev, "failed to request eq irq, ret = %d" , ret); |
4971 | |
4972 | return ret; |
4973 | } |
4974 | |
4975 | static void qm_irqs_unregister(struct hisi_qm *qm) |
4976 | { |
4977 | qm_unregister_mb_cmd_irq(qm); |
4978 | qm_unregister_abnormal_irq(qm); |
4979 | qm_unregister_aeq_irq(qm); |
4980 | qm_unregister_eq_irq(qm); |
4981 | } |
4982 | |
4983 | static int qm_irqs_register(struct hisi_qm *qm) |
4984 | { |
4985 | int ret; |
4986 | |
4987 | ret = qm_register_eq_irq(qm); |
4988 | if (ret) |
4989 | return ret; |
4990 | |
4991 | ret = qm_register_aeq_irq(qm); |
4992 | if (ret) |
4993 | goto free_eq_irq; |
4994 | |
4995 | ret = qm_register_abnormal_irq(qm); |
4996 | if (ret) |
4997 | goto free_aeq_irq; |
4998 | |
4999 | ret = qm_register_mb_cmd_irq(qm); |
5000 | if (ret) |
5001 | goto free_abnormal_irq; |
5002 | |
5003 | return 0; |
5004 | |
5005 | free_abnormal_irq: |
5006 | qm_unregister_abnormal_irq(qm); |
5007 | free_aeq_irq: |
5008 | qm_unregister_aeq_irq(qm); |
5009 | free_eq_irq: |
5010 | qm_unregister_eq_irq(qm); |
5011 | return ret; |
5012 | } |
5013 | |
5014 | static int qm_get_qp_num(struct hisi_qm *qm) |
5015 | { |
5016 | struct device *dev = &qm->pdev->dev; |
5017 | bool is_db_isolation; |
5018 | |
5019 | /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */ |
5020 | if (qm->fun_type == QM_HW_VF) { |
5021 | if (qm->ver != QM_HW_V1) |
5022 | /* v2 starts to support get vft by mailbox */ |
5023 | return hisi_qm_get_vft(qm, base: &qm->qp_base, number: &qm->qp_num); |
5024 | |
5025 | return 0; |
5026 | } |
5027 | |
5028 | is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); |
5029 | qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true); |
5030 | qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, |
5031 | QM_FUNC_MAX_QP_CAP, is_db_isolation); |
5032 | |
5033 | if (qm->qp_num <= qm->max_qp_num) |
5034 | return 0; |
5035 | |
5036 | if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) { |
5037 | /* Check whether the set qp number is valid */ |
5038 | dev_err(dev, "qp num(%u) is more than max qp num(%u)!\n" , |
5039 | qm->qp_num, qm->max_qp_num); |
5040 | return -EINVAL; |
5041 | } |
5042 | |
5043 | dev_info(dev, "Default qp num(%u) is too big, reset it to Function's max qp num(%u)!\n" , |
5044 | qm->qp_num, qm->max_qp_num); |
5045 | qm->qp_num = qm->max_qp_num; |
5046 | qm->debug.curr_qm_qp_num = qm->qp_num; |
5047 | |
5048 | return 0; |
5049 | } |
5050 | |
5051 | static int qm_pre_store_irq_type_caps(struct hisi_qm *qm) |
5052 | { |
5053 | struct hisi_qm_cap_record *qm_cap; |
5054 | struct pci_dev *pdev = qm->pdev; |
5055 | size_t i, size; |
5056 | |
5057 | size = ARRAY_SIZE(qm_pre_store_caps); |
5058 | qm_cap = devm_kzalloc(dev: &pdev->dev, size: sizeof(*qm_cap) * size, GFP_KERNEL); |
5059 | if (!qm_cap) |
5060 | return -ENOMEM; |
5061 | |
5062 | for (i = 0; i < size; i++) { |
5063 | qm_cap[i].type = qm_pre_store_caps[i]; |
5064 | qm_cap[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info, |
5065 | qm_pre_store_caps[i], qm->cap_ver); |
5066 | } |
5067 | |
5068 | qm->cap_tables.qm_cap_table = qm_cap; |
5069 | |
5070 | return 0; |
5071 | } |
5072 | |
5073 | static int qm_get_hw_caps(struct hisi_qm *qm) |
5074 | { |
5075 | const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ? |
5076 | qm_cap_info_pf : qm_cap_info_vf; |
5077 | u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) : |
5078 | ARRAY_SIZE(qm_cap_info_vf); |
5079 | u32 val, i; |
5080 | |
5081 | /* Doorbell isolate register is a independent register. */ |
5082 | val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true); |
5083 | if (val) |
5084 | set_bit(nr: QM_SUPPORT_DB_ISOLATION, addr: &qm->caps); |
5085 | |
5086 | if (qm->ver >= QM_HW_V3) { |
5087 | val = readl(addr: qm->io_base + QM_FUNC_CAPS_REG); |
5088 | qm->cap_ver = val & QM_CAPBILITY_VERSION; |
5089 | } |
5090 | |
5091 | /* Get PF/VF common capbility */ |
5092 | for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) { |
5093 | val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver); |
5094 | if (val) |
5095 | set_bit(nr: qm_cap_info_comm[i].type, addr: &qm->caps); |
5096 | } |
5097 | |
5098 | /* Get PF/VF different capbility */ |
5099 | for (i = 0; i < size; i++) { |
5100 | val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver); |
5101 | if (val) |
5102 | set_bit(nr: cap_info[i].type, addr: &qm->caps); |
5103 | } |
5104 | |
5105 | /* Fetch and save the value of irq type related capability registers */ |
5106 | return qm_pre_store_irq_type_caps(qm); |
5107 | } |
5108 | |
5109 | static int qm_get_pci_res(struct hisi_qm *qm) |
5110 | { |
5111 | struct pci_dev *pdev = qm->pdev; |
5112 | struct device *dev = &pdev->dev; |
5113 | int ret; |
5114 | |
5115 | ret = pci_request_mem_regions(pdev, name: qm->dev_name); |
5116 | if (ret < 0) { |
5117 | dev_err(dev, "Failed to request mem regions!\n" ); |
5118 | return ret; |
5119 | } |
5120 | |
5121 | qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); |
5122 | qm->io_base = ioremap(offset: qm->phys_base, pci_resource_len(pdev, PCI_BAR_2)); |
5123 | if (!qm->io_base) { |
5124 | ret = -EIO; |
5125 | goto err_request_mem_regions; |
5126 | } |
5127 | |
5128 | ret = qm_get_hw_caps(qm); |
5129 | if (ret) |
5130 | goto err_ioremap; |
5131 | |
5132 | if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { |
5133 | qm->db_interval = QM_QP_DB_INTERVAL; |
5134 | qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); |
5135 | qm->db_io_base = ioremap(offset: qm->db_phys_base, |
5136 | pci_resource_len(pdev, PCI_BAR_4)); |
5137 | if (!qm->db_io_base) { |
5138 | ret = -EIO; |
5139 | goto err_ioremap; |
5140 | } |
5141 | } else { |
5142 | qm->db_phys_base = qm->phys_base; |
5143 | qm->db_io_base = qm->io_base; |
5144 | qm->db_interval = 0; |
5145 | } |
5146 | |
5147 | ret = qm_get_qp_num(qm); |
5148 | if (ret) |
5149 | goto err_db_ioremap; |
5150 | |
5151 | return 0; |
5152 | |
5153 | err_db_ioremap: |
5154 | if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) |
5155 | iounmap(addr: qm->db_io_base); |
5156 | err_ioremap: |
5157 | iounmap(addr: qm->io_base); |
5158 | err_request_mem_regions: |
5159 | pci_release_mem_regions(pdev); |
5160 | return ret; |
5161 | } |
5162 | |
5163 | static int hisi_qm_pci_init(struct hisi_qm *qm) |
5164 | { |
5165 | struct pci_dev *pdev = qm->pdev; |
5166 | struct device *dev = &pdev->dev; |
5167 | unsigned int num_vec; |
5168 | int ret; |
5169 | |
5170 | ret = pci_enable_device_mem(dev: pdev); |
5171 | if (ret < 0) { |
5172 | dev_err(dev, "Failed to enable device mem!\n" ); |
5173 | return ret; |
5174 | } |
5175 | |
5176 | ret = qm_get_pci_res(qm); |
5177 | if (ret) |
5178 | goto err_disable_pcidev; |
5179 | |
5180 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); |
5181 | if (ret < 0) |
5182 | goto err_get_pci_res; |
5183 | pci_set_master(dev: pdev); |
5184 | |
5185 | num_vec = qm_get_irq_num(qm); |
5186 | ret = pci_alloc_irq_vectors(dev: pdev, min_vecs: num_vec, max_vecs: num_vec, PCI_IRQ_MSI); |
5187 | if (ret < 0) { |
5188 | dev_err(dev, "Failed to enable MSI vectors!\n" ); |
5189 | goto err_get_pci_res; |
5190 | } |
5191 | |
5192 | return 0; |
5193 | |
5194 | err_get_pci_res: |
5195 | qm_put_pci_res(qm); |
5196 | err_disable_pcidev: |
5197 | pci_disable_device(dev: pdev); |
5198 | return ret; |
5199 | } |
5200 | |
5201 | static int hisi_qm_init_work(struct hisi_qm *qm) |
5202 | { |
5203 | int i; |
5204 | |
5205 | for (i = 0; i < qm->qp_num; i++) |
5206 | INIT_WORK(&qm->poll_data[i].work, qm_work_process); |
5207 | |
5208 | if (qm->fun_type == QM_HW_PF) |
5209 | INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); |
5210 | |
5211 | if (qm->ver > QM_HW_V2) |
5212 | INIT_WORK(&qm->cmd_process, qm_cmd_process); |
5213 | |
5214 | qm->wq = alloc_workqueue(fmt: "%s" , flags: WQ_HIGHPRI | WQ_MEM_RECLAIM | |
5215 | WQ_UNBOUND, max_active: num_online_cpus(), |
5216 | pci_name(pdev: qm->pdev)); |
5217 | if (!qm->wq) { |
5218 | pci_err(qm->pdev, "failed to alloc workqueue!\n" ); |
5219 | return -ENOMEM; |
5220 | } |
5221 | |
5222 | return 0; |
5223 | } |
5224 | |
5225 | static int hisi_qp_alloc_memory(struct hisi_qm *qm) |
5226 | { |
5227 | struct device *dev = &qm->pdev->dev; |
5228 | u16 sq_depth, cq_depth; |
5229 | size_t qp_dma_size; |
5230 | int i, ret; |
5231 | |
5232 | qm->qp_array = kcalloc(n: qm->qp_num, size: sizeof(struct hisi_qp), GFP_KERNEL); |
5233 | if (!qm->qp_array) |
5234 | return -ENOMEM; |
5235 | |
5236 | qm->poll_data = kcalloc(n: qm->qp_num, size: sizeof(struct hisi_qm_poll_data), GFP_KERNEL); |
5237 | if (!qm->poll_data) { |
5238 | kfree(objp: qm->qp_array); |
5239 | return -ENOMEM; |
5240 | } |
5241 | |
5242 | qm_get_xqc_depth(qm, low_bits: &sq_depth, high_bits: &cq_depth, type: QM_QP_DEPTH_CAP); |
5243 | |
5244 | /* one more page for device or qp statuses */ |
5245 | qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; |
5246 | qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; |
5247 | for (i = 0; i < qm->qp_num; i++) { |
5248 | qm->poll_data[i].qm = qm; |
5249 | ret = hisi_qp_memory_init(qm, dma_size: qp_dma_size, id: i, sq_depth, cq_depth); |
5250 | if (ret) |
5251 | goto err_init_qp_mem; |
5252 | |
5253 | dev_dbg(dev, "allocate qp dma buf size=%zx)\n" , qp_dma_size); |
5254 | } |
5255 | |
5256 | return 0; |
5257 | err_init_qp_mem: |
5258 | hisi_qp_memory_uninit(qm, num: i); |
5259 | |
5260 | return ret; |
5261 | } |
5262 | |
5263 | static int hisi_qm_alloc_rsv_buf(struct hisi_qm *qm) |
5264 | { |
5265 | struct qm_rsv_buf *xqc_buf = &qm->xqc_buf; |
5266 | struct qm_dma *xqc_dma = &xqc_buf->qcdma; |
5267 | struct device *dev = &qm->pdev->dev; |
5268 | size_t off = 0; |
5269 | |
5270 | #define QM_XQC_BUF_INIT(xqc_buf, type) do { \ |
5271 | (xqc_buf)->type = ((xqc_buf)->qcdma.va + (off)); \ |
5272 | (xqc_buf)->type##_dma = (xqc_buf)->qcdma.dma + (off); \ |
5273 | off += QMC_ALIGN(sizeof(struct qm_##type)); \ |
5274 | } while (0) |
5275 | |
5276 | xqc_dma->size = QMC_ALIGN(sizeof(struct qm_eqc)) + |
5277 | QMC_ALIGN(sizeof(struct qm_aeqc)) + |
5278 | QMC_ALIGN(sizeof(struct qm_sqc)) + |
5279 | QMC_ALIGN(sizeof(struct qm_cqc)); |
5280 | xqc_dma->va = dma_alloc_coherent(dev, size: xqc_dma->size, |
5281 | dma_handle: &xqc_dma->dma, GFP_KERNEL); |
5282 | if (!xqc_dma->va) |
5283 | return -ENOMEM; |
5284 | |
5285 | QM_XQC_BUF_INIT(xqc_buf, eqc); |
5286 | QM_XQC_BUF_INIT(xqc_buf, aeqc); |
5287 | QM_XQC_BUF_INIT(xqc_buf, sqc); |
5288 | QM_XQC_BUF_INIT(xqc_buf, cqc); |
5289 | |
5290 | return 0; |
5291 | } |
5292 | |
5293 | static int hisi_qm_memory_init(struct hisi_qm *qm) |
5294 | { |
5295 | struct device *dev = &qm->pdev->dev; |
5296 | int ret, total_func; |
5297 | size_t off = 0; |
5298 | |
5299 | if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { |
5300 | total_func = pci_sriov_get_totalvfs(dev: qm->pdev) + 1; |
5301 | qm->factor = kcalloc(n: total_func, size: sizeof(struct qm_shaper_factor), GFP_KERNEL); |
5302 | if (!qm->factor) |
5303 | return -ENOMEM; |
5304 | |
5305 | /* Only the PF value needs to be initialized */ |
5306 | qm->factor[0].func_qos = QM_QOS_MAX_VAL; |
5307 | } |
5308 | |
5309 | #define QM_INIT_BUF(qm, type, num) do { \ |
5310 | (qm)->type = ((qm)->qdma.va + (off)); \ |
5311 | (qm)->type##_dma = (qm)->qdma.dma + (off); \ |
5312 | off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \ |
5313 | } while (0) |
5314 | |
5315 | idr_init(idr: &qm->qp_idr); |
5316 | qm_get_xqc_depth(qm, low_bits: &qm->eq_depth, high_bits: &qm->aeq_depth, type: QM_XEQ_DEPTH_CAP); |
5317 | qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + |
5318 | QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + |
5319 | QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + |
5320 | QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); |
5321 | qm->qdma.va = dma_alloc_coherent(dev, size: qm->qdma.size, dma_handle: &qm->qdma.dma, |
5322 | GFP_ATOMIC); |
5323 | dev_dbg(dev, "allocate qm dma buf size=%zx)\n" , qm->qdma.size); |
5324 | if (!qm->qdma.va) { |
5325 | ret = -ENOMEM; |
5326 | goto err_destroy_idr; |
5327 | } |
5328 | |
5329 | QM_INIT_BUF(qm, eqe, qm->eq_depth); |
5330 | QM_INIT_BUF(qm, aeqe, qm->aeq_depth); |
5331 | QM_INIT_BUF(qm, sqc, qm->qp_num); |
5332 | QM_INIT_BUF(qm, cqc, qm->qp_num); |
5333 | |
5334 | ret = hisi_qm_alloc_rsv_buf(qm); |
5335 | if (ret) |
5336 | goto err_free_qdma; |
5337 | |
5338 | ret = hisi_qp_alloc_memory(qm); |
5339 | if (ret) |
5340 | goto err_free_reserve_buf; |
5341 | |
5342 | return 0; |
5343 | |
5344 | err_free_reserve_buf: |
5345 | hisi_qm_free_rsv_buf(qm); |
5346 | err_free_qdma: |
5347 | dma_free_coherent(dev, size: qm->qdma.size, cpu_addr: qm->qdma.va, dma_handle: qm->qdma.dma); |
5348 | err_destroy_idr: |
5349 | idr_destroy(&qm->qp_idr); |
5350 | if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) |
5351 | kfree(objp: qm->factor); |
5352 | |
5353 | return ret; |
5354 | } |
5355 | |
5356 | /** |
5357 | * hisi_qm_init() - Initialize configures about qm. |
5358 | * @qm: The qm needing init. |
5359 | * |
5360 | * This function init qm, then we can call hisi_qm_start to put qm into work. |
5361 | */ |
5362 | int hisi_qm_init(struct hisi_qm *qm) |
5363 | { |
5364 | struct pci_dev *pdev = qm->pdev; |
5365 | struct device *dev = &pdev->dev; |
5366 | int ret; |
5367 | |
5368 | hisi_qm_pre_init(qm); |
5369 | |
5370 | ret = hisi_qm_pci_init(qm); |
5371 | if (ret) |
5372 | return ret; |
5373 | |
5374 | ret = qm_irqs_register(qm); |
5375 | if (ret) |
5376 | goto err_pci_init; |
5377 | |
5378 | if (qm->fun_type == QM_HW_PF) { |
5379 | /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */ |
5380 | writel(QM_DB_TIMEOUT_SET, addr: qm->io_base + QM_DB_TIMEOUT_CFG); |
5381 | qm_disable_clock_gate(qm); |
5382 | ret = qm_dev_mem_reset(qm); |
5383 | if (ret) { |
5384 | dev_err(dev, "failed to reset device memory\n" ); |
5385 | goto err_irq_register; |
5386 | } |
5387 | } |
5388 | |
5389 | if (qm->mode == UACCE_MODE_SVA) { |
5390 | ret = qm_alloc_uacce(qm); |
5391 | if (ret < 0) |
5392 | dev_warn(dev, "fail to alloc uacce (%d)\n" , ret); |
5393 | } |
5394 | |
5395 | ret = hisi_qm_memory_init(qm); |
5396 | if (ret) |
5397 | goto err_alloc_uacce; |
5398 | |
5399 | ret = hisi_qm_init_work(qm); |
5400 | if (ret) |
5401 | goto err_free_qm_memory; |
5402 | |
5403 | qm_cmd_init(qm); |
5404 | |
5405 | return 0; |
5406 | |
5407 | err_free_qm_memory: |
5408 | hisi_qm_memory_uninit(qm); |
5409 | err_alloc_uacce: |
5410 | qm_remove_uacce(qm); |
5411 | err_irq_register: |
5412 | qm_irqs_unregister(qm); |
5413 | err_pci_init: |
5414 | hisi_qm_pci_uninit(qm); |
5415 | return ret; |
5416 | } |
5417 | EXPORT_SYMBOL_GPL(hisi_qm_init); |
5418 | |
5419 | /** |
5420 | * hisi_qm_get_dfx_access() - Try to get dfx access. |
5421 | * @qm: pointer to accelerator device. |
5422 | * |
5423 | * Try to get dfx access, then user can get message. |
5424 | * |
5425 | * If device is in suspended, return failure, otherwise |
5426 | * bump up the runtime PM usage counter. |
5427 | */ |
5428 | int hisi_qm_get_dfx_access(struct hisi_qm *qm) |
5429 | { |
5430 | struct device *dev = &qm->pdev->dev; |
5431 | |
5432 | if (pm_runtime_suspended(dev)) { |
5433 | dev_info(dev, "can not read/write - device in suspended.\n" ); |
5434 | return -EAGAIN; |
5435 | } |
5436 | |
5437 | return qm_pm_get_sync(qm); |
5438 | } |
5439 | EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access); |
5440 | |
5441 | /** |
5442 | * hisi_qm_put_dfx_access() - Put dfx access. |
5443 | * @qm: pointer to accelerator device. |
5444 | * |
5445 | * Put dfx access, drop runtime PM usage counter. |
5446 | */ |
5447 | void hisi_qm_put_dfx_access(struct hisi_qm *qm) |
5448 | { |
5449 | qm_pm_put_sync(qm); |
5450 | } |
5451 | EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access); |
5452 | |
5453 | /** |
5454 | * hisi_qm_pm_init() - Initialize qm runtime PM. |
5455 | * @qm: pointer to accelerator device. |
5456 | * |
5457 | * Function that initialize qm runtime PM. |
5458 | */ |
5459 | void hisi_qm_pm_init(struct hisi_qm *qm) |
5460 | { |
5461 | struct device *dev = &qm->pdev->dev; |
5462 | |
5463 | if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) |
5464 | return; |
5465 | |
5466 | pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY); |
5467 | pm_runtime_use_autosuspend(dev); |
5468 | pm_runtime_put_noidle(dev); |
5469 | } |
5470 | EXPORT_SYMBOL_GPL(hisi_qm_pm_init); |
5471 | |
5472 | /** |
5473 | * hisi_qm_pm_uninit() - Uninitialize qm runtime PM. |
5474 | * @qm: pointer to accelerator device. |
5475 | * |
5476 | * Function that uninitialize qm runtime PM. |
5477 | */ |
5478 | void hisi_qm_pm_uninit(struct hisi_qm *qm) |
5479 | { |
5480 | struct device *dev = &qm->pdev->dev; |
5481 | |
5482 | if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) |
5483 | return; |
5484 | |
5485 | pm_runtime_get_noresume(dev); |
5486 | pm_runtime_dont_use_autosuspend(dev); |
5487 | } |
5488 | EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit); |
5489 | |
5490 | static int qm_prepare_for_suspend(struct hisi_qm *qm) |
5491 | { |
5492 | struct pci_dev *pdev = qm->pdev; |
5493 | int ret; |
5494 | u32 val; |
5495 | |
5496 | ret = qm->ops->set_msi(qm, false); |
5497 | if (ret) { |
5498 | pci_err(pdev, "failed to disable MSI before suspending!\n" ); |
5499 | return ret; |
5500 | } |
5501 | |
5502 | /* shutdown OOO register */ |
5503 | writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, |
5504 | addr: qm->io_base + ACC_MASTER_GLOBAL_CTRL); |
5505 | |
5506 | ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, |
5507 | val, |
5508 | (val == ACC_MASTER_TRANS_RETURN_RW), |
5509 | POLL_PERIOD, POLL_TIMEOUT); |
5510 | if (ret) { |
5511 | pci_emerg(pdev, "Bus lock! Please reset system.\n" ); |
5512 | return ret; |
5513 | } |
5514 | |
5515 | ret = qm_set_pf_mse(qm, set: false); |
5516 | if (ret) |
5517 | pci_err(pdev, "failed to disable MSE before suspending!\n" ); |
5518 | |
5519 | return ret; |
5520 | } |
5521 | |
5522 | static int qm_rebuild_for_resume(struct hisi_qm *qm) |
5523 | { |
5524 | struct pci_dev *pdev = qm->pdev; |
5525 | int ret; |
5526 | |
5527 | ret = qm_set_pf_mse(qm, set: true); |
5528 | if (ret) { |
5529 | pci_err(pdev, "failed to enable MSE after resuming!\n" ); |
5530 | return ret; |
5531 | } |
5532 | |
5533 | ret = qm->ops->set_msi(qm, true); |
5534 | if (ret) { |
5535 | pci_err(pdev, "failed to enable MSI after resuming!\n" ); |
5536 | return ret; |
5537 | } |
5538 | |
5539 | ret = qm_dev_hw_init(qm); |
5540 | if (ret) { |
5541 | pci_err(pdev, "failed to init device after resuming\n" ); |
5542 | return ret; |
5543 | } |
5544 | |
5545 | qm_cmd_init(qm); |
5546 | hisi_qm_dev_err_init(qm); |
5547 | /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */ |
5548 | writel(QM_DB_TIMEOUT_SET, addr: qm->io_base + QM_DB_TIMEOUT_CFG); |
5549 | qm_disable_clock_gate(qm); |
5550 | ret = qm_dev_mem_reset(qm); |
5551 | if (ret) |
5552 | pci_err(pdev, "failed to reset device memory\n" ); |
5553 | |
5554 | return ret; |
5555 | } |
5556 | |
5557 | /** |
5558 | * hisi_qm_suspend() - Runtime suspend of given device. |
5559 | * @dev: device to suspend. |
5560 | * |
5561 | * Function that suspend the device. |
5562 | */ |
5563 | int hisi_qm_suspend(struct device *dev) |
5564 | { |
5565 | struct pci_dev *pdev = to_pci_dev(dev); |
5566 | struct hisi_qm *qm = pci_get_drvdata(pdev); |
5567 | int ret; |
5568 | |
5569 | pci_info(pdev, "entering suspended state\n" ); |
5570 | |
5571 | ret = hisi_qm_stop(qm, QM_NORMAL); |
5572 | if (ret) { |
5573 | pci_err(pdev, "failed to stop qm(%d)\n" , ret); |
5574 | return ret; |
5575 | } |
5576 | |
5577 | ret = qm_prepare_for_suspend(qm); |
5578 | if (ret) |
5579 | pci_err(pdev, "failed to prepare suspended(%d)\n" , ret); |
5580 | |
5581 | return ret; |
5582 | } |
5583 | EXPORT_SYMBOL_GPL(hisi_qm_suspend); |
5584 | |
5585 | /** |
5586 | * hisi_qm_resume() - Runtime resume of given device. |
5587 | * @dev: device to resume. |
5588 | * |
5589 | * Function that resume the device. |
5590 | */ |
5591 | int hisi_qm_resume(struct device *dev) |
5592 | { |
5593 | struct pci_dev *pdev = to_pci_dev(dev); |
5594 | struct hisi_qm *qm = pci_get_drvdata(pdev); |
5595 | int ret; |
5596 | |
5597 | pci_info(pdev, "resuming from suspend state\n" ); |
5598 | |
5599 | ret = qm_rebuild_for_resume(qm); |
5600 | if (ret) { |
5601 | pci_err(pdev, "failed to rebuild resume(%d)\n" , ret); |
5602 | return ret; |
5603 | } |
5604 | |
5605 | ret = hisi_qm_start(qm); |
5606 | if (ret) { |
5607 | if (qm_check_dev_error(qm)) { |
5608 | pci_info(pdev, "failed to start qm due to device error, device will be reset!\n" ); |
5609 | return 0; |
5610 | } |
5611 | |
5612 | pci_err(pdev, "failed to start qm(%d)!\n" , ret); |
5613 | } |
5614 | |
5615 | return ret; |
5616 | } |
5617 | EXPORT_SYMBOL_GPL(hisi_qm_resume); |
5618 | |
5619 | MODULE_LICENSE("GPL v2" ); |
5620 | MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>" ); |
5621 | MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver" ); |
5622 | |