1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // Copyright (c) 2021-2021 Hisilicon Limited. |
3 | |
4 | #include "hnae3.h" |
5 | #include "hclge_comm_cmd.h" |
6 | |
7 | static void hclge_comm_cmd_config_regs(struct hclge_comm_hw *hw, |
8 | struct hclge_comm_cmq_ring *ring) |
9 | { |
10 | dma_addr_t dma = ring->desc_dma_addr; |
11 | u32 reg_val; |
12 | |
13 | if (ring->ring_type == HCLGE_COMM_TYPE_CSQ) { |
14 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, |
15 | lower_32_bits(dma)); |
16 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, |
17 | upper_32_bits(dma)); |
18 | reg_val = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); |
19 | reg_val &= HCLGE_COMM_NIC_SW_RST_RDY; |
20 | reg_val |= ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S; |
21 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); |
22 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0); |
23 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0); |
24 | } else { |
25 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, |
26 | lower_32_bits(dma)); |
27 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, |
28 | upper_32_bits(dma)); |
29 | reg_val = ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S; |
30 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, reg_val); |
31 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0); |
32 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0); |
33 | } |
34 | } |
35 | |
36 | void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw) |
37 | { |
38 | hclge_comm_cmd_config_regs(hw, ring: &hw->cmq.csq); |
39 | hclge_comm_cmd_config_regs(hw, ring: &hw->cmq.crq); |
40 | } |
41 | |
42 | void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read) |
43 | { |
44 | desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR | |
45 | HCLGE_COMM_CMD_FLAG_IN); |
46 | if (is_read) |
47 | desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR); |
48 | else |
49 | desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR); |
50 | } |
51 | |
52 | static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev, |
53 | bool is_pf) |
54 | { |
55 | set_bit(nr: HNAE3_DEV_SUPPORT_GRO_B, addr: ae_dev->caps); |
56 | if (is_pf) { |
57 | set_bit(nr: HNAE3_DEV_SUPPORT_FD_B, addr: ae_dev->caps); |
58 | set_bit(nr: HNAE3_DEV_SUPPORT_FEC_B, addr: ae_dev->caps); |
59 | set_bit(nr: HNAE3_DEV_SUPPORT_PAUSE_B, addr: ae_dev->caps); |
60 | } |
61 | } |
62 | |
63 | void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc, |
64 | enum hclge_opcode_type opcode, |
65 | bool is_read) |
66 | { |
67 | memset((void *)desc, 0, sizeof(struct hclge_desc)); |
68 | desc->opcode = cpu_to_le16(opcode); |
69 | desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR | |
70 | HCLGE_COMM_CMD_FLAG_IN); |
71 | |
72 | if (is_read) |
73 | desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR); |
74 | } |
75 | |
76 | int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev, |
77 | struct hclge_comm_hw *hw, bool en) |
78 | { |
79 | struct hclge_comm_firmware_compat_cmd *req; |
80 | struct hclge_desc desc; |
81 | u32 compat = 0; |
82 | |
83 | hclge_comm_cmd_setup_basic_desc(desc: &desc, opcode: HCLGE_OPC_IMP_COMPAT_CFG, is_read: false); |
84 | |
85 | if (en) { |
86 | req = (struct hclge_comm_firmware_compat_cmd *)desc.data; |
87 | |
88 | hnae3_set_bit(compat, HCLGE_COMM_LINK_EVENT_REPORT_EN_B, 1); |
89 | hnae3_set_bit(compat, HCLGE_COMM_NCSI_ERROR_REPORT_EN_B, 1); |
90 | if (hclge_comm_dev_phy_imp_supported(ae_dev)) |
91 | hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1); |
92 | hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1); |
93 | hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1); |
94 | hnae3_set_bit(compat, HCLGE_COMM_LLRS_FEC_EN_B, 1); |
95 | |
96 | req->compat = cpu_to_le32(compat); |
97 | } |
98 | |
99 | return hclge_comm_cmd_send(hw, desc: &desc, num: 1); |
100 | } |
101 | |
102 | void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring) |
103 | { |
104 | int size = ring->desc_num * sizeof(struct hclge_desc); |
105 | |
106 | if (!ring->desc) |
107 | return; |
108 | |
109 | dma_free_coherent(dev: &ring->pdev->dev, size, |
110 | cpu_addr: ring->desc, dma_handle: ring->desc_dma_addr); |
111 | ring->desc = NULL; |
112 | } |
113 | |
114 | static int hclge_comm_alloc_cmd_desc(struct hclge_comm_cmq_ring *ring) |
115 | { |
116 | int size = ring->desc_num * sizeof(struct hclge_desc); |
117 | |
118 | ring->desc = dma_alloc_coherent(dev: &ring->pdev->dev, |
119 | size, dma_handle: &ring->desc_dma_addr, GFP_KERNEL); |
120 | if (!ring->desc) |
121 | return -ENOMEM; |
122 | |
123 | return 0; |
124 | } |
125 | |
126 | static __le32 hclge_comm_build_api_caps(void) |
127 | { |
128 | u32 api_caps = 0; |
129 | |
130 | hnae3_set_bit(api_caps, HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B, 1); |
131 | |
132 | return cpu_to_le32(api_caps); |
133 | } |
134 | |
135 | static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = { |
136 | {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, |
137 | {HCLGE_COMM_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B}, |
138 | {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, |
139 | {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, |
140 | {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, |
141 | {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, |
142 | {HCLGE_COMM_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B}, |
143 | {HCLGE_COMM_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B}, |
144 | {HCLGE_COMM_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B}, |
145 | {HCLGE_COMM_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B}, |
146 | {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B}, |
147 | {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B}, |
148 | {HCLGE_COMM_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B}, |
149 | {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, |
150 | {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, |
151 | HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B}, |
152 | {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B}, |
153 | {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, |
154 | {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B}, |
155 | {HCLGE_COMM_CAP_FD_B, HNAE3_DEV_SUPPORT_FD_B}, |
156 | {HCLGE_COMM_CAP_FEC_STATS_B, HNAE3_DEV_SUPPORT_FEC_STATS_B}, |
157 | {HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B}, |
158 | {HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B}, |
159 | {HCLGE_COMM_CAP_TM_FLUSH_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B}, |
160 | {HCLGE_COMM_CAP_VF_FAULT_B, HNAE3_DEV_SUPPORT_VF_FAULT_B}, |
161 | }; |
162 | |
163 | static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { |
164 | {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, |
165 | {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, |
166 | {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, |
167 | {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, |
168 | {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, |
169 | {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B}, |
170 | {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B}, |
171 | {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, |
172 | {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, |
173 | {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B}, |
174 | }; |
175 | |
176 | static void |
177 | hclge_comm_capability_to_bitmap(unsigned long *bitmap, __le32 *caps) |
178 | { |
179 | const unsigned int words = HCLGE_COMM_QUERY_CAP_LENGTH; |
180 | u32 val[HCLGE_COMM_QUERY_CAP_LENGTH]; |
181 | unsigned int i; |
182 | |
183 | for (i = 0; i < words; i++) |
184 | val[i] = __le32_to_cpu(caps[i]); |
185 | |
186 | bitmap_from_arr32(bitmap, buf: val, |
187 | HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32)); |
188 | } |
189 | |
190 | static void |
191 | hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf, |
192 | struct hclge_comm_query_version_cmd *cmd) |
193 | { |
194 | const struct hclge_comm_caps_bit_map *caps_map = |
195 | is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps; |
196 | u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) : |
197 | ARRAY_SIZE(hclge_vf_cmd_caps); |
198 | DECLARE_BITMAP(caps, HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32)); |
199 | u32 i; |
200 | |
201 | hclge_comm_capability_to_bitmap(bitmap: caps, caps: cmd->caps); |
202 | for (i = 0; i < size; i++) |
203 | if (test_bit(caps_map[i].imp_bit, caps)) |
204 | set_bit(nr: caps_map[i].local_bit, addr: ae_dev->caps); |
205 | } |
206 | |
207 | int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type) |
208 | { |
209 | struct hclge_comm_cmq_ring *ring = |
210 | (ring_type == HCLGE_COMM_TYPE_CSQ) ? &hw->cmq.csq : |
211 | &hw->cmq.crq; |
212 | int ret; |
213 | |
214 | ring->ring_type = ring_type; |
215 | |
216 | ret = hclge_comm_alloc_cmd_desc(ring); |
217 | if (ret) |
218 | dev_err(&ring->pdev->dev, "descriptor %s alloc error %d\n" , |
219 | (ring_type == HCLGE_COMM_TYPE_CSQ) ? "CSQ" : "CRQ" , |
220 | ret); |
221 | |
222 | return ret; |
223 | } |
224 | |
225 | int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev, |
226 | struct hclge_comm_hw *hw, |
227 | u32 *fw_version, bool is_pf) |
228 | { |
229 | struct hclge_comm_query_version_cmd *resp; |
230 | struct hclge_desc desc; |
231 | int ret; |
232 | |
233 | hclge_comm_cmd_setup_basic_desc(desc: &desc, opcode: HCLGE_OPC_QUERY_FW_VER, is_read: 1); |
234 | resp = (struct hclge_comm_query_version_cmd *)desc.data; |
235 | resp->api_caps = hclge_comm_build_api_caps(); |
236 | |
237 | ret = hclge_comm_cmd_send(hw, desc: &desc, num: 1); |
238 | if (ret) |
239 | return ret; |
240 | |
241 | *fw_version = le32_to_cpu(resp->firmware); |
242 | |
243 | ae_dev->dev_version = le32_to_cpu(resp->hardware) << |
244 | HNAE3_PCI_REVISION_BIT_SIZE; |
245 | ae_dev->dev_version |= ae_dev->pdev->revision; |
246 | |
247 | if (ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { |
248 | hclge_comm_set_default_capability(ae_dev, is_pf); |
249 | return 0; |
250 | } |
251 | |
252 | hclge_comm_parse_capability(ae_dev, is_pf, cmd: resp); |
253 | |
254 | return ret; |
255 | } |
256 | |
257 | static const u16 spec_opcode[] = { HCLGE_OPC_STATS_64_BIT, |
258 | HCLGE_OPC_STATS_32_BIT, |
259 | HCLGE_OPC_STATS_MAC, |
260 | HCLGE_OPC_STATS_MAC_ALL, |
261 | HCLGE_OPC_QUERY_32_BIT_REG, |
262 | HCLGE_OPC_QUERY_64_BIT_REG, |
263 | HCLGE_QUERY_CLEAR_MPF_RAS_INT, |
264 | HCLGE_QUERY_CLEAR_PF_RAS_INT, |
265 | HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, |
266 | HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, |
267 | HCLGE_QUERY_ALL_ERR_INFO }; |
268 | |
269 | static bool hclge_comm_is_special_opcode(u16 opcode) |
270 | { |
271 | /* these commands have several descriptors, |
272 | * and use the first one to save opcode and return value |
273 | */ |
274 | u32 i; |
275 | |
276 | for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) |
277 | if (spec_opcode[i] == opcode) |
278 | return true; |
279 | |
280 | return false; |
281 | } |
282 | |
283 | static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring) |
284 | { |
285 | int ntc = ring->next_to_clean; |
286 | int ntu = ring->next_to_use; |
287 | int used = (ntu - ntc + ring->desc_num) % ring->desc_num; |
288 | |
289 | return ring->desc_num - used - 1; |
290 | } |
291 | |
292 | static void hclge_comm_cmd_copy_desc(struct hclge_comm_hw *hw, |
293 | struct hclge_desc *desc, int num) |
294 | { |
295 | struct hclge_desc *desc_to_use; |
296 | int handle = 0; |
297 | |
298 | while (handle < num) { |
299 | desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; |
300 | *desc_to_use = desc[handle]; |
301 | (hw->cmq.csq.next_to_use)++; |
302 | if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num) |
303 | hw->cmq.csq.next_to_use = 0; |
304 | handle++; |
305 | } |
306 | } |
307 | |
308 | static int hclge_comm_is_valid_csq_clean_head(struct hclge_comm_cmq_ring *ring, |
309 | int head) |
310 | { |
311 | int ntc = ring->next_to_clean; |
312 | int ntu = ring->next_to_use; |
313 | |
314 | if (ntu > ntc) |
315 | return head >= ntc && head <= ntu; |
316 | |
317 | return head >= ntc || head <= ntu; |
318 | } |
319 | |
320 | static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw) |
321 | { |
322 | struct hclge_comm_cmq_ring *csq = &hw->cmq.csq; |
323 | int clean; |
324 | u32 head; |
325 | |
326 | head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); |
327 | rmb(); /* Make sure head is ready before touch any data */ |
328 | |
329 | if (!hclge_comm_is_valid_csq_clean_head(ring: csq, head)) { |
330 | dev_warn(&hw->cmq.csq.pdev->dev, "wrong cmd head (%u, %d-%d)\n" , |
331 | head, csq->next_to_use, csq->next_to_clean); |
332 | dev_warn(&hw->cmq.csq.pdev->dev, |
333 | "Disabling any further commands to IMP firmware\n" ); |
334 | set_bit(nr: HCLGE_COMM_STATE_CMD_DISABLE, addr: &hw->comm_state); |
335 | dev_warn(&hw->cmq.csq.pdev->dev, |
336 | "IMP firmware watchdog reset soon expected!\n" ); |
337 | return -EIO; |
338 | } |
339 | |
340 | clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; |
341 | csq->next_to_clean = head; |
342 | return clean; |
343 | } |
344 | |
345 | static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw) |
346 | { |
347 | u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); |
348 | return head == hw->cmq.csq.next_to_use; |
349 | } |
350 | |
351 | static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout) |
352 | { |
353 | static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = { |
354 | {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_CFG_RST_TIMEOUT}, |
355 | }; |
356 | u32 i; |
357 | |
358 | for (i = 0; i < ARRAY_SIZE(cmdq_tx_timeout_map); i++) |
359 | if (cmdq_tx_timeout_map[i].opcode == opcode) |
360 | return cmdq_tx_timeout_map[i].tx_timeout; |
361 | |
362 | return tx_timeout; |
363 | } |
364 | |
365 | static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, u16 opcode, |
366 | bool *is_completed) |
367 | { |
368 | u32 cmdq_tx_timeout = hclge_get_cmdq_tx_timeout(opcode, |
369 | tx_timeout: hw->cmq.tx_timeout); |
370 | u32 timeout = 0; |
371 | |
372 | do { |
373 | if (hclge_comm_cmd_csq_done(hw)) { |
374 | *is_completed = true; |
375 | break; |
376 | } |
377 | udelay(1); |
378 | timeout++; |
379 | } while (timeout < cmdq_tx_timeout); |
380 | } |
381 | |
382 | static int hclge_comm_cmd_convert_err_code(u16 desc_ret) |
383 | { |
384 | struct hclge_comm_errcode hclge_comm_cmd_errcode[] = { |
385 | { HCLGE_COMM_CMD_EXEC_SUCCESS, 0 }, |
386 | { HCLGE_COMM_CMD_NO_AUTH, -EPERM }, |
387 | { HCLGE_COMM_CMD_NOT_SUPPORTED, -EOPNOTSUPP }, |
388 | { HCLGE_COMM_CMD_QUEUE_FULL, -EXFULL }, |
389 | { HCLGE_COMM_CMD_NEXT_ERR, -ENOSR }, |
390 | { HCLGE_COMM_CMD_UNEXE_ERR, -ENOTBLK }, |
391 | { HCLGE_COMM_CMD_PARA_ERR, -EINVAL }, |
392 | { HCLGE_COMM_CMD_RESULT_ERR, -ERANGE }, |
393 | { HCLGE_COMM_CMD_TIMEOUT, -ETIME }, |
394 | { HCLGE_COMM_CMD_HILINK_ERR, -ENOLINK }, |
395 | { HCLGE_COMM_CMD_QUEUE_ILLEGAL, -ENXIO }, |
396 | { HCLGE_COMM_CMD_INVALID, -EBADR }, |
397 | }; |
398 | u32 errcode_count = ARRAY_SIZE(hclge_comm_cmd_errcode); |
399 | u32 i; |
400 | |
401 | for (i = 0; i < errcode_count; i++) |
402 | if (hclge_comm_cmd_errcode[i].imp_errcode == desc_ret) |
403 | return hclge_comm_cmd_errcode[i].common_errno; |
404 | |
405 | return -EIO; |
406 | } |
407 | |
408 | static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw, |
409 | struct hclge_desc *desc, int num, |
410 | int ntc) |
411 | { |
412 | u16 opcode, desc_ret; |
413 | int handle; |
414 | |
415 | opcode = le16_to_cpu(desc[0].opcode); |
416 | for (handle = 0; handle < num; handle++) { |
417 | desc[handle] = hw->cmq.csq.desc[ntc]; |
418 | ntc++; |
419 | if (ntc >= hw->cmq.csq.desc_num) |
420 | ntc = 0; |
421 | } |
422 | if (likely(!hclge_comm_is_special_opcode(opcode))) |
423 | desc_ret = le16_to_cpu(desc[num - 1].retval); |
424 | else |
425 | desc_ret = le16_to_cpu(desc[0].retval); |
426 | |
427 | hw->cmq.last_status = desc_ret; |
428 | |
429 | return hclge_comm_cmd_convert_err_code(desc_ret); |
430 | } |
431 | |
432 | static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw, |
433 | struct hclge_desc *desc, |
434 | int num, int ntc) |
435 | { |
436 | bool is_completed = false; |
437 | int handle, ret; |
438 | |
439 | /* If the command is sync, wait for the firmware to write back, |
440 | * if multi descriptors to be sent, use the first one to check |
441 | */ |
442 | if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) |
443 | hclge_comm_wait_for_resp(hw, le16_to_cpu(desc->opcode), |
444 | is_completed: &is_completed); |
445 | |
446 | if (!is_completed) |
447 | ret = -EBADE; |
448 | else |
449 | ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc); |
450 | |
451 | /* Clean the command send queue */ |
452 | handle = hclge_comm_cmd_csq_clean(hw); |
453 | if (handle < 0) |
454 | ret = handle; |
455 | else if (handle != num) |
456 | dev_warn(&hw->cmq.csq.pdev->dev, |
457 | "cleaned %d, need to clean %d\n" , handle, num); |
458 | return ret; |
459 | } |
460 | |
461 | /** |
462 | * hclge_comm_cmd_send - send command to command queue |
463 | * @hw: pointer to the hw struct |
464 | * @desc: prefilled descriptor for describing the command |
465 | * @num : the number of descriptors to be sent |
466 | * |
467 | * This is the main send command for command queue, it |
468 | * sends the queue, cleans the queue, etc |
469 | **/ |
470 | int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, |
471 | int num) |
472 | { |
473 | struct hclge_comm_cmq_ring *csq = &hw->cmq.csq; |
474 | int ret; |
475 | int ntc; |
476 | |
477 | spin_lock_bh(lock: &hw->cmq.csq.lock); |
478 | |
479 | if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) { |
480 | spin_unlock_bh(lock: &hw->cmq.csq.lock); |
481 | return -EBUSY; |
482 | } |
483 | |
484 | if (num > hclge_comm_ring_space(ring: &hw->cmq.csq)) { |
485 | /* If CMDQ ring is full, SW HEAD and HW HEAD may be different, |
486 | * need update the SW HEAD pointer csq->next_to_clean |
487 | */ |
488 | csq->next_to_clean = |
489 | hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); |
490 | spin_unlock_bh(lock: &hw->cmq.csq.lock); |
491 | return -EBUSY; |
492 | } |
493 | |
494 | /** |
495 | * Record the location of desc in the ring for this time |
496 | * which will be use for hardware to write back |
497 | */ |
498 | ntc = hw->cmq.csq.next_to_use; |
499 | |
500 | hclge_comm_cmd_copy_desc(hw, desc, num); |
501 | |
502 | /* Write to hardware */ |
503 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, |
504 | hw->cmq.csq.next_to_use); |
505 | |
506 | ret = hclge_comm_cmd_check_result(hw, desc, num, ntc); |
507 | |
508 | spin_unlock_bh(lock: &hw->cmq.csq.lock); |
509 | |
510 | return ret; |
511 | } |
512 | |
513 | static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw) |
514 | { |
515 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 0); |
516 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 0); |
517 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 0); |
518 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0); |
519 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0); |
520 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 0); |
521 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 0); |
522 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, 0); |
523 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0); |
524 | hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0); |
525 | } |
526 | |
527 | void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev, |
528 | struct hclge_comm_hw *hw) |
529 | { |
530 | struct hclge_comm_cmq *cmdq = &hw->cmq; |
531 | |
532 | hclge_comm_firmware_compat_config(ae_dev, hw, en: false); |
533 | set_bit(nr: HCLGE_COMM_STATE_CMD_DISABLE, addr: &hw->comm_state); |
534 | |
535 | /* wait to ensure that the firmware completes the possible left |
536 | * over commands. |
537 | */ |
538 | msleep(HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME); |
539 | spin_lock_bh(lock: &cmdq->csq.lock); |
540 | spin_lock(lock: &cmdq->crq.lock); |
541 | hclge_comm_cmd_uninit_regs(hw); |
542 | spin_unlock(lock: &cmdq->crq.lock); |
543 | spin_unlock_bh(lock: &cmdq->csq.lock); |
544 | |
545 | hclge_comm_free_cmd_desc(ring: &cmdq->csq); |
546 | hclge_comm_free_cmd_desc(ring: &cmdq->crq); |
547 | } |
548 | |
549 | int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw) |
550 | { |
551 | struct hclge_comm_cmq *cmdq = &hw->cmq; |
552 | int ret; |
553 | |
554 | /* Setup the lock for command queue */ |
555 | spin_lock_init(&cmdq->csq.lock); |
556 | spin_lock_init(&cmdq->crq.lock); |
557 | |
558 | cmdq->csq.pdev = pdev; |
559 | cmdq->crq.pdev = pdev; |
560 | |
561 | /* Setup the queue entries for use cmd queue */ |
562 | cmdq->csq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM; |
563 | cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM; |
564 | |
565 | /* Setup Tx write back timeout */ |
566 | cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT; |
567 | |
568 | /* Setup queue rings */ |
569 | ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ); |
570 | if (ret) { |
571 | dev_err(&pdev->dev, "CSQ ring setup error %d\n" , ret); |
572 | return ret; |
573 | } |
574 | |
575 | ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CRQ); |
576 | if (ret) { |
577 | dev_err(&pdev->dev, "CRQ ring setup error %d\n" , ret); |
578 | goto err_csq; |
579 | } |
580 | |
581 | return 0; |
582 | err_csq: |
583 | hclge_comm_free_cmd_desc(ring: &hw->cmq.csq); |
584 | return ret; |
585 | } |
586 | |
587 | int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, |
588 | u32 *fw_version, bool is_pf, |
589 | unsigned long reset_pending) |
590 | { |
591 | struct hclge_comm_cmq *cmdq = &hw->cmq; |
592 | int ret; |
593 | |
594 | spin_lock_bh(lock: &cmdq->csq.lock); |
595 | spin_lock(lock: &cmdq->crq.lock); |
596 | |
597 | cmdq->csq.next_to_clean = 0; |
598 | cmdq->csq.next_to_use = 0; |
599 | cmdq->crq.next_to_clean = 0; |
600 | cmdq->crq.next_to_use = 0; |
601 | |
602 | hclge_comm_cmd_init_regs(hw); |
603 | |
604 | spin_unlock(lock: &cmdq->crq.lock); |
605 | spin_unlock_bh(lock: &cmdq->csq.lock); |
606 | |
607 | clear_bit(nr: HCLGE_COMM_STATE_CMD_DISABLE, addr: &hw->comm_state); |
608 | |
609 | /* Check if there is new reset pending, because the higher level |
610 | * reset may happen when lower level reset is being processed. |
611 | */ |
612 | if (reset_pending) { |
613 | ret = -EBUSY; |
614 | goto err_cmd_init; |
615 | } |
616 | |
617 | /* get version and device capabilities */ |
618 | ret = hclge_comm_cmd_query_version_and_capability(ae_dev, hw, |
619 | fw_version, is_pf); |
620 | if (ret) { |
621 | dev_err(&ae_dev->pdev->dev, |
622 | "failed to query version and capabilities, ret = %d\n" , |
623 | ret); |
624 | goto err_cmd_init; |
625 | } |
626 | |
627 | dev_info(&ae_dev->pdev->dev, |
628 | "The firmware version is %lu.%lu.%lu.%lu\n" , |
629 | hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE3_MASK, |
630 | HNAE3_FW_VERSION_BYTE3_SHIFT), |
631 | hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE2_MASK, |
632 | HNAE3_FW_VERSION_BYTE2_SHIFT), |
633 | hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE1_MASK, |
634 | HNAE3_FW_VERSION_BYTE1_SHIFT), |
635 | hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE0_MASK, |
636 | HNAE3_FW_VERSION_BYTE0_SHIFT)); |
637 | |
638 | if (!is_pf && ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) |
639 | return 0; |
640 | |
641 | /* ask the firmware to enable some features, driver can work without |
642 | * it. |
643 | */ |
644 | ret = hclge_comm_firmware_compat_config(ae_dev, hw, en: true); |
645 | if (ret) |
646 | dev_warn(&ae_dev->pdev->dev, |
647 | "Firmware compatible features not enabled(%d).\n" , |
648 | ret); |
649 | return 0; |
650 | |
651 | err_cmd_init: |
652 | set_bit(nr: HCLGE_COMM_STATE_CMD_DISABLE, addr: &hw->comm_state); |
653 | |
654 | return ret; |
655 | } |
656 | |