1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Huawei HiNIC PCI Express Linux driver
4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/errno.h>
10#include <linux/pci.h>
11#include <linux/device.h>
12#include <linux/slab.h>
13#include <linux/vmalloc.h>
14#include <linux/spinlock.h>
15#include <linux/sizes.h>
16#include <linux/atomic.h>
17#include <linux/log2.h>
18#include <linux/io.h>
19#include <linux/completion.h>
20#include <linux/err.h>
21#include <asm/byteorder.h>
22#include <asm/barrier.h>
23
24#include "hinic_common.h"
25#include "hinic_hw_if.h"
26#include "hinic_hw_eqs.h"
27#include "hinic_hw_mgmt.h"
28#include "hinic_hw_wqe.h"
29#include "hinic_hw_wq.h"
30#include "hinic_hw_cmdq.h"
31#include "hinic_hw_io.h"
32#include "hinic_hw_dev.h"
33
34#define CMDQ_CEQE_TYPE_SHIFT 0
35
36#define CMDQ_CEQE_TYPE_MASK 0x7
37
38#define CMDQ_CEQE_GET(val, member) \
39 (((val) >> CMDQ_CEQE_##member##_SHIFT) \
40 & CMDQ_CEQE_##member##_MASK)
41
42#define CMDQ_WQE_ERRCODE_VAL_SHIFT 20
43
44#define CMDQ_WQE_ERRCODE_VAL_MASK 0xF
45
46#define CMDQ_WQE_ERRCODE_GET(val, member) \
47 (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \
48 & CMDQ_WQE_ERRCODE_##member##_MASK)
49
50#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
51
52#define CMDQ_DB_ADDR(db_base, pi) ((db_base) + CMDQ_DB_PI_OFF(pi))
53
54#define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe))
55
56#define CMDQ_WQE_COMPLETED(ctrl_info) \
57 HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
58
59#define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
60
61#define CMDQ_DB_OFF SZ_2K
62
63#define CMDQ_WQEBB_SIZE 64
64#define CMDQ_WQE_SIZE 64
65#define CMDQ_DEPTH SZ_4K
66
67#define CMDQ_WQ_PAGE_SIZE SZ_256K
68
69#define WQE_LCMD_SIZE 64
70#define WQE_SCMD_SIZE 64
71
72#define COMPLETE_LEN 3
73
74#define CMDQ_TIMEOUT 1000
75
76#define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size)))
77
78#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
79 struct hinic_cmdqs, cmdq[0])
80
81#define cmdqs_to_func_to_io(cmdqs) container_of(cmdqs, \
82 struct hinic_func_to_io, \
83 cmdqs)
84
85enum completion_format {
86 COMPLETE_DIRECT = 0,
87 COMPLETE_SGE = 1,
88};
89
90enum data_format {
91 DATA_SGE = 0,
92 DATA_DIRECT = 1,
93};
94
95enum bufdesc_len {
96 BUFDESC_LCMD_LEN = 2, /* 16 bytes - 2(8 byte unit) */
97 BUFDESC_SCMD_LEN = 3, /* 24 bytes - 3(8 byte unit) */
98};
99
100enum ctrl_sect_len {
101 CTRL_SECT_LEN = 1, /* 4 bytes (ctrl) - 1(8 byte unit) */
102 CTRL_DIRECT_SECT_LEN = 2, /* 12 bytes (ctrl + rsvd) - 2(8 byte unit) */
103};
104
105enum cmdq_scmd_type {
106 CMDQ_SET_ARM_CMD = 2,
107};
108
109enum cmdq_cmd_type {
110 CMDQ_CMD_SYNC_DIRECT_RESP = 0,
111 CMDQ_CMD_SYNC_SGE_RESP = 1,
112};
113
114enum completion_request {
115 NO_CEQ = 0,
116 CEQ_SET = 1,
117};
118
119/**
120 * hinic_alloc_cmdq_buf - alloc buffer for sending command
121 * @cmdqs: the cmdqs
122 * @cmdq_buf: the buffer returned in this struct
123 *
124 * Return 0 - Success, negative - Failure
125 **/
126int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
127 struct hinic_cmdq_buf *cmdq_buf)
128{
129 struct hinic_hwif *hwif = cmdqs->hwif;
130 struct pci_dev *pdev = hwif->pdev;
131
132 cmdq_buf->buf = dma_pool_alloc(pool: cmdqs->cmdq_buf_pool, GFP_KERNEL,
133 handle: &cmdq_buf->dma_addr);
134 if (!cmdq_buf->buf) {
135 dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n");
136 return -ENOMEM;
137 }
138
139 return 0;
140}
141
142/**
143 * hinic_free_cmdq_buf - free buffer
144 * @cmdqs: the cmdqs
145 * @cmdq_buf: the buffer to free that is in this struct
146 **/
147void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
148 struct hinic_cmdq_buf *cmdq_buf)
149{
150 dma_pool_free(pool: cmdqs->cmdq_buf_pool, vaddr: cmdq_buf->buf, addr: cmdq_buf->dma_addr);
151}
152
153static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len)
154{
155 unsigned int wqe_size = 0;
156
157 switch (len) {
158 case BUFDESC_LCMD_LEN:
159 wqe_size = WQE_LCMD_SIZE;
160 break;
161 case BUFDESC_SCMD_LEN:
162 wqe_size = WQE_SCMD_SIZE;
163 break;
164 }
165
166 return wqe_size;
167}
168
169static void cmdq_set_sge_completion(struct hinic_cmdq_completion *completion,
170 struct hinic_cmdq_buf *buf_out)
171{
172 struct hinic_sge_resp *sge_resp = &completion->sge_resp;
173
174 hinic_set_sge(sge: &sge_resp->sge, addr: buf_out->dma_addr, len: buf_out->size);
175}
176
177static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
178 enum hinic_cmd_ack_type ack_type,
179 enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
180 enum completion_format complete_format,
181 enum data_format data_format,
182 enum bufdesc_len buf_len)
183{
184 struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
185 struct hinic_cmdq_wqe_scmd *wqe_scmd;
186 enum ctrl_sect_len ctrl_len;
187 struct hinic_ctrl *ctrl;
188 u32 saved_data;
189
190 if (data_format == DATA_SGE) {
191 wqe_lcmd = &wqe->wqe_lcmd;
192
193 wqe_lcmd->status.status_info = 0;
194 ctrl = &wqe_lcmd->ctrl;
195 ctrl_len = CTRL_SECT_LEN;
196 } else {
197 wqe_scmd = &wqe->direct_wqe.wqe_scmd;
198
199 wqe_scmd->status.status_info = 0;
200 ctrl = &wqe_scmd->ctrl;
201 ctrl_len = CTRL_DIRECT_SECT_LEN;
202 }
203
204 ctrl->ctrl_info = HINIC_CMDQ_CTRL_SET(prod_idx, PI) |
205 HINIC_CMDQ_CTRL_SET(cmd, CMD) |
206 HINIC_CMDQ_CTRL_SET(mod, MOD) |
207 HINIC_CMDQ_CTRL_SET(ack_type, ACK_TYPE);
208
209 CMDQ_WQE_HEADER(wqe)->header_info =
210 HINIC_CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
211 HINIC_CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
212 HINIC_CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) |
213 HINIC_CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) |
214 HINIC_CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
215 HINIC_CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) |
216 HINIC_CMDQ_WQE_HEADER_SET(wrapped, TOGGLED_WRAPPED);
217
218 saved_data = CMDQ_WQE_HEADER(wqe)->saved_data;
219 saved_data = HINIC_SAVED_DATA_CLEAR(saved_data, ARM);
220
221 if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC_MOD_COMM)
222 CMDQ_WQE_HEADER(wqe)->saved_data |=
223 HINIC_SAVED_DATA_SET(1, ARM);
224 else
225 CMDQ_WQE_HEADER(wqe)->saved_data = saved_data;
226}
227
228static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe_lcmd,
229 struct hinic_cmdq_buf *buf_in)
230{
231 hinic_set_sge(sge: &wqe_lcmd->buf_desc.sge, addr: buf_in->dma_addr, len: buf_in->size);
232}
233
234static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe,
235 void *buf_in, u32 in_size)
236{
237 struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd;
238
239 wqe_scmd->buf_desc.buf_len = in_size;
240 memcpy(wqe_scmd->buf_desc.data, buf_in, in_size);
241}
242
243static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
244 enum cmdq_cmd_type cmd_type,
245 struct hinic_cmdq_buf *buf_in,
246 struct hinic_cmdq_buf *buf_out, int wrapped,
247 enum hinic_cmd_ack_type ack_type,
248 enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
249{
250 struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
251 enum completion_format complete_format;
252
253 switch (cmd_type) {
254 case CMDQ_CMD_SYNC_SGE_RESP:
255 complete_format = COMPLETE_SGE;
256 cmdq_set_sge_completion(completion: &wqe_lcmd->completion, buf_out);
257 break;
258 case CMDQ_CMD_SYNC_DIRECT_RESP:
259 complete_format = COMPLETE_DIRECT;
260 wqe_lcmd->completion.direct_resp = 0;
261 break;
262 }
263
264 cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
265 prod_idx, complete_format, data_format: DATA_SGE,
266 buf_len: BUFDESC_LCMD_LEN);
267
268 cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
269}
270
271static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe *wqe,
272 enum cmdq_cmd_type cmd_type,
273 void *buf_in, u16 in_size,
274 struct hinic_cmdq_buf *buf_out, int wrapped,
275 enum hinic_cmd_ack_type ack_type,
276 enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
277{
278 struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
279 enum completion_format complete_format;
280 struct hinic_cmdq_wqe_scmd *wqe_scmd;
281
282 wqe_scmd = &direct_wqe->wqe_scmd;
283
284 switch (cmd_type) {
285 case CMDQ_CMD_SYNC_SGE_RESP:
286 complete_format = COMPLETE_SGE;
287 cmdq_set_sge_completion(completion: &wqe_scmd->completion, buf_out);
288 break;
289 case CMDQ_CMD_SYNC_DIRECT_RESP:
290 complete_format = COMPLETE_DIRECT;
291 wqe_scmd->completion.direct_resp = 0;
292 break;
293 }
294
295 cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx,
296 complete_format, data_format: DATA_DIRECT, buf_len: BUFDESC_SCMD_LEN);
297
298 cmdq_set_direct_wqe_data(wqe: direct_wqe, buf_in, in_size);
299}
300
301static void cmdq_wqe_fill(void *dst, void *src)
302{
303 memcpy(dst + FIRST_DATA_TO_WRITE_LAST, src + FIRST_DATA_TO_WRITE_LAST,
304 CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
305
306 wmb(); /* The first 8 bytes should be written last */
307
308 *(u64 *)dst = *(u64 *)src;
309}
310
311static void cmdq_fill_db(u32 *db_info,
312 enum hinic_cmdq_type cmdq_type, u16 prod_idx)
313{
314 *db_info = HINIC_CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
315 HINIC_CMDQ_DB_INFO_SET(HINIC_CTRL_PATH, PATH) |
316 HINIC_CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) |
317 HINIC_CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, DB_TYPE);
318}
319
320static void cmdq_set_db(struct hinic_cmdq *cmdq,
321 enum hinic_cmdq_type cmdq_type, u16 prod_idx)
322{
323 u32 db_info;
324
325 cmdq_fill_db(db_info: &db_info, cmdq_type, prod_idx);
326
327 /* The data that is written to HW should be in Big Endian Format */
328 db_info = cpu_to_be32(db_info);
329
330 wmb(); /* write all before the doorbell */
331
332 writel(val: db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
333}
334
335static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
336 enum hinic_mod_type mod, u8 cmd,
337 struct hinic_cmdq_buf *buf_in,
338 u64 *resp)
339{
340 struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
341 u16 curr_prod_idx, next_prod_idx;
342 int errcode, wrapped, num_wqebbs;
343 struct hinic_wq *wq = cmdq->wq;
344 struct hinic_hw_wqe *hw_wqe;
345 struct completion done;
346
347 /* Keep doorbell index correct. bh - for tasklet(ceq). */
348 spin_lock_bh(lock: &cmdq->cmdq_lock);
349
350 /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
351 hw_wqe = hinic_get_wqe(wq, WQE_LCMD_SIZE, prod_idx: &curr_prod_idx);
352 if (IS_ERR(ptr: hw_wqe)) {
353 spin_unlock_bh(lock: &cmdq->cmdq_lock);
354 return -EBUSY;
355 }
356
357 curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
358
359 wrapped = cmdq->wrapped;
360
361 num_wqebbs = ALIGN(WQE_LCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
362 next_prod_idx = curr_prod_idx + num_wqebbs;
363 if (next_prod_idx >= wq->q_depth) {
364 cmdq->wrapped = !cmdq->wrapped;
365 next_prod_idx -= wq->q_depth;
366 }
367
368 cmdq->errcode[curr_prod_idx] = &errcode;
369
370 init_completion(x: &done);
371 cmdq->done[curr_prod_idx] = &done;
372
373 cmdq_set_lcmd_wqe(wqe: &cmdq_wqe, cmd_type: CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, NULL,
374 wrapped, ack_type: HINIC_CMD_ACK_TYPE_CMDQ, mod, cmd,
375 prod_idx: curr_prod_idx);
376
377 /* The data that is written to HW should be in Big Endian Format */
378 hinic_cpu_to_be32(data: &cmdq_wqe, WQE_LCMD_SIZE);
379
380 /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
381 cmdq_wqe_fill(dst: curr_cmdq_wqe, src: &cmdq_wqe);
382
383 cmdq_set_db(cmdq, cmdq_type: HINIC_CMDQ_SYNC, prod_idx: next_prod_idx);
384
385 spin_unlock_bh(lock: &cmdq->cmdq_lock);
386
387 if (!wait_for_completion_timeout(x: &done,
388 timeout: msecs_to_jiffies(CMDQ_TIMEOUT))) {
389 spin_lock_bh(lock: &cmdq->cmdq_lock);
390
391 if (cmdq->errcode[curr_prod_idx] == &errcode)
392 cmdq->errcode[curr_prod_idx] = NULL;
393
394 if (cmdq->done[curr_prod_idx] == &done)
395 cmdq->done[curr_prod_idx] = NULL;
396
397 spin_unlock_bh(lock: &cmdq->cmdq_lock);
398
399 hinic_dump_ceq_info(hwdev: cmdq->hwdev);
400 return -ETIMEDOUT;
401 }
402
403 smp_rmb(); /* read error code after completion */
404
405 if (resp) {
406 struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &curr_cmdq_wqe->wqe_lcmd;
407
408 *resp = cpu_to_be64(wqe_lcmd->completion.direct_resp);
409 }
410
411 if (errcode != 0)
412 return -EFAULT;
413
414 return 0;
415}
416
417static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in,
418 u16 in_size)
419{
420 struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
421 u16 curr_prod_idx, next_prod_idx;
422 struct hinic_wq *wq = cmdq->wq;
423 struct hinic_hw_wqe *hw_wqe;
424 int wrapped, num_wqebbs;
425
426 /* Keep doorbell index correct */
427 spin_lock(lock: &cmdq->cmdq_lock);
428
429 /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
430 hw_wqe = hinic_get_wqe(wq, WQE_SCMD_SIZE, prod_idx: &curr_prod_idx);
431 if (IS_ERR(ptr: hw_wqe)) {
432 spin_unlock(lock: &cmdq->cmdq_lock);
433 return -EBUSY;
434 }
435
436 curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
437
438 wrapped = cmdq->wrapped;
439
440 num_wqebbs = ALIGN(WQE_SCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
441 next_prod_idx = curr_prod_idx + num_wqebbs;
442 if (next_prod_idx >= wq->q_depth) {
443 cmdq->wrapped = !cmdq->wrapped;
444 next_prod_idx -= wq->q_depth;
445 }
446
447 cmdq_set_direct_wqe(wqe: &cmdq_wqe, cmd_type: CMDQ_CMD_SYNC_DIRECT_RESP, buf_in,
448 in_size, NULL, wrapped, ack_type: HINIC_CMD_ACK_TYPE_CMDQ,
449 mod: HINIC_MOD_COMM, cmd: CMDQ_SET_ARM_CMD, prod_idx: curr_prod_idx);
450
451 /* The data that is written to HW should be in Big Endian Format */
452 hinic_cpu_to_be32(data: &cmdq_wqe, WQE_SCMD_SIZE);
453
454 /* cmdq wqe is not shadow, therefore wqe will be written to wq */
455 cmdq_wqe_fill(dst: curr_cmdq_wqe, src: &cmdq_wqe);
456
457 cmdq_set_db(cmdq, cmdq_type: HINIC_CMDQ_SYNC, prod_idx: next_prod_idx);
458
459 spin_unlock(lock: &cmdq->cmdq_lock);
460 return 0;
461}
462
463static int cmdq_params_valid(struct hinic_cmdq_buf *buf_in)
464{
465 if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE)
466 return -EINVAL;
467
468 return 0;
469}
470
471/**
472 * hinic_cmdq_direct_resp - send command with direct data as resp
473 * @cmdqs: the cmdqs
474 * @mod: module on the card that will handle the command
475 * @cmd: the command
476 * @buf_in: the buffer for the command
477 * @resp: the response to return
478 *
479 * Return 0 - Success, negative - Failure
480 **/
481int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
482 enum hinic_mod_type mod, u8 cmd,
483 struct hinic_cmdq_buf *buf_in, u64 *resp)
484{
485 struct hinic_hwif *hwif = cmdqs->hwif;
486 struct pci_dev *pdev = hwif->pdev;
487 int err;
488
489 err = cmdq_params_valid(buf_in);
490 if (err) {
491 dev_err(&pdev->dev, "Invalid CMDQ parameters\n");
492 return err;
493 }
494
495 return cmdq_sync_cmd_direct_resp(cmdq: &cmdqs->cmdq[HINIC_CMDQ_SYNC],
496 mod, cmd, buf_in, resp);
497}
498
499/**
500 * hinic_set_arm_bit - set arm bit for enable interrupt again
501 * @cmdqs: the cmdqs
502 * @q_type: type of queue to set the arm bit for
503 * @q_id: the queue number
504 *
505 * Return 0 - Success, negative - Failure
506 **/
507static int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
508 enum hinic_set_arm_qtype q_type, u32 q_id)
509{
510 struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC];
511 struct hinic_hwif *hwif = cmdqs->hwif;
512 struct pci_dev *pdev = hwif->pdev;
513 struct hinic_cmdq_arm_bit arm_bit;
514 int err;
515
516 arm_bit.q_type = q_type;
517 arm_bit.q_id = q_id;
518
519 err = cmdq_set_arm_bit(cmdq, buf_in: &arm_bit, in_size: sizeof(arm_bit));
520 if (err) {
521 dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id);
522 return err;
523 }
524
525 return 0;
526}
527
528static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
529 struct hinic_cmdq_wqe *wqe)
530{
531 u32 header_info = be32_to_cpu(CMDQ_WQE_HEADER(wqe)->header_info);
532 unsigned int bufdesc_len, wqe_size;
533 struct hinic_ctrl *ctrl;
534
535 bufdesc_len = HINIC_CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
536 wqe_size = cmdq_wqe_size_from_bdlen(len: bufdesc_len);
537 if (wqe_size == WQE_LCMD_SIZE) {
538 struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
539
540 ctrl = &wqe_lcmd->ctrl;
541 } else {
542 struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
543 struct hinic_cmdq_wqe_scmd *wqe_scmd;
544
545 wqe_scmd = &direct_wqe->wqe_scmd;
546 ctrl = &wqe_scmd->ctrl;
547 }
548
549 /* clear HW busy bit */
550 ctrl->ctrl_info = 0;
551
552 wmb(); /* verify wqe is clear */
553}
554
555/**
556 * cmdq_arm_ceq_handler - cmdq completion event handler for arm command
557 * @cmdq: the cmdq of the arm command
558 * @wqe: the wqe of the arm command
559 *
560 * Return 0 - Success, negative - Failure
561 **/
562static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq,
563 struct hinic_cmdq_wqe *wqe)
564{
565 struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
566 struct hinic_cmdq_wqe_scmd *wqe_scmd;
567 struct hinic_ctrl *ctrl;
568 u32 ctrl_info;
569
570 wqe_scmd = &direct_wqe->wqe_scmd;
571 ctrl = &wqe_scmd->ctrl;
572 ctrl_info = be32_to_cpu(ctrl->ctrl_info);
573
574 /* HW should toggle the HW BUSY BIT */
575 if (!CMDQ_WQE_COMPLETED(ctrl_info))
576 return -EBUSY;
577
578 clear_wqe_complete_bit(cmdq, wqe);
579
580 hinic_put_wqe(wq: cmdq->wq, WQE_SCMD_SIZE);
581 return 0;
582}
583
584static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
585 int errcode)
586{
587 if (cmdq->errcode[prod_idx])
588 *cmdq->errcode[prod_idx] = errcode;
589}
590
591/**
592 * cmdq_sync_cmd_handler - cmdq completion event handler for sync command
593 * @cmdq: the cmdq of the command
594 * @cons_idx: the consumer index to update the error code for
595 * @errcode: the error code
596 **/
597static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, u16 cons_idx,
598 int errcode)
599{
600 u16 prod_idx = cons_idx;
601
602 spin_lock(lock: &cmdq->cmdq_lock);
603 cmdq_update_errcode(cmdq, prod_idx, errcode);
604
605 wmb(); /* write all before update for the command request */
606
607 if (cmdq->done[prod_idx])
608 complete(cmdq->done[prod_idx]);
609 spin_unlock(lock: &cmdq->cmdq_lock);
610}
611
612static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci,
613 struct hinic_cmdq_wqe *cmdq_wqe)
614{
615 struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &cmdq_wqe->wqe_lcmd;
616 struct hinic_status *status = &wqe_lcmd->status;
617 struct hinic_ctrl *ctrl = &wqe_lcmd->ctrl;
618 int errcode;
619
620 if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info)))
621 return -EBUSY;
622
623 dma_rmb();
624
625 errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL);
626
627 cmdq_sync_cmd_handler(cmdq, cons_idx: ci, errcode);
628
629 clear_wqe_complete_bit(cmdq, wqe: cmdq_wqe);
630 hinic_put_wqe(wq: cmdq->wq, WQE_LCMD_SIZE);
631 return 0;
632}
633
634/**
635 * cmdq_ceq_handler - cmdq completion event handler
636 * @handle: private data for the handler(cmdqs)
637 * @ceqe_data: ceq element data
638 **/
639static void cmdq_ceq_handler(void *handle, u32 ceqe_data)
640{
641 enum hinic_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE);
642 struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)handle;
643 struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type];
644 struct hinic_cmdq_header *header;
645 struct hinic_hw_wqe *hw_wqe;
646 int err, set_arm = 0;
647 u32 saved_data;
648 u16 ci;
649
650 /* Read the smallest wqe size for getting wqe size */
651 while ((hw_wqe = hinic_read_wqe(wq: cmdq->wq, WQE_SCMD_SIZE, cons_idx: &ci))) {
652 if (IS_ERR(ptr: hw_wqe))
653 break;
654
655 header = CMDQ_WQE_HEADER(&hw_wqe->cmdq_wqe);
656 saved_data = be32_to_cpu(header->saved_data);
657
658 if (HINIC_SAVED_DATA_GET(saved_data, ARM)) {
659 /* arm_bit was set until here */
660 set_arm = 0;
661
662 if (cmdq_arm_ceq_handler(cmdq, wqe: &hw_wqe->cmdq_wqe))
663 break;
664 } else {
665 set_arm = 1;
666
667 hw_wqe = hinic_read_wqe(wq: cmdq->wq, WQE_LCMD_SIZE, cons_idx: &ci);
668 if (IS_ERR(ptr: hw_wqe))
669 break;
670
671 if (cmdq_cmd_ceq_handler(cmdq, ci, cmdq_wqe: &hw_wqe->cmdq_wqe))
672 break;
673 }
674 }
675
676 if (set_arm) {
677 struct hinic_hwif *hwif = cmdqs->hwif;
678 struct pci_dev *pdev = hwif->pdev;
679
680 err = hinic_set_arm_bit(cmdqs, q_type: HINIC_SET_ARM_CMDQ, q_id: cmdq_type);
681 if (err)
682 dev_err(&pdev->dev, "Failed to set arm for CMDQ\n");
683 }
684}
685
686/**
687 * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq
688 * @cmdq_ctxt: cmdq ctxt to initialize
689 * @cmdq: the cmdq
690 * @cmdq_pages: the memory of the queue
691 **/
692static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt,
693 struct hinic_cmdq *cmdq,
694 struct hinic_cmdq_pages *cmdq_pages)
695{
696 struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
697 u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn;
698 struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq);
699 struct hinic_wq *wq = cmdq->wq;
700
701 /* The data in the HW is in Big Endian Format */
702 wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr);
703
704 pfn = CMDQ_PFN(wq_first_page_paddr, SZ_4K);
705
706 ctxt_info->curr_wqe_page_pfn =
707 HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) |
708 HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
709 HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) |
710 HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
711 HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED);
712
713 if (wq->num_q_pages != 1) {
714 /* block PFN - Read Modify Write */
715 cmdq_first_block_paddr = cmdq_pages->page_paddr;
716
717 pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
718 }
719
720 ctxt_info->wq_block_pfn =
721 HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) |
722 HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI);
723
724 cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif);
725 cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(cmdqs->hwif);
726 cmdq_ctxt->cmdq_type = cmdq->cmdq_type;
727}
728
729/**
730 * init_cmdq - initialize cmdq
731 * @cmdq: the cmdq
732 * @wq: the wq attaced to the cmdq
733 * @q_type: the cmdq type of the cmdq
734 * @db_area: doorbell area for the cmdq
735 *
736 * Return 0 - Success, negative - Failure
737 **/
738static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq,
739 enum hinic_cmdq_type q_type, void __iomem *db_area)
740{
741 int err;
742
743 cmdq->wq = wq;
744 cmdq->cmdq_type = q_type;
745 cmdq->wrapped = 1;
746
747 spin_lock_init(&cmdq->cmdq_lock);
748
749 cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth));
750 if (!cmdq->done)
751 return -ENOMEM;
752
753 cmdq->errcode = vzalloc(array_size(sizeof(*cmdq->errcode),
754 wq->q_depth));
755 if (!cmdq->errcode) {
756 err = -ENOMEM;
757 goto err_errcode;
758 }
759
760 cmdq->db_base = db_area + CMDQ_DB_OFF;
761 return 0;
762
763err_errcode:
764 vfree(addr: cmdq->done);
765 return err;
766}
767
768/**
769 * free_cmdq - Free cmdq
770 * @cmdq: the cmdq to free
771 **/
772static void free_cmdq(struct hinic_cmdq *cmdq)
773{
774 vfree(addr: cmdq->errcode);
775 vfree(addr: cmdq->done);
776}
777
778/**
779 * init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq
780 * @hwdev: the NIC HW device
781 * @cmdqs: cmdqs to write the ctxts for
782 * @db_area: db_area for all the cmdqs
783 *
784 * Return 0 - Success, negative - Failure
785 **/
786static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
787 struct hinic_cmdqs *cmdqs, void __iomem **db_area)
788{
789 struct hinic_hwif *hwif = hwdev->hwif;
790 enum hinic_cmdq_type type, cmdq_type;
791 struct hinic_cmdq_ctxt *cmdq_ctxts;
792 struct pci_dev *pdev = hwif->pdev;
793 struct hinic_pfhwdev *pfhwdev;
794 int err;
795
796 cmdq_ctxts = devm_kcalloc(dev: &pdev->dev, n: HINIC_MAX_CMDQ_TYPES,
797 size: sizeof(*cmdq_ctxts), GFP_KERNEL);
798 if (!cmdq_ctxts)
799 return -ENOMEM;
800
801 pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
802
803 cmdq_type = HINIC_CMDQ_SYNC;
804 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
805 cmdqs->cmdq[cmdq_type].hwdev = hwdev;
806 err = init_cmdq(cmdq: &cmdqs->cmdq[cmdq_type],
807 wq: &cmdqs->saved_wqs[cmdq_type], q_type: cmdq_type,
808 db_area: db_area[cmdq_type]);
809 if (err) {
810 dev_err(&pdev->dev, "Failed to initialize cmdq\n");
811 goto err_init_cmdq;
812 }
813
814 cmdq_init_queue_ctxt(cmdq_ctxt: &cmdq_ctxts[cmdq_type],
815 cmdq: &cmdqs->cmdq[cmdq_type],
816 cmdq_pages: &cmdqs->cmdq_pages);
817 }
818
819 /* Write the CMDQ ctxts */
820 cmdq_type = HINIC_CMDQ_SYNC;
821 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
822 err = hinic_msg_to_mgmt(pf_to_mgmt: &pfhwdev->pf_to_mgmt, mod: HINIC_MOD_COMM,
823 cmd: HINIC_COMM_CMD_CMDQ_CTXT_SET,
824 buf_in: &cmdq_ctxts[cmdq_type],
825 in_size: sizeof(cmdq_ctxts[cmdq_type]),
826 NULL, NULL, sync: HINIC_MGMT_MSG_SYNC);
827 if (err) {
828 dev_err(&pdev->dev, "Failed to set CMDQ CTXT type = %d\n",
829 cmdq_type);
830 goto err_write_cmdq_ctxt;
831 }
832 }
833
834 devm_kfree(dev: &pdev->dev, p: cmdq_ctxts);
835 return 0;
836
837err_write_cmdq_ctxt:
838 cmdq_type = HINIC_MAX_CMDQ_TYPES;
839
840err_init_cmdq:
841 for (type = HINIC_CMDQ_SYNC; type < cmdq_type; type++)
842 free_cmdq(cmdq: &cmdqs->cmdq[type]);
843
844 devm_kfree(dev: &pdev->dev, p: cmdq_ctxts);
845 return err;
846}
847
848static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth)
849{
850 struct hinic_cmd_hw_ioctxt hw_ioctxt = { 0 };
851 struct hinic_pfhwdev *pfhwdev;
852
853 pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
854
855 hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
856 hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
857
858 hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_ENABLE;
859 hw_ioctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
860
861 return hinic_msg_to_mgmt(pf_to_mgmt: &pfhwdev->pf_to_mgmt, mod: HINIC_MOD_COMM,
862 cmd: HINIC_COMM_CMD_HWCTXT_SET,
863 buf_in: &hw_ioctxt, in_size: sizeof(hw_ioctxt), NULL,
864 NULL, sync: HINIC_MGMT_MSG_SYNC);
865}
866
867/**
868 * hinic_init_cmdqs - init all cmdqs
869 * @cmdqs: cmdqs to init
870 * @hwif: HW interface for accessing cmdqs
871 * @db_area: doorbell areas for all the cmdqs
872 *
873 * Return 0 - Success, negative - Failure
874 **/
875int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
876 void __iomem **db_area)
877{
878 struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
879 struct pci_dev *pdev = hwif->pdev;
880 struct hinic_hwdev *hwdev;
881 u16 max_wqe_size;
882 int err;
883
884 cmdqs->hwif = hwif;
885 cmdqs->cmdq_buf_pool = dma_pool_create(name: "hinic_cmdq", dev: &pdev->dev,
886 HINIC_CMDQ_BUF_SIZE,
887 HINIC_CMDQ_BUF_SIZE, allocation: 0);
888 if (!cmdqs->cmdq_buf_pool)
889 return -ENOMEM;
890
891 cmdqs->saved_wqs = devm_kcalloc(dev: &pdev->dev, n: HINIC_MAX_CMDQ_TYPES,
892 size: sizeof(*cmdqs->saved_wqs), GFP_KERNEL);
893 if (!cmdqs->saved_wqs) {
894 err = -ENOMEM;
895 goto err_saved_wqs;
896 }
897
898 max_wqe_size = WQE_LCMD_SIZE;
899 err = hinic_wqs_cmdq_alloc(cmdq_pages: &cmdqs->cmdq_pages, wq: cmdqs->saved_wqs, hwif,
900 cmdq_blocks: HINIC_MAX_CMDQ_TYPES, CMDQ_WQEBB_SIZE,
901 CMDQ_WQ_PAGE_SIZE, CMDQ_DEPTH, max_wqe_size);
902 if (err) {
903 dev_err(&pdev->dev, "Failed to allocate CMDQ wqs\n");
904 goto err_cmdq_wqs;
905 }
906
907 hwdev = container_of(func_to_io, struct hinic_hwdev, func_to_io);
908 err = init_cmdqs_ctxt(hwdev, cmdqs, db_area);
909 if (err) {
910 dev_err(&pdev->dev, "Failed to write cmdq ctxt\n");
911 goto err_cmdq_ctxt;
912 }
913
914 hinic_ceq_register_cb(ceqs: &func_to_io->ceqs, event: HINIC_CEQ_CMDQ, handle: cmdqs,
915 ceq_cb: cmdq_ceq_handler);
916
917 err = hinic_set_cmdq_depth(hwdev, CMDQ_DEPTH);
918 if (err) {
919 dev_err(&hwif->pdev->dev, "Failed to set cmdq depth\n");
920 goto err_set_cmdq_depth;
921 }
922
923 return 0;
924
925err_set_cmdq_depth:
926 hinic_ceq_unregister_cb(ceqs: &func_to_io->ceqs, event: HINIC_CEQ_CMDQ);
927 free_cmdq(cmdq: &cmdqs->cmdq[HINIC_CMDQ_SYNC]);
928err_cmdq_ctxt:
929 hinic_wqs_cmdq_free(cmdq_pages: &cmdqs->cmdq_pages, wq: cmdqs->saved_wqs,
930 cmdq_blocks: HINIC_MAX_CMDQ_TYPES);
931
932err_cmdq_wqs:
933 devm_kfree(dev: &pdev->dev, p: cmdqs->saved_wqs);
934
935err_saved_wqs:
936 dma_pool_destroy(pool: cmdqs->cmdq_buf_pool);
937 return err;
938}
939
940/**
941 * hinic_free_cmdqs - free all cmdqs
942 * @cmdqs: cmdqs to free
943 **/
944void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs)
945{
946 struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
947 struct hinic_hwif *hwif = cmdqs->hwif;
948 struct pci_dev *pdev = hwif->pdev;
949 enum hinic_cmdq_type cmdq_type;
950
951 hinic_ceq_unregister_cb(ceqs: &func_to_io->ceqs, event: HINIC_CEQ_CMDQ);
952
953 cmdq_type = HINIC_CMDQ_SYNC;
954 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++)
955 free_cmdq(cmdq: &cmdqs->cmdq[cmdq_type]);
956
957 hinic_wqs_cmdq_free(cmdq_pages: &cmdqs->cmdq_pages, wq: cmdqs->saved_wqs,
958 cmdq_blocks: HINIC_MAX_CMDQ_TYPES);
959
960 devm_kfree(dev: &pdev->dev, p: cmdqs->saved_wqs);
961
962 dma_pool_destroy(pool: cmdqs->cmdq_buf_pool);
963}
964

source code of linux/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c