1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Huawei HiNIC PCI Express Linux driver
4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/pci.h>
10#include <linux/device.h>
11#include <linux/errno.h>
12#include <linux/slab.h>
13#include <linux/semaphore.h>
14#include <linux/dma-mapping.h>
15#include <linux/io.h>
16#include <linux/err.h>
17
18#include "hinic_hw_dev.h"
19#include "hinic_hw_if.h"
20#include "hinic_hw_eqs.h"
21#include "hinic_hw_wqe.h"
22#include "hinic_hw_wq.h"
23#include "hinic_hw_cmdq.h"
24#include "hinic_hw_qp_ctxt.h"
25#include "hinic_hw_qp.h"
26#include "hinic_hw_io.h"
27
28#define CI_Q_ADDR_SIZE sizeof(u32)
29
30#define CI_ADDR(base_addr, q_id) ((base_addr) + \
31 (q_id) * CI_Q_ADDR_SIZE)
32
33#define CI_TABLE_SIZE(num_qps) ((num_qps) * CI_Q_ADDR_SIZE)
34
35#define DB_IDX(db, db_base) \
36 (((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE)
37
38#define HINIC_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12)))
39
40enum io_cmd {
41 IO_CMD_MODIFY_QUEUE_CTXT = 0,
42 IO_CMD_CLEAN_QUEUE_CTXT,
43};
44
45static void init_db_area_idx(struct hinic_free_db_area *free_db_area)
46{
47 int i;
48
49 for (i = 0; i < HINIC_DB_MAX_AREAS; i++)
50 free_db_area->db_idx[i] = i;
51
52 free_db_area->alloc_pos = 0;
53 free_db_area->return_pos = HINIC_DB_MAX_AREAS;
54
55 free_db_area->num_free = HINIC_DB_MAX_AREAS;
56
57 sema_init(sem: &free_db_area->idx_lock, val: 1);
58}
59
60static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io)
61{
62 struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
63 int pos, idx;
64
65 down(sem: &free_db_area->idx_lock);
66
67 free_db_area->num_free--;
68
69 if (free_db_area->num_free < 0) {
70 free_db_area->num_free++;
71 up(sem: &free_db_area->idx_lock);
72 return ERR_PTR(error: -ENOMEM);
73 }
74
75 pos = free_db_area->alloc_pos++;
76 pos &= HINIC_DB_MAX_AREAS - 1;
77
78 idx = free_db_area->db_idx[pos];
79
80 free_db_area->db_idx[pos] = -1;
81
82 up(sem: &free_db_area->idx_lock);
83
84 return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE;
85}
86
87static void return_db_area(struct hinic_func_to_io *func_to_io,
88 void __iomem *db_base)
89{
90 struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
91 int pos, idx = DB_IDX(db_base, func_to_io->db_base);
92
93 down(sem: &free_db_area->idx_lock);
94
95 pos = free_db_area->return_pos++;
96 pos &= HINIC_DB_MAX_AREAS - 1;
97
98 free_db_area->db_idx[pos] = idx;
99
100 free_db_area->num_free++;
101
102 up(sem: &free_db_area->idx_lock);
103}
104
105static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
106 u16 num_sqs)
107{
108 struct hinic_hwif *hwif = func_to_io->hwif;
109 struct hinic_sq_ctxt_block *sq_ctxt_block;
110 struct pci_dev *pdev = hwif->pdev;
111 struct hinic_cmdq_buf cmdq_buf;
112 struct hinic_sq_ctxt *sq_ctxt;
113 struct hinic_qp *qp;
114 u64 out_param;
115 int err, i;
116
117 err = hinic_alloc_cmdq_buf(cmdqs: &func_to_io->cmdqs, cmdq_buf: &cmdq_buf);
118 if (err) {
119 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
120 return err;
121 }
122
123 sq_ctxt_block = cmdq_buf.buf;
124 sq_ctxt = sq_ctxt_block->sq_ctxt;
125
126 hinic_qp_prepare_header(qp_ctxt_hdr: &sq_ctxt_block->hdr, ctxt_type: HINIC_QP_CTXT_TYPE_SQ,
127 num_queues: num_sqs, max_queues: func_to_io->max_qps);
128 for (i = 0; i < num_sqs; i++) {
129 qp = &func_to_io->qps[i];
130
131 hinic_sq_prepare_ctxt(sq_ctxt: &sq_ctxt[i], sq: &qp->sq,
132 global_qid: base_qpn + qp->q_id);
133 }
134
135 cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs);
136
137 err = hinic_cmdq_direct_resp(cmdqs: &func_to_io->cmdqs, mod: HINIC_MOD_L2NIC,
138 cmd: IO_CMD_MODIFY_QUEUE_CTXT, buf_in: &cmdq_buf,
139 out_param: &out_param);
140 if (err || out_param != 0) {
141 dev_err(&pdev->dev, "Failed to set SQ ctxts\n");
142 err = -EFAULT;
143 }
144
145 hinic_free_cmdq_buf(cmdqs: &func_to_io->cmdqs, cmdq_buf: &cmdq_buf);
146 return err;
147}
148
149static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
150 u16 num_rqs)
151{
152 struct hinic_hwif *hwif = func_to_io->hwif;
153 struct hinic_rq_ctxt_block *rq_ctxt_block;
154 struct pci_dev *pdev = hwif->pdev;
155 struct hinic_cmdq_buf cmdq_buf;
156 struct hinic_rq_ctxt *rq_ctxt;
157 struct hinic_qp *qp;
158 u64 out_param;
159 int err, i;
160
161 err = hinic_alloc_cmdq_buf(cmdqs: &func_to_io->cmdqs, cmdq_buf: &cmdq_buf);
162 if (err) {
163 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
164 return err;
165 }
166
167 rq_ctxt_block = cmdq_buf.buf;
168 rq_ctxt = rq_ctxt_block->rq_ctxt;
169
170 hinic_qp_prepare_header(qp_ctxt_hdr: &rq_ctxt_block->hdr, ctxt_type: HINIC_QP_CTXT_TYPE_RQ,
171 num_queues: num_rqs, max_queues: func_to_io->max_qps);
172 for (i = 0; i < num_rqs; i++) {
173 qp = &func_to_io->qps[i];
174
175 hinic_rq_prepare_ctxt(rq_ctxt: &rq_ctxt[i], rq: &qp->rq,
176 global_qid: base_qpn + qp->q_id);
177 }
178
179 cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs);
180
181 err = hinic_cmdq_direct_resp(cmdqs: &func_to_io->cmdqs, mod: HINIC_MOD_L2NIC,
182 cmd: IO_CMD_MODIFY_QUEUE_CTXT, buf_in: &cmdq_buf,
183 out_param: &out_param);
184 if (err || out_param != 0) {
185 dev_err(&pdev->dev, "Failed to set RQ ctxts\n");
186 err = -EFAULT;
187 }
188
189 hinic_free_cmdq_buf(cmdqs: &func_to_io->cmdqs, cmdq_buf: &cmdq_buf);
190 return err;
191}
192
193/**
194 * write_qp_ctxts - write the qp ctxt to HW
195 * @func_to_io: func to io channel that holds the IO components
196 * @base_qpn: first qp number
197 * @num_qps: number of qps to write
198 *
199 * Return 0 - Success, negative - Failure
200 **/
201static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
202 u16 num_qps)
203{
204 return (write_sq_ctxts(func_to_io, base_qpn, num_sqs: num_qps) ||
205 write_rq_ctxts(func_to_io, base_qpn, num_rqs: num_qps));
206}
207
208static int hinic_clean_queue_offload_ctxt(struct hinic_func_to_io *func_to_io,
209 enum hinic_qp_ctxt_type ctxt_type)
210{
211 struct hinic_hwif *hwif = func_to_io->hwif;
212 struct hinic_clean_queue_ctxt *ctxt_block;
213 struct pci_dev *pdev = hwif->pdev;
214 struct hinic_cmdq_buf cmdq_buf;
215 u64 out_param = 0;
216 int err;
217
218 err = hinic_alloc_cmdq_buf(cmdqs: &func_to_io->cmdqs, cmdq_buf: &cmdq_buf);
219 if (err) {
220 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
221 return err;
222 }
223
224 ctxt_block = cmdq_buf.buf;
225 ctxt_block->cmdq_hdr.num_queues = func_to_io->max_qps;
226 ctxt_block->cmdq_hdr.queue_type = ctxt_type;
227 ctxt_block->cmdq_hdr.addr_offset = 0;
228
229 /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */
230 ctxt_block->ctxt_size = 0x3;
231
232 hinic_cpu_to_be32(data: ctxt_block, len: sizeof(*ctxt_block));
233
234 cmdq_buf.size = sizeof(*ctxt_block);
235
236 err = hinic_cmdq_direct_resp(cmdqs: &func_to_io->cmdqs, mod: HINIC_MOD_L2NIC,
237 cmd: IO_CMD_CLEAN_QUEUE_CTXT,
238 buf_in: &cmdq_buf, out_param: &out_param);
239
240 if (err || out_param) {
241 dev_err(&pdev->dev, "Failed to clean offload ctxts, err: %d, out_param: 0x%llx\n",
242 err, out_param);
243
244 err = -EFAULT;
245 }
246
247 hinic_free_cmdq_buf(cmdqs: &func_to_io->cmdqs, cmdq_buf: &cmdq_buf);
248
249 return err;
250}
251
252static int hinic_clean_qp_offload_ctxt(struct hinic_func_to_io *func_to_io)
253{
254 /* clean LRO/TSO context space */
255 return (hinic_clean_queue_offload_ctxt(func_to_io,
256 ctxt_type: HINIC_QP_CTXT_TYPE_SQ) ||
257 hinic_clean_queue_offload_ctxt(func_to_io,
258 ctxt_type: HINIC_QP_CTXT_TYPE_RQ));
259}
260
261/**
262 * init_qp - Initialize a Queue Pair
263 * @func_to_io: func to io channel that holds the IO components
264 * @qp: pointer to the qp to initialize
265 * @q_id: the id of the qp
266 * @sq_msix_entry: msix entry for sq
267 * @rq_msix_entry: msix entry for rq
268 *
269 * Return 0 - Success, negative - Failure
270 **/
271static int init_qp(struct hinic_func_to_io *func_to_io,
272 struct hinic_qp *qp, int q_id,
273 struct msix_entry *sq_msix_entry,
274 struct msix_entry *rq_msix_entry)
275{
276 struct hinic_hwif *hwif = func_to_io->hwif;
277 struct pci_dev *pdev = hwif->pdev;
278 void __iomem *db_base;
279 int err;
280
281 qp->q_id = q_id;
282
283 err = hinic_wq_allocate(wqs: &func_to_io->wqs, wq: &func_to_io->sq_wq[q_id],
284 HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE,
285 q_depth: func_to_io->sq_depth, HINIC_SQ_WQE_MAX_SIZE);
286 if (err) {
287 dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n");
288 return err;
289 }
290
291 err = hinic_wq_allocate(wqs: &func_to_io->wqs, wq: &func_to_io->rq_wq[q_id],
292 HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE,
293 q_depth: func_to_io->rq_depth, HINIC_RQ_WQE_SIZE);
294 if (err) {
295 dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n");
296 goto err_rq_alloc;
297 }
298
299 db_base = get_db_area(func_to_io);
300 if (IS_ERR(ptr: db_base)) {
301 dev_err(&pdev->dev, "Failed to get DB area for SQ\n");
302 err = PTR_ERR(ptr: db_base);
303 goto err_get_db;
304 }
305
306 func_to_io->sq_db[q_id] = db_base;
307
308 qp->sq.qid = q_id;
309 err = hinic_init_sq(sq: &qp->sq, hwif, wq: &func_to_io->sq_wq[q_id],
310 entry: sq_msix_entry,
311 CI_ADDR(func_to_io->ci_addr_base, q_id),
312 CI_ADDR(func_to_io->ci_dma_base, q_id), db_base);
313 if (err) {
314 dev_err(&pdev->dev, "Failed to init SQ\n");
315 goto err_sq_init;
316 }
317
318 qp->rq.qid = q_id;
319 err = hinic_init_rq(rq: &qp->rq, hwif, wq: &func_to_io->rq_wq[q_id],
320 entry: rq_msix_entry);
321 if (err) {
322 dev_err(&pdev->dev, "Failed to init RQ\n");
323 goto err_rq_init;
324 }
325
326 return 0;
327
328err_rq_init:
329 hinic_clean_sq(sq: &qp->sq);
330
331err_sq_init:
332 return_db_area(func_to_io, db_base);
333
334err_get_db:
335 hinic_wq_free(wqs: &func_to_io->wqs, wq: &func_to_io->rq_wq[q_id]);
336
337err_rq_alloc:
338 hinic_wq_free(wqs: &func_to_io->wqs, wq: &func_to_io->sq_wq[q_id]);
339 return err;
340}
341
342/**
343 * destroy_qp - Clean the resources of a Queue Pair
344 * @func_to_io: func to io channel that holds the IO components
345 * @qp: pointer to the qp to clean
346 **/
347static void destroy_qp(struct hinic_func_to_io *func_to_io,
348 struct hinic_qp *qp)
349{
350 int q_id = qp->q_id;
351
352 hinic_clean_rq(rq: &qp->rq);
353 hinic_clean_sq(sq: &qp->sq);
354
355 return_db_area(func_to_io, db_base: func_to_io->sq_db[q_id]);
356
357 hinic_wq_free(wqs: &func_to_io->wqs, wq: &func_to_io->rq_wq[q_id]);
358 hinic_wq_free(wqs: &func_to_io->wqs, wq: &func_to_io->sq_wq[q_id]);
359}
360
361/**
362 * hinic_io_create_qps - Create Queue Pairs
363 * @func_to_io: func to io channel that holds the IO components
364 * @base_qpn: base qp number
365 * @num_qps: number queue pairs to create
366 * @sq_msix_entries: msix entries for sq
367 * @rq_msix_entries: msix entries for rq
368 *
369 * Return 0 - Success, negative - Failure
370 **/
371int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
372 u16 base_qpn, int num_qps,
373 struct msix_entry *sq_msix_entries,
374 struct msix_entry *rq_msix_entries)
375{
376 struct hinic_hwif *hwif = func_to_io->hwif;
377 struct pci_dev *pdev = hwif->pdev;
378 void *ci_addr_base;
379 int i, j, err;
380
381 func_to_io->qps = devm_kcalloc(dev: &pdev->dev, n: num_qps,
382 size: sizeof(*func_to_io->qps), GFP_KERNEL);
383 if (!func_to_io->qps)
384 return -ENOMEM;
385
386 func_to_io->sq_wq = devm_kcalloc(dev: &pdev->dev, n: num_qps,
387 size: sizeof(*func_to_io->sq_wq), GFP_KERNEL);
388 if (!func_to_io->sq_wq) {
389 err = -ENOMEM;
390 goto err_sq_wq;
391 }
392
393 func_to_io->rq_wq = devm_kcalloc(dev: &pdev->dev, n: num_qps,
394 size: sizeof(*func_to_io->rq_wq), GFP_KERNEL);
395 if (!func_to_io->rq_wq) {
396 err = -ENOMEM;
397 goto err_rq_wq;
398 }
399
400 func_to_io->sq_db = devm_kcalloc(dev: &pdev->dev, n: num_qps,
401 size: sizeof(*func_to_io->sq_db), GFP_KERNEL);
402 if (!func_to_io->sq_db) {
403 err = -ENOMEM;
404 goto err_sq_db;
405 }
406
407 ci_addr_base = dma_alloc_coherent(dev: &pdev->dev, CI_TABLE_SIZE(num_qps),
408 dma_handle: &func_to_io->ci_dma_base,
409 GFP_KERNEL);
410 if (!ci_addr_base) {
411 dev_err(&pdev->dev, "Failed to allocate CI area\n");
412 err = -ENOMEM;
413 goto err_ci_base;
414 }
415
416 func_to_io->ci_addr_base = ci_addr_base;
417
418 for (i = 0; i < num_qps; i++) {
419 err = init_qp(func_to_io, qp: &func_to_io->qps[i], q_id: i,
420 sq_msix_entry: &sq_msix_entries[i], rq_msix_entry: &rq_msix_entries[i]);
421 if (err) {
422 dev_err(&pdev->dev, "Failed to create QP %d\n", i);
423 goto err_init_qp;
424 }
425 }
426
427 err = write_qp_ctxts(func_to_io, base_qpn, num_qps);
428 if (err) {
429 dev_err(&pdev->dev, "Failed to init QP ctxts\n");
430 goto err_write_qp_ctxts;
431 }
432
433 err = hinic_clean_qp_offload_ctxt(func_to_io);
434 if (err) {
435 dev_err(&pdev->dev, "Failed to clean QP contexts space\n");
436 goto err_write_qp_ctxts;
437 }
438
439 return 0;
440
441err_write_qp_ctxts:
442err_init_qp:
443 for (j = 0; j < i; j++)
444 destroy_qp(func_to_io, qp: &func_to_io->qps[j]);
445
446 dma_free_coherent(dev: &pdev->dev, CI_TABLE_SIZE(num_qps),
447 cpu_addr: func_to_io->ci_addr_base, dma_handle: func_to_io->ci_dma_base);
448
449err_ci_base:
450 devm_kfree(dev: &pdev->dev, p: func_to_io->sq_db);
451
452err_sq_db:
453 devm_kfree(dev: &pdev->dev, p: func_to_io->rq_wq);
454
455err_rq_wq:
456 devm_kfree(dev: &pdev->dev, p: func_to_io->sq_wq);
457
458err_sq_wq:
459 devm_kfree(dev: &pdev->dev, p: func_to_io->qps);
460 return err;
461}
462
463/**
464 * hinic_io_destroy_qps - Destroy the IO Queue Pairs
465 * @func_to_io: func to io channel that holds the IO components
466 * @num_qps: number queue pairs to destroy
467 **/
468void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps)
469{
470 struct hinic_hwif *hwif = func_to_io->hwif;
471 struct pci_dev *pdev = hwif->pdev;
472 size_t ci_table_size;
473 int i;
474
475 ci_table_size = CI_TABLE_SIZE(num_qps);
476
477 for (i = 0; i < num_qps; i++)
478 destroy_qp(func_to_io, qp: &func_to_io->qps[i]);
479
480 dma_free_coherent(dev: &pdev->dev, size: ci_table_size, cpu_addr: func_to_io->ci_addr_base,
481 dma_handle: func_to_io->ci_dma_base);
482
483 devm_kfree(dev: &pdev->dev, p: func_to_io->sq_db);
484
485 devm_kfree(dev: &pdev->dev, p: func_to_io->rq_wq);
486 devm_kfree(dev: &pdev->dev, p: func_to_io->sq_wq);
487
488 devm_kfree(dev: &pdev->dev, p: func_to_io->qps);
489}
490
491int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
492 u32 page_size)
493{
494 struct hinic_wq_page_size page_size_info = {0};
495 u16 out_size = sizeof(page_size_info);
496 struct hinic_pfhwdev *pfhwdev;
497 int err;
498
499 pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
500
501 page_size_info.func_idx = func_idx;
502 page_size_info.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
503 page_size_info.page_size = HINIC_PAGE_SIZE_HW(page_size);
504
505 err = hinic_msg_to_mgmt(pf_to_mgmt: &pfhwdev->pf_to_mgmt, mod: HINIC_MOD_COMM,
506 cmd: HINIC_COMM_CMD_PAGESIZE_SET, buf_in: &page_size_info,
507 in_size: sizeof(page_size_info), buf_out: &page_size_info,
508 out_size: &out_size, sync: HINIC_MGMT_MSG_SYNC);
509 if (err || !out_size || page_size_info.status) {
510 dev_err(&hwdev->hwif->pdev->dev, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x\n",
511 err, page_size_info.status, out_size);
512 return -EFAULT;
513 }
514
515 return 0;
516}
517
518/**
519 * hinic_io_init - Initialize the IO components
520 * @func_to_io: func to io channel that holds the IO components
521 * @hwif: HW interface for accessing IO
522 * @max_qps: maximum QPs in HW
523 * @num_ceqs: number completion event queues
524 * @ceq_msix_entries: msix entries for ceqs
525 *
526 * Return 0 - Success, negative - Failure
527 **/
528int hinic_io_init(struct hinic_func_to_io *func_to_io,
529 struct hinic_hwif *hwif, u16 max_qps, int num_ceqs,
530 struct msix_entry *ceq_msix_entries)
531{
532 struct pci_dev *pdev = hwif->pdev;
533 enum hinic_cmdq_type cmdq, type;
534 void __iomem *db_area;
535 int err;
536
537 func_to_io->hwif = hwif;
538 func_to_io->qps = NULL;
539 func_to_io->max_qps = max_qps;
540 func_to_io->ceqs.hwdev = func_to_io->hwdev;
541
542 err = hinic_ceqs_init(ceqs: &func_to_io->ceqs, hwif, num_ceqs,
543 HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE,
544 msix_entries: ceq_msix_entries);
545 if (err) {
546 dev_err(&pdev->dev, "Failed to init CEQs\n");
547 return err;
548 }
549
550 err = hinic_wqs_alloc(wqs: &func_to_io->wqs, num_wqs: 2 * max_qps, hwif);
551 if (err) {
552 dev_err(&pdev->dev, "Failed to allocate WQS for IO\n");
553 goto err_wqs_alloc;
554 }
555
556 func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR);
557 if (!func_to_io->db_base) {
558 dev_err(&pdev->dev, "Failed to remap IO DB area\n");
559 err = -ENOMEM;
560 goto err_db_ioremap;
561 }
562
563 init_db_area_idx(free_db_area: &func_to_io->free_db_area);
564
565 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) {
566 db_area = get_db_area(func_to_io);
567 if (IS_ERR(ptr: db_area)) {
568 dev_err(&pdev->dev, "Failed to get cmdq db area\n");
569 err = PTR_ERR(ptr: db_area);
570 goto err_db_area;
571 }
572
573 func_to_io->cmdq_db_area[cmdq] = db_area;
574 }
575
576 err = hinic_set_wq_page_size(hwdev: func_to_io->hwdev,
577 HINIC_HWIF_FUNC_IDX(hwif),
578 HINIC_DEFAULT_WQ_PAGE_SIZE);
579 if (err) {
580 dev_err(&func_to_io->hwif->pdev->dev, "Failed to set wq page size\n");
581 goto init_wq_pg_size_err;
582 }
583
584 err = hinic_init_cmdqs(cmdqs: &func_to_io->cmdqs, hwif,
585 db_area: func_to_io->cmdq_db_area);
586 if (err) {
587 dev_err(&pdev->dev, "Failed to initialize cmdqs\n");
588 goto err_init_cmdqs;
589 }
590
591 return 0;
592
593err_init_cmdqs:
594 if (!HINIC_IS_VF(func_to_io->hwif))
595 hinic_set_wq_page_size(hwdev: func_to_io->hwdev,
596 HINIC_HWIF_FUNC_IDX(hwif),
597 HINIC_HW_WQ_PAGE_SIZE);
598init_wq_pg_size_err:
599err_db_area:
600 for (type = HINIC_CMDQ_SYNC; type < cmdq; type++)
601 return_db_area(func_to_io, db_base: func_to_io->cmdq_db_area[type]);
602
603 iounmap(addr: func_to_io->db_base);
604
605err_db_ioremap:
606 hinic_wqs_free(wqs: &func_to_io->wqs);
607
608err_wqs_alloc:
609 hinic_ceqs_free(ceqs: &func_to_io->ceqs);
610 return err;
611}
612
613/**
614 * hinic_io_free - Free the IO components
615 * @func_to_io: func to io channel that holds the IO components
616 **/
617void hinic_io_free(struct hinic_func_to_io *func_to_io)
618{
619 enum hinic_cmdq_type cmdq;
620
621 hinic_free_cmdqs(cmdqs: &func_to_io->cmdqs);
622
623 if (!HINIC_IS_VF(func_to_io->hwif))
624 hinic_set_wq_page_size(hwdev: func_to_io->hwdev,
625 HINIC_HWIF_FUNC_IDX(func_to_io->hwif),
626 HINIC_HW_WQ_PAGE_SIZE);
627
628 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++)
629 return_db_area(func_to_io, db_base: func_to_io->cmdq_db_area[cmdq]);
630
631 iounmap(addr: func_to_io->db_base);
632 hinic_wqs_free(wqs: &func_to_io->wqs);
633 hinic_ceqs_free(ceqs: &func_to_io->ceqs);
634}
635

source code of linux/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c