1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2022, Microsoft Corporation. All rights reserved. |
4 | */ |
5 | |
6 | #include "mana_ib.h" |
7 | |
8 | int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, |
9 | struct ib_udata *udata) |
10 | { |
11 | struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq); |
12 | struct ib_device *ibdev = ibcq->device; |
13 | struct mana_ib_create_cq ucmd = {}; |
14 | struct mana_ib_dev *mdev; |
15 | struct gdma_context *gc; |
16 | int err; |
17 | |
18 | mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); |
19 | gc = mdev_to_gc(mdev); |
20 | |
21 | if (udata->inlen < sizeof(ucmd)) |
22 | return -EINVAL; |
23 | |
24 | if (attr->comp_vector > gc->max_num_queues) |
25 | return -EINVAL; |
26 | |
27 | cq->comp_vector = attr->comp_vector; |
28 | |
29 | err = ib_copy_from_udata(dest: &ucmd, udata, min(sizeof(ucmd), udata->inlen)); |
30 | if (err) { |
31 | ibdev_dbg(ibdev, |
32 | "Failed to copy from udata for create cq, %d\n" , err); |
33 | return err; |
34 | } |
35 | |
36 | if (attr->cqe > mdev->adapter_caps.max_qp_wr) { |
37 | ibdev_dbg(ibdev, "CQE %d exceeding limit\n" , attr->cqe); |
38 | return -EINVAL; |
39 | } |
40 | |
41 | cq->cqe = attr->cqe; |
42 | cq->umem = ib_umem_get(device: ibdev, addr: ucmd.buf_addr, size: cq->cqe * COMP_ENTRY_SIZE, |
43 | access: IB_ACCESS_LOCAL_WRITE); |
44 | if (IS_ERR(ptr: cq->umem)) { |
45 | err = PTR_ERR(ptr: cq->umem); |
46 | ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n" , |
47 | err); |
48 | return err; |
49 | } |
50 | |
51 | err = mana_ib_create_zero_offset_dma_region(dev: mdev, umem: cq->umem, gdma_region: &cq->gdma_region); |
52 | if (err) { |
53 | ibdev_dbg(ibdev, |
54 | "Failed to create dma region for create cq, %d\n" , |
55 | err); |
56 | goto err_release_umem; |
57 | } |
58 | |
59 | ibdev_dbg(ibdev, |
60 | "create_dma_region ret %d gdma_region 0x%llx\n" , |
61 | err, cq->gdma_region); |
62 | |
63 | /* |
64 | * The CQ ID is not known at this time. The ID is generated at create_qp |
65 | */ |
66 | cq->id = INVALID_QUEUE_ID; |
67 | |
68 | return 0; |
69 | |
70 | err_release_umem: |
71 | ib_umem_release(umem: cq->umem); |
72 | return err; |
73 | } |
74 | |
75 | int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) |
76 | { |
77 | struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq); |
78 | struct ib_device *ibdev = ibcq->device; |
79 | struct mana_ib_dev *mdev; |
80 | struct gdma_context *gc; |
81 | int err; |
82 | |
83 | mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); |
84 | gc = mdev_to_gc(mdev); |
85 | |
86 | err = mana_ib_gd_destroy_dma_region(dev: mdev, gdma_region: cq->gdma_region); |
87 | if (err) { |
88 | ibdev_dbg(ibdev, |
89 | "Failed to destroy dma region, %d\n" , err); |
90 | return err; |
91 | } |
92 | |
93 | if (cq->id != INVALID_QUEUE_ID) { |
94 | kfree(objp: gc->cq_table[cq->id]); |
95 | gc->cq_table[cq->id] = NULL; |
96 | } |
97 | |
98 | ib_umem_release(umem: cq->umem); |
99 | |
100 | return 0; |
101 | } |
102 | |
103 | static void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq) |
104 | { |
105 | struct mana_ib_cq *cq = ctx; |
106 | |
107 | if (cq->ibcq.comp_handler) |
108 | cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); |
109 | } |
110 | |
111 | int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq) |
112 | { |
113 | struct gdma_context *gc = mdev_to_gc(mdev); |
114 | struct gdma_queue *gdma_cq; |
115 | |
116 | /* Create CQ table entry */ |
117 | WARN_ON(gc->cq_table[cq->id]); |
118 | gdma_cq = kzalloc(size: sizeof(*gdma_cq), GFP_KERNEL); |
119 | if (!gdma_cq) |
120 | return -ENOMEM; |
121 | |
122 | gdma_cq->cq.context = cq; |
123 | gdma_cq->type = GDMA_CQ; |
124 | gdma_cq->cq.callback = mana_ib_cq_handler; |
125 | gdma_cq->id = cq->id; |
126 | gc->cq_table[cq->id] = gdma_cq; |
127 | return 0; |
128 | } |
129 | |