1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2020 Hewlett Packard Enterprise, Inc. All rights reserved.
4 */
5
6/*
7 * The rdma_rxe driver supports type 1 or type 2B memory windows.
8 * Type 1 MWs are created by ibv_alloc_mw() verbs calls and bound by
9 * ibv_bind_mw() calls. Type 2 MWs are also created by ibv_alloc_mw()
10 * but bound by bind_mw work requests. The ibv_bind_mw() call is converted
11 * by libibverbs to a bind_mw work request.
12 */
13
14#include "rxe.h"
15
16int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
17{
18 struct rxe_mw *mw = to_rmw(mw: ibmw);
19 struct rxe_pd *pd = to_rpd(pd: ibmw->pd);
20 struct rxe_dev *rxe = to_rdev(dev: ibmw->device);
21 int ret;
22
23 rxe_get(pd);
24
25 ret = rxe_add_to_pool(&rxe->mw_pool, mw);
26 if (ret) {
27 rxe_put(pd);
28 return ret;
29 }
30
31 mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(last_key: -1);
32 mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ?
33 RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
34 spin_lock_init(&mw->lock);
35
36 rxe_finalize(mw);
37
38 return 0;
39}
40
41int rxe_dealloc_mw(struct ib_mw *ibmw)
42{
43 struct rxe_mw *mw = to_rmw(mw: ibmw);
44
45 rxe_cleanup(mw);
46
47 return 0;
48}
49
50static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
51 struct rxe_mw *mw, struct rxe_mr *mr, int access)
52{
53 if (mw->ibmw.type == IB_MW_TYPE_1) {
54 if (unlikely(mw->state != RXE_MW_STATE_VALID)) {
55 rxe_dbg_mw(mw,
56 "attempt to bind a type 1 MW not in the valid state\n");
57 return -EINVAL;
58 }
59
60 /* o10-36.2.2 */
61 if (unlikely((access & IB_ZERO_BASED))) {
62 rxe_dbg_mw(mw, "attempt to bind a zero based type 1 MW\n");
63 return -EINVAL;
64 }
65 }
66
67 if (mw->ibmw.type == IB_MW_TYPE_2) {
68 /* o10-37.2.30 */
69 if (unlikely(mw->state != RXE_MW_STATE_FREE)) {
70 rxe_dbg_mw(mw,
71 "attempt to bind a type 2 MW not in the free state\n");
72 return -EINVAL;
73 }
74
75 /* C10-72 */
76 if (unlikely(qp->pd != to_rpd(mw->ibmw.pd))) {
77 rxe_dbg_mw(mw,
78 "attempt to bind type 2 MW with qp with different PD\n");
79 return -EINVAL;
80 }
81
82 /* o10-37.2.40 */
83 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) {
84 rxe_dbg_mw(mw,
85 "attempt to invalidate type 2 MW by binding with NULL or zero length MR\n");
86 return -EINVAL;
87 }
88 }
89
90 /* remaining checks only apply to a nonzero MR */
91 if (!mr)
92 return 0;
93
94 if (unlikely(mr->access & IB_ZERO_BASED)) {
95 rxe_dbg_mw(mw, "attempt to bind MW to zero based MR\n");
96 return -EINVAL;
97 }
98
99 /* C10-73 */
100 if (unlikely(!(mr->access & IB_ACCESS_MW_BIND))) {
101 rxe_dbg_mw(mw,
102 "attempt to bind an MW to an MR without bind access\n");
103 return -EINVAL;
104 }
105
106 /* C10-74 */
107 if (unlikely((access &
108 (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) &&
109 !(mr->access & IB_ACCESS_LOCAL_WRITE))) {
110 rxe_dbg_mw(mw,
111 "attempt to bind an Writable MW to an MR without local write access\n");
112 return -EINVAL;
113 }
114
115 /* C10-75 */
116 if (access & IB_ZERO_BASED) {
117 if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) {
118 rxe_dbg_mw(mw,
119 "attempt to bind a ZB MW outside of the MR\n");
120 return -EINVAL;
121 }
122 } else {
123 if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) ||
124 ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
125 (mr->ibmr.iova + mr->ibmr.length)))) {
126 rxe_dbg_mw(mw,
127 "attempt to bind a VA MW outside of the MR\n");
128 return -EINVAL;
129 }
130 }
131
132 return 0;
133}
134
135static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
136 struct rxe_mw *mw, struct rxe_mr *mr, int access)
137{
138 u32 key = wqe->wr.wr.mw.rkey & 0xff;
139
140 mw->rkey = (mw->rkey & ~0xff) | key;
141 mw->access = access;
142 mw->state = RXE_MW_STATE_VALID;
143 mw->addr = wqe->wr.wr.mw.addr;
144 mw->length = wqe->wr.wr.mw.length;
145
146 if (mw->mr) {
147 rxe_put(mw->mr);
148 atomic_dec(v: &mw->mr->num_mw);
149 mw->mr = NULL;
150 }
151
152 if (mw->length) {
153 mw->mr = mr;
154 atomic_inc(v: &mr->num_mw);
155 rxe_get(mr);
156 }
157
158 if (mw->ibmw.type == IB_MW_TYPE_2) {
159 rxe_get(qp);
160 mw->qp = qp;
161 }
162}
163
164int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
165{
166 int ret;
167 struct rxe_mw *mw;
168 struct rxe_mr *mr;
169 struct rxe_dev *rxe = to_rdev(dev: qp->ibqp.device);
170 u32 mw_rkey = wqe->wr.wr.mw.mw_rkey;
171 u32 mr_lkey = wqe->wr.wr.mw.mr_lkey;
172 int access = wqe->wr.wr.mw.access;
173
174 mw = rxe_pool_get_index(pool: &rxe->mw_pool, index: mw_rkey >> 8);
175 if (unlikely(!mw)) {
176 ret = -EINVAL;
177 goto err;
178 }
179
180 if (unlikely(mw->rkey != mw_rkey)) {
181 ret = -EINVAL;
182 goto err_drop_mw;
183 }
184
185 if (likely(wqe->wr.wr.mw.length)) {
186 mr = rxe_pool_get_index(pool: &rxe->mr_pool, index: mr_lkey >> 8);
187 if (unlikely(!mr)) {
188 ret = -EINVAL;
189 goto err_drop_mw;
190 }
191
192 if (unlikely(mr->lkey != mr_lkey)) {
193 ret = -EINVAL;
194 goto err_drop_mr;
195 }
196 } else {
197 mr = NULL;
198 }
199
200 if (access & ~RXE_ACCESS_SUPPORTED_MW) {
201 rxe_err_mw(mw, "access %#x not supported\n", access);
202 ret = -EOPNOTSUPP;
203 goto err_drop_mr;
204 }
205
206 spin_lock_bh(lock: &mw->lock);
207
208 ret = rxe_check_bind_mw(qp, wqe, mw, mr, access);
209 if (ret)
210 goto err_unlock;
211
212 rxe_do_bind_mw(qp, wqe, mw, mr, access);
213err_unlock:
214 spin_unlock_bh(lock: &mw->lock);
215err_drop_mr:
216 if (mr)
217 rxe_put(mr);
218err_drop_mw:
219 rxe_put(mw);
220err:
221 return ret;
222}
223
224static int rxe_check_invalidate_mw(struct rxe_qp *qp, struct rxe_mw *mw)
225{
226 if (unlikely(mw->state == RXE_MW_STATE_INVALID))
227 return -EINVAL;
228
229 /* o10-37.2.26 */
230 if (unlikely(mw->ibmw.type == IB_MW_TYPE_1))
231 return -EINVAL;
232
233 return 0;
234}
235
236static void rxe_do_invalidate_mw(struct rxe_mw *mw)
237{
238 struct rxe_qp *qp;
239 struct rxe_mr *mr;
240
241 /* valid type 2 MW will always have a QP pointer */
242 qp = mw->qp;
243 mw->qp = NULL;
244 rxe_put(qp);
245
246 /* valid type 2 MW will always have an MR pointer */
247 mr = mw->mr;
248 mw->mr = NULL;
249 atomic_dec(v: &mr->num_mw);
250 rxe_put(mr);
251
252 mw->access = 0;
253 mw->addr = 0;
254 mw->length = 0;
255 mw->state = RXE_MW_STATE_FREE;
256}
257
258int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
259{
260 struct rxe_dev *rxe = to_rdev(dev: qp->ibqp.device);
261 struct rxe_mw *mw;
262 int ret;
263
264 mw = rxe_pool_get_index(pool: &rxe->mw_pool, index: rkey >> 8);
265 if (!mw) {
266 ret = -EINVAL;
267 goto err;
268 }
269
270 if (rkey != mw->rkey) {
271 ret = -EINVAL;
272 goto err_drop_ref;
273 }
274
275 spin_lock_bh(lock: &mw->lock);
276
277 ret = rxe_check_invalidate_mw(qp, mw);
278 if (ret)
279 goto err_unlock;
280
281 rxe_do_invalidate_mw(mw);
282err_unlock:
283 spin_unlock_bh(lock: &mw->lock);
284err_drop_ref:
285 rxe_put(mw);
286err:
287 return ret;
288}
289
290struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey)
291{
292 struct rxe_dev *rxe = to_rdev(dev: qp->ibqp.device);
293 struct rxe_pd *pd = to_rpd(pd: qp->ibqp.pd);
294 struct rxe_mw *mw;
295 int index = rkey >> 8;
296
297 mw = rxe_pool_get_index(pool: &rxe->mw_pool, index);
298 if (!mw)
299 return NULL;
300
301 if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd ||
302 (mw->ibmw.type == IB_MW_TYPE_2 && mw->qp != qp) ||
303 (mw->length == 0) || ((access & mw->access) != access) ||
304 mw->state != RXE_MW_STATE_VALID)) {
305 rxe_put(mw);
306 return NULL;
307 }
308
309 return mw;
310}
311
312void rxe_mw_cleanup(struct rxe_pool_elem *elem)
313{
314 struct rxe_mw *mw = container_of(elem, typeof(*mw), elem);
315 struct rxe_pd *pd = to_rpd(pd: mw->ibmw.pd);
316
317 rxe_put(pd);
318
319 if (mw->mr) {
320 struct rxe_mr *mr = mw->mr;
321
322 mw->mr = NULL;
323 atomic_dec(v: &mr->num_mw);
324 rxe_put(mr);
325 }
326
327 if (mw->qp) {
328 struct rxe_qp *qp = mw->qp;
329
330 mw->qp = NULL;
331 rxe_put(qp);
332 }
333
334 mw->access = 0;
335 mw->addr = 0;
336 mw->length = 0;
337 mw->state = RXE_MW_STATE_INVALID;
338}
339

source code of linux/drivers/infiniband/sw/rxe/rxe_mw.c