1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ |
2 | |
3 | /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ |
4 | /* Copyright (c) 2008-2019, IBM Corporation */ |
5 | |
6 | #ifndef _IWARP_H |
7 | #define _IWARP_H |
8 | |
9 | #include <rdma/rdma_user_cm.h> /* RDMA_MAX_PRIVATE_DATA */ |
10 | #include <linux/types.h> |
11 | #include <asm/byteorder.h> |
12 | |
13 | #define RDMAP_VERSION 1 |
14 | #define DDP_VERSION 1 |
15 | #define MPA_REVISION_1 1 |
16 | #define MPA_REVISION_2 2 |
17 | #define MPA_MAX_PRIVDATA RDMA_MAX_PRIVATE_DATA |
18 | #define MPA_KEY_REQ "MPA ID Req Frame" |
19 | #define MPA_KEY_REP "MPA ID Rep Frame" |
20 | #define MPA_IRD_ORD_MASK 0x3fff |
21 | |
22 | struct mpa_rr_params { |
23 | __be16 bits; |
24 | __be16 pd_len; |
25 | }; |
26 | |
27 | /* |
28 | * MPA request/response header bits & fields |
29 | */ |
30 | enum { |
31 | MPA_RR_FLAG_MARKERS = cpu_to_be16(0x8000), |
32 | MPA_RR_FLAG_CRC = cpu_to_be16(0x4000), |
33 | MPA_RR_FLAG_REJECT = cpu_to_be16(0x2000), |
34 | MPA_RR_FLAG_ENHANCED = cpu_to_be16(0x1000), |
35 | MPA_RR_FLAG_GSO_EXP = cpu_to_be16(0x0800), |
36 | MPA_RR_MASK_REVISION = cpu_to_be16(0x00ff) |
37 | }; |
38 | |
39 | /* |
40 | * MPA request/reply header |
41 | */ |
42 | struct mpa_rr { |
43 | __u8 key[16]; |
44 | struct mpa_rr_params params; |
45 | }; |
46 | |
47 | static inline void __mpa_rr_set_revision(__be16 *bits, u8 rev) |
48 | { |
49 | *bits = (*bits & ~MPA_RR_MASK_REVISION) | |
50 | (cpu_to_be16(rev) & MPA_RR_MASK_REVISION); |
51 | } |
52 | |
53 | static inline u8 __mpa_rr_revision(__be16 mpa_rr_bits) |
54 | { |
55 | __be16 rev = mpa_rr_bits & MPA_RR_MASK_REVISION; |
56 | |
57 | return be16_to_cpu(rev); |
58 | } |
59 | |
60 | enum mpa_v2_ctrl { |
61 | MPA_V2_PEER_TO_PEER = cpu_to_be16(0x8000), |
62 | MPA_V2_ZERO_LENGTH_RTR = cpu_to_be16(0x4000), |
63 | MPA_V2_RDMA_WRITE_RTR = cpu_to_be16(0x8000), |
64 | MPA_V2_RDMA_READ_RTR = cpu_to_be16(0x4000), |
65 | MPA_V2_RDMA_NO_RTR = cpu_to_be16(0x0000), |
66 | MPA_V2_MASK_IRD_ORD = cpu_to_be16(0x3fff) |
67 | }; |
68 | |
69 | struct mpa_v2_data { |
70 | __be16 ird; |
71 | __be16 ord; |
72 | }; |
73 | |
74 | struct mpa_marker { |
75 | __be16 rsvd; |
76 | __be16 fpdu_hmd; /* FPDU header-marker distance (= MPA's FPDUPTR) */ |
77 | }; |
78 | |
79 | /* |
80 | * maximum MPA trailer |
81 | */ |
82 | struct mpa_trailer { |
83 | __u8 pad[4]; |
84 | __be32 crc; |
85 | }; |
86 | |
87 | #define MPA_HDR_SIZE 2 |
88 | #define MPA_CRC_SIZE 4 |
89 | |
90 | /* |
91 | * Common portion of iWARP headers (MPA, DDP, RDMAP) |
92 | * for any FPDU |
93 | */ |
94 | struct iwarp_ctrl { |
95 | __be16 mpa_len; |
96 | __be16 ddp_rdmap_ctrl; |
97 | }; |
98 | |
99 | /* |
100 | * DDP/RDMAP Hdr bits & fields |
101 | */ |
102 | enum { |
103 | DDP_FLAG_TAGGED = cpu_to_be16(0x8000), |
104 | DDP_FLAG_LAST = cpu_to_be16(0x4000), |
105 | DDP_MASK_RESERVED = cpu_to_be16(0x3C00), |
106 | DDP_MASK_VERSION = cpu_to_be16(0x0300), |
107 | RDMAP_MASK_VERSION = cpu_to_be16(0x00C0), |
108 | RDMAP_MASK_RESERVED = cpu_to_be16(0x0030), |
109 | RDMAP_MASK_OPCODE = cpu_to_be16(0x000f) |
110 | }; |
111 | |
112 | static inline u8 __ddp_get_version(struct iwarp_ctrl *ctrl) |
113 | { |
114 | return be16_to_cpu(ctrl->ddp_rdmap_ctrl & DDP_MASK_VERSION) >> 8; |
115 | } |
116 | |
117 | static inline u8 __rdmap_get_version(struct iwarp_ctrl *ctrl) |
118 | { |
119 | __be16 ver = ctrl->ddp_rdmap_ctrl & RDMAP_MASK_VERSION; |
120 | |
121 | return be16_to_cpu(ver) >> 6; |
122 | } |
123 | |
124 | static inline u8 __rdmap_get_opcode(struct iwarp_ctrl *ctrl) |
125 | { |
126 | return be16_to_cpu(ctrl->ddp_rdmap_ctrl & RDMAP_MASK_OPCODE); |
127 | } |
128 | |
129 | static inline void __rdmap_set_opcode(struct iwarp_ctrl *ctrl, u8 opcode) |
130 | { |
131 | ctrl->ddp_rdmap_ctrl = (ctrl->ddp_rdmap_ctrl & ~RDMAP_MASK_OPCODE) | |
132 | (cpu_to_be16(opcode) & RDMAP_MASK_OPCODE); |
133 | } |
134 | |
135 | struct iwarp_rdma_write { |
136 | struct iwarp_ctrl ctrl; |
137 | __be32 sink_stag; |
138 | __be64 sink_to; |
139 | }; |
140 | |
141 | struct iwarp_rdma_rreq { |
142 | struct iwarp_ctrl ctrl; |
143 | __be32 rsvd; |
144 | __be32 ddp_qn; |
145 | __be32 ddp_msn; |
146 | __be32 ddp_mo; |
147 | __be32 sink_stag; |
148 | __be64 sink_to; |
149 | __be32 read_size; |
150 | __be32 source_stag; |
151 | __be64 source_to; |
152 | }; |
153 | |
154 | struct iwarp_rdma_rresp { |
155 | struct iwarp_ctrl ctrl; |
156 | __be32 sink_stag; |
157 | __be64 sink_to; |
158 | }; |
159 | |
160 | struct iwarp_send { |
161 | struct iwarp_ctrl ctrl; |
162 | __be32 rsvd; |
163 | __be32 ddp_qn; |
164 | __be32 ddp_msn; |
165 | __be32 ddp_mo; |
166 | }; |
167 | |
168 | struct iwarp_send_inv { |
169 | struct iwarp_ctrl ctrl; |
170 | __be32 inval_stag; |
171 | __be32 ddp_qn; |
172 | __be32 ddp_msn; |
173 | __be32 ddp_mo; |
174 | }; |
175 | |
176 | struct iwarp_terminate { |
177 | struct iwarp_ctrl ctrl; |
178 | __be32 rsvd; |
179 | __be32 ddp_qn; |
180 | __be32 ddp_msn; |
181 | __be32 ddp_mo; |
182 | #if defined(__LITTLE_ENDIAN_BITFIELD) |
183 | __be32 layer : 4; |
184 | __be32 etype : 4; |
185 | __be32 ecode : 8; |
186 | __be32 flag_m : 1; |
187 | __be32 flag_d : 1; |
188 | __be32 flag_r : 1; |
189 | __be32 reserved : 13; |
190 | #elif defined(__BIG_ENDIAN_BITFIELD) |
191 | __be32 reserved : 13; |
192 | __be32 flag_r : 1; |
193 | __be32 flag_d : 1; |
194 | __be32 flag_m : 1; |
195 | __be32 ecode : 8; |
196 | __be32 etype : 4; |
197 | __be32 layer : 4; |
198 | #else |
199 | #error "undefined byte order" |
200 | #endif |
201 | }; |
202 | |
203 | /* |
204 | * Terminate Hdr bits & fields |
205 | */ |
206 | enum { |
207 | TERM_MASK_LAYER = cpu_to_be32(0xf0000000), |
208 | TERM_MASK_ETYPE = cpu_to_be32(0x0f000000), |
209 | TERM_MASK_ECODE = cpu_to_be32(0x00ff0000), |
210 | TERM_FLAG_M = cpu_to_be32(0x00008000), |
211 | TERM_FLAG_D = cpu_to_be32(0x00004000), |
212 | TERM_FLAG_R = cpu_to_be32(0x00002000), |
213 | TERM_MASK_RESVD = cpu_to_be32(0x00001fff) |
214 | }; |
215 | |
216 | static inline u8 __rdmap_term_layer(struct iwarp_terminate *term) |
217 | { |
218 | return term->layer; |
219 | } |
220 | |
221 | static inline void __rdmap_term_set_layer(struct iwarp_terminate *term, |
222 | u8 layer) |
223 | { |
224 | term->layer = layer & 0xf; |
225 | } |
226 | |
227 | static inline u8 __rdmap_term_etype(struct iwarp_terminate *term) |
228 | { |
229 | return term->etype; |
230 | } |
231 | |
232 | static inline void __rdmap_term_set_etype(struct iwarp_terminate *term, |
233 | u8 etype) |
234 | { |
235 | term->etype = etype & 0xf; |
236 | } |
237 | |
238 | static inline u8 __rdmap_term_ecode(struct iwarp_terminate *term) |
239 | { |
240 | return term->ecode; |
241 | } |
242 | |
243 | static inline void __rdmap_term_set_ecode(struct iwarp_terminate *term, |
244 | u8 ecode) |
245 | { |
246 | term->ecode = ecode; |
247 | } |
248 | |
249 | /* |
250 | * Common portion of iWARP headers (MPA, DDP, RDMAP) |
251 | * for an FPDU carrying an untagged DDP segment |
252 | */ |
253 | struct iwarp_ctrl_untagged { |
254 | struct iwarp_ctrl ctrl; |
255 | __be32 rsvd; |
256 | __be32 ddp_qn; |
257 | __be32 ddp_msn; |
258 | __be32 ddp_mo; |
259 | }; |
260 | |
261 | /* |
262 | * Common portion of iWARP headers (MPA, DDP, RDMAP) |
263 | * for an FPDU carrying a tagged DDP segment |
264 | */ |
265 | struct iwarp_ctrl_tagged { |
266 | struct iwarp_ctrl ctrl; |
267 | __be32 ddp_stag; |
268 | __be64 ddp_to; |
269 | }; |
270 | |
271 | union iwarp_hdr { |
272 | struct iwarp_ctrl ctrl; |
273 | struct iwarp_ctrl_untagged c_untagged; |
274 | struct iwarp_ctrl_tagged c_tagged; |
275 | struct iwarp_rdma_write rwrite; |
276 | struct iwarp_rdma_rreq rreq; |
277 | struct iwarp_rdma_rresp rresp; |
278 | struct iwarp_terminate terminate; |
279 | struct iwarp_send send; |
280 | struct iwarp_send_inv send_inv; |
281 | }; |
282 | |
283 | enum term_elayer { |
284 | TERM_ERROR_LAYER_RDMAP = 0x00, |
285 | TERM_ERROR_LAYER_DDP = 0x01, |
286 | TERM_ERROR_LAYER_LLP = 0x02 /* eg., MPA */ |
287 | }; |
288 | |
289 | enum ddp_etype { |
290 | DDP_ETYPE_CATASTROPHIC = 0x0, |
291 | DDP_ETYPE_TAGGED_BUF = 0x1, |
292 | DDP_ETYPE_UNTAGGED_BUF = 0x2, |
293 | DDP_ETYPE_RSVD = 0x3 |
294 | }; |
295 | |
296 | enum ddp_ecode { |
297 | /* unspecified, set to zero */ |
298 | DDP_ECODE_CATASTROPHIC = 0x00, |
299 | /* Tagged Buffer Errors */ |
300 | DDP_ECODE_T_INVALID_STAG = 0x00, |
301 | DDP_ECODE_T_BASE_BOUNDS = 0x01, |
302 | DDP_ECODE_T_STAG_NOT_ASSOC = 0x02, |
303 | DDP_ECODE_T_TO_WRAP = 0x03, |
304 | DDP_ECODE_T_VERSION = 0x04, |
305 | /* Untagged Buffer Errors */ |
306 | DDP_ECODE_UT_INVALID_QN = 0x01, |
307 | DDP_ECODE_UT_INVALID_MSN_NOBUF = 0x02, |
308 | DDP_ECODE_UT_INVALID_MSN_RANGE = 0x03, |
309 | DDP_ECODE_UT_INVALID_MO = 0x04, |
310 | DDP_ECODE_UT_MSG_TOOLONG = 0x05, |
311 | DDP_ECODE_UT_VERSION = 0x06 |
312 | }; |
313 | |
314 | enum rdmap_untagged_qn { |
315 | RDMAP_UNTAGGED_QN_SEND = 0, |
316 | RDMAP_UNTAGGED_QN_RDMA_READ = 1, |
317 | RDMAP_UNTAGGED_QN_TERMINATE = 2, |
318 | RDMAP_UNTAGGED_QN_COUNT = 3 |
319 | }; |
320 | |
321 | enum rdmap_etype { |
322 | RDMAP_ETYPE_CATASTROPHIC = 0x0, |
323 | RDMAP_ETYPE_REMOTE_PROTECTION = 0x1, |
324 | RDMAP_ETYPE_REMOTE_OPERATION = 0x2 |
325 | }; |
326 | |
327 | enum rdmap_ecode { |
328 | RDMAP_ECODE_INVALID_STAG = 0x00, |
329 | RDMAP_ECODE_BASE_BOUNDS = 0x01, |
330 | RDMAP_ECODE_ACCESS_RIGHTS = 0x02, |
331 | RDMAP_ECODE_STAG_NOT_ASSOC = 0x03, |
332 | RDMAP_ECODE_TO_WRAP = 0x04, |
333 | RDMAP_ECODE_VERSION = 0x05, |
334 | RDMAP_ECODE_OPCODE = 0x06, |
335 | RDMAP_ECODE_CATASTROPHIC_STREAM = 0x07, |
336 | RDMAP_ECODE_CATASTROPHIC_GLOBAL = 0x08, |
337 | RDMAP_ECODE_CANNOT_INVALIDATE = 0x09, |
338 | RDMAP_ECODE_UNSPECIFIED = 0xff |
339 | }; |
340 | |
341 | enum llp_ecode { |
342 | LLP_ECODE_TCP_STREAM_LOST = 0x01, /* How to transfer this ?? */ |
343 | LLP_ECODE_RECEIVED_CRC = 0x02, |
344 | LLP_ECODE_FPDU_START = 0x03, |
345 | LLP_ECODE_INVALID_REQ_RESP = 0x04, |
346 | |
347 | /* Errors for Enhanced Connection Establishment only */ |
348 | LLP_ECODE_LOCAL_CATASTROPHIC = 0x05, |
349 | LLP_ECODE_INSUFFICIENT_IRD = 0x06, |
350 | LLP_ECODE_NO_MATCHING_RTR = 0x07 |
351 | }; |
352 | |
353 | enum llp_etype { LLP_ETYPE_MPA = 0x00 }; |
354 | |
355 | enum rdma_opcode { |
356 | RDMAP_RDMA_WRITE = 0x0, |
357 | RDMAP_RDMA_READ_REQ = 0x1, |
358 | RDMAP_RDMA_READ_RESP = 0x2, |
359 | RDMAP_SEND = 0x3, |
360 | RDMAP_SEND_INVAL = 0x4, |
361 | RDMAP_SEND_SE = 0x5, |
362 | RDMAP_SEND_SE_INVAL = 0x6, |
363 | RDMAP_TERMINATE = 0x7, |
364 | RDMAP_NOT_SUPPORTED = RDMAP_TERMINATE + 1 |
365 | }; |
366 | |
367 | #endif |
368 | |