1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright (C) 2017, Microsoft Corporation. |
4 | * |
5 | * Author(s): Long Li <longli@microsoft.com> |
6 | */ |
7 | #include <linux/module.h> |
8 | #include <linux/highmem.h> |
9 | #include "smbdirect.h" |
10 | #include "cifs_debug.h" |
11 | #include "cifsproto.h" |
12 | #include "smb2proto.h" |
13 | |
14 | static struct smbd_response *get_empty_queue_buffer( |
15 | struct smbd_connection *info); |
16 | static struct smbd_response *get_receive_buffer( |
17 | struct smbd_connection *info); |
18 | static void put_receive_buffer( |
19 | struct smbd_connection *info, |
20 | struct smbd_response *response); |
21 | static int allocate_receive_buffers(struct smbd_connection *info, int num_buf); |
22 | static void destroy_receive_buffers(struct smbd_connection *info); |
23 | |
24 | static void put_empty_packet( |
25 | struct smbd_connection *info, struct smbd_response *response); |
26 | static void enqueue_reassembly( |
27 | struct smbd_connection *info, |
28 | struct smbd_response *response, int data_length); |
29 | static struct smbd_response *_get_first_reassembly( |
30 | struct smbd_connection *info); |
31 | |
32 | static int smbd_post_recv( |
33 | struct smbd_connection *info, |
34 | struct smbd_response *response); |
35 | |
36 | static int smbd_post_send_empty(struct smbd_connection *info); |
37 | |
38 | static void destroy_mr_list(struct smbd_connection *info); |
39 | static int allocate_mr_list(struct smbd_connection *info); |
40 | |
41 | struct { |
42 | struct ib_sge *; |
43 | unsigned int ; |
44 | unsigned int ; |
45 | struct ib_device *; |
46 | u32 ; |
47 | enum dma_data_direction ; |
48 | }; |
49 | static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len, |
50 | struct smb_extract_to_rdma *rdma); |
51 | |
52 | /* SMBD version number */ |
53 | #define SMBD_V1 0x0100 |
54 | |
55 | /* Port numbers for SMBD transport */ |
56 | #define SMB_PORT 445 |
57 | #define SMBD_PORT 5445 |
58 | |
59 | /* Address lookup and resolve timeout in ms */ |
60 | #define RDMA_RESOLVE_TIMEOUT 5000 |
61 | |
62 | /* SMBD negotiation timeout in seconds */ |
63 | #define SMBD_NEGOTIATE_TIMEOUT 120 |
64 | |
65 | /* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */ |
66 | #define SMBD_MIN_RECEIVE_SIZE 128 |
67 | #define SMBD_MIN_FRAGMENTED_SIZE 131072 |
68 | |
69 | /* |
70 | * Default maximum number of RDMA read/write outstanding on this connection |
71 | * This value is possibly decreased during QP creation on hardware limit |
72 | */ |
73 | #define SMBD_CM_RESPONDER_RESOURCES 32 |
74 | |
75 | /* Maximum number of retries on data transfer operations */ |
76 | #define SMBD_CM_RETRY 6 |
77 | /* No need to retry on Receiver Not Ready since SMBD manages credits */ |
78 | #define SMBD_CM_RNR_RETRY 0 |
79 | |
80 | /* |
81 | * User configurable initial values per SMBD transport connection |
82 | * as defined in [MS-SMBD] 3.1.1.1 |
83 | * Those may change after a SMBD negotiation |
84 | */ |
85 | /* The local peer's maximum number of credits to grant to the peer */ |
86 | int smbd_receive_credit_max = 255; |
87 | |
88 | /* The remote peer's credit request of local peer */ |
89 | int smbd_send_credit_target = 255; |
90 | |
91 | /* The maximum single message size can be sent to remote peer */ |
92 | int smbd_max_send_size = 1364; |
93 | |
94 | /* The maximum fragmented upper-layer payload receive size supported */ |
95 | int smbd_max_fragmented_recv_size = 1024 * 1024; |
96 | |
97 | /* The maximum single-message size which can be received */ |
98 | int smbd_max_receive_size = 1364; |
99 | |
100 | /* The timeout to initiate send of a keepalive message on idle */ |
101 | int smbd_keep_alive_interval = 120; |
102 | |
103 | /* |
104 | * User configurable initial values for RDMA transport |
105 | * The actual values used may be lower and are limited to hardware capabilities |
106 | */ |
107 | /* Default maximum number of pages in a single RDMA write/read */ |
108 | int smbd_max_frmr_depth = 2048; |
109 | |
110 | /* If payload is less than this byte, use RDMA send/recv not read/write */ |
111 | int rdma_readwrite_threshold = 4096; |
112 | |
113 | /* Transport logging functions |
114 | * Logging are defined as classes. They can be OR'ed to define the actual |
115 | * logging level via module parameter smbd_logging_class |
116 | * e.g. cifs.smbd_logging_class=0xa0 will log all log_rdma_recv() and |
117 | * log_rdma_event() |
118 | */ |
119 | #define LOG_OUTGOING 0x1 |
120 | #define LOG_INCOMING 0x2 |
121 | #define LOG_READ 0x4 |
122 | #define LOG_WRITE 0x8 |
123 | #define LOG_RDMA_SEND 0x10 |
124 | #define LOG_RDMA_RECV 0x20 |
125 | #define LOG_KEEP_ALIVE 0x40 |
126 | #define LOG_RDMA_EVENT 0x80 |
127 | #define LOG_RDMA_MR 0x100 |
128 | static unsigned int smbd_logging_class; |
129 | module_param(smbd_logging_class, uint, 0644); |
130 | MODULE_PARM_DESC(smbd_logging_class, |
131 | "Logging class for SMBD transport 0x0 to 0x100" ); |
132 | |
133 | #define ERR 0x0 |
134 | #define INFO 0x1 |
135 | static unsigned int smbd_logging_level = ERR; |
136 | module_param(smbd_logging_level, uint, 0644); |
137 | MODULE_PARM_DESC(smbd_logging_level, |
138 | "Logging level for SMBD transport, 0 (default): error, 1: info" ); |
139 | |
140 | #define log_rdma(level, class, fmt, args...) \ |
141 | do { \ |
142 | if (level <= smbd_logging_level || class & smbd_logging_class) \ |
143 | cifs_dbg(VFS, "%s:%d " fmt, __func__, __LINE__, ##args);\ |
144 | } while (0) |
145 | |
146 | #define log_outgoing(level, fmt, args...) \ |
147 | log_rdma(level, LOG_OUTGOING, fmt, ##args) |
148 | #define log_incoming(level, fmt, args...) \ |
149 | log_rdma(level, LOG_INCOMING, fmt, ##args) |
150 | #define log_read(level, fmt, args...) log_rdma(level, LOG_READ, fmt, ##args) |
151 | #define log_write(level, fmt, args...) log_rdma(level, LOG_WRITE, fmt, ##args) |
152 | #define log_rdma_send(level, fmt, args...) \ |
153 | log_rdma(level, LOG_RDMA_SEND, fmt, ##args) |
154 | #define log_rdma_recv(level, fmt, args...) \ |
155 | log_rdma(level, LOG_RDMA_RECV, fmt, ##args) |
156 | #define log_keep_alive(level, fmt, args...) \ |
157 | log_rdma(level, LOG_KEEP_ALIVE, fmt, ##args) |
158 | #define log_rdma_event(level, fmt, args...) \ |
159 | log_rdma(level, LOG_RDMA_EVENT, fmt, ##args) |
160 | #define log_rdma_mr(level, fmt, args...) \ |
161 | log_rdma(level, LOG_RDMA_MR, fmt, ##args) |
162 | |
163 | static void smbd_disconnect_rdma_work(struct work_struct *work) |
164 | { |
165 | struct smbd_connection *info = |
166 | container_of(work, struct smbd_connection, disconnect_work); |
167 | |
168 | if (info->transport_status == SMBD_CONNECTED) { |
169 | info->transport_status = SMBD_DISCONNECTING; |
170 | rdma_disconnect(id: info->id); |
171 | } |
172 | } |
173 | |
174 | static void smbd_disconnect_rdma_connection(struct smbd_connection *info) |
175 | { |
176 | queue_work(wq: info->workqueue, work: &info->disconnect_work); |
177 | } |
178 | |
179 | /* Upcall from RDMA CM */ |
180 | static int smbd_conn_upcall( |
181 | struct rdma_cm_id *id, struct rdma_cm_event *event) |
182 | { |
183 | struct smbd_connection *info = id->context; |
184 | |
185 | log_rdma_event(INFO, "event=%d status=%d\n" , |
186 | event->event, event->status); |
187 | |
188 | switch (event->event) { |
189 | case RDMA_CM_EVENT_ADDR_RESOLVED: |
190 | case RDMA_CM_EVENT_ROUTE_RESOLVED: |
191 | info->ri_rc = 0; |
192 | complete(&info->ri_done); |
193 | break; |
194 | |
195 | case RDMA_CM_EVENT_ADDR_ERROR: |
196 | info->ri_rc = -EHOSTUNREACH; |
197 | complete(&info->ri_done); |
198 | break; |
199 | |
200 | case RDMA_CM_EVENT_ROUTE_ERROR: |
201 | info->ri_rc = -ENETUNREACH; |
202 | complete(&info->ri_done); |
203 | break; |
204 | |
205 | case RDMA_CM_EVENT_ESTABLISHED: |
206 | log_rdma_event(INFO, "connected event=%d\n" , event->event); |
207 | info->transport_status = SMBD_CONNECTED; |
208 | wake_up_interruptible(&info->conn_wait); |
209 | break; |
210 | |
211 | case RDMA_CM_EVENT_CONNECT_ERROR: |
212 | case RDMA_CM_EVENT_UNREACHABLE: |
213 | case RDMA_CM_EVENT_REJECTED: |
214 | log_rdma_event(INFO, "connecting failed event=%d\n" , event->event); |
215 | info->transport_status = SMBD_DISCONNECTED; |
216 | wake_up_interruptible(&info->conn_wait); |
217 | break; |
218 | |
219 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
220 | case RDMA_CM_EVENT_DISCONNECTED: |
221 | /* This happenes when we fail the negotiation */ |
222 | if (info->transport_status == SMBD_NEGOTIATE_FAILED) { |
223 | info->transport_status = SMBD_DISCONNECTED; |
224 | wake_up(&info->conn_wait); |
225 | break; |
226 | } |
227 | |
228 | info->transport_status = SMBD_DISCONNECTED; |
229 | wake_up_interruptible(&info->disconn_wait); |
230 | wake_up_interruptible(&info->wait_reassembly_queue); |
231 | wake_up_interruptible_all(&info->wait_send_queue); |
232 | break; |
233 | |
234 | default: |
235 | break; |
236 | } |
237 | |
238 | return 0; |
239 | } |
240 | |
241 | /* Upcall from RDMA QP */ |
242 | static void |
243 | smbd_qp_async_error_upcall(struct ib_event *event, void *context) |
244 | { |
245 | struct smbd_connection *info = context; |
246 | |
247 | log_rdma_event(ERR, "%s on device %s info %p\n" , |
248 | ib_event_msg(event->event), event->device->name, info); |
249 | |
250 | switch (event->event) { |
251 | case IB_EVENT_CQ_ERR: |
252 | case IB_EVENT_QP_FATAL: |
253 | smbd_disconnect_rdma_connection(info); |
254 | break; |
255 | |
256 | default: |
257 | break; |
258 | } |
259 | } |
260 | |
261 | static inline void *smbd_request_payload(struct smbd_request *request) |
262 | { |
263 | return (void *)request->packet; |
264 | } |
265 | |
266 | static inline void *smbd_response_payload(struct smbd_response *response) |
267 | { |
268 | return (void *)response->packet; |
269 | } |
270 | |
271 | /* Called when a RDMA send is done */ |
272 | static void send_done(struct ib_cq *cq, struct ib_wc *wc) |
273 | { |
274 | int i; |
275 | struct smbd_request *request = |
276 | container_of(wc->wr_cqe, struct smbd_request, cqe); |
277 | |
278 | log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n" , |
279 | request, wc->status); |
280 | |
281 | if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { |
282 | log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n" , |
283 | wc->status, wc->opcode); |
284 | smbd_disconnect_rdma_connection(info: request->info); |
285 | } |
286 | |
287 | for (i = 0; i < request->num_sge; i++) |
288 | ib_dma_unmap_single(dev: request->info->id->device, |
289 | addr: request->sge[i].addr, |
290 | size: request->sge[i].length, |
291 | direction: DMA_TO_DEVICE); |
292 | |
293 | if (atomic_dec_and_test(v: &request->info->send_pending)) |
294 | wake_up(&request->info->wait_send_pending); |
295 | |
296 | wake_up(&request->info->wait_post_send); |
297 | |
298 | mempool_free(element: request, pool: request->info->request_mempool); |
299 | } |
300 | |
301 | static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp) |
302 | { |
303 | log_rdma_event(INFO, "resp message min_version %u max_version %u negotiated_version %u credits_requested %u credits_granted %u status %u max_readwrite_size %u preferred_send_size %u max_receive_size %u max_fragmented_size %u\n" , |
304 | resp->min_version, resp->max_version, |
305 | resp->negotiated_version, resp->credits_requested, |
306 | resp->credits_granted, resp->status, |
307 | resp->max_readwrite_size, resp->preferred_send_size, |
308 | resp->max_receive_size, resp->max_fragmented_size); |
309 | } |
310 | |
311 | /* |
312 | * Process a negotiation response message, according to [MS-SMBD]3.1.5.7 |
313 | * response, packet_length: the negotiation response message |
314 | * return value: true if negotiation is a success, false if failed |
315 | */ |
316 | static bool process_negotiation_response( |
317 | struct smbd_response *response, int packet_length) |
318 | { |
319 | struct smbd_connection *info = response->info; |
320 | struct smbd_negotiate_resp *packet = smbd_response_payload(response); |
321 | |
322 | if (packet_length < sizeof(struct smbd_negotiate_resp)) { |
323 | log_rdma_event(ERR, |
324 | "error: packet_length=%d\n" , packet_length); |
325 | return false; |
326 | } |
327 | |
328 | if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) { |
329 | log_rdma_event(ERR, "error: negotiated_version=%x\n" , |
330 | le16_to_cpu(packet->negotiated_version)); |
331 | return false; |
332 | } |
333 | info->protocol = le16_to_cpu(packet->negotiated_version); |
334 | |
335 | if (packet->credits_requested == 0) { |
336 | log_rdma_event(ERR, "error: credits_requested==0\n" ); |
337 | return false; |
338 | } |
339 | info->receive_credit_target = le16_to_cpu(packet->credits_requested); |
340 | |
341 | if (packet->credits_granted == 0) { |
342 | log_rdma_event(ERR, "error: credits_granted==0\n" ); |
343 | return false; |
344 | } |
345 | atomic_set(v: &info->send_credits, le16_to_cpu(packet->credits_granted)); |
346 | |
347 | atomic_set(v: &info->receive_credits, i: 0); |
348 | |
349 | if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) { |
350 | log_rdma_event(ERR, "error: preferred_send_size=%d\n" , |
351 | le32_to_cpu(packet->preferred_send_size)); |
352 | return false; |
353 | } |
354 | info->max_receive_size = le32_to_cpu(packet->preferred_send_size); |
355 | |
356 | if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) { |
357 | log_rdma_event(ERR, "error: max_receive_size=%d\n" , |
358 | le32_to_cpu(packet->max_receive_size)); |
359 | return false; |
360 | } |
361 | info->max_send_size = min_t(int, info->max_send_size, |
362 | le32_to_cpu(packet->max_receive_size)); |
363 | |
364 | if (le32_to_cpu(packet->max_fragmented_size) < |
365 | SMBD_MIN_FRAGMENTED_SIZE) { |
366 | log_rdma_event(ERR, "error: max_fragmented_size=%d\n" , |
367 | le32_to_cpu(packet->max_fragmented_size)); |
368 | return false; |
369 | } |
370 | info->max_fragmented_send_size = |
371 | le32_to_cpu(packet->max_fragmented_size); |
372 | info->rdma_readwrite_threshold = |
373 | rdma_readwrite_threshold > info->max_fragmented_send_size ? |
374 | info->max_fragmented_send_size : |
375 | rdma_readwrite_threshold; |
376 | |
377 | |
378 | info->max_readwrite_size = min_t(u32, |
379 | le32_to_cpu(packet->max_readwrite_size), |
380 | info->max_frmr_depth * PAGE_SIZE); |
381 | info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE; |
382 | |
383 | return true; |
384 | } |
385 | |
386 | static void smbd_post_send_credits(struct work_struct *work) |
387 | { |
388 | int ret = 0; |
389 | int use_receive_queue = 1; |
390 | int rc; |
391 | struct smbd_response *response; |
392 | struct smbd_connection *info = |
393 | container_of(work, struct smbd_connection, |
394 | post_send_credits_work); |
395 | |
396 | if (info->transport_status != SMBD_CONNECTED) { |
397 | wake_up(&info->wait_receive_queues); |
398 | return; |
399 | } |
400 | |
401 | if (info->receive_credit_target > |
402 | atomic_read(v: &info->receive_credits)) { |
403 | while (true) { |
404 | if (use_receive_queue) |
405 | response = get_receive_buffer(info); |
406 | else |
407 | response = get_empty_queue_buffer(info); |
408 | if (!response) { |
409 | /* now switch to emtpy packet queue */ |
410 | if (use_receive_queue) { |
411 | use_receive_queue = 0; |
412 | continue; |
413 | } else |
414 | break; |
415 | } |
416 | |
417 | response->type = SMBD_TRANSFER_DATA; |
418 | response->first_segment = false; |
419 | rc = smbd_post_recv(info, response); |
420 | if (rc) { |
421 | log_rdma_recv(ERR, |
422 | "post_recv failed rc=%d\n" , rc); |
423 | put_receive_buffer(info, response); |
424 | break; |
425 | } |
426 | |
427 | ret++; |
428 | } |
429 | } |
430 | |
431 | spin_lock(lock: &info->lock_new_credits_offered); |
432 | info->new_credits_offered += ret; |
433 | spin_unlock(lock: &info->lock_new_credits_offered); |
434 | |
435 | /* Promptly send an immediate packet as defined in [MS-SMBD] 3.1.1.1 */ |
436 | info->send_immediate = true; |
437 | if (atomic_read(v: &info->receive_credits) < |
438 | info->receive_credit_target - 1) { |
439 | if (info->keep_alive_requested == KEEP_ALIVE_PENDING || |
440 | info->send_immediate) { |
441 | log_keep_alive(INFO, "send an empty message\n" ); |
442 | smbd_post_send_empty(info); |
443 | } |
444 | } |
445 | } |
446 | |
447 | /* Called from softirq, when recv is done */ |
448 | static void recv_done(struct ib_cq *cq, struct ib_wc *wc) |
449 | { |
450 | struct smbd_data_transfer *data_transfer; |
451 | struct smbd_response *response = |
452 | container_of(wc->wr_cqe, struct smbd_response, cqe); |
453 | struct smbd_connection *info = response->info; |
454 | int data_length = 0; |
455 | |
456 | log_rdma_recv(INFO, "response=0x%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%u\n" , |
457 | response, response->type, wc->status, wc->opcode, |
458 | wc->byte_len, wc->pkey_index); |
459 | |
460 | if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) { |
461 | log_rdma_recv(INFO, "wc->status=%d opcode=%d\n" , |
462 | wc->status, wc->opcode); |
463 | smbd_disconnect_rdma_connection(info); |
464 | goto error; |
465 | } |
466 | |
467 | ib_dma_sync_single_for_cpu( |
468 | dev: wc->qp->device, |
469 | addr: response->sge.addr, |
470 | size: response->sge.length, |
471 | dir: DMA_FROM_DEVICE); |
472 | |
473 | switch (response->type) { |
474 | /* SMBD negotiation response */ |
475 | case SMBD_NEGOTIATE_RESP: |
476 | dump_smbd_negotiate_resp(resp: smbd_response_payload(response)); |
477 | info->full_packet_received = true; |
478 | info->negotiate_done = |
479 | process_negotiation_response(response, packet_length: wc->byte_len); |
480 | complete(&info->negotiate_completion); |
481 | break; |
482 | |
483 | /* SMBD data transfer packet */ |
484 | case SMBD_TRANSFER_DATA: |
485 | data_transfer = smbd_response_payload(response); |
486 | data_length = le32_to_cpu(data_transfer->data_length); |
487 | |
488 | /* |
489 | * If this is a packet with data playload place the data in |
490 | * reassembly queue and wake up the reading thread |
491 | */ |
492 | if (data_length) { |
493 | if (info->full_packet_received) |
494 | response->first_segment = true; |
495 | |
496 | if (le32_to_cpu(data_transfer->remaining_data_length)) |
497 | info->full_packet_received = false; |
498 | else |
499 | info->full_packet_received = true; |
500 | |
501 | enqueue_reassembly( |
502 | info, |
503 | response, |
504 | data_length); |
505 | } else |
506 | put_empty_packet(info, response); |
507 | |
508 | if (data_length) |
509 | wake_up_interruptible(&info->wait_reassembly_queue); |
510 | |
511 | atomic_dec(v: &info->receive_credits); |
512 | info->receive_credit_target = |
513 | le16_to_cpu(data_transfer->credits_requested); |
514 | if (le16_to_cpu(data_transfer->credits_granted)) { |
515 | atomic_add(le16_to_cpu(data_transfer->credits_granted), |
516 | v: &info->send_credits); |
517 | /* |
518 | * We have new send credits granted from remote peer |
519 | * If any sender is waiting for credits, unblock it |
520 | */ |
521 | wake_up_interruptible(&info->wait_send_queue); |
522 | } |
523 | |
524 | log_incoming(INFO, "data flags %d data_offset %d data_length %d remaining_data_length %d\n" , |
525 | le16_to_cpu(data_transfer->flags), |
526 | le32_to_cpu(data_transfer->data_offset), |
527 | le32_to_cpu(data_transfer->data_length), |
528 | le32_to_cpu(data_transfer->remaining_data_length)); |
529 | |
530 | /* Send a KEEP_ALIVE response right away if requested */ |
531 | info->keep_alive_requested = KEEP_ALIVE_NONE; |
532 | if (le16_to_cpu(data_transfer->flags) & |
533 | SMB_DIRECT_RESPONSE_REQUESTED) { |
534 | info->keep_alive_requested = KEEP_ALIVE_PENDING; |
535 | } |
536 | |
537 | return; |
538 | |
539 | default: |
540 | log_rdma_recv(ERR, |
541 | "unexpected response type=%d\n" , response->type); |
542 | } |
543 | |
544 | error: |
545 | put_receive_buffer(info, response); |
546 | } |
547 | |
548 | static struct rdma_cm_id *smbd_create_id( |
549 | struct smbd_connection *info, |
550 | struct sockaddr *dstaddr, int port) |
551 | { |
552 | struct rdma_cm_id *id; |
553 | int rc; |
554 | __be16 *sport; |
555 | |
556 | id = rdma_create_id(&init_net, smbd_conn_upcall, info, |
557 | RDMA_PS_TCP, IB_QPT_RC); |
558 | if (IS_ERR(ptr: id)) { |
559 | rc = PTR_ERR(ptr: id); |
560 | log_rdma_event(ERR, "rdma_create_id() failed %i\n" , rc); |
561 | return id; |
562 | } |
563 | |
564 | if (dstaddr->sa_family == AF_INET6) |
565 | sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port; |
566 | else |
567 | sport = &((struct sockaddr_in *)dstaddr)->sin_port; |
568 | |
569 | *sport = htons(port); |
570 | |
571 | init_completion(x: &info->ri_done); |
572 | info->ri_rc = -ETIMEDOUT; |
573 | |
574 | rc = rdma_resolve_addr(id, NULL, dst_addr: (struct sockaddr *)dstaddr, |
575 | RDMA_RESOLVE_TIMEOUT); |
576 | if (rc) { |
577 | log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n" , rc); |
578 | goto out; |
579 | } |
580 | rc = wait_for_completion_interruptible_timeout( |
581 | x: &info->ri_done, timeout: msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); |
582 | /* e.g. if interrupted returns -ERESTARTSYS */ |
583 | if (rc < 0) { |
584 | log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n" , rc); |
585 | goto out; |
586 | } |
587 | rc = info->ri_rc; |
588 | if (rc) { |
589 | log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n" , rc); |
590 | goto out; |
591 | } |
592 | |
593 | info->ri_rc = -ETIMEDOUT; |
594 | rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT); |
595 | if (rc) { |
596 | log_rdma_event(ERR, "rdma_resolve_route() failed %i\n" , rc); |
597 | goto out; |
598 | } |
599 | rc = wait_for_completion_interruptible_timeout( |
600 | x: &info->ri_done, timeout: msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); |
601 | /* e.g. if interrupted returns -ERESTARTSYS */ |
602 | if (rc < 0) { |
603 | log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n" , rc); |
604 | goto out; |
605 | } |
606 | rc = info->ri_rc; |
607 | if (rc) { |
608 | log_rdma_event(ERR, "rdma_resolve_route() completed %i\n" , rc); |
609 | goto out; |
610 | } |
611 | |
612 | return id; |
613 | |
614 | out: |
615 | rdma_destroy_id(id); |
616 | return ERR_PTR(error: rc); |
617 | } |
618 | |
619 | /* |
620 | * Test if FRWR (Fast Registration Work Requests) is supported on the device |
621 | * This implementation requries FRWR on RDMA read/write |
622 | * return value: true if it is supported |
623 | */ |
624 | static bool frwr_is_supported(struct ib_device_attr *attrs) |
625 | { |
626 | if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) |
627 | return false; |
628 | if (attrs->max_fast_reg_page_list_len == 0) |
629 | return false; |
630 | return true; |
631 | } |
632 | |
633 | static int smbd_ia_open( |
634 | struct smbd_connection *info, |
635 | struct sockaddr *dstaddr, int port) |
636 | { |
637 | int rc; |
638 | |
639 | info->id = smbd_create_id(info, dstaddr, port); |
640 | if (IS_ERR(ptr: info->id)) { |
641 | rc = PTR_ERR(ptr: info->id); |
642 | goto out1; |
643 | } |
644 | |
645 | if (!frwr_is_supported(attrs: &info->id->device->attrs)) { |
646 | log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n" ); |
647 | log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n" , |
648 | info->id->device->attrs.device_cap_flags, |
649 | info->id->device->attrs.max_fast_reg_page_list_len); |
650 | rc = -EPROTONOSUPPORT; |
651 | goto out2; |
652 | } |
653 | info->max_frmr_depth = min_t(int, |
654 | smbd_max_frmr_depth, |
655 | info->id->device->attrs.max_fast_reg_page_list_len); |
656 | info->mr_type = IB_MR_TYPE_MEM_REG; |
657 | if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) |
658 | info->mr_type = IB_MR_TYPE_SG_GAPS; |
659 | |
660 | info->pd = ib_alloc_pd(info->id->device, 0); |
661 | if (IS_ERR(ptr: info->pd)) { |
662 | rc = PTR_ERR(ptr: info->pd); |
663 | log_rdma_event(ERR, "ib_alloc_pd() returned %d\n" , rc); |
664 | goto out2; |
665 | } |
666 | |
667 | return 0; |
668 | |
669 | out2: |
670 | rdma_destroy_id(id: info->id); |
671 | info->id = NULL; |
672 | |
673 | out1: |
674 | return rc; |
675 | } |
676 | |
677 | /* |
678 | * Send a negotiation request message to the peer |
679 | * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3 |
680 | * After negotiation, the transport is connected and ready for |
681 | * carrying upper layer SMB payload |
682 | */ |
683 | static int smbd_post_send_negotiate_req(struct smbd_connection *info) |
684 | { |
685 | struct ib_send_wr send_wr; |
686 | int rc = -ENOMEM; |
687 | struct smbd_request *request; |
688 | struct smbd_negotiate_req *packet; |
689 | |
690 | request = mempool_alloc(pool: info->request_mempool, GFP_KERNEL); |
691 | if (!request) |
692 | return rc; |
693 | |
694 | request->info = info; |
695 | |
696 | packet = smbd_request_payload(request); |
697 | packet->min_version = cpu_to_le16(SMBD_V1); |
698 | packet->max_version = cpu_to_le16(SMBD_V1); |
699 | packet->reserved = 0; |
700 | packet->credits_requested = cpu_to_le16(info->send_credit_target); |
701 | packet->preferred_send_size = cpu_to_le32(info->max_send_size); |
702 | packet->max_receive_size = cpu_to_le32(info->max_receive_size); |
703 | packet->max_fragmented_size = |
704 | cpu_to_le32(info->max_fragmented_recv_size); |
705 | |
706 | request->num_sge = 1; |
707 | request->sge[0].addr = ib_dma_map_single( |
708 | dev: info->id->device, cpu_addr: (void *)packet, |
709 | size: sizeof(*packet), direction: DMA_TO_DEVICE); |
710 | if (ib_dma_mapping_error(dev: info->id->device, dma_addr: request->sge[0].addr)) { |
711 | rc = -EIO; |
712 | goto dma_mapping_failed; |
713 | } |
714 | |
715 | request->sge[0].length = sizeof(*packet); |
716 | request->sge[0].lkey = info->pd->local_dma_lkey; |
717 | |
718 | ib_dma_sync_single_for_device( |
719 | dev: info->id->device, addr: request->sge[0].addr, |
720 | size: request->sge[0].length, dir: DMA_TO_DEVICE); |
721 | |
722 | request->cqe.done = send_done; |
723 | |
724 | send_wr.next = NULL; |
725 | send_wr.wr_cqe = &request->cqe; |
726 | send_wr.sg_list = request->sge; |
727 | send_wr.num_sge = request->num_sge; |
728 | send_wr.opcode = IB_WR_SEND; |
729 | send_wr.send_flags = IB_SEND_SIGNALED; |
730 | |
731 | log_rdma_send(INFO, "sge addr=0x%llx length=%u lkey=0x%x\n" , |
732 | request->sge[0].addr, |
733 | request->sge[0].length, request->sge[0].lkey); |
734 | |
735 | atomic_inc(v: &info->send_pending); |
736 | rc = ib_post_send(qp: info->id->qp, send_wr: &send_wr, NULL); |
737 | if (!rc) |
738 | return 0; |
739 | |
740 | /* if we reach here, post send failed */ |
741 | log_rdma_send(ERR, "ib_post_send failed rc=%d\n" , rc); |
742 | atomic_dec(v: &info->send_pending); |
743 | ib_dma_unmap_single(dev: info->id->device, addr: request->sge[0].addr, |
744 | size: request->sge[0].length, direction: DMA_TO_DEVICE); |
745 | |
746 | smbd_disconnect_rdma_connection(info); |
747 | |
748 | dma_mapping_failed: |
749 | mempool_free(element: request, pool: info->request_mempool); |
750 | return rc; |
751 | } |
752 | |
753 | /* |
754 | * Extend the credits to remote peer |
755 | * This implements [MS-SMBD] 3.1.5.9 |
756 | * The idea is that we should extend credits to remote peer as quickly as |
757 | * it's allowed, to maintain data flow. We allocate as much receive |
758 | * buffer as possible, and extend the receive credits to remote peer |
759 | * return value: the new credtis being granted. |
760 | */ |
761 | static int manage_credits_prior_sending(struct smbd_connection *info) |
762 | { |
763 | int new_credits; |
764 | |
765 | spin_lock(lock: &info->lock_new_credits_offered); |
766 | new_credits = info->new_credits_offered; |
767 | info->new_credits_offered = 0; |
768 | spin_unlock(lock: &info->lock_new_credits_offered); |
769 | |
770 | return new_credits; |
771 | } |
772 | |
773 | /* |
774 | * Check if we need to send a KEEP_ALIVE message |
775 | * The idle connection timer triggers a KEEP_ALIVE message when expires |
776 | * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send |
777 | * back a response. |
778 | * return value: |
779 | * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set |
780 | * 0: otherwise |
781 | */ |
782 | static int manage_keep_alive_before_sending(struct smbd_connection *info) |
783 | { |
784 | if (info->keep_alive_requested == KEEP_ALIVE_PENDING) { |
785 | info->keep_alive_requested = KEEP_ALIVE_SENT; |
786 | return 1; |
787 | } |
788 | return 0; |
789 | } |
790 | |
791 | /* Post the send request */ |
792 | static int smbd_post_send(struct smbd_connection *info, |
793 | struct smbd_request *request) |
794 | { |
795 | struct ib_send_wr send_wr; |
796 | int rc, i; |
797 | |
798 | for (i = 0; i < request->num_sge; i++) { |
799 | log_rdma_send(INFO, |
800 | "rdma_request sge[%d] addr=0x%llx length=%u\n" , |
801 | i, request->sge[i].addr, request->sge[i].length); |
802 | ib_dma_sync_single_for_device( |
803 | dev: info->id->device, |
804 | addr: request->sge[i].addr, |
805 | size: request->sge[i].length, |
806 | dir: DMA_TO_DEVICE); |
807 | } |
808 | |
809 | request->cqe.done = send_done; |
810 | |
811 | send_wr.next = NULL; |
812 | send_wr.wr_cqe = &request->cqe; |
813 | send_wr.sg_list = request->sge; |
814 | send_wr.num_sge = request->num_sge; |
815 | send_wr.opcode = IB_WR_SEND; |
816 | send_wr.send_flags = IB_SEND_SIGNALED; |
817 | |
818 | rc = ib_post_send(qp: info->id->qp, send_wr: &send_wr, NULL); |
819 | if (rc) { |
820 | log_rdma_send(ERR, "ib_post_send failed rc=%d\n" , rc); |
821 | smbd_disconnect_rdma_connection(info); |
822 | rc = -EAGAIN; |
823 | } else |
824 | /* Reset timer for idle connection after packet is sent */ |
825 | mod_delayed_work(wq: info->workqueue, dwork: &info->idle_timer_work, |
826 | delay: info->keep_alive_interval*HZ); |
827 | |
828 | return rc; |
829 | } |
830 | |
831 | static int smbd_post_send_iter(struct smbd_connection *info, |
832 | struct iov_iter *iter, |
833 | int *_remaining_data_length) |
834 | { |
835 | int i, rc; |
836 | int ; |
837 | int data_length; |
838 | struct smbd_request *request; |
839 | struct smbd_data_transfer *packet; |
840 | int new_credits = 0; |
841 | |
842 | wait_credit: |
843 | /* Wait for send credits. A SMBD packet needs one credit */ |
844 | rc = wait_event_interruptible(info->wait_send_queue, |
845 | atomic_read(&info->send_credits) > 0 || |
846 | info->transport_status != SMBD_CONNECTED); |
847 | if (rc) |
848 | goto err_wait_credit; |
849 | |
850 | if (info->transport_status != SMBD_CONNECTED) { |
851 | log_outgoing(ERR, "disconnected not sending on wait_credit\n" ); |
852 | rc = -EAGAIN; |
853 | goto err_wait_credit; |
854 | } |
855 | if (unlikely(atomic_dec_return(&info->send_credits) < 0)) { |
856 | atomic_inc(v: &info->send_credits); |
857 | goto wait_credit; |
858 | } |
859 | |
860 | wait_send_queue: |
861 | wait_event(info->wait_post_send, |
862 | atomic_read(&info->send_pending) < info->send_credit_target || |
863 | info->transport_status != SMBD_CONNECTED); |
864 | |
865 | if (info->transport_status != SMBD_CONNECTED) { |
866 | log_outgoing(ERR, "disconnected not sending on wait_send_queue\n" ); |
867 | rc = -EAGAIN; |
868 | goto err_wait_send_queue; |
869 | } |
870 | |
871 | if (unlikely(atomic_inc_return(&info->send_pending) > |
872 | info->send_credit_target)) { |
873 | atomic_dec(v: &info->send_pending); |
874 | goto wait_send_queue; |
875 | } |
876 | |
877 | request = mempool_alloc(pool: info->request_mempool, GFP_KERNEL); |
878 | if (!request) { |
879 | rc = -ENOMEM; |
880 | goto err_alloc; |
881 | } |
882 | |
883 | request->info = info; |
884 | memset(request->sge, 0, sizeof(request->sge)); |
885 | |
886 | /* Fill in the data payload to find out how much data we can add */ |
887 | if (iter) { |
888 | struct smb_extract_to_rdma = { |
889 | .nr_sge = 1, |
890 | .max_sge = SMBDIRECT_MAX_SEND_SGE, |
891 | .sge = request->sge, |
892 | .device = info->id->device, |
893 | .local_dma_lkey = info->pd->local_dma_lkey, |
894 | .direction = DMA_TO_DEVICE, |
895 | }; |
896 | |
897 | rc = smb_extract_iter_to_rdma(iter, len: *_remaining_data_length, |
898 | rdma: &extract); |
899 | if (rc < 0) |
900 | goto err_dma; |
901 | data_length = rc; |
902 | request->num_sge = extract.nr_sge; |
903 | *_remaining_data_length -= data_length; |
904 | } else { |
905 | data_length = 0; |
906 | request->num_sge = 1; |
907 | } |
908 | |
909 | /* Fill in the packet header */ |
910 | packet = smbd_request_payload(request); |
911 | packet->credits_requested = cpu_to_le16(info->send_credit_target); |
912 | |
913 | new_credits = manage_credits_prior_sending(info); |
914 | atomic_add(i: new_credits, v: &info->receive_credits); |
915 | packet->credits_granted = cpu_to_le16(new_credits); |
916 | |
917 | info->send_immediate = false; |
918 | |
919 | packet->flags = 0; |
920 | if (manage_keep_alive_before_sending(info)) |
921 | packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED); |
922 | |
923 | packet->reserved = 0; |
924 | if (!data_length) |
925 | packet->data_offset = 0; |
926 | else |
927 | packet->data_offset = cpu_to_le32(24); |
928 | packet->data_length = cpu_to_le32(data_length); |
929 | packet->remaining_data_length = cpu_to_le32(*_remaining_data_length); |
930 | packet->padding = 0; |
931 | |
932 | log_outgoing(INFO, "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n" , |
933 | le16_to_cpu(packet->credits_requested), |
934 | le16_to_cpu(packet->credits_granted), |
935 | le32_to_cpu(packet->data_offset), |
936 | le32_to_cpu(packet->data_length), |
937 | le32_to_cpu(packet->remaining_data_length)); |
938 | |
939 | /* Map the packet to DMA */ |
940 | header_length = sizeof(struct smbd_data_transfer); |
941 | /* If this is a packet without payload, don't send padding */ |
942 | if (!data_length) |
943 | header_length = offsetof(struct smbd_data_transfer, padding); |
944 | |
945 | request->sge[0].addr = ib_dma_map_single(dev: info->id->device, |
946 | cpu_addr: (void *)packet, |
947 | size: header_length, |
948 | direction: DMA_TO_DEVICE); |
949 | if (ib_dma_mapping_error(dev: info->id->device, dma_addr: request->sge[0].addr)) { |
950 | rc = -EIO; |
951 | request->sge[0].addr = 0; |
952 | goto err_dma; |
953 | } |
954 | |
955 | request->sge[0].length = header_length; |
956 | request->sge[0].lkey = info->pd->local_dma_lkey; |
957 | |
958 | rc = smbd_post_send(info, request); |
959 | if (!rc) |
960 | return 0; |
961 | |
962 | err_dma: |
963 | for (i = 0; i < request->num_sge; i++) |
964 | if (request->sge[i].addr) |
965 | ib_dma_unmap_single(dev: info->id->device, |
966 | addr: request->sge[i].addr, |
967 | size: request->sge[i].length, |
968 | direction: DMA_TO_DEVICE); |
969 | mempool_free(element: request, pool: info->request_mempool); |
970 | |
971 | /* roll back receive credits and credits to be offered */ |
972 | spin_lock(lock: &info->lock_new_credits_offered); |
973 | info->new_credits_offered += new_credits; |
974 | spin_unlock(lock: &info->lock_new_credits_offered); |
975 | atomic_sub(i: new_credits, v: &info->receive_credits); |
976 | |
977 | err_alloc: |
978 | if (atomic_dec_and_test(v: &info->send_pending)) |
979 | wake_up(&info->wait_send_pending); |
980 | |
981 | err_wait_send_queue: |
982 | /* roll back send credits and pending */ |
983 | atomic_inc(v: &info->send_credits); |
984 | |
985 | err_wait_credit: |
986 | return rc; |
987 | } |
988 | |
989 | /* |
990 | * Send an empty message |
991 | * Empty message is used to extend credits to peer to for keep live |
992 | * while there is no upper layer payload to send at the time |
993 | */ |
994 | static int smbd_post_send_empty(struct smbd_connection *info) |
995 | { |
996 | int remaining_data_length = 0; |
997 | |
998 | info->count_send_empty++; |
999 | return smbd_post_send_iter(info, NULL, remaining_data_length: &remaining_data_length); |
1000 | } |
1001 | |
1002 | /* |
1003 | * Post a receive request to the transport |
1004 | * The remote peer can only send data when a receive request is posted |
1005 | * The interaction is controlled by send/receive credit system |
1006 | */ |
1007 | static int smbd_post_recv( |
1008 | struct smbd_connection *info, struct smbd_response *response) |
1009 | { |
1010 | struct ib_recv_wr recv_wr; |
1011 | int rc = -EIO; |
1012 | |
1013 | response->sge.addr = ib_dma_map_single( |
1014 | dev: info->id->device, cpu_addr: response->packet, |
1015 | size: info->max_receive_size, direction: DMA_FROM_DEVICE); |
1016 | if (ib_dma_mapping_error(dev: info->id->device, dma_addr: response->sge.addr)) |
1017 | return rc; |
1018 | |
1019 | response->sge.length = info->max_receive_size; |
1020 | response->sge.lkey = info->pd->local_dma_lkey; |
1021 | |
1022 | response->cqe.done = recv_done; |
1023 | |
1024 | recv_wr.wr_cqe = &response->cqe; |
1025 | recv_wr.next = NULL; |
1026 | recv_wr.sg_list = &response->sge; |
1027 | recv_wr.num_sge = 1; |
1028 | |
1029 | rc = ib_post_recv(qp: info->id->qp, recv_wr: &recv_wr, NULL); |
1030 | if (rc) { |
1031 | ib_dma_unmap_single(dev: info->id->device, addr: response->sge.addr, |
1032 | size: response->sge.length, direction: DMA_FROM_DEVICE); |
1033 | smbd_disconnect_rdma_connection(info); |
1034 | log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n" , rc); |
1035 | } |
1036 | |
1037 | return rc; |
1038 | } |
1039 | |
1040 | /* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */ |
1041 | static int smbd_negotiate(struct smbd_connection *info) |
1042 | { |
1043 | int rc; |
1044 | struct smbd_response *response = get_receive_buffer(info); |
1045 | |
1046 | response->type = SMBD_NEGOTIATE_RESP; |
1047 | rc = smbd_post_recv(info, response); |
1048 | log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n" , |
1049 | rc, response->sge.addr, |
1050 | response->sge.length, response->sge.lkey); |
1051 | if (rc) |
1052 | return rc; |
1053 | |
1054 | init_completion(x: &info->negotiate_completion); |
1055 | info->negotiate_done = false; |
1056 | rc = smbd_post_send_negotiate_req(info); |
1057 | if (rc) |
1058 | return rc; |
1059 | |
1060 | rc = wait_for_completion_interruptible_timeout( |
1061 | x: &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ); |
1062 | log_rdma_event(INFO, "wait_for_completion_timeout rc=%d\n" , rc); |
1063 | |
1064 | if (info->negotiate_done) |
1065 | return 0; |
1066 | |
1067 | if (rc == 0) |
1068 | rc = -ETIMEDOUT; |
1069 | else if (rc == -ERESTARTSYS) |
1070 | rc = -EINTR; |
1071 | else |
1072 | rc = -ENOTCONN; |
1073 | |
1074 | return rc; |
1075 | } |
1076 | |
1077 | static void put_empty_packet( |
1078 | struct smbd_connection *info, struct smbd_response *response) |
1079 | { |
1080 | spin_lock(lock: &info->empty_packet_queue_lock); |
1081 | list_add_tail(new: &response->list, head: &info->empty_packet_queue); |
1082 | info->count_empty_packet_queue++; |
1083 | spin_unlock(lock: &info->empty_packet_queue_lock); |
1084 | |
1085 | queue_work(wq: info->workqueue, work: &info->post_send_credits_work); |
1086 | } |
1087 | |
1088 | /* |
1089 | * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1 |
1090 | * This is a queue for reassembling upper layer payload and present to upper |
1091 | * layer. All the inncoming payload go to the reassembly queue, regardless of |
1092 | * if reassembly is required. The uuper layer code reads from the queue for all |
1093 | * incoming payloads. |
1094 | * Put a received packet to the reassembly queue |
1095 | * response: the packet received |
1096 | * data_length: the size of payload in this packet |
1097 | */ |
1098 | static void enqueue_reassembly( |
1099 | struct smbd_connection *info, |
1100 | struct smbd_response *response, |
1101 | int data_length) |
1102 | { |
1103 | spin_lock(lock: &info->reassembly_queue_lock); |
1104 | list_add_tail(new: &response->list, head: &info->reassembly_queue); |
1105 | info->reassembly_queue_length++; |
1106 | /* |
1107 | * Make sure reassembly_data_length is updated after list and |
1108 | * reassembly_queue_length are updated. On the dequeue side |
1109 | * reassembly_data_length is checked without a lock to determine |
1110 | * if reassembly_queue_length and list is up to date |
1111 | */ |
1112 | virt_wmb(); |
1113 | info->reassembly_data_length += data_length; |
1114 | spin_unlock(lock: &info->reassembly_queue_lock); |
1115 | info->count_reassembly_queue++; |
1116 | info->count_enqueue_reassembly_queue++; |
1117 | } |
1118 | |
1119 | /* |
1120 | * Get the first entry at the front of reassembly queue |
1121 | * Caller is responsible for locking |
1122 | * return value: the first entry if any, NULL if queue is empty |
1123 | */ |
1124 | static struct smbd_response *_get_first_reassembly(struct smbd_connection *info) |
1125 | { |
1126 | struct smbd_response *ret = NULL; |
1127 | |
1128 | if (!list_empty(head: &info->reassembly_queue)) { |
1129 | ret = list_first_entry( |
1130 | &info->reassembly_queue, |
1131 | struct smbd_response, list); |
1132 | } |
1133 | return ret; |
1134 | } |
1135 | |
1136 | static struct smbd_response *get_empty_queue_buffer( |
1137 | struct smbd_connection *info) |
1138 | { |
1139 | struct smbd_response *ret = NULL; |
1140 | unsigned long flags; |
1141 | |
1142 | spin_lock_irqsave(&info->empty_packet_queue_lock, flags); |
1143 | if (!list_empty(head: &info->empty_packet_queue)) { |
1144 | ret = list_first_entry( |
1145 | &info->empty_packet_queue, |
1146 | struct smbd_response, list); |
1147 | list_del(entry: &ret->list); |
1148 | info->count_empty_packet_queue--; |
1149 | } |
1150 | spin_unlock_irqrestore(lock: &info->empty_packet_queue_lock, flags); |
1151 | |
1152 | return ret; |
1153 | } |
1154 | |
1155 | /* |
1156 | * Get a receive buffer |
1157 | * For each remote send, we need to post a receive. The receive buffers are |
1158 | * pre-allocated in advance. |
1159 | * return value: the receive buffer, NULL if none is available |
1160 | */ |
1161 | static struct smbd_response *get_receive_buffer(struct smbd_connection *info) |
1162 | { |
1163 | struct smbd_response *ret = NULL; |
1164 | unsigned long flags; |
1165 | |
1166 | spin_lock_irqsave(&info->receive_queue_lock, flags); |
1167 | if (!list_empty(head: &info->receive_queue)) { |
1168 | ret = list_first_entry( |
1169 | &info->receive_queue, |
1170 | struct smbd_response, list); |
1171 | list_del(entry: &ret->list); |
1172 | info->count_receive_queue--; |
1173 | info->count_get_receive_buffer++; |
1174 | } |
1175 | spin_unlock_irqrestore(lock: &info->receive_queue_lock, flags); |
1176 | |
1177 | return ret; |
1178 | } |
1179 | |
1180 | /* |
1181 | * Return a receive buffer |
1182 | * Upon returning of a receive buffer, we can post new receive and extend |
1183 | * more receive credits to remote peer. This is done immediately after a |
1184 | * receive buffer is returned. |
1185 | */ |
1186 | static void put_receive_buffer( |
1187 | struct smbd_connection *info, struct smbd_response *response) |
1188 | { |
1189 | unsigned long flags; |
1190 | |
1191 | ib_dma_unmap_single(dev: info->id->device, addr: response->sge.addr, |
1192 | size: response->sge.length, direction: DMA_FROM_DEVICE); |
1193 | |
1194 | spin_lock_irqsave(&info->receive_queue_lock, flags); |
1195 | list_add_tail(new: &response->list, head: &info->receive_queue); |
1196 | info->count_receive_queue++; |
1197 | info->count_put_receive_buffer++; |
1198 | spin_unlock_irqrestore(lock: &info->receive_queue_lock, flags); |
1199 | |
1200 | queue_work(wq: info->workqueue, work: &info->post_send_credits_work); |
1201 | } |
1202 | |
1203 | /* Preallocate all receive buffer on transport establishment */ |
1204 | static int allocate_receive_buffers(struct smbd_connection *info, int num_buf) |
1205 | { |
1206 | int i; |
1207 | struct smbd_response *response; |
1208 | |
1209 | INIT_LIST_HEAD(list: &info->reassembly_queue); |
1210 | spin_lock_init(&info->reassembly_queue_lock); |
1211 | info->reassembly_data_length = 0; |
1212 | info->reassembly_queue_length = 0; |
1213 | |
1214 | INIT_LIST_HEAD(list: &info->receive_queue); |
1215 | spin_lock_init(&info->receive_queue_lock); |
1216 | info->count_receive_queue = 0; |
1217 | |
1218 | INIT_LIST_HEAD(list: &info->empty_packet_queue); |
1219 | spin_lock_init(&info->empty_packet_queue_lock); |
1220 | info->count_empty_packet_queue = 0; |
1221 | |
1222 | init_waitqueue_head(&info->wait_receive_queues); |
1223 | |
1224 | for (i = 0; i < num_buf; i++) { |
1225 | response = mempool_alloc(pool: info->response_mempool, GFP_KERNEL); |
1226 | if (!response) |
1227 | goto allocate_failed; |
1228 | |
1229 | response->info = info; |
1230 | list_add_tail(new: &response->list, head: &info->receive_queue); |
1231 | info->count_receive_queue++; |
1232 | } |
1233 | |
1234 | return 0; |
1235 | |
1236 | allocate_failed: |
1237 | while (!list_empty(head: &info->receive_queue)) { |
1238 | response = list_first_entry( |
1239 | &info->receive_queue, |
1240 | struct smbd_response, list); |
1241 | list_del(entry: &response->list); |
1242 | info->count_receive_queue--; |
1243 | |
1244 | mempool_free(element: response, pool: info->response_mempool); |
1245 | } |
1246 | return -ENOMEM; |
1247 | } |
1248 | |
1249 | static void destroy_receive_buffers(struct smbd_connection *info) |
1250 | { |
1251 | struct smbd_response *response; |
1252 | |
1253 | while ((response = get_receive_buffer(info))) |
1254 | mempool_free(element: response, pool: info->response_mempool); |
1255 | |
1256 | while ((response = get_empty_queue_buffer(info))) |
1257 | mempool_free(element: response, pool: info->response_mempool); |
1258 | } |
1259 | |
1260 | /* Implement idle connection timer [MS-SMBD] 3.1.6.2 */ |
1261 | static void idle_connection_timer(struct work_struct *work) |
1262 | { |
1263 | struct smbd_connection *info = container_of( |
1264 | work, struct smbd_connection, |
1265 | idle_timer_work.work); |
1266 | |
1267 | if (info->keep_alive_requested != KEEP_ALIVE_NONE) { |
1268 | log_keep_alive(ERR, |
1269 | "error status info->keep_alive_requested=%d\n" , |
1270 | info->keep_alive_requested); |
1271 | smbd_disconnect_rdma_connection(info); |
1272 | return; |
1273 | } |
1274 | |
1275 | log_keep_alive(INFO, "about to send an empty idle message\n" ); |
1276 | smbd_post_send_empty(info); |
1277 | |
1278 | /* Setup the next idle timeout work */ |
1279 | queue_delayed_work(wq: info->workqueue, dwork: &info->idle_timer_work, |
1280 | delay: info->keep_alive_interval*HZ); |
1281 | } |
1282 | |
1283 | /* |
1284 | * Destroy the transport and related RDMA and memory resources |
1285 | * Need to go through all the pending counters and make sure on one is using |
1286 | * the transport while it is destroyed |
1287 | */ |
1288 | void smbd_destroy(struct TCP_Server_Info *server) |
1289 | { |
1290 | struct smbd_connection *info = server->smbd_conn; |
1291 | struct smbd_response *response; |
1292 | unsigned long flags; |
1293 | |
1294 | if (!info) { |
1295 | log_rdma_event(INFO, "rdma session already destroyed\n" ); |
1296 | return; |
1297 | } |
1298 | |
1299 | log_rdma_event(INFO, "destroying rdma session\n" ); |
1300 | if (info->transport_status != SMBD_DISCONNECTED) { |
1301 | rdma_disconnect(id: server->smbd_conn->id); |
1302 | log_rdma_event(INFO, "wait for transport being disconnected\n" ); |
1303 | wait_event_interruptible( |
1304 | info->disconn_wait, |
1305 | info->transport_status == SMBD_DISCONNECTED); |
1306 | } |
1307 | |
1308 | log_rdma_event(INFO, "destroying qp\n" ); |
1309 | ib_drain_qp(qp: info->id->qp); |
1310 | rdma_destroy_qp(id: info->id); |
1311 | |
1312 | log_rdma_event(INFO, "cancelling idle timer\n" ); |
1313 | cancel_delayed_work_sync(dwork: &info->idle_timer_work); |
1314 | |
1315 | log_rdma_event(INFO, "wait for all send posted to IB to finish\n" ); |
1316 | wait_event(info->wait_send_pending, |
1317 | atomic_read(&info->send_pending) == 0); |
1318 | |
1319 | /* It's not possible for upper layer to get to reassembly */ |
1320 | log_rdma_event(INFO, "drain the reassembly queue\n" ); |
1321 | do { |
1322 | spin_lock_irqsave(&info->reassembly_queue_lock, flags); |
1323 | response = _get_first_reassembly(info); |
1324 | if (response) { |
1325 | list_del(entry: &response->list); |
1326 | spin_unlock_irqrestore( |
1327 | lock: &info->reassembly_queue_lock, flags); |
1328 | put_receive_buffer(info, response); |
1329 | } else |
1330 | spin_unlock_irqrestore( |
1331 | lock: &info->reassembly_queue_lock, flags); |
1332 | } while (response); |
1333 | info->reassembly_data_length = 0; |
1334 | |
1335 | log_rdma_event(INFO, "free receive buffers\n" ); |
1336 | wait_event(info->wait_receive_queues, |
1337 | info->count_receive_queue + info->count_empty_packet_queue |
1338 | == info->receive_credit_max); |
1339 | destroy_receive_buffers(info); |
1340 | |
1341 | /* |
1342 | * For performance reasons, memory registration and deregistration |
1343 | * are not locked by srv_mutex. It is possible some processes are |
1344 | * blocked on transport srv_mutex while holding memory registration. |
1345 | * Release the transport srv_mutex to allow them to hit the failure |
1346 | * path when sending data, and then release memory registartions. |
1347 | */ |
1348 | log_rdma_event(INFO, "freeing mr list\n" ); |
1349 | wake_up_interruptible_all(&info->wait_mr); |
1350 | while (atomic_read(v: &info->mr_used_count)) { |
1351 | cifs_server_unlock(server); |
1352 | msleep(msecs: 1000); |
1353 | cifs_server_lock(server); |
1354 | } |
1355 | destroy_mr_list(info); |
1356 | |
1357 | ib_free_cq(cq: info->send_cq); |
1358 | ib_free_cq(cq: info->recv_cq); |
1359 | ib_dealloc_pd(pd: info->pd); |
1360 | rdma_destroy_id(id: info->id); |
1361 | |
1362 | /* free mempools */ |
1363 | mempool_destroy(pool: info->request_mempool); |
1364 | kmem_cache_destroy(s: info->request_cache); |
1365 | |
1366 | mempool_destroy(pool: info->response_mempool); |
1367 | kmem_cache_destroy(s: info->response_cache); |
1368 | |
1369 | info->transport_status = SMBD_DESTROYED; |
1370 | |
1371 | destroy_workqueue(wq: info->workqueue); |
1372 | log_rdma_event(INFO, "rdma session destroyed\n" ); |
1373 | kfree(objp: info); |
1374 | server->smbd_conn = NULL; |
1375 | } |
1376 | |
1377 | /* |
1378 | * Reconnect this SMBD connection, called from upper layer |
1379 | * return value: 0 on success, or actual error code |
1380 | */ |
1381 | int smbd_reconnect(struct TCP_Server_Info *server) |
1382 | { |
1383 | log_rdma_event(INFO, "reconnecting rdma session\n" ); |
1384 | |
1385 | if (!server->smbd_conn) { |
1386 | log_rdma_event(INFO, "rdma session already destroyed\n" ); |
1387 | goto create_conn; |
1388 | } |
1389 | |
1390 | /* |
1391 | * This is possible if transport is disconnected and we haven't received |
1392 | * notification from RDMA, but upper layer has detected timeout |
1393 | */ |
1394 | if (server->smbd_conn->transport_status == SMBD_CONNECTED) { |
1395 | log_rdma_event(INFO, "disconnecting transport\n" ); |
1396 | smbd_destroy(server); |
1397 | } |
1398 | |
1399 | create_conn: |
1400 | log_rdma_event(INFO, "creating rdma session\n" ); |
1401 | server->smbd_conn = smbd_get_connection( |
1402 | server, dstaddr: (struct sockaddr *) &server->dstaddr); |
1403 | |
1404 | if (server->smbd_conn) { |
1405 | cifs_dbg(VFS, "RDMA transport re-established\n" ); |
1406 | trace_smb3_smbd_connect_done(hostname: server->hostname, conn_id: server->conn_id, addr: &server->dstaddr); |
1407 | return 0; |
1408 | } |
1409 | trace_smb3_smbd_connect_err(hostname: server->hostname, conn_id: server->conn_id, addr: &server->dstaddr); |
1410 | return -ENOENT; |
1411 | } |
1412 | |
1413 | static void destroy_caches_and_workqueue(struct smbd_connection *info) |
1414 | { |
1415 | destroy_receive_buffers(info); |
1416 | destroy_workqueue(wq: info->workqueue); |
1417 | mempool_destroy(pool: info->response_mempool); |
1418 | kmem_cache_destroy(s: info->response_cache); |
1419 | mempool_destroy(pool: info->request_mempool); |
1420 | kmem_cache_destroy(s: info->request_cache); |
1421 | } |
1422 | |
1423 | #define MAX_NAME_LEN 80 |
1424 | static int allocate_caches_and_workqueue(struct smbd_connection *info) |
1425 | { |
1426 | char name[MAX_NAME_LEN]; |
1427 | int rc; |
1428 | |
1429 | scnprintf(buf: name, MAX_NAME_LEN, fmt: "smbd_request_%p" , info); |
1430 | info->request_cache = |
1431 | kmem_cache_create( |
1432 | name, |
1433 | size: sizeof(struct smbd_request) + |
1434 | sizeof(struct smbd_data_transfer), |
1435 | align: 0, SLAB_HWCACHE_ALIGN, NULL); |
1436 | if (!info->request_cache) |
1437 | return -ENOMEM; |
1438 | |
1439 | info->request_mempool = |
1440 | mempool_create(min_nr: info->send_credit_target, alloc_fn: mempool_alloc_slab, |
1441 | free_fn: mempool_free_slab, pool_data: info->request_cache); |
1442 | if (!info->request_mempool) |
1443 | goto out1; |
1444 | |
1445 | scnprintf(buf: name, MAX_NAME_LEN, fmt: "smbd_response_%p" , info); |
1446 | info->response_cache = |
1447 | kmem_cache_create( |
1448 | name, |
1449 | size: sizeof(struct smbd_response) + |
1450 | info->max_receive_size, |
1451 | align: 0, SLAB_HWCACHE_ALIGN, NULL); |
1452 | if (!info->response_cache) |
1453 | goto out2; |
1454 | |
1455 | info->response_mempool = |
1456 | mempool_create(min_nr: info->receive_credit_max, alloc_fn: mempool_alloc_slab, |
1457 | free_fn: mempool_free_slab, pool_data: info->response_cache); |
1458 | if (!info->response_mempool) |
1459 | goto out3; |
1460 | |
1461 | scnprintf(buf: name, MAX_NAME_LEN, fmt: "smbd_%p" , info); |
1462 | info->workqueue = create_workqueue(name); |
1463 | if (!info->workqueue) |
1464 | goto out4; |
1465 | |
1466 | rc = allocate_receive_buffers(info, num_buf: info->receive_credit_max); |
1467 | if (rc) { |
1468 | log_rdma_event(ERR, "failed to allocate receive buffers\n" ); |
1469 | goto out5; |
1470 | } |
1471 | |
1472 | return 0; |
1473 | |
1474 | out5: |
1475 | destroy_workqueue(wq: info->workqueue); |
1476 | out4: |
1477 | mempool_destroy(pool: info->response_mempool); |
1478 | out3: |
1479 | kmem_cache_destroy(s: info->response_cache); |
1480 | out2: |
1481 | mempool_destroy(pool: info->request_mempool); |
1482 | out1: |
1483 | kmem_cache_destroy(s: info->request_cache); |
1484 | return -ENOMEM; |
1485 | } |
1486 | |
1487 | /* Create a SMBD connection, called by upper layer */ |
1488 | static struct smbd_connection *_smbd_get_connection( |
1489 | struct TCP_Server_Info *server, struct sockaddr *dstaddr, int port) |
1490 | { |
1491 | int rc; |
1492 | struct smbd_connection *info; |
1493 | struct rdma_conn_param conn_param; |
1494 | struct ib_qp_init_attr qp_attr; |
1495 | struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr; |
1496 | struct ib_port_immutable port_immutable; |
1497 | u32 ird_ord_hdr[2]; |
1498 | |
1499 | info = kzalloc(size: sizeof(struct smbd_connection), GFP_KERNEL); |
1500 | if (!info) |
1501 | return NULL; |
1502 | |
1503 | info->transport_status = SMBD_CONNECTING; |
1504 | rc = smbd_ia_open(info, dstaddr, port); |
1505 | if (rc) { |
1506 | log_rdma_event(INFO, "smbd_ia_open rc=%d\n" , rc); |
1507 | goto create_id_failed; |
1508 | } |
1509 | |
1510 | if (smbd_send_credit_target > info->id->device->attrs.max_cqe || |
1511 | smbd_send_credit_target > info->id->device->attrs.max_qp_wr) { |
1512 | log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n" , |
1513 | smbd_send_credit_target, |
1514 | info->id->device->attrs.max_cqe, |
1515 | info->id->device->attrs.max_qp_wr); |
1516 | goto config_failed; |
1517 | } |
1518 | |
1519 | if (smbd_receive_credit_max > info->id->device->attrs.max_cqe || |
1520 | smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) { |
1521 | log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n" , |
1522 | smbd_receive_credit_max, |
1523 | info->id->device->attrs.max_cqe, |
1524 | info->id->device->attrs.max_qp_wr); |
1525 | goto config_failed; |
1526 | } |
1527 | |
1528 | info->receive_credit_max = smbd_receive_credit_max; |
1529 | info->send_credit_target = smbd_send_credit_target; |
1530 | info->max_send_size = smbd_max_send_size; |
1531 | info->max_fragmented_recv_size = smbd_max_fragmented_recv_size; |
1532 | info->max_receive_size = smbd_max_receive_size; |
1533 | info->keep_alive_interval = smbd_keep_alive_interval; |
1534 | |
1535 | if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE || |
1536 | info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) { |
1537 | log_rdma_event(ERR, |
1538 | "device %.*s max_send_sge/max_recv_sge = %d/%d too small\n" , |
1539 | IB_DEVICE_NAME_MAX, |
1540 | info->id->device->name, |
1541 | info->id->device->attrs.max_send_sge, |
1542 | info->id->device->attrs.max_recv_sge); |
1543 | goto config_failed; |
1544 | } |
1545 | |
1546 | info->send_cq = NULL; |
1547 | info->recv_cq = NULL; |
1548 | info->send_cq = |
1549 | ib_alloc_cq_any(dev: info->id->device, private: info, |
1550 | nr_cqe: info->send_credit_target, poll_ctx: IB_POLL_SOFTIRQ); |
1551 | if (IS_ERR(ptr: info->send_cq)) { |
1552 | info->send_cq = NULL; |
1553 | goto alloc_cq_failed; |
1554 | } |
1555 | |
1556 | info->recv_cq = |
1557 | ib_alloc_cq_any(dev: info->id->device, private: info, |
1558 | nr_cqe: info->receive_credit_max, poll_ctx: IB_POLL_SOFTIRQ); |
1559 | if (IS_ERR(ptr: info->recv_cq)) { |
1560 | info->recv_cq = NULL; |
1561 | goto alloc_cq_failed; |
1562 | } |
1563 | |
1564 | memset(&qp_attr, 0, sizeof(qp_attr)); |
1565 | qp_attr.event_handler = smbd_qp_async_error_upcall; |
1566 | qp_attr.qp_context = info; |
1567 | qp_attr.cap.max_send_wr = info->send_credit_target; |
1568 | qp_attr.cap.max_recv_wr = info->receive_credit_max; |
1569 | qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SEND_SGE; |
1570 | qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_RECV_SGE; |
1571 | qp_attr.cap.max_inline_data = 0; |
1572 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
1573 | qp_attr.qp_type = IB_QPT_RC; |
1574 | qp_attr.send_cq = info->send_cq; |
1575 | qp_attr.recv_cq = info->recv_cq; |
1576 | qp_attr.port_num = ~0; |
1577 | |
1578 | rc = rdma_create_qp(id: info->id, pd: info->pd, qp_init_attr: &qp_attr); |
1579 | if (rc) { |
1580 | log_rdma_event(ERR, "rdma_create_qp failed %i\n" , rc); |
1581 | goto create_qp_failed; |
1582 | } |
1583 | |
1584 | memset(&conn_param, 0, sizeof(conn_param)); |
1585 | conn_param.initiator_depth = 0; |
1586 | |
1587 | conn_param.responder_resources = |
1588 | info->id->device->attrs.max_qp_rd_atom |
1589 | < SMBD_CM_RESPONDER_RESOURCES ? |
1590 | info->id->device->attrs.max_qp_rd_atom : |
1591 | SMBD_CM_RESPONDER_RESOURCES; |
1592 | info->responder_resources = conn_param.responder_resources; |
1593 | log_rdma_mr(INFO, "responder_resources=%d\n" , |
1594 | info->responder_resources); |
1595 | |
1596 | /* Need to send IRD/ORD in private data for iWARP */ |
1597 | info->id->device->ops.get_port_immutable( |
1598 | info->id->device, info->id->port_num, &port_immutable); |
1599 | if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) { |
1600 | ird_ord_hdr[0] = info->responder_resources; |
1601 | ird_ord_hdr[1] = 1; |
1602 | conn_param.private_data = ird_ord_hdr; |
1603 | conn_param.private_data_len = sizeof(ird_ord_hdr); |
1604 | } else { |
1605 | conn_param.private_data = NULL; |
1606 | conn_param.private_data_len = 0; |
1607 | } |
1608 | |
1609 | conn_param.retry_count = SMBD_CM_RETRY; |
1610 | conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY; |
1611 | conn_param.flow_control = 0; |
1612 | |
1613 | log_rdma_event(INFO, "connecting to IP %pI4 port %d\n" , |
1614 | &addr_in->sin_addr, port); |
1615 | |
1616 | init_waitqueue_head(&info->conn_wait); |
1617 | init_waitqueue_head(&info->disconn_wait); |
1618 | init_waitqueue_head(&info->wait_reassembly_queue); |
1619 | rc = rdma_connect(id: info->id, conn_param: &conn_param); |
1620 | if (rc) { |
1621 | log_rdma_event(ERR, "rdma_connect() failed with %i\n" , rc); |
1622 | goto rdma_connect_failed; |
1623 | } |
1624 | |
1625 | wait_event_interruptible( |
1626 | info->conn_wait, info->transport_status != SMBD_CONNECTING); |
1627 | |
1628 | if (info->transport_status != SMBD_CONNECTED) { |
1629 | log_rdma_event(ERR, "rdma_connect failed port=%d\n" , port); |
1630 | goto rdma_connect_failed; |
1631 | } |
1632 | |
1633 | log_rdma_event(INFO, "rdma_connect connected\n" ); |
1634 | |
1635 | rc = allocate_caches_and_workqueue(info); |
1636 | if (rc) { |
1637 | log_rdma_event(ERR, "cache allocation failed\n" ); |
1638 | goto allocate_cache_failed; |
1639 | } |
1640 | |
1641 | init_waitqueue_head(&info->wait_send_queue); |
1642 | INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer); |
1643 | queue_delayed_work(wq: info->workqueue, dwork: &info->idle_timer_work, |
1644 | delay: info->keep_alive_interval*HZ); |
1645 | |
1646 | init_waitqueue_head(&info->wait_send_pending); |
1647 | atomic_set(v: &info->send_pending, i: 0); |
1648 | |
1649 | init_waitqueue_head(&info->wait_post_send); |
1650 | |
1651 | INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work); |
1652 | INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits); |
1653 | info->new_credits_offered = 0; |
1654 | spin_lock_init(&info->lock_new_credits_offered); |
1655 | |
1656 | rc = smbd_negotiate(info); |
1657 | if (rc) { |
1658 | log_rdma_event(ERR, "smbd_negotiate rc=%d\n" , rc); |
1659 | goto negotiation_failed; |
1660 | } |
1661 | |
1662 | rc = allocate_mr_list(info); |
1663 | if (rc) { |
1664 | log_rdma_mr(ERR, "memory registration allocation failed\n" ); |
1665 | goto allocate_mr_failed; |
1666 | } |
1667 | |
1668 | return info; |
1669 | |
1670 | allocate_mr_failed: |
1671 | /* At this point, need to a full transport shutdown */ |
1672 | server->smbd_conn = info; |
1673 | smbd_destroy(server); |
1674 | return NULL; |
1675 | |
1676 | negotiation_failed: |
1677 | cancel_delayed_work_sync(dwork: &info->idle_timer_work); |
1678 | destroy_caches_and_workqueue(info); |
1679 | info->transport_status = SMBD_NEGOTIATE_FAILED; |
1680 | init_waitqueue_head(&info->conn_wait); |
1681 | rdma_disconnect(id: info->id); |
1682 | wait_event(info->conn_wait, |
1683 | info->transport_status == SMBD_DISCONNECTED); |
1684 | |
1685 | allocate_cache_failed: |
1686 | rdma_connect_failed: |
1687 | rdma_destroy_qp(id: info->id); |
1688 | |
1689 | create_qp_failed: |
1690 | alloc_cq_failed: |
1691 | if (info->send_cq) |
1692 | ib_free_cq(cq: info->send_cq); |
1693 | if (info->recv_cq) |
1694 | ib_free_cq(cq: info->recv_cq); |
1695 | |
1696 | config_failed: |
1697 | ib_dealloc_pd(pd: info->pd); |
1698 | rdma_destroy_id(id: info->id); |
1699 | |
1700 | create_id_failed: |
1701 | kfree(objp: info); |
1702 | return NULL; |
1703 | } |
1704 | |
1705 | struct smbd_connection *smbd_get_connection( |
1706 | struct TCP_Server_Info *server, struct sockaddr *dstaddr) |
1707 | { |
1708 | struct smbd_connection *ret; |
1709 | int port = SMBD_PORT; |
1710 | |
1711 | try_again: |
1712 | ret = _smbd_get_connection(server, dstaddr, port); |
1713 | |
1714 | /* Try SMB_PORT if SMBD_PORT doesn't work */ |
1715 | if (!ret && port == SMBD_PORT) { |
1716 | port = SMB_PORT; |
1717 | goto try_again; |
1718 | } |
1719 | return ret; |
1720 | } |
1721 | |
1722 | /* |
1723 | * Receive data from receive reassembly queue |
1724 | * All the incoming data packets are placed in reassembly queue |
1725 | * buf: the buffer to read data into |
1726 | * size: the length of data to read |
1727 | * return value: actual data read |
1728 | * Note: this implementation copies the data from reassebmly queue to receive |
1729 | * buffers used by upper layer. This is not the optimal code path. A better way |
1730 | * to do it is to not have upper layer allocate its receive buffers but rather |
1731 | * borrow the buffer from reassembly queue, and return it after data is |
1732 | * consumed. But this will require more changes to upper layer code, and also |
1733 | * need to consider packet boundaries while they still being reassembled. |
1734 | */ |
1735 | static int smbd_recv_buf(struct smbd_connection *info, char *buf, |
1736 | unsigned int size) |
1737 | { |
1738 | struct smbd_response *response; |
1739 | struct smbd_data_transfer *data_transfer; |
1740 | int to_copy, to_read, data_read, offset; |
1741 | u32 data_length, remaining_data_length, data_offset; |
1742 | int rc; |
1743 | |
1744 | again: |
1745 | /* |
1746 | * No need to hold the reassembly queue lock all the time as we are |
1747 | * the only one reading from the front of the queue. The transport |
1748 | * may add more entries to the back of the queue at the same time |
1749 | */ |
1750 | log_read(INFO, "size=%d info->reassembly_data_length=%d\n" , size, |
1751 | info->reassembly_data_length); |
1752 | if (info->reassembly_data_length >= size) { |
1753 | int queue_length; |
1754 | int queue_removed = 0; |
1755 | |
1756 | /* |
1757 | * Need to make sure reassembly_data_length is read before |
1758 | * reading reassembly_queue_length and calling |
1759 | * _get_first_reassembly. This call is lock free |
1760 | * as we never read at the end of the queue which are being |
1761 | * updated in SOFTIRQ as more data is received |
1762 | */ |
1763 | virt_rmb(); |
1764 | queue_length = info->reassembly_queue_length; |
1765 | data_read = 0; |
1766 | to_read = size; |
1767 | offset = info->first_entry_offset; |
1768 | while (data_read < size) { |
1769 | response = _get_first_reassembly(info); |
1770 | data_transfer = smbd_response_payload(response); |
1771 | data_length = le32_to_cpu(data_transfer->data_length); |
1772 | remaining_data_length = |
1773 | le32_to_cpu( |
1774 | data_transfer->remaining_data_length); |
1775 | data_offset = le32_to_cpu(data_transfer->data_offset); |
1776 | |
1777 | /* |
1778 | * The upper layer expects RFC1002 length at the |
1779 | * beginning of the payload. Return it to indicate |
1780 | * the total length of the packet. This minimize the |
1781 | * change to upper layer packet processing logic. This |
1782 | * will be eventually remove when an intermediate |
1783 | * transport layer is added |
1784 | */ |
1785 | if (response->first_segment && size == 4) { |
1786 | unsigned int rfc1002_len = |
1787 | data_length + remaining_data_length; |
1788 | *((__be32 *)buf) = cpu_to_be32(rfc1002_len); |
1789 | data_read = 4; |
1790 | response->first_segment = false; |
1791 | log_read(INFO, "returning rfc1002 length %d\n" , |
1792 | rfc1002_len); |
1793 | goto read_rfc1002_done; |
1794 | } |
1795 | |
1796 | to_copy = min_t(int, data_length - offset, to_read); |
1797 | memcpy( |
1798 | buf + data_read, |
1799 | (char *)data_transfer + data_offset + offset, |
1800 | to_copy); |
1801 | |
1802 | /* move on to the next buffer? */ |
1803 | if (to_copy == data_length - offset) { |
1804 | queue_length--; |
1805 | /* |
1806 | * No need to lock if we are not at the |
1807 | * end of the queue |
1808 | */ |
1809 | if (queue_length) |
1810 | list_del(entry: &response->list); |
1811 | else { |
1812 | spin_lock_irq( |
1813 | lock: &info->reassembly_queue_lock); |
1814 | list_del(entry: &response->list); |
1815 | spin_unlock_irq( |
1816 | lock: &info->reassembly_queue_lock); |
1817 | } |
1818 | queue_removed++; |
1819 | info->count_reassembly_queue--; |
1820 | info->count_dequeue_reassembly_queue++; |
1821 | put_receive_buffer(info, response); |
1822 | offset = 0; |
1823 | log_read(INFO, "put_receive_buffer offset=0\n" ); |
1824 | } else |
1825 | offset += to_copy; |
1826 | |
1827 | to_read -= to_copy; |
1828 | data_read += to_copy; |
1829 | |
1830 | log_read(INFO, "_get_first_reassembly memcpy %d bytes data_transfer_length-offset=%d after that to_read=%d data_read=%d offset=%d\n" , |
1831 | to_copy, data_length - offset, |
1832 | to_read, data_read, offset); |
1833 | } |
1834 | |
1835 | spin_lock_irq(lock: &info->reassembly_queue_lock); |
1836 | info->reassembly_data_length -= data_read; |
1837 | info->reassembly_queue_length -= queue_removed; |
1838 | spin_unlock_irq(lock: &info->reassembly_queue_lock); |
1839 | |
1840 | info->first_entry_offset = offset; |
1841 | log_read(INFO, "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n" , |
1842 | data_read, info->reassembly_data_length, |
1843 | info->first_entry_offset); |
1844 | read_rfc1002_done: |
1845 | return data_read; |
1846 | } |
1847 | |
1848 | log_read(INFO, "wait_event on more data\n" ); |
1849 | rc = wait_event_interruptible( |
1850 | info->wait_reassembly_queue, |
1851 | info->reassembly_data_length >= size || |
1852 | info->transport_status != SMBD_CONNECTED); |
1853 | /* Don't return any data if interrupted */ |
1854 | if (rc) |
1855 | return rc; |
1856 | |
1857 | if (info->transport_status != SMBD_CONNECTED) { |
1858 | log_read(ERR, "disconnected\n" ); |
1859 | return -ECONNABORTED; |
1860 | } |
1861 | |
1862 | goto again; |
1863 | } |
1864 | |
1865 | /* |
1866 | * Receive a page from receive reassembly queue |
1867 | * page: the page to read data into |
1868 | * to_read: the length of data to read |
1869 | * return value: actual data read |
1870 | */ |
1871 | static int smbd_recv_page(struct smbd_connection *info, |
1872 | struct page *page, unsigned int page_offset, |
1873 | unsigned int to_read) |
1874 | { |
1875 | int ret; |
1876 | char *to_address; |
1877 | void *page_address; |
1878 | |
1879 | /* make sure we have the page ready for read */ |
1880 | ret = wait_event_interruptible( |
1881 | info->wait_reassembly_queue, |
1882 | info->reassembly_data_length >= to_read || |
1883 | info->transport_status != SMBD_CONNECTED); |
1884 | if (ret) |
1885 | return ret; |
1886 | |
1887 | /* now we can read from reassembly queue and not sleep */ |
1888 | page_address = kmap_atomic(page); |
1889 | to_address = (char *) page_address + page_offset; |
1890 | |
1891 | log_read(INFO, "reading from page=%p address=%p to_read=%d\n" , |
1892 | page, to_address, to_read); |
1893 | |
1894 | ret = smbd_recv_buf(info, buf: to_address, size: to_read); |
1895 | kunmap_atomic(page_address); |
1896 | |
1897 | return ret; |
1898 | } |
1899 | |
1900 | /* |
1901 | * Receive data from transport |
1902 | * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC |
1903 | * return: total bytes read, or 0. SMB Direct will not do partial read. |
1904 | */ |
1905 | int smbd_recv(struct smbd_connection *info, struct msghdr *msg) |
1906 | { |
1907 | char *buf; |
1908 | struct page *page; |
1909 | unsigned int to_read, page_offset; |
1910 | int rc; |
1911 | |
1912 | if (iov_iter_rw(i: &msg->msg_iter) == WRITE) { |
1913 | /* It's a bug in upper layer to get there */ |
1914 | cifs_dbg(VFS, "Invalid msg iter dir %u\n" , |
1915 | iov_iter_rw(&msg->msg_iter)); |
1916 | rc = -EINVAL; |
1917 | goto out; |
1918 | } |
1919 | |
1920 | switch (iov_iter_type(i: &msg->msg_iter)) { |
1921 | case ITER_KVEC: |
1922 | buf = msg->msg_iter.kvec->iov_base; |
1923 | to_read = msg->msg_iter.kvec->iov_len; |
1924 | rc = smbd_recv_buf(info, buf, size: to_read); |
1925 | break; |
1926 | |
1927 | case ITER_BVEC: |
1928 | page = msg->msg_iter.bvec->bv_page; |
1929 | page_offset = msg->msg_iter.bvec->bv_offset; |
1930 | to_read = msg->msg_iter.bvec->bv_len; |
1931 | rc = smbd_recv_page(info, page, page_offset, to_read); |
1932 | break; |
1933 | |
1934 | default: |
1935 | /* It's a bug in upper layer to get there */ |
1936 | cifs_dbg(VFS, "Invalid msg type %d\n" , |
1937 | iov_iter_type(&msg->msg_iter)); |
1938 | rc = -EINVAL; |
1939 | } |
1940 | |
1941 | out: |
1942 | /* SMBDirect will read it all or nothing */ |
1943 | if (rc > 0) |
1944 | msg->msg_iter.count = 0; |
1945 | return rc; |
1946 | } |
1947 | |
1948 | /* |
1949 | * Send data to transport |
1950 | * Each rqst is transported as a SMBDirect payload |
1951 | * rqst: the data to write |
1952 | * return value: 0 if successfully write, otherwise error code |
1953 | */ |
1954 | int smbd_send(struct TCP_Server_Info *server, |
1955 | int num_rqst, struct smb_rqst *rqst_array) |
1956 | { |
1957 | struct smbd_connection *info = server->smbd_conn; |
1958 | struct smb_rqst *rqst; |
1959 | struct iov_iter iter; |
1960 | unsigned int remaining_data_length, klen; |
1961 | int rc, i, rqst_idx; |
1962 | |
1963 | if (info->transport_status != SMBD_CONNECTED) |
1964 | return -EAGAIN; |
1965 | |
1966 | /* |
1967 | * Add in the page array if there is one. The caller needs to set |
1968 | * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and |
1969 | * ends at page boundary |
1970 | */ |
1971 | remaining_data_length = 0; |
1972 | for (i = 0; i < num_rqst; i++) |
1973 | remaining_data_length += smb_rqst_len(server, rqst: &rqst_array[i]); |
1974 | |
1975 | if (unlikely(remaining_data_length > info->max_fragmented_send_size)) { |
1976 | /* assertion: payload never exceeds negotiated maximum */ |
1977 | log_write(ERR, "payload size %d > max size %d\n" , |
1978 | remaining_data_length, info->max_fragmented_send_size); |
1979 | return -EINVAL; |
1980 | } |
1981 | |
1982 | log_write(INFO, "num_rqst=%d total length=%u\n" , |
1983 | num_rqst, remaining_data_length); |
1984 | |
1985 | rqst_idx = 0; |
1986 | do { |
1987 | rqst = &rqst_array[rqst_idx]; |
1988 | |
1989 | cifs_dbg(FYI, "Sending smb (RDMA): idx=%d smb_len=%lu\n" , |
1990 | rqst_idx, smb_rqst_len(server, rqst)); |
1991 | for (i = 0; i < rqst->rq_nvec; i++) |
1992 | dump_smb(rqst->rq_iov[i].iov_base, rqst->rq_iov[i].iov_len); |
1993 | |
1994 | log_write(INFO, "RDMA-WR[%u] nvec=%d len=%u iter=%zu rqlen=%lu\n" , |
1995 | rqst_idx, rqst->rq_nvec, remaining_data_length, |
1996 | iov_iter_count(&rqst->rq_iter), smb_rqst_len(server, rqst)); |
1997 | |
1998 | /* Send the metadata pages. */ |
1999 | klen = 0; |
2000 | for (i = 0; i < rqst->rq_nvec; i++) |
2001 | klen += rqst->rq_iov[i].iov_len; |
2002 | iov_iter_kvec(i: &iter, ITER_SOURCE, kvec: rqst->rq_iov, nr_segs: rqst->rq_nvec, count: klen); |
2003 | |
2004 | rc = smbd_post_send_iter(info, iter: &iter, remaining_data_length: &remaining_data_length); |
2005 | if (rc < 0) |
2006 | break; |
2007 | |
2008 | if (iov_iter_count(i: &rqst->rq_iter) > 0) { |
2009 | /* And then the data pages if there are any */ |
2010 | rc = smbd_post_send_iter(info, iter: &rqst->rq_iter, |
2011 | remaining_data_length: &remaining_data_length); |
2012 | if (rc < 0) |
2013 | break; |
2014 | } |
2015 | |
2016 | } while (++rqst_idx < num_rqst); |
2017 | |
2018 | /* |
2019 | * As an optimization, we don't wait for individual I/O to finish |
2020 | * before sending the next one. |
2021 | * Send them all and wait for pending send count to get to 0 |
2022 | * that means all the I/Os have been out and we are good to return |
2023 | */ |
2024 | |
2025 | wait_event(info->wait_send_pending, |
2026 | atomic_read(&info->send_pending) == 0); |
2027 | |
2028 | return rc; |
2029 | } |
2030 | |
2031 | static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc) |
2032 | { |
2033 | struct smbd_mr *mr; |
2034 | struct ib_cqe *cqe; |
2035 | |
2036 | if (wc->status) { |
2037 | log_rdma_mr(ERR, "status=%d\n" , wc->status); |
2038 | cqe = wc->wr_cqe; |
2039 | mr = container_of(cqe, struct smbd_mr, cqe); |
2040 | smbd_disconnect_rdma_connection(info: mr->conn); |
2041 | } |
2042 | } |
2043 | |
2044 | /* |
2045 | * The work queue function that recovers MRs |
2046 | * We need to call ib_dereg_mr() and ib_alloc_mr() before this MR can be used |
2047 | * again. Both calls are slow, so finish them in a workqueue. This will not |
2048 | * block I/O path. |
2049 | * There is one workqueue that recovers MRs, there is no need to lock as the |
2050 | * I/O requests calling smbd_register_mr will never update the links in the |
2051 | * mr_list. |
2052 | */ |
2053 | static void smbd_mr_recovery_work(struct work_struct *work) |
2054 | { |
2055 | struct smbd_connection *info = |
2056 | container_of(work, struct smbd_connection, mr_recovery_work); |
2057 | struct smbd_mr *smbdirect_mr; |
2058 | int rc; |
2059 | |
2060 | list_for_each_entry(smbdirect_mr, &info->mr_list, list) { |
2061 | if (smbdirect_mr->state == MR_ERROR) { |
2062 | |
2063 | /* recover this MR entry */ |
2064 | rc = ib_dereg_mr(mr: smbdirect_mr->mr); |
2065 | if (rc) { |
2066 | log_rdma_mr(ERR, |
2067 | "ib_dereg_mr failed rc=%x\n" , |
2068 | rc); |
2069 | smbd_disconnect_rdma_connection(info); |
2070 | continue; |
2071 | } |
2072 | |
2073 | smbdirect_mr->mr = ib_alloc_mr( |
2074 | pd: info->pd, mr_type: info->mr_type, |
2075 | max_num_sg: info->max_frmr_depth); |
2076 | if (IS_ERR(ptr: smbdirect_mr->mr)) { |
2077 | log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n" , |
2078 | info->mr_type, |
2079 | info->max_frmr_depth); |
2080 | smbd_disconnect_rdma_connection(info); |
2081 | continue; |
2082 | } |
2083 | } else |
2084 | /* This MR is being used, don't recover it */ |
2085 | continue; |
2086 | |
2087 | smbdirect_mr->state = MR_READY; |
2088 | |
2089 | /* smbdirect_mr->state is updated by this function |
2090 | * and is read and updated by I/O issuing CPUs trying |
2091 | * to get a MR, the call to atomic_inc_return |
2092 | * implicates a memory barrier and guarantees this |
2093 | * value is updated before waking up any calls to |
2094 | * get_mr() from the I/O issuing CPUs |
2095 | */ |
2096 | if (atomic_inc_return(v: &info->mr_ready_count) == 1) |
2097 | wake_up_interruptible(&info->wait_mr); |
2098 | } |
2099 | } |
2100 | |
2101 | static void destroy_mr_list(struct smbd_connection *info) |
2102 | { |
2103 | struct smbd_mr *mr, *tmp; |
2104 | |
2105 | cancel_work_sync(work: &info->mr_recovery_work); |
2106 | list_for_each_entry_safe(mr, tmp, &info->mr_list, list) { |
2107 | if (mr->state == MR_INVALIDATED) |
2108 | ib_dma_unmap_sg(dev: info->id->device, sg: mr->sgt.sgl, |
2109 | nents: mr->sgt.nents, direction: mr->dir); |
2110 | ib_dereg_mr(mr: mr->mr); |
2111 | kfree(objp: mr->sgt.sgl); |
2112 | kfree(objp: mr); |
2113 | } |
2114 | } |
2115 | |
2116 | /* |
2117 | * Allocate MRs used for RDMA read/write |
2118 | * The number of MRs will not exceed hardware capability in responder_resources |
2119 | * All MRs are kept in mr_list. The MR can be recovered after it's used |
2120 | * Recovery is done in smbd_mr_recovery_work. The content of list entry changes |
2121 | * as MRs are used and recovered for I/O, but the list links will not change |
2122 | */ |
2123 | static int allocate_mr_list(struct smbd_connection *info) |
2124 | { |
2125 | int i; |
2126 | struct smbd_mr *smbdirect_mr, *tmp; |
2127 | |
2128 | INIT_LIST_HEAD(list: &info->mr_list); |
2129 | init_waitqueue_head(&info->wait_mr); |
2130 | spin_lock_init(&info->mr_list_lock); |
2131 | atomic_set(v: &info->mr_ready_count, i: 0); |
2132 | atomic_set(v: &info->mr_used_count, i: 0); |
2133 | init_waitqueue_head(&info->wait_for_mr_cleanup); |
2134 | INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work); |
2135 | /* Allocate more MRs (2x) than hardware responder_resources */ |
2136 | for (i = 0; i < info->responder_resources * 2; i++) { |
2137 | smbdirect_mr = kzalloc(size: sizeof(*smbdirect_mr), GFP_KERNEL); |
2138 | if (!smbdirect_mr) |
2139 | goto cleanup_entries; |
2140 | smbdirect_mr->mr = ib_alloc_mr(pd: info->pd, mr_type: info->mr_type, |
2141 | max_num_sg: info->max_frmr_depth); |
2142 | if (IS_ERR(ptr: smbdirect_mr->mr)) { |
2143 | log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n" , |
2144 | info->mr_type, info->max_frmr_depth); |
2145 | goto out; |
2146 | } |
2147 | smbdirect_mr->sgt.sgl = kcalloc(n: info->max_frmr_depth, |
2148 | size: sizeof(struct scatterlist), |
2149 | GFP_KERNEL); |
2150 | if (!smbdirect_mr->sgt.sgl) { |
2151 | log_rdma_mr(ERR, "failed to allocate sgl\n" ); |
2152 | ib_dereg_mr(mr: smbdirect_mr->mr); |
2153 | goto out; |
2154 | } |
2155 | smbdirect_mr->state = MR_READY; |
2156 | smbdirect_mr->conn = info; |
2157 | |
2158 | list_add_tail(new: &smbdirect_mr->list, head: &info->mr_list); |
2159 | atomic_inc(v: &info->mr_ready_count); |
2160 | } |
2161 | return 0; |
2162 | |
2163 | out: |
2164 | kfree(objp: smbdirect_mr); |
2165 | cleanup_entries: |
2166 | list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) { |
2167 | list_del(entry: &smbdirect_mr->list); |
2168 | ib_dereg_mr(mr: smbdirect_mr->mr); |
2169 | kfree(objp: smbdirect_mr->sgt.sgl); |
2170 | kfree(objp: smbdirect_mr); |
2171 | } |
2172 | return -ENOMEM; |
2173 | } |
2174 | |
2175 | /* |
2176 | * Get a MR from mr_list. This function waits until there is at least one |
2177 | * MR available in the list. It may access the list while the |
2178 | * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock |
2179 | * as they never modify the same places. However, there may be several CPUs |
2180 | * issueing I/O trying to get MR at the same time, mr_list_lock is used to |
2181 | * protect this situation. |
2182 | */ |
2183 | static struct smbd_mr *get_mr(struct smbd_connection *info) |
2184 | { |
2185 | struct smbd_mr *ret; |
2186 | int rc; |
2187 | again: |
2188 | rc = wait_event_interruptible(info->wait_mr, |
2189 | atomic_read(&info->mr_ready_count) || |
2190 | info->transport_status != SMBD_CONNECTED); |
2191 | if (rc) { |
2192 | log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n" , rc); |
2193 | return NULL; |
2194 | } |
2195 | |
2196 | if (info->transport_status != SMBD_CONNECTED) { |
2197 | log_rdma_mr(ERR, "info->transport_status=%x\n" , |
2198 | info->transport_status); |
2199 | return NULL; |
2200 | } |
2201 | |
2202 | spin_lock(lock: &info->mr_list_lock); |
2203 | list_for_each_entry(ret, &info->mr_list, list) { |
2204 | if (ret->state == MR_READY) { |
2205 | ret->state = MR_REGISTERED; |
2206 | spin_unlock(lock: &info->mr_list_lock); |
2207 | atomic_dec(v: &info->mr_ready_count); |
2208 | atomic_inc(v: &info->mr_used_count); |
2209 | return ret; |
2210 | } |
2211 | } |
2212 | |
2213 | spin_unlock(lock: &info->mr_list_lock); |
2214 | /* |
2215 | * It is possible that we could fail to get MR because other processes may |
2216 | * try to acquire a MR at the same time. If this is the case, retry it. |
2217 | */ |
2218 | goto again; |
2219 | } |
2220 | |
2221 | /* |
2222 | * Transcribe the pages from an iterator into an MR scatterlist. |
2223 | */ |
2224 | static int smbd_iter_to_mr(struct smbd_connection *info, |
2225 | struct iov_iter *iter, |
2226 | struct sg_table *sgt, |
2227 | unsigned int max_sg) |
2228 | { |
2229 | int ret; |
2230 | |
2231 | memset(sgt->sgl, 0, max_sg * sizeof(struct scatterlist)); |
2232 | |
2233 | ret = extract_iter_to_sg(iter, len: iov_iter_count(i: iter), sgtable: sgt, sg_max: max_sg, extraction_flags: 0); |
2234 | WARN_ON(ret < 0); |
2235 | if (sgt->nents > 0) |
2236 | sg_mark_end(sg: &sgt->sgl[sgt->nents - 1]); |
2237 | return ret; |
2238 | } |
2239 | |
2240 | /* |
2241 | * Register memory for RDMA read/write |
2242 | * iter: the buffer to register memory with |
2243 | * writing: true if this is a RDMA write (SMB read), false for RDMA read |
2244 | * need_invalidate: true if this MR needs to be locally invalidated after I/O |
2245 | * return value: the MR registered, NULL if failed. |
2246 | */ |
2247 | struct smbd_mr *smbd_register_mr(struct smbd_connection *info, |
2248 | struct iov_iter *iter, |
2249 | bool writing, bool need_invalidate) |
2250 | { |
2251 | struct smbd_mr *smbdirect_mr; |
2252 | int rc, num_pages; |
2253 | enum dma_data_direction dir; |
2254 | struct ib_reg_wr *reg_wr; |
2255 | |
2256 | num_pages = iov_iter_npages(i: iter, maxpages: info->max_frmr_depth + 1); |
2257 | if (num_pages > info->max_frmr_depth) { |
2258 | log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n" , |
2259 | num_pages, info->max_frmr_depth); |
2260 | WARN_ON_ONCE(1); |
2261 | return NULL; |
2262 | } |
2263 | |
2264 | smbdirect_mr = get_mr(info); |
2265 | if (!smbdirect_mr) { |
2266 | log_rdma_mr(ERR, "get_mr returning NULL\n" ); |
2267 | return NULL; |
2268 | } |
2269 | |
2270 | dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
2271 | smbdirect_mr->dir = dir; |
2272 | smbdirect_mr->need_invalidate = need_invalidate; |
2273 | smbdirect_mr->sgt.nents = 0; |
2274 | smbdirect_mr->sgt.orig_nents = 0; |
2275 | |
2276 | log_rdma_mr(INFO, "num_pages=0x%x count=0x%zx depth=%u\n" , |
2277 | num_pages, iov_iter_count(iter), info->max_frmr_depth); |
2278 | smbd_iter_to_mr(info, iter, sgt: &smbdirect_mr->sgt, max_sg: info->max_frmr_depth); |
2279 | |
2280 | rc = ib_dma_map_sg(dev: info->id->device, sg: smbdirect_mr->sgt.sgl, |
2281 | nents: smbdirect_mr->sgt.nents, direction: dir); |
2282 | if (!rc) { |
2283 | log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n" , |
2284 | num_pages, dir, rc); |
2285 | goto dma_map_error; |
2286 | } |
2287 | |
2288 | rc = ib_map_mr_sg(mr: smbdirect_mr->mr, sg: smbdirect_mr->sgt.sgl, |
2289 | sg_nents: smbdirect_mr->sgt.nents, NULL, PAGE_SIZE); |
2290 | if (rc != smbdirect_mr->sgt.nents) { |
2291 | log_rdma_mr(ERR, |
2292 | "ib_map_mr_sg failed rc = %d nents = %x\n" , |
2293 | rc, smbdirect_mr->sgt.nents); |
2294 | goto map_mr_error; |
2295 | } |
2296 | |
2297 | ib_update_fast_reg_key(mr: smbdirect_mr->mr, |
2298 | newkey: ib_inc_rkey(rkey: smbdirect_mr->mr->rkey)); |
2299 | reg_wr = &smbdirect_mr->wr; |
2300 | reg_wr->wr.opcode = IB_WR_REG_MR; |
2301 | smbdirect_mr->cqe.done = register_mr_done; |
2302 | reg_wr->wr.wr_cqe = &smbdirect_mr->cqe; |
2303 | reg_wr->wr.num_sge = 0; |
2304 | reg_wr->wr.send_flags = IB_SEND_SIGNALED; |
2305 | reg_wr->mr = smbdirect_mr->mr; |
2306 | reg_wr->key = smbdirect_mr->mr->rkey; |
2307 | reg_wr->access = writing ? |
2308 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : |
2309 | IB_ACCESS_REMOTE_READ; |
2310 | |
2311 | /* |
2312 | * There is no need for waiting for complemtion on ib_post_send |
2313 | * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution |
2314 | * on the next ib_post_send when we actaully send I/O to remote peer |
2315 | */ |
2316 | rc = ib_post_send(qp: info->id->qp, send_wr: ®_wr->wr, NULL); |
2317 | if (!rc) |
2318 | return smbdirect_mr; |
2319 | |
2320 | log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n" , |
2321 | rc, reg_wr->key); |
2322 | |
2323 | /* If all failed, attempt to recover this MR by setting it MR_ERROR*/ |
2324 | map_mr_error: |
2325 | ib_dma_unmap_sg(dev: info->id->device, sg: smbdirect_mr->sgt.sgl, |
2326 | nents: smbdirect_mr->sgt.nents, direction: smbdirect_mr->dir); |
2327 | |
2328 | dma_map_error: |
2329 | smbdirect_mr->state = MR_ERROR; |
2330 | if (atomic_dec_and_test(v: &info->mr_used_count)) |
2331 | wake_up(&info->wait_for_mr_cleanup); |
2332 | |
2333 | smbd_disconnect_rdma_connection(info); |
2334 | |
2335 | return NULL; |
2336 | } |
2337 | |
2338 | static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc) |
2339 | { |
2340 | struct smbd_mr *smbdirect_mr; |
2341 | struct ib_cqe *cqe; |
2342 | |
2343 | cqe = wc->wr_cqe; |
2344 | smbdirect_mr = container_of(cqe, struct smbd_mr, cqe); |
2345 | smbdirect_mr->state = MR_INVALIDATED; |
2346 | if (wc->status != IB_WC_SUCCESS) { |
2347 | log_rdma_mr(ERR, "invalidate failed status=%x\n" , wc->status); |
2348 | smbdirect_mr->state = MR_ERROR; |
2349 | } |
2350 | complete(&smbdirect_mr->invalidate_done); |
2351 | } |
2352 | |
2353 | /* |
2354 | * Deregister a MR after I/O is done |
2355 | * This function may wait if remote invalidation is not used |
2356 | * and we have to locally invalidate the buffer to prevent data is being |
2357 | * modified by remote peer after upper layer consumes it |
2358 | */ |
2359 | int smbd_deregister_mr(struct smbd_mr *smbdirect_mr) |
2360 | { |
2361 | struct ib_send_wr *wr; |
2362 | struct smbd_connection *info = smbdirect_mr->conn; |
2363 | int rc = 0; |
2364 | |
2365 | if (smbdirect_mr->need_invalidate) { |
2366 | /* Need to finish local invalidation before returning */ |
2367 | wr = &smbdirect_mr->inv_wr; |
2368 | wr->opcode = IB_WR_LOCAL_INV; |
2369 | smbdirect_mr->cqe.done = local_inv_done; |
2370 | wr->wr_cqe = &smbdirect_mr->cqe; |
2371 | wr->num_sge = 0; |
2372 | wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey; |
2373 | wr->send_flags = IB_SEND_SIGNALED; |
2374 | |
2375 | init_completion(x: &smbdirect_mr->invalidate_done); |
2376 | rc = ib_post_send(qp: info->id->qp, send_wr: wr, NULL); |
2377 | if (rc) { |
2378 | log_rdma_mr(ERR, "ib_post_send failed rc=%x\n" , rc); |
2379 | smbd_disconnect_rdma_connection(info); |
2380 | goto done; |
2381 | } |
2382 | wait_for_completion(&smbdirect_mr->invalidate_done); |
2383 | smbdirect_mr->need_invalidate = false; |
2384 | } else |
2385 | /* |
2386 | * For remote invalidation, just set it to MR_INVALIDATED |
2387 | * and defer to mr_recovery_work to recover the MR for next use |
2388 | */ |
2389 | smbdirect_mr->state = MR_INVALIDATED; |
2390 | |
2391 | if (smbdirect_mr->state == MR_INVALIDATED) { |
2392 | ib_dma_unmap_sg( |
2393 | dev: info->id->device, sg: smbdirect_mr->sgt.sgl, |
2394 | nents: smbdirect_mr->sgt.nents, |
2395 | direction: smbdirect_mr->dir); |
2396 | smbdirect_mr->state = MR_READY; |
2397 | if (atomic_inc_return(v: &info->mr_ready_count) == 1) |
2398 | wake_up_interruptible(&info->wait_mr); |
2399 | } else |
2400 | /* |
2401 | * Schedule the work to do MR recovery for future I/Os MR |
2402 | * recovery is slow and don't want it to block current I/O |
2403 | */ |
2404 | queue_work(wq: info->workqueue, work: &info->mr_recovery_work); |
2405 | |
2406 | done: |
2407 | if (atomic_dec_and_test(v: &info->mr_used_count)) |
2408 | wake_up(&info->wait_for_mr_cleanup); |
2409 | |
2410 | return rc; |
2411 | } |
2412 | |
2413 | static bool smb_set_sge(struct smb_extract_to_rdma *rdma, |
2414 | struct page *lowest_page, size_t off, size_t len) |
2415 | { |
2416 | struct ib_sge *sge = &rdma->sge[rdma->nr_sge]; |
2417 | u64 addr; |
2418 | |
2419 | addr = ib_dma_map_page(dev: rdma->device, page: lowest_page, |
2420 | offset: off, size: len, direction: rdma->direction); |
2421 | if (ib_dma_mapping_error(dev: rdma->device, dma_addr: addr)) |
2422 | return false; |
2423 | |
2424 | sge->addr = addr; |
2425 | sge->length = len; |
2426 | sge->lkey = rdma->local_dma_lkey; |
2427 | rdma->nr_sge++; |
2428 | return true; |
2429 | } |
2430 | |
2431 | /* |
2432 | * Extract page fragments from a BVEC-class iterator and add them to an RDMA |
2433 | * element list. The pages are not pinned. |
2434 | */ |
2435 | static ssize_t (struct iov_iter *iter, |
2436 | struct smb_extract_to_rdma *rdma, |
2437 | ssize_t maxsize) |
2438 | { |
2439 | const struct bio_vec *bv = iter->bvec; |
2440 | unsigned long start = iter->iov_offset; |
2441 | unsigned int i; |
2442 | ssize_t ret = 0; |
2443 | |
2444 | for (i = 0; i < iter->nr_segs; i++) { |
2445 | size_t off, len; |
2446 | |
2447 | len = bv[i].bv_len; |
2448 | if (start >= len) { |
2449 | start -= len; |
2450 | continue; |
2451 | } |
2452 | |
2453 | len = min_t(size_t, maxsize, len - start); |
2454 | off = bv[i].bv_offset + start; |
2455 | |
2456 | if (!smb_set_sge(rdma, lowest_page: bv[i].bv_page, off, len)) |
2457 | return -EIO; |
2458 | |
2459 | ret += len; |
2460 | maxsize -= len; |
2461 | if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0) |
2462 | break; |
2463 | start = 0; |
2464 | } |
2465 | |
2466 | return ret; |
2467 | } |
2468 | |
2469 | /* |
2470 | * Extract fragments from a KVEC-class iterator and add them to an RDMA list. |
2471 | * This can deal with vmalloc'd buffers as well as kmalloc'd or static buffers. |
2472 | * The pages are not pinned. |
2473 | */ |
2474 | static ssize_t (struct iov_iter *iter, |
2475 | struct smb_extract_to_rdma *rdma, |
2476 | ssize_t maxsize) |
2477 | { |
2478 | const struct kvec *kv = iter->kvec; |
2479 | unsigned long start = iter->iov_offset; |
2480 | unsigned int i; |
2481 | ssize_t ret = 0; |
2482 | |
2483 | for (i = 0; i < iter->nr_segs; i++) { |
2484 | struct page *page; |
2485 | unsigned long kaddr; |
2486 | size_t off, len, seg; |
2487 | |
2488 | len = kv[i].iov_len; |
2489 | if (start >= len) { |
2490 | start -= len; |
2491 | continue; |
2492 | } |
2493 | |
2494 | kaddr = (unsigned long)kv[i].iov_base + start; |
2495 | off = kaddr & ~PAGE_MASK; |
2496 | len = min_t(size_t, maxsize, len - start); |
2497 | kaddr &= PAGE_MASK; |
2498 | |
2499 | maxsize -= len; |
2500 | do { |
2501 | seg = min_t(size_t, len, PAGE_SIZE - off); |
2502 | |
2503 | if (is_vmalloc_or_module_addr(x: (void *)kaddr)) |
2504 | page = vmalloc_to_page(addr: (void *)kaddr); |
2505 | else |
2506 | page = virt_to_page((void *)kaddr); |
2507 | |
2508 | if (!smb_set_sge(rdma, lowest_page: page, off, len: seg)) |
2509 | return -EIO; |
2510 | |
2511 | ret += seg; |
2512 | len -= seg; |
2513 | kaddr += PAGE_SIZE; |
2514 | off = 0; |
2515 | } while (len > 0 && rdma->nr_sge < rdma->max_sge); |
2516 | |
2517 | if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0) |
2518 | break; |
2519 | start = 0; |
2520 | } |
2521 | |
2522 | return ret; |
2523 | } |
2524 | |
2525 | /* |
2526 | * Extract folio fragments from an XARRAY-class iterator and add them to an |
2527 | * RDMA list. The folios are not pinned. |
2528 | */ |
2529 | static ssize_t (struct iov_iter *iter, |
2530 | struct smb_extract_to_rdma *rdma, |
2531 | ssize_t maxsize) |
2532 | { |
2533 | struct xarray *xa = iter->xarray; |
2534 | struct folio *folio; |
2535 | loff_t start = iter->xarray_start + iter->iov_offset; |
2536 | pgoff_t index = start / PAGE_SIZE; |
2537 | ssize_t ret = 0; |
2538 | size_t off, len; |
2539 | XA_STATE(xas, xa, index); |
2540 | |
2541 | rcu_read_lock(); |
2542 | |
2543 | xas_for_each(&xas, folio, ULONG_MAX) { |
2544 | if (xas_retry(xas: &xas, entry: folio)) |
2545 | continue; |
2546 | if (WARN_ON(xa_is_value(folio))) |
2547 | break; |
2548 | if (WARN_ON(folio_test_hugetlb(folio))) |
2549 | break; |
2550 | |
2551 | off = offset_in_folio(folio, start); |
2552 | len = min_t(size_t, maxsize, folio_size(folio) - off); |
2553 | |
2554 | if (!smb_set_sge(rdma, folio_page(folio, 0), off, len)) { |
2555 | rcu_read_unlock(); |
2556 | return -EIO; |
2557 | } |
2558 | |
2559 | maxsize -= len; |
2560 | ret += len; |
2561 | if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0) |
2562 | break; |
2563 | } |
2564 | |
2565 | rcu_read_unlock(); |
2566 | return ret; |
2567 | } |
2568 | |
2569 | /* |
2570 | * Extract page fragments from up to the given amount of the source iterator |
2571 | * and build up an RDMA list that refers to all of those bits. The RDMA list |
2572 | * is appended to, up to the maximum number of elements set in the parameter |
2573 | * block. |
2574 | * |
2575 | * The extracted page fragments are not pinned or ref'd in any way; if an |
2576 | * IOVEC/UBUF-type iterator is to be used, it should be converted to a |
2577 | * BVEC-type iterator and the pages pinned, ref'd or otherwise held in some |
2578 | * way. |
2579 | */ |
2580 | static ssize_t (struct iov_iter *iter, size_t len, |
2581 | struct smb_extract_to_rdma *rdma) |
2582 | { |
2583 | ssize_t ret; |
2584 | int before = rdma->nr_sge; |
2585 | |
2586 | switch (iov_iter_type(i: iter)) { |
2587 | case ITER_BVEC: |
2588 | ret = smb_extract_bvec_to_rdma(iter, rdma, maxsize: len); |
2589 | break; |
2590 | case ITER_KVEC: |
2591 | ret = smb_extract_kvec_to_rdma(iter, rdma, maxsize: len); |
2592 | break; |
2593 | case ITER_XARRAY: |
2594 | ret = smb_extract_xarray_to_rdma(iter, rdma, maxsize: len); |
2595 | break; |
2596 | default: |
2597 | WARN_ON_ONCE(1); |
2598 | return -EIO; |
2599 | } |
2600 | |
2601 | if (ret > 0) { |
2602 | iov_iter_advance(i: iter, bytes: ret); |
2603 | } else if (ret < 0) { |
2604 | while (rdma->nr_sge > before) { |
2605 | struct ib_sge *sge = &rdma->sge[rdma->nr_sge--]; |
2606 | |
2607 | ib_dma_unmap_single(dev: rdma->device, addr: sge->addr, size: sge->length, |
2608 | direction: rdma->direction); |
2609 | sge->addr = 0; |
2610 | } |
2611 | } |
2612 | |
2613 | return ret; |
2614 | } |
2615 | |