1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * |
4 | * Copyright (C) 2004 Oracle. All rights reserved. |
5 | * |
6 | * ---- |
7 | * |
8 | * Callers for this were originally written against a very simple synchronus |
9 | * API. This implementation reflects those simple callers. Some day I'm sure |
10 | * we'll need to move to a more robust posting/callback mechanism. |
11 | * |
12 | * Transmit calls pass in kernel virtual addresses and block copying this into |
13 | * the socket's tx buffers via a usual blocking sendmsg. They'll block waiting |
14 | * for a failed socket to timeout. TX callers can also pass in a poniter to an |
15 | * 'int' which gets filled with an errno off the wire in response to the |
16 | * message they send. |
17 | * |
18 | * Handlers for unsolicited messages are registered. Each socket has a page |
19 | * that incoming data is copied into. First the header, then the data. |
20 | * Handlers are called from only one thread with a reference to this per-socket |
21 | * page. This page is destroyed after the handler call, so it can't be |
22 | * referenced beyond the call. Handlers may block but are discouraged from |
23 | * doing so. |
24 | * |
25 | * Any framing errors (bad magic, large payload lengths) close a connection. |
26 | * |
27 | * Our sock_container holds the state we associate with a socket. It's current |
28 | * framing state is held there as well as the refcounting we do around when it |
29 | * is safe to tear down the socket. The socket is only finally torn down from |
30 | * the container when the container loses all of its references -- so as long |
31 | * as you hold a ref on the container you can trust that the socket is valid |
32 | * for use with kernel socket APIs. |
33 | * |
34 | * Connections are initiated between a pair of nodes when the node with the |
35 | * higher node number gets a heartbeat callback which indicates that the lower |
36 | * numbered node has started heartbeating. The lower numbered node is passive |
37 | * and only accepts the connection if the higher numbered node is heartbeating. |
38 | */ |
39 | |
40 | #include <linux/kernel.h> |
41 | #include <linux/sched/mm.h> |
42 | #include <linux/jiffies.h> |
43 | #include <linux/slab.h> |
44 | #include <linux/idr.h> |
45 | #include <linux/kref.h> |
46 | #include <linux/net.h> |
47 | #include <linux/export.h> |
48 | #include <net/tcp.h> |
49 | #include <trace/events/sock.h> |
50 | |
51 | #include <linux/uaccess.h> |
52 | |
53 | #include "heartbeat.h" |
54 | #include "tcp.h" |
55 | #include "nodemanager.h" |
56 | #define MLOG_MASK_PREFIX ML_TCP |
57 | #include "masklog.h" |
58 | #include "quorum.h" |
59 | |
60 | #include "tcp_internal.h" |
61 | |
62 | #define SC_NODEF_FMT "node %s (num %u) at %pI4:%u" |
63 | #define SC_NODEF_ARGS(sc) sc->sc_node->nd_name, sc->sc_node->nd_num, \ |
64 | &sc->sc_node->nd_ipv4_address, \ |
65 | ntohs(sc->sc_node->nd_ipv4_port) |
66 | |
67 | /* |
68 | * In the following two log macros, the whitespace after the ',' just |
69 | * before ##args is intentional. Otherwise, gcc 2.95 will eat the |
70 | * previous token if args expands to nothing. |
71 | */ |
72 | #define msglog(hdr, fmt, args...) do { \ |
73 | typeof(hdr) __hdr = (hdr); \ |
74 | mlog(ML_MSG, "[mag %u len %u typ %u stat %d sys_stat %d " \ |
75 | "key %08x num %u] " fmt, \ |
76 | be16_to_cpu(__hdr->magic), be16_to_cpu(__hdr->data_len), \ |
77 | be16_to_cpu(__hdr->msg_type), be32_to_cpu(__hdr->status), \ |
78 | be32_to_cpu(__hdr->sys_status), be32_to_cpu(__hdr->key), \ |
79 | be32_to_cpu(__hdr->msg_num) , ##args); \ |
80 | } while (0) |
81 | |
82 | #define sclog(sc, fmt, args...) do { \ |
83 | typeof(sc) __sc = (sc); \ |
84 | mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p " \ |
85 | "pg_off %zu] " fmt, __sc, \ |
86 | kref_read(&__sc->sc_kref), __sc->sc_sock, \ |
87 | __sc->sc_node->nd_num, __sc->sc_page, __sc->sc_page_off , \ |
88 | ##args); \ |
89 | } while (0) |
90 | |
91 | static DEFINE_RWLOCK(o2net_handler_lock); |
92 | static struct rb_root o2net_handler_tree = RB_ROOT; |
93 | |
94 | static struct o2net_node o2net_nodes[O2NM_MAX_NODES]; |
95 | |
96 | /* XXX someday we'll need better accounting */ |
97 | static struct socket *o2net_listen_sock; |
98 | |
99 | /* |
100 | * listen work is only queued by the listening socket callbacks on the |
101 | * o2net_wq. teardown detaches the callbacks before destroying the workqueue. |
102 | * quorum work is queued as sock containers are shutdown.. stop_listening |
103 | * tears down all the node's sock containers, preventing future shutdowns |
104 | * and queued quroum work, before canceling delayed quorum work and |
105 | * destroying the work queue. |
106 | */ |
107 | static struct workqueue_struct *o2net_wq; |
108 | static struct work_struct o2net_listen_work; |
109 | |
110 | static struct o2hb_callback_func o2net_hb_up, o2net_hb_down; |
111 | #define O2NET_HB_PRI 0x1 |
112 | |
113 | static struct o2net_handshake *o2net_hand; |
114 | static struct o2net_msg *o2net_keep_req, *o2net_keep_resp; |
115 | |
116 | static int o2net_sys_err_translations[O2NET_ERR_MAX] = |
117 | {[O2NET_ERR_NONE] = 0, |
118 | [O2NET_ERR_NO_HNDLR] = -ENOPROTOOPT, |
119 | [O2NET_ERR_OVERFLOW] = -EOVERFLOW, |
120 | [O2NET_ERR_DIED] = -EHOSTDOWN,}; |
121 | |
122 | /* can't quite avoid *all* internal declarations :/ */ |
123 | static void o2net_sc_connect_completed(struct work_struct *work); |
124 | static void o2net_rx_until_empty(struct work_struct *work); |
125 | static void o2net_shutdown_sc(struct work_struct *work); |
126 | static void o2net_listen_data_ready(struct sock *sk); |
127 | static void o2net_sc_send_keep_req(struct work_struct *work); |
128 | static void o2net_idle_timer(struct timer_list *t); |
129 | static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); |
130 | static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc); |
131 | |
132 | #ifdef CONFIG_DEBUG_FS |
133 | static void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype, |
134 | u32 msgkey, struct task_struct *task, u8 node) |
135 | { |
136 | INIT_LIST_HEAD(list: &nst->st_net_debug_item); |
137 | nst->st_task = task; |
138 | nst->st_msg_type = msgtype; |
139 | nst->st_msg_key = msgkey; |
140 | nst->st_node = node; |
141 | } |
142 | |
143 | static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst) |
144 | { |
145 | nst->st_sock_time = ktime_get(); |
146 | } |
147 | |
148 | static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst) |
149 | { |
150 | nst->st_send_time = ktime_get(); |
151 | } |
152 | |
153 | static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst) |
154 | { |
155 | nst->st_status_time = ktime_get(); |
156 | } |
157 | |
158 | static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst, |
159 | struct o2net_sock_container *sc) |
160 | { |
161 | nst->st_sc = sc; |
162 | } |
163 | |
164 | static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, |
165 | u32 msg_id) |
166 | { |
167 | nst->st_id = msg_id; |
168 | } |
169 | |
170 | static inline void o2net_set_sock_timer(struct o2net_sock_container *sc) |
171 | { |
172 | sc->sc_tv_timer = ktime_get(); |
173 | } |
174 | |
175 | static inline void o2net_set_data_ready_time(struct o2net_sock_container *sc) |
176 | { |
177 | sc->sc_tv_data_ready = ktime_get(); |
178 | } |
179 | |
180 | static inline void o2net_set_advance_start_time(struct o2net_sock_container *sc) |
181 | { |
182 | sc->sc_tv_advance_start = ktime_get(); |
183 | } |
184 | |
185 | static inline void o2net_set_advance_stop_time(struct o2net_sock_container *sc) |
186 | { |
187 | sc->sc_tv_advance_stop = ktime_get(); |
188 | } |
189 | |
190 | static inline void o2net_set_func_start_time(struct o2net_sock_container *sc) |
191 | { |
192 | sc->sc_tv_func_start = ktime_get(); |
193 | } |
194 | |
195 | static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc) |
196 | { |
197 | sc->sc_tv_func_stop = ktime_get(); |
198 | } |
199 | |
200 | #else /* CONFIG_DEBUG_FS */ |
201 | # define o2net_init_nst(a, b, c, d, e) |
202 | # define o2net_set_nst_sock_time(a) |
203 | # define o2net_set_nst_send_time(a) |
204 | # define o2net_set_nst_status_time(a) |
205 | # define o2net_set_nst_sock_container(a, b) |
206 | # define o2net_set_nst_msg_id(a, b) |
207 | # define o2net_set_sock_timer(a) |
208 | # define o2net_set_data_ready_time(a) |
209 | # define o2net_set_advance_start_time(a) |
210 | # define o2net_set_advance_stop_time(a) |
211 | # define o2net_set_func_start_time(a) |
212 | # define o2net_set_func_stop_time(a) |
213 | #endif /* CONFIG_DEBUG_FS */ |
214 | |
215 | #ifdef CONFIG_OCFS2_FS_STATS |
216 | static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc) |
217 | { |
218 | return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start); |
219 | } |
220 | |
221 | static void o2net_update_send_stats(struct o2net_send_tracking *nst, |
222 | struct o2net_sock_container *sc) |
223 | { |
224 | sc->sc_tv_status_total = ktime_add(sc->sc_tv_status_total, |
225 | ktime_sub(ktime_get(), |
226 | nst->st_status_time)); |
227 | sc->sc_tv_send_total = ktime_add(sc->sc_tv_send_total, |
228 | ktime_sub(nst->st_status_time, |
229 | nst->st_send_time)); |
230 | sc->sc_tv_acquiry_total = ktime_add(sc->sc_tv_acquiry_total, |
231 | ktime_sub(nst->st_send_time, |
232 | nst->st_sock_time)); |
233 | sc->sc_send_count++; |
234 | } |
235 | |
236 | static void o2net_update_recv_stats(struct o2net_sock_container *sc) |
237 | { |
238 | sc->sc_tv_process_total = ktime_add(sc->sc_tv_process_total, |
239 | o2net_get_func_run_time(sc)); |
240 | sc->sc_recv_count++; |
241 | } |
242 | |
243 | #else |
244 | |
245 | # define o2net_update_send_stats(a, b) |
246 | |
247 | # define o2net_update_recv_stats(sc) |
248 | |
249 | #endif /* CONFIG_OCFS2_FS_STATS */ |
250 | |
251 | static inline unsigned int o2net_reconnect_delay(void) |
252 | { |
253 | return o2nm_single_cluster->cl_reconnect_delay_ms; |
254 | } |
255 | |
256 | static inline unsigned int o2net_keepalive_delay(void) |
257 | { |
258 | return o2nm_single_cluster->cl_keepalive_delay_ms; |
259 | } |
260 | |
261 | static inline unsigned int o2net_idle_timeout(void) |
262 | { |
263 | return o2nm_single_cluster->cl_idle_timeout_ms; |
264 | } |
265 | |
266 | static inline int o2net_sys_err_to_errno(enum o2net_system_error err) |
267 | { |
268 | int trans; |
269 | BUG_ON(err >= O2NET_ERR_MAX); |
270 | trans = o2net_sys_err_translations[err]; |
271 | |
272 | /* Just in case we mess up the translation table above */ |
273 | BUG_ON(err != O2NET_ERR_NONE && trans == 0); |
274 | return trans; |
275 | } |
276 | |
277 | static struct o2net_node * o2net_nn_from_num(u8 node_num) |
278 | { |
279 | BUG_ON(node_num >= ARRAY_SIZE(o2net_nodes)); |
280 | return &o2net_nodes[node_num]; |
281 | } |
282 | |
283 | static u8 o2net_num_from_nn(struct o2net_node *nn) |
284 | { |
285 | BUG_ON(nn == NULL); |
286 | return nn - o2net_nodes; |
287 | } |
288 | |
289 | /* ------------------------------------------------------------ */ |
290 | |
291 | static int o2net_prep_nsw(struct o2net_node *nn, struct o2net_status_wait *nsw) |
292 | { |
293 | int ret; |
294 | |
295 | spin_lock(lock: &nn->nn_lock); |
296 | ret = idr_alloc(&nn->nn_status_idr, ptr: nsw, start: 0, end: 0, GFP_ATOMIC); |
297 | if (ret >= 0) { |
298 | nsw->ns_id = ret; |
299 | list_add_tail(new: &nsw->ns_node_item, head: &nn->nn_status_list); |
300 | } |
301 | spin_unlock(lock: &nn->nn_lock); |
302 | if (ret < 0) |
303 | return ret; |
304 | |
305 | init_waitqueue_head(&nsw->ns_wq); |
306 | nsw->ns_sys_status = O2NET_ERR_NONE; |
307 | nsw->ns_status = 0; |
308 | return 0; |
309 | } |
310 | |
311 | static void o2net_complete_nsw_locked(struct o2net_node *nn, |
312 | struct o2net_status_wait *nsw, |
313 | enum o2net_system_error sys_status, |
314 | s32 status) |
315 | { |
316 | assert_spin_locked(&nn->nn_lock); |
317 | |
318 | if (!list_empty(head: &nsw->ns_node_item)) { |
319 | list_del_init(entry: &nsw->ns_node_item); |
320 | nsw->ns_sys_status = sys_status; |
321 | nsw->ns_status = status; |
322 | idr_remove(&nn->nn_status_idr, id: nsw->ns_id); |
323 | wake_up(&nsw->ns_wq); |
324 | } |
325 | } |
326 | |
327 | static void o2net_complete_nsw(struct o2net_node *nn, |
328 | struct o2net_status_wait *nsw, |
329 | u64 id, enum o2net_system_error sys_status, |
330 | s32 status) |
331 | { |
332 | spin_lock(lock: &nn->nn_lock); |
333 | if (nsw == NULL) { |
334 | if (id > INT_MAX) |
335 | goto out; |
336 | |
337 | nsw = idr_find(&nn->nn_status_idr, id); |
338 | if (nsw == NULL) |
339 | goto out; |
340 | } |
341 | |
342 | o2net_complete_nsw_locked(nn, nsw, sys_status, status); |
343 | |
344 | out: |
345 | spin_unlock(lock: &nn->nn_lock); |
346 | return; |
347 | } |
348 | |
349 | static void o2net_complete_nodes_nsw(struct o2net_node *nn) |
350 | { |
351 | struct o2net_status_wait *nsw, *tmp; |
352 | unsigned int num_kills = 0; |
353 | |
354 | assert_spin_locked(&nn->nn_lock); |
355 | |
356 | list_for_each_entry_safe(nsw, tmp, &nn->nn_status_list, ns_node_item) { |
357 | o2net_complete_nsw_locked(nn, nsw, sys_status: O2NET_ERR_DIED, status: 0); |
358 | num_kills++; |
359 | } |
360 | |
361 | mlog(0, "completed %d messages for node %u\n" , num_kills, |
362 | o2net_num_from_nn(nn)); |
363 | } |
364 | |
365 | static int o2net_nsw_completed(struct o2net_node *nn, |
366 | struct o2net_status_wait *nsw) |
367 | { |
368 | int completed; |
369 | spin_lock(lock: &nn->nn_lock); |
370 | completed = list_empty(head: &nsw->ns_node_item); |
371 | spin_unlock(lock: &nn->nn_lock); |
372 | return completed; |
373 | } |
374 | |
375 | /* ------------------------------------------------------------ */ |
376 | |
377 | static void sc_kref_release(struct kref *kref) |
378 | { |
379 | struct o2net_sock_container *sc = container_of(kref, |
380 | struct o2net_sock_container, sc_kref); |
381 | BUG_ON(timer_pending(&sc->sc_idle_timeout)); |
382 | |
383 | sclog(sc, "releasing\n" ); |
384 | |
385 | if (sc->sc_sock) { |
386 | sock_release(sock: sc->sc_sock); |
387 | sc->sc_sock = NULL; |
388 | } |
389 | |
390 | o2nm_undepend_item(item: &sc->sc_node->nd_item); |
391 | o2nm_node_put(node: sc->sc_node); |
392 | sc->sc_node = NULL; |
393 | |
394 | o2net_debug_del_sc(sc); |
395 | |
396 | if (sc->sc_page) |
397 | __free_page(sc->sc_page); |
398 | kfree(objp: sc); |
399 | } |
400 | |
401 | static void sc_put(struct o2net_sock_container *sc) |
402 | { |
403 | sclog(sc, "put\n" ); |
404 | kref_put(kref: &sc->sc_kref, release: sc_kref_release); |
405 | } |
406 | static void sc_get(struct o2net_sock_container *sc) |
407 | { |
408 | sclog(sc, "get\n" ); |
409 | kref_get(kref: &sc->sc_kref); |
410 | } |
411 | static struct o2net_sock_container *sc_alloc(struct o2nm_node *node) |
412 | { |
413 | struct o2net_sock_container *sc, *ret = NULL; |
414 | struct page *page = NULL; |
415 | int status = 0; |
416 | |
417 | page = alloc_page(GFP_NOFS); |
418 | sc = kzalloc(size: sizeof(*sc), GFP_NOFS); |
419 | if (sc == NULL || page == NULL) |
420 | goto out; |
421 | |
422 | kref_init(kref: &sc->sc_kref); |
423 | o2nm_node_get(node); |
424 | sc->sc_node = node; |
425 | |
426 | /* pin the node item of the remote node */ |
427 | status = o2nm_depend_item(item: &node->nd_item); |
428 | if (status) { |
429 | mlog_errno(status); |
430 | o2nm_node_put(node); |
431 | goto out; |
432 | } |
433 | INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed); |
434 | INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty); |
435 | INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc); |
436 | INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req); |
437 | |
438 | timer_setup(&sc->sc_idle_timeout, o2net_idle_timer, 0); |
439 | |
440 | sclog(sc, "alloced\n" ); |
441 | |
442 | ret = sc; |
443 | sc->sc_page = page; |
444 | o2net_debug_add_sc(sc); |
445 | sc = NULL; |
446 | page = NULL; |
447 | |
448 | out: |
449 | if (page) |
450 | __free_page(page); |
451 | kfree(objp: sc); |
452 | |
453 | return ret; |
454 | } |
455 | |
456 | /* ------------------------------------------------------------ */ |
457 | |
458 | static void o2net_sc_queue_work(struct o2net_sock_container *sc, |
459 | struct work_struct *work) |
460 | { |
461 | sc_get(sc); |
462 | if (!queue_work(wq: o2net_wq, work)) |
463 | sc_put(sc); |
464 | } |
465 | static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, |
466 | struct delayed_work *work, |
467 | int delay) |
468 | { |
469 | sc_get(sc); |
470 | if (!queue_delayed_work(wq: o2net_wq, dwork: work, delay)) |
471 | sc_put(sc); |
472 | } |
473 | static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc, |
474 | struct delayed_work *work) |
475 | { |
476 | if (cancel_delayed_work(dwork: work)) |
477 | sc_put(sc); |
478 | } |
479 | |
480 | static atomic_t o2net_connected_peers = ATOMIC_INIT(0); |
481 | |
482 | int o2net_num_connected_peers(void) |
483 | { |
484 | return atomic_read(v: &o2net_connected_peers); |
485 | } |
486 | |
487 | static void o2net_set_nn_state(struct o2net_node *nn, |
488 | struct o2net_sock_container *sc, |
489 | unsigned valid, int err) |
490 | { |
491 | int was_valid = nn->nn_sc_valid; |
492 | int was_err = nn->nn_persistent_error; |
493 | struct o2net_sock_container *old_sc = nn->nn_sc; |
494 | |
495 | assert_spin_locked(&nn->nn_lock); |
496 | |
497 | if (old_sc && !sc) |
498 | atomic_dec(v: &o2net_connected_peers); |
499 | else if (!old_sc && sc) |
500 | atomic_inc(v: &o2net_connected_peers); |
501 | |
502 | /* the node num comparison and single connect/accept path should stop |
503 | * an non-null sc from being overwritten with another */ |
504 | BUG_ON(sc && nn->nn_sc && nn->nn_sc != sc); |
505 | mlog_bug_on_msg(err && valid, "err %d valid %u\n" , err, valid); |
506 | mlog_bug_on_msg(valid && !sc, "valid %u sc %p\n" , valid, sc); |
507 | |
508 | if (was_valid && !valid && err == 0) |
509 | err = -ENOTCONN; |
510 | |
511 | mlog(ML_CONN, "node %u sc: %p -> %p, valid %u -> %u, err %d -> %d\n" , |
512 | o2net_num_from_nn(nn), nn->nn_sc, sc, nn->nn_sc_valid, valid, |
513 | nn->nn_persistent_error, err); |
514 | |
515 | nn->nn_sc = sc; |
516 | nn->nn_sc_valid = valid ? 1 : 0; |
517 | nn->nn_persistent_error = err; |
518 | |
519 | /* mirrors o2net_tx_can_proceed() */ |
520 | if (nn->nn_persistent_error || nn->nn_sc_valid) |
521 | wake_up(&nn->nn_sc_wq); |
522 | |
523 | if (was_valid && !was_err && nn->nn_persistent_error) { |
524 | o2quo_conn_err(node: o2net_num_from_nn(nn)); |
525 | queue_delayed_work(wq: o2net_wq, dwork: &nn->nn_still_up, |
526 | delay: msecs_to_jiffies(O2NET_QUORUM_DELAY_MS)); |
527 | } |
528 | |
529 | if (was_valid && !valid) { |
530 | if (old_sc) |
531 | printk(KERN_NOTICE "o2net: No longer connected to " |
532 | SC_NODEF_FMT "\n" , SC_NODEF_ARGS(old_sc)); |
533 | o2net_complete_nodes_nsw(nn); |
534 | } |
535 | |
536 | if (!was_valid && valid) { |
537 | o2quo_conn_up(node: o2net_num_from_nn(nn)); |
538 | cancel_delayed_work(dwork: &nn->nn_connect_expired); |
539 | printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n" , |
540 | o2nm_this_node() > sc->sc_node->nd_num ? |
541 | "Connected to" : "Accepted connection from" , |
542 | SC_NODEF_ARGS(sc)); |
543 | } |
544 | |
545 | /* trigger the connecting worker func as long as we're not valid, |
546 | * it will back off if it shouldn't connect. This can be called |
547 | * from node config teardown and so needs to be careful about |
548 | * the work queue actually being up. */ |
549 | if (!valid && o2net_wq) { |
550 | unsigned long delay; |
551 | /* delay if we're within a RECONNECT_DELAY of the |
552 | * last attempt */ |
553 | delay = (nn->nn_last_connect_attempt + |
554 | msecs_to_jiffies(m: o2net_reconnect_delay())) |
555 | - jiffies; |
556 | if (delay > msecs_to_jiffies(m: o2net_reconnect_delay())) |
557 | delay = 0; |
558 | mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n" , delay); |
559 | queue_delayed_work(wq: o2net_wq, dwork: &nn->nn_connect_work, delay); |
560 | |
561 | /* |
562 | * Delay the expired work after idle timeout. |
563 | * |
564 | * We might have lots of failed connection attempts that run |
565 | * through here but we only cancel the connect_expired work when |
566 | * a connection attempt succeeds. So only the first enqueue of |
567 | * the connect_expired work will do anything. The rest will see |
568 | * that it's already queued and do nothing. |
569 | */ |
570 | delay += msecs_to_jiffies(m: o2net_idle_timeout()); |
571 | queue_delayed_work(wq: o2net_wq, dwork: &nn->nn_connect_expired, delay); |
572 | } |
573 | |
574 | /* keep track of the nn's sc ref for the caller */ |
575 | if ((old_sc == NULL) && sc) |
576 | sc_get(sc); |
577 | if (old_sc && (old_sc != sc)) { |
578 | o2net_sc_queue_work(sc: old_sc, work: &old_sc->sc_shutdown_work); |
579 | sc_put(sc: old_sc); |
580 | } |
581 | } |
582 | |
583 | /* see o2net_register_callbacks() */ |
584 | static void o2net_data_ready(struct sock *sk) |
585 | { |
586 | void (*ready)(struct sock *sk); |
587 | struct o2net_sock_container *sc; |
588 | |
589 | trace_sk_data_ready(sk); |
590 | |
591 | read_lock_bh(&sk->sk_callback_lock); |
592 | sc = sk->sk_user_data; |
593 | if (sc) { |
594 | sclog(sc, "data_ready hit\n" ); |
595 | o2net_set_data_ready_time(sc); |
596 | o2net_sc_queue_work(sc, work: &sc->sc_rx_work); |
597 | ready = sc->sc_data_ready; |
598 | } else { |
599 | ready = sk->sk_data_ready; |
600 | } |
601 | read_unlock_bh(&sk->sk_callback_lock); |
602 | |
603 | ready(sk); |
604 | } |
605 | |
606 | /* see o2net_register_callbacks() */ |
607 | static void o2net_state_change(struct sock *sk) |
608 | { |
609 | void (*state_change)(struct sock *sk); |
610 | struct o2net_sock_container *sc; |
611 | |
612 | read_lock_bh(&sk->sk_callback_lock); |
613 | sc = sk->sk_user_data; |
614 | if (sc == NULL) { |
615 | state_change = sk->sk_state_change; |
616 | goto out; |
617 | } |
618 | |
619 | sclog(sc, "state_change to %d\n" , sk->sk_state); |
620 | |
621 | state_change = sc->sc_state_change; |
622 | |
623 | switch(sk->sk_state) { |
624 | /* ignore connecting sockets as they make progress */ |
625 | case TCP_SYN_SENT: |
626 | case TCP_SYN_RECV: |
627 | break; |
628 | case TCP_ESTABLISHED: |
629 | o2net_sc_queue_work(sc, work: &sc->sc_connect_work); |
630 | break; |
631 | default: |
632 | printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT |
633 | " shutdown, state %d\n" , |
634 | SC_NODEF_ARGS(sc), sk->sk_state); |
635 | o2net_sc_queue_work(sc, work: &sc->sc_shutdown_work); |
636 | break; |
637 | } |
638 | out: |
639 | read_unlock_bh(&sk->sk_callback_lock); |
640 | state_change(sk); |
641 | } |
642 | |
643 | /* |
644 | * we register callbacks so we can queue work on events before calling |
645 | * the original callbacks. our callbacks our careful to test user_data |
646 | * to discover when they've reaced with o2net_unregister_callbacks(). |
647 | */ |
648 | static void o2net_register_callbacks(struct sock *sk, |
649 | struct o2net_sock_container *sc) |
650 | { |
651 | write_lock_bh(&sk->sk_callback_lock); |
652 | |
653 | /* accepted sockets inherit the old listen socket data ready */ |
654 | if (sk->sk_data_ready == o2net_listen_data_ready) { |
655 | sk->sk_data_ready = sk->sk_user_data; |
656 | sk->sk_user_data = NULL; |
657 | } |
658 | |
659 | BUG_ON(sk->sk_user_data != NULL); |
660 | sk->sk_user_data = sc; |
661 | sc_get(sc); |
662 | |
663 | sc->sc_data_ready = sk->sk_data_ready; |
664 | sc->sc_state_change = sk->sk_state_change; |
665 | sk->sk_data_ready = o2net_data_ready; |
666 | sk->sk_state_change = o2net_state_change; |
667 | |
668 | mutex_init(&sc->sc_send_lock); |
669 | |
670 | write_unlock_bh(&sk->sk_callback_lock); |
671 | } |
672 | |
673 | static int o2net_unregister_callbacks(struct sock *sk, |
674 | struct o2net_sock_container *sc) |
675 | { |
676 | int ret = 0; |
677 | |
678 | write_lock_bh(&sk->sk_callback_lock); |
679 | if (sk->sk_user_data == sc) { |
680 | ret = 1; |
681 | sk->sk_user_data = NULL; |
682 | sk->sk_data_ready = sc->sc_data_ready; |
683 | sk->sk_state_change = sc->sc_state_change; |
684 | } |
685 | write_unlock_bh(&sk->sk_callback_lock); |
686 | |
687 | return ret; |
688 | } |
689 | |
690 | /* |
691 | * this is a little helper that is called by callers who have seen a problem |
692 | * with an sc and want to detach it from the nn if someone already hasn't beat |
693 | * them to it. if an error is given then the shutdown will be persistent |
694 | * and pending transmits will be canceled. |
695 | */ |
696 | static void o2net_ensure_shutdown(struct o2net_node *nn, |
697 | struct o2net_sock_container *sc, |
698 | int err) |
699 | { |
700 | spin_lock(lock: &nn->nn_lock); |
701 | if (nn->nn_sc == sc) |
702 | o2net_set_nn_state(nn, NULL, valid: 0, err); |
703 | spin_unlock(lock: &nn->nn_lock); |
704 | } |
705 | |
706 | /* |
707 | * This work queue function performs the blocking parts of socket shutdown. A |
708 | * few paths lead here. set_nn_state will trigger this callback if it sees an |
709 | * sc detached from the nn. state_change will also trigger this callback |
710 | * directly when it sees errors. In that case we need to call set_nn_state |
711 | * ourselves as state_change couldn't get the nn_lock and call set_nn_state |
712 | * itself. |
713 | */ |
714 | static void o2net_shutdown_sc(struct work_struct *work) |
715 | { |
716 | struct o2net_sock_container *sc = |
717 | container_of(work, struct o2net_sock_container, |
718 | sc_shutdown_work); |
719 | struct o2net_node *nn = o2net_nn_from_num(node_num: sc->sc_node->nd_num); |
720 | |
721 | sclog(sc, "shutting down\n" ); |
722 | |
723 | /* drop the callbacks ref and call shutdown only once */ |
724 | if (o2net_unregister_callbacks(sk: sc->sc_sock->sk, sc)) { |
725 | /* we shouldn't flush as we're in the thread, the |
726 | * races with pending sc work structs are harmless */ |
727 | del_timer_sync(timer: &sc->sc_idle_timeout); |
728 | o2net_sc_cancel_delayed_work(sc, work: &sc->sc_keepalive_work); |
729 | sc_put(sc); |
730 | kernel_sock_shutdown(sock: sc->sc_sock, how: SHUT_RDWR); |
731 | } |
732 | |
733 | /* not fatal so failed connects before the other guy has our |
734 | * heartbeat can be retried */ |
735 | o2net_ensure_shutdown(nn, sc, err: 0); |
736 | sc_put(sc); |
737 | } |
738 | |
739 | /* ------------------------------------------------------------ */ |
740 | |
741 | static int o2net_handler_cmp(struct o2net_msg_handler *nmh, u32 msg_type, |
742 | u32 key) |
743 | { |
744 | int ret = memcmp(p: &nmh->nh_key, q: &key, size: sizeof(key)); |
745 | |
746 | if (ret == 0) |
747 | ret = memcmp(p: &nmh->nh_msg_type, q: &msg_type, size: sizeof(msg_type)); |
748 | |
749 | return ret; |
750 | } |
751 | |
752 | static struct o2net_msg_handler * |
753 | o2net_handler_tree_lookup(u32 msg_type, u32 key, struct rb_node ***ret_p, |
754 | struct rb_node **ret_parent) |
755 | { |
756 | struct rb_node **p = &o2net_handler_tree.rb_node; |
757 | struct rb_node *parent = NULL; |
758 | struct o2net_msg_handler *nmh, *ret = NULL; |
759 | int cmp; |
760 | |
761 | while (*p) { |
762 | parent = *p; |
763 | nmh = rb_entry(parent, struct o2net_msg_handler, nh_node); |
764 | cmp = o2net_handler_cmp(nmh, msg_type, key); |
765 | |
766 | if (cmp < 0) |
767 | p = &(*p)->rb_left; |
768 | else if (cmp > 0) |
769 | p = &(*p)->rb_right; |
770 | else { |
771 | ret = nmh; |
772 | break; |
773 | } |
774 | } |
775 | |
776 | if (ret_p != NULL) |
777 | *ret_p = p; |
778 | if (ret_parent != NULL) |
779 | *ret_parent = parent; |
780 | |
781 | return ret; |
782 | } |
783 | |
784 | static void o2net_handler_kref_release(struct kref *kref) |
785 | { |
786 | struct o2net_msg_handler *nmh; |
787 | nmh = container_of(kref, struct o2net_msg_handler, nh_kref); |
788 | |
789 | kfree(objp: nmh); |
790 | } |
791 | |
792 | static void o2net_handler_put(struct o2net_msg_handler *nmh) |
793 | { |
794 | kref_put(kref: &nmh->nh_kref, release: o2net_handler_kref_release); |
795 | } |
796 | |
797 | /* max_len is protection for the handler func. incoming messages won't |
798 | * be given to the handler if their payload is longer than the max. */ |
799 | int o2net_register_handler(u32 msg_type, u32 key, u32 max_len, |
800 | o2net_msg_handler_func *func, void *data, |
801 | o2net_post_msg_handler_func *post_func, |
802 | struct list_head *unreg_list) |
803 | { |
804 | struct o2net_msg_handler *nmh = NULL; |
805 | struct rb_node **p, *parent; |
806 | int ret = 0; |
807 | |
808 | if (max_len > O2NET_MAX_PAYLOAD_BYTES) { |
809 | mlog(0, "max_len for message handler out of range: %u\n" , |
810 | max_len); |
811 | ret = -EINVAL; |
812 | goto out; |
813 | } |
814 | |
815 | if (!msg_type) { |
816 | mlog(0, "no message type provided: %u, %p\n" , msg_type, func); |
817 | ret = -EINVAL; |
818 | goto out; |
819 | |
820 | } |
821 | if (!func) { |
822 | mlog(0, "no message handler provided: %u, %p\n" , |
823 | msg_type, func); |
824 | ret = -EINVAL; |
825 | goto out; |
826 | } |
827 | |
828 | nmh = kzalloc(size: sizeof(struct o2net_msg_handler), GFP_NOFS); |
829 | if (nmh == NULL) { |
830 | ret = -ENOMEM; |
831 | goto out; |
832 | } |
833 | |
834 | nmh->nh_func = func; |
835 | nmh->nh_func_data = data; |
836 | nmh->nh_post_func = post_func; |
837 | nmh->nh_msg_type = msg_type; |
838 | nmh->nh_max_len = max_len; |
839 | nmh->nh_key = key; |
840 | /* the tree and list get this ref.. they're both removed in |
841 | * unregister when this ref is dropped */ |
842 | kref_init(kref: &nmh->nh_kref); |
843 | INIT_LIST_HEAD(list: &nmh->nh_unregister_item); |
844 | |
845 | write_lock(&o2net_handler_lock); |
846 | if (o2net_handler_tree_lookup(msg_type, key, ret_p: &p, ret_parent: &parent)) |
847 | ret = -EEXIST; |
848 | else { |
849 | rb_link_node(node: &nmh->nh_node, parent, rb_link: p); |
850 | rb_insert_color(&nmh->nh_node, &o2net_handler_tree); |
851 | list_add_tail(new: &nmh->nh_unregister_item, head: unreg_list); |
852 | |
853 | mlog(ML_TCP, "registered handler func %p type %u key %08x\n" , |
854 | func, msg_type, key); |
855 | /* we've had some trouble with handlers seemingly vanishing. */ |
856 | mlog_bug_on_msg(o2net_handler_tree_lookup(msg_type, key, &p, |
857 | &parent) == NULL, |
858 | "couldn't find handler we *just* registered " |
859 | "for type %u key %08x\n" , msg_type, key); |
860 | } |
861 | write_unlock(&o2net_handler_lock); |
862 | |
863 | out: |
864 | if (ret) |
865 | kfree(objp: nmh); |
866 | |
867 | return ret; |
868 | } |
869 | EXPORT_SYMBOL_GPL(o2net_register_handler); |
870 | |
871 | void o2net_unregister_handler_list(struct list_head *list) |
872 | { |
873 | struct o2net_msg_handler *nmh, *n; |
874 | |
875 | write_lock(&o2net_handler_lock); |
876 | list_for_each_entry_safe(nmh, n, list, nh_unregister_item) { |
877 | mlog(ML_TCP, "unregistering handler func %p type %u key %08x\n" , |
878 | nmh->nh_func, nmh->nh_msg_type, nmh->nh_key); |
879 | rb_erase(&nmh->nh_node, &o2net_handler_tree); |
880 | list_del_init(entry: &nmh->nh_unregister_item); |
881 | kref_put(kref: &nmh->nh_kref, release: o2net_handler_kref_release); |
882 | } |
883 | write_unlock(&o2net_handler_lock); |
884 | } |
885 | EXPORT_SYMBOL_GPL(o2net_unregister_handler_list); |
886 | |
887 | static struct o2net_msg_handler *o2net_handler_get(u32 msg_type, u32 key) |
888 | { |
889 | struct o2net_msg_handler *nmh; |
890 | |
891 | read_lock(&o2net_handler_lock); |
892 | nmh = o2net_handler_tree_lookup(msg_type, key, NULL, NULL); |
893 | if (nmh) |
894 | kref_get(kref: &nmh->nh_kref); |
895 | read_unlock(&o2net_handler_lock); |
896 | |
897 | return nmh; |
898 | } |
899 | |
900 | /* ------------------------------------------------------------ */ |
901 | |
902 | static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len) |
903 | { |
904 | struct kvec vec = { .iov_len = len, .iov_base = data, }; |
905 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT, }; |
906 | iov_iter_kvec(i: &msg.msg_iter, ITER_DEST, kvec: &vec, nr_segs: 1, count: len); |
907 | return sock_recvmsg(sock, msg: &msg, MSG_DONTWAIT); |
908 | } |
909 | |
910 | static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec, |
911 | size_t veclen, size_t total) |
912 | { |
913 | int ret; |
914 | struct msghdr msg = {.msg_flags = 0,}; |
915 | |
916 | if (sock == NULL) { |
917 | ret = -EINVAL; |
918 | goto out; |
919 | } |
920 | |
921 | ret = kernel_sendmsg(sock, msg: &msg, vec, num: veclen, len: total); |
922 | if (likely(ret == total)) |
923 | return 0; |
924 | mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n" , ret, total); |
925 | if (ret >= 0) |
926 | ret = -EPIPE; /* should be smarter, I bet */ |
927 | out: |
928 | mlog(0, "returning error: %d\n" , ret); |
929 | return ret; |
930 | } |
931 | |
932 | static void o2net_sendpage(struct o2net_sock_container *sc, |
933 | void *virt, size_t size) |
934 | { |
935 | struct o2net_node *nn = o2net_nn_from_num(node_num: sc->sc_node->nd_num); |
936 | struct msghdr msg = {}; |
937 | struct bio_vec bv; |
938 | ssize_t ret; |
939 | |
940 | bvec_set_virt(bv: &bv, vaddr: virt, len: size); |
941 | iov_iter_bvec(i: &msg.msg_iter, ITER_SOURCE, bvec: &bv, nr_segs: 1, count: size); |
942 | |
943 | while (1) { |
944 | msg.msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES; |
945 | mutex_lock(&sc->sc_send_lock); |
946 | ret = sock_sendmsg(sock: sc->sc_sock, msg: &msg); |
947 | mutex_unlock(lock: &sc->sc_send_lock); |
948 | |
949 | if (ret == size) |
950 | break; |
951 | if (ret == (ssize_t)-EAGAIN) { |
952 | mlog(0, "sendpage of size %zu to " SC_NODEF_FMT |
953 | " returned EAGAIN\n" , size, SC_NODEF_ARGS(sc)); |
954 | cond_resched(); |
955 | continue; |
956 | } |
957 | mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT |
958 | " failed with %zd\n" , size, SC_NODEF_ARGS(sc), ret); |
959 | o2net_ensure_shutdown(nn, sc, err: 0); |
960 | break; |
961 | } |
962 | } |
963 | |
964 | static void o2net_init_msg(struct o2net_msg *msg, u16 data_len, u16 msg_type, u32 key) |
965 | { |
966 | memset(msg, 0, sizeof(struct o2net_msg)); |
967 | msg->magic = cpu_to_be16(O2NET_MSG_MAGIC); |
968 | msg->data_len = cpu_to_be16(data_len); |
969 | msg->msg_type = cpu_to_be16(msg_type); |
970 | msg->sys_status = cpu_to_be32(O2NET_ERR_NONE); |
971 | msg->status = 0; |
972 | msg->key = cpu_to_be32(key); |
973 | } |
974 | |
975 | static int o2net_tx_can_proceed(struct o2net_node *nn, |
976 | struct o2net_sock_container **sc_ret, |
977 | int *error) |
978 | { |
979 | int ret = 0; |
980 | |
981 | spin_lock(lock: &nn->nn_lock); |
982 | if (nn->nn_persistent_error) { |
983 | ret = 1; |
984 | *sc_ret = NULL; |
985 | *error = nn->nn_persistent_error; |
986 | } else if (nn->nn_sc_valid) { |
987 | kref_get(kref: &nn->nn_sc->sc_kref); |
988 | |
989 | ret = 1; |
990 | *sc_ret = nn->nn_sc; |
991 | *error = 0; |
992 | } |
993 | spin_unlock(lock: &nn->nn_lock); |
994 | |
995 | return ret; |
996 | } |
997 | |
998 | /* Get a map of all nodes to which this node is currently connected to */ |
999 | void o2net_fill_node_map(unsigned long *map, unsigned int bits) |
1000 | { |
1001 | struct o2net_sock_container *sc; |
1002 | int node, ret; |
1003 | |
1004 | bitmap_zero(dst: map, nbits: bits); |
1005 | for (node = 0; node < O2NM_MAX_NODES; ++node) { |
1006 | if (!o2net_tx_can_proceed(nn: o2net_nn_from_num(node_num: node), sc_ret: &sc, error: &ret)) |
1007 | continue; |
1008 | if (!ret) { |
1009 | set_bit(nr: node, addr: map); |
1010 | sc_put(sc); |
1011 | } |
1012 | } |
1013 | } |
1014 | EXPORT_SYMBOL_GPL(o2net_fill_node_map); |
1015 | |
1016 | int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, |
1017 | size_t caller_veclen, u8 target_node, int *status) |
1018 | { |
1019 | int ret = 0; |
1020 | struct o2net_msg *msg = NULL; |
1021 | size_t veclen, caller_bytes = 0; |
1022 | struct kvec *vec = NULL; |
1023 | struct o2net_sock_container *sc = NULL; |
1024 | struct o2net_node *nn = o2net_nn_from_num(node_num: target_node); |
1025 | struct o2net_status_wait nsw = { |
1026 | .ns_node_item = LIST_HEAD_INIT(nsw.ns_node_item), |
1027 | }; |
1028 | struct o2net_send_tracking nst; |
1029 | |
1030 | o2net_init_nst(nst: &nst, msgtype: msg_type, msgkey: key, current, node: target_node); |
1031 | |
1032 | if (o2net_wq == NULL) { |
1033 | mlog(0, "attempt to tx without o2netd running\n" ); |
1034 | ret = -ESRCH; |
1035 | goto out; |
1036 | } |
1037 | |
1038 | if (caller_veclen == 0) { |
1039 | mlog(0, "bad kvec array length\n" ); |
1040 | ret = -EINVAL; |
1041 | goto out; |
1042 | } |
1043 | |
1044 | caller_bytes = iov_length(iov: (struct iovec *)caller_vec, nr_segs: caller_veclen); |
1045 | if (caller_bytes > O2NET_MAX_PAYLOAD_BYTES) { |
1046 | mlog(0, "total payload len %zu too large\n" , caller_bytes); |
1047 | ret = -EINVAL; |
1048 | goto out; |
1049 | } |
1050 | |
1051 | if (target_node == o2nm_this_node()) { |
1052 | ret = -ELOOP; |
1053 | goto out; |
1054 | } |
1055 | |
1056 | o2net_debug_add_nst(nst: &nst); |
1057 | |
1058 | o2net_set_nst_sock_time(nst: &nst); |
1059 | |
1060 | wait_event(nn->nn_sc_wq, o2net_tx_can_proceed(nn, &sc, &ret)); |
1061 | if (ret) |
1062 | goto out; |
1063 | |
1064 | o2net_set_nst_sock_container(nst: &nst, sc); |
1065 | |
1066 | veclen = caller_veclen + 1; |
1067 | vec = kmalloc_array(n: veclen, size: sizeof(struct kvec), GFP_ATOMIC); |
1068 | if (vec == NULL) { |
1069 | mlog(0, "failed to %zu element kvec!\n" , veclen); |
1070 | ret = -ENOMEM; |
1071 | goto out; |
1072 | } |
1073 | |
1074 | msg = kmalloc(size: sizeof(struct o2net_msg), GFP_ATOMIC); |
1075 | if (!msg) { |
1076 | mlog(0, "failed to allocate a o2net_msg!\n" ); |
1077 | ret = -ENOMEM; |
1078 | goto out; |
1079 | } |
1080 | |
1081 | o2net_init_msg(msg, data_len: caller_bytes, msg_type, key); |
1082 | |
1083 | vec[0].iov_len = sizeof(struct o2net_msg); |
1084 | vec[0].iov_base = msg; |
1085 | memcpy(&vec[1], caller_vec, caller_veclen * sizeof(struct kvec)); |
1086 | |
1087 | ret = o2net_prep_nsw(nn, nsw: &nsw); |
1088 | if (ret) |
1089 | goto out; |
1090 | |
1091 | msg->msg_num = cpu_to_be32(nsw.ns_id); |
1092 | o2net_set_nst_msg_id(nst: &nst, msg_id: nsw.ns_id); |
1093 | |
1094 | o2net_set_nst_send_time(nst: &nst); |
1095 | |
1096 | /* finally, convert the message header to network byte-order |
1097 | * and send */ |
1098 | mutex_lock(&sc->sc_send_lock); |
1099 | ret = o2net_send_tcp_msg(sock: sc->sc_sock, vec, veclen, |
1100 | total: sizeof(struct o2net_msg) + caller_bytes); |
1101 | mutex_unlock(lock: &sc->sc_send_lock); |
1102 | msglog(msg, "sending returned %d\n" , ret); |
1103 | if (ret < 0) { |
1104 | mlog(0, "error returned from o2net_send_tcp_msg=%d\n" , ret); |
1105 | goto out; |
1106 | } |
1107 | |
1108 | /* wait on other node's handler */ |
1109 | o2net_set_nst_status_time(nst: &nst); |
1110 | wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw)); |
1111 | |
1112 | o2net_update_send_stats(nst: &nst, sc); |
1113 | |
1114 | /* Note that we avoid overwriting the callers status return |
1115 | * variable if a system error was reported on the other |
1116 | * side. Callers beware. */ |
1117 | ret = o2net_sys_err_to_errno(err: nsw.ns_sys_status); |
1118 | if (status && !ret) |
1119 | *status = nsw.ns_status; |
1120 | |
1121 | mlog(0, "woken, returning system status %d, user status %d\n" , |
1122 | ret, nsw.ns_status); |
1123 | out: |
1124 | o2net_debug_del_nst(nst: &nst); /* must be before dropping sc and node */ |
1125 | if (sc) |
1126 | sc_put(sc); |
1127 | kfree(objp: vec); |
1128 | kfree(objp: msg); |
1129 | o2net_complete_nsw(nn, nsw: &nsw, id: 0, sys_status: 0, status: 0); |
1130 | return ret; |
1131 | } |
1132 | EXPORT_SYMBOL_GPL(o2net_send_message_vec); |
1133 | |
1134 | int o2net_send_message(u32 msg_type, u32 key, void *data, u32 len, |
1135 | u8 target_node, int *status) |
1136 | { |
1137 | struct kvec vec = { |
1138 | .iov_base = data, |
1139 | .iov_len = len, |
1140 | }; |
1141 | return o2net_send_message_vec(msg_type, key, &vec, 1, |
1142 | target_node, status); |
1143 | } |
1144 | EXPORT_SYMBOL_GPL(o2net_send_message); |
1145 | |
1146 | static int o2net_send_status_magic(struct socket *sock, struct o2net_msg *hdr, |
1147 | enum o2net_system_error syserr, int err) |
1148 | { |
1149 | struct kvec vec = { |
1150 | .iov_base = hdr, |
1151 | .iov_len = sizeof(struct o2net_msg), |
1152 | }; |
1153 | |
1154 | BUG_ON(syserr >= O2NET_ERR_MAX); |
1155 | |
1156 | /* leave other fields intact from the incoming message, msg_num |
1157 | * in particular */ |
1158 | hdr->sys_status = cpu_to_be32(syserr); |
1159 | hdr->status = cpu_to_be32(err); |
1160 | hdr->magic = cpu_to_be16(O2NET_MSG_STATUS_MAGIC); // twiddle the magic |
1161 | hdr->data_len = 0; |
1162 | |
1163 | msglog(hdr, "about to send status magic %d\n" , err); |
1164 | /* hdr has been in host byteorder this whole time */ |
1165 | return o2net_send_tcp_msg(sock, vec: &vec, veclen: 1, total: sizeof(struct o2net_msg)); |
1166 | } |
1167 | |
1168 | /* this returns -errno if the header was unknown or too large, etc. |
1169 | * after this is called the buffer us reused for the next message */ |
1170 | static int o2net_process_message(struct o2net_sock_container *sc, |
1171 | struct o2net_msg *hdr) |
1172 | { |
1173 | struct o2net_node *nn = o2net_nn_from_num(node_num: sc->sc_node->nd_num); |
1174 | int ret = 0, handler_status; |
1175 | enum o2net_system_error syserr; |
1176 | struct o2net_msg_handler *nmh = NULL; |
1177 | void *ret_data = NULL; |
1178 | |
1179 | msglog(hdr, "processing message\n" ); |
1180 | |
1181 | o2net_sc_postpone_idle(sc); |
1182 | |
1183 | switch(be16_to_cpu(hdr->magic)) { |
1184 | case O2NET_MSG_STATUS_MAGIC: |
1185 | /* special type for returning message status */ |
1186 | o2net_complete_nsw(nn, NULL, |
1187 | be32_to_cpu(hdr->msg_num), |
1188 | be32_to_cpu(hdr->sys_status), |
1189 | be32_to_cpu(hdr->status)); |
1190 | goto out; |
1191 | case O2NET_MSG_KEEP_REQ_MAGIC: |
1192 | o2net_sendpage(sc, virt: o2net_keep_resp, |
1193 | size: sizeof(*o2net_keep_resp)); |
1194 | goto out; |
1195 | case O2NET_MSG_KEEP_RESP_MAGIC: |
1196 | goto out; |
1197 | case O2NET_MSG_MAGIC: |
1198 | break; |
1199 | default: |
1200 | msglog(hdr, "bad magic\n" ); |
1201 | ret = -EINVAL; |
1202 | goto out; |
1203 | } |
1204 | |
1205 | /* find a handler for it */ |
1206 | handler_status = 0; |
1207 | nmh = o2net_handler_get(be16_to_cpu(hdr->msg_type), |
1208 | be32_to_cpu(hdr->key)); |
1209 | if (!nmh) { |
1210 | mlog(ML_TCP, "couldn't find handler for type %u key %08x\n" , |
1211 | be16_to_cpu(hdr->msg_type), be32_to_cpu(hdr->key)); |
1212 | syserr = O2NET_ERR_NO_HNDLR; |
1213 | goto out_respond; |
1214 | } |
1215 | |
1216 | syserr = O2NET_ERR_NONE; |
1217 | |
1218 | if (be16_to_cpu(hdr->data_len) > nmh->nh_max_len) |
1219 | syserr = O2NET_ERR_OVERFLOW; |
1220 | |
1221 | if (syserr != O2NET_ERR_NONE) |
1222 | goto out_respond; |
1223 | |
1224 | o2net_set_func_start_time(sc); |
1225 | sc->sc_msg_key = be32_to_cpu(hdr->key); |
1226 | sc->sc_msg_type = be16_to_cpu(hdr->msg_type); |
1227 | handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) + |
1228 | be16_to_cpu(hdr->data_len), |
1229 | nmh->nh_func_data, &ret_data); |
1230 | o2net_set_func_stop_time(sc); |
1231 | |
1232 | o2net_update_recv_stats(sc); |
1233 | |
1234 | out_respond: |
1235 | /* this destroys the hdr, so don't use it after this */ |
1236 | mutex_lock(&sc->sc_send_lock); |
1237 | ret = o2net_send_status_magic(sock: sc->sc_sock, hdr, syserr, |
1238 | err: handler_status); |
1239 | mutex_unlock(lock: &sc->sc_send_lock); |
1240 | hdr = NULL; |
1241 | mlog(0, "sending handler status %d, syserr %d returned %d\n" , |
1242 | handler_status, syserr, ret); |
1243 | |
1244 | if (nmh) { |
1245 | BUG_ON(ret_data != NULL && nmh->nh_post_func == NULL); |
1246 | if (nmh->nh_post_func) |
1247 | (nmh->nh_post_func)(handler_status, nmh->nh_func_data, |
1248 | ret_data); |
1249 | } |
1250 | |
1251 | out: |
1252 | if (nmh) |
1253 | o2net_handler_put(nmh); |
1254 | return ret; |
1255 | } |
1256 | |
1257 | static int o2net_check_handshake(struct o2net_sock_container *sc) |
1258 | { |
1259 | struct o2net_handshake *hand = page_address(sc->sc_page); |
1260 | struct o2net_node *nn = o2net_nn_from_num(node_num: sc->sc_node->nd_num); |
1261 | |
1262 | if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) { |
1263 | printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " Advertised net " |
1264 | "protocol version %llu but %llu is required. " |
1265 | "Disconnecting.\n" , SC_NODEF_ARGS(sc), |
1266 | (unsigned long long)be64_to_cpu(hand->protocol_version), |
1267 | O2NET_PROTOCOL_VERSION); |
1268 | |
1269 | /* don't bother reconnecting if its the wrong version. */ |
1270 | o2net_ensure_shutdown(nn, sc, err: -ENOTCONN); |
1271 | return -1; |
1272 | } |
1273 | |
1274 | /* |
1275 | * Ensure timeouts are consistent with other nodes, otherwise |
1276 | * we can end up with one node thinking that the other must be down, |
1277 | * but isn't. This can ultimately cause corruption. |
1278 | */ |
1279 | if (be32_to_cpu(hand->o2net_idle_timeout_ms) != |
1280 | o2net_idle_timeout()) { |
1281 | printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a network " |
1282 | "idle timeout of %u ms, but we use %u ms locally. " |
1283 | "Disconnecting.\n" , SC_NODEF_ARGS(sc), |
1284 | be32_to_cpu(hand->o2net_idle_timeout_ms), |
1285 | o2net_idle_timeout()); |
1286 | o2net_ensure_shutdown(nn, sc, err: -ENOTCONN); |
1287 | return -1; |
1288 | } |
1289 | |
1290 | if (be32_to_cpu(hand->o2net_keepalive_delay_ms) != |
1291 | o2net_keepalive_delay()) { |
1292 | printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a keepalive " |
1293 | "delay of %u ms, but we use %u ms locally. " |
1294 | "Disconnecting.\n" , SC_NODEF_ARGS(sc), |
1295 | be32_to_cpu(hand->o2net_keepalive_delay_ms), |
1296 | o2net_keepalive_delay()); |
1297 | o2net_ensure_shutdown(nn, sc, err: -ENOTCONN); |
1298 | return -1; |
1299 | } |
1300 | |
1301 | if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) != |
1302 | O2HB_MAX_WRITE_TIMEOUT_MS) { |
1303 | printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a heartbeat " |
1304 | "timeout of %u ms, but we use %u ms locally. " |
1305 | "Disconnecting.\n" , SC_NODEF_ARGS(sc), |
1306 | be32_to_cpu(hand->o2hb_heartbeat_timeout_ms), |
1307 | O2HB_MAX_WRITE_TIMEOUT_MS); |
1308 | o2net_ensure_shutdown(nn, sc, err: -ENOTCONN); |
1309 | return -1; |
1310 | } |
1311 | |
1312 | sc->sc_handshake_ok = 1; |
1313 | |
1314 | spin_lock(lock: &nn->nn_lock); |
1315 | /* set valid and queue the idle timers only if it hasn't been |
1316 | * shut down already */ |
1317 | if (nn->nn_sc == sc) { |
1318 | o2net_sc_reset_idle_timer(sc); |
1319 | atomic_set(v: &nn->nn_timeout, i: 0); |
1320 | o2net_set_nn_state(nn, sc, valid: 1, err: 0); |
1321 | } |
1322 | spin_unlock(lock: &nn->nn_lock); |
1323 | |
1324 | /* shift everything up as though it wasn't there */ |
1325 | sc->sc_page_off -= sizeof(struct o2net_handshake); |
1326 | if (sc->sc_page_off) |
1327 | memmove(hand, hand + 1, sc->sc_page_off); |
1328 | |
1329 | return 0; |
1330 | } |
1331 | |
1332 | /* this demuxes the queued rx bytes into header or payload bits and calls |
1333 | * handlers as each full message is read off the socket. it returns -error, |
1334 | * == 0 eof, or > 0 for progress made.*/ |
1335 | static int o2net_advance_rx(struct o2net_sock_container *sc) |
1336 | { |
1337 | struct o2net_msg *hdr; |
1338 | int ret = 0; |
1339 | void *data; |
1340 | size_t datalen; |
1341 | |
1342 | sclog(sc, "receiving\n" ); |
1343 | o2net_set_advance_start_time(sc); |
1344 | |
1345 | if (unlikely(sc->sc_handshake_ok == 0)) { |
1346 | if(sc->sc_page_off < sizeof(struct o2net_handshake)) { |
1347 | data = page_address(sc->sc_page) + sc->sc_page_off; |
1348 | datalen = sizeof(struct o2net_handshake) - sc->sc_page_off; |
1349 | ret = o2net_recv_tcp_msg(sock: sc->sc_sock, data, len: datalen); |
1350 | if (ret > 0) |
1351 | sc->sc_page_off += ret; |
1352 | } |
1353 | |
1354 | if (sc->sc_page_off == sizeof(struct o2net_handshake)) { |
1355 | o2net_check_handshake(sc); |
1356 | if (unlikely(sc->sc_handshake_ok == 0)) |
1357 | ret = -EPROTO; |
1358 | } |
1359 | goto out; |
1360 | } |
1361 | |
1362 | /* do we need more header? */ |
1363 | if (sc->sc_page_off < sizeof(struct o2net_msg)) { |
1364 | data = page_address(sc->sc_page) + sc->sc_page_off; |
1365 | datalen = sizeof(struct o2net_msg) - sc->sc_page_off; |
1366 | ret = o2net_recv_tcp_msg(sock: sc->sc_sock, data, len: datalen); |
1367 | if (ret > 0) { |
1368 | sc->sc_page_off += ret; |
1369 | /* only swab incoming here.. we can |
1370 | * only get here once as we cross from |
1371 | * being under to over */ |
1372 | if (sc->sc_page_off == sizeof(struct o2net_msg)) { |
1373 | hdr = page_address(sc->sc_page); |
1374 | if (be16_to_cpu(hdr->data_len) > |
1375 | O2NET_MAX_PAYLOAD_BYTES) |
1376 | ret = -EOVERFLOW; |
1377 | } |
1378 | } |
1379 | if (ret <= 0) |
1380 | goto out; |
1381 | } |
1382 | |
1383 | if (sc->sc_page_off < sizeof(struct o2net_msg)) { |
1384 | /* oof, still don't have a header */ |
1385 | goto out; |
1386 | } |
1387 | |
1388 | /* this was swabbed above when we first read it */ |
1389 | hdr = page_address(sc->sc_page); |
1390 | |
1391 | msglog(hdr, "at page_off %zu\n" , sc->sc_page_off); |
1392 | |
1393 | /* do we need more payload? */ |
1394 | if (sc->sc_page_off - sizeof(struct o2net_msg) < be16_to_cpu(hdr->data_len)) { |
1395 | /* need more payload */ |
1396 | data = page_address(sc->sc_page) + sc->sc_page_off; |
1397 | datalen = (sizeof(struct o2net_msg) + be16_to_cpu(hdr->data_len)) - |
1398 | sc->sc_page_off; |
1399 | ret = o2net_recv_tcp_msg(sock: sc->sc_sock, data, len: datalen); |
1400 | if (ret > 0) |
1401 | sc->sc_page_off += ret; |
1402 | if (ret <= 0) |
1403 | goto out; |
1404 | } |
1405 | |
1406 | if (sc->sc_page_off - sizeof(struct o2net_msg) == be16_to_cpu(hdr->data_len)) { |
1407 | /* we can only get here once, the first time we read |
1408 | * the payload.. so set ret to progress if the handler |
1409 | * works out. after calling this the message is toast */ |
1410 | ret = o2net_process_message(sc, hdr); |
1411 | if (ret == 0) |
1412 | ret = 1; |
1413 | sc->sc_page_off = 0; |
1414 | } |
1415 | |
1416 | out: |
1417 | sclog(sc, "ret = %d\n" , ret); |
1418 | o2net_set_advance_stop_time(sc); |
1419 | return ret; |
1420 | } |
1421 | |
1422 | /* this work func is triggerd by data ready. it reads until it can read no |
1423 | * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing |
1424 | * our work the work struct will be marked and we'll be called again. */ |
1425 | static void o2net_rx_until_empty(struct work_struct *work) |
1426 | { |
1427 | struct o2net_sock_container *sc = |
1428 | container_of(work, struct o2net_sock_container, sc_rx_work); |
1429 | int ret; |
1430 | |
1431 | do { |
1432 | ret = o2net_advance_rx(sc); |
1433 | } while (ret > 0); |
1434 | |
1435 | if (ret <= 0 && ret != -EAGAIN) { |
1436 | struct o2net_node *nn = o2net_nn_from_num(node_num: sc->sc_node->nd_num); |
1437 | sclog(sc, "saw error %d, closing\n" , ret); |
1438 | /* not permanent so read failed handshake can retry */ |
1439 | o2net_ensure_shutdown(nn, sc, err: 0); |
1440 | } |
1441 | |
1442 | sc_put(sc); |
1443 | } |
1444 | |
1445 | static void o2net_initialize_handshake(void) |
1446 | { |
1447 | o2net_hand->o2hb_heartbeat_timeout_ms = cpu_to_be32( |
1448 | O2HB_MAX_WRITE_TIMEOUT_MS); |
1449 | o2net_hand->o2net_idle_timeout_ms = cpu_to_be32(o2net_idle_timeout()); |
1450 | o2net_hand->o2net_keepalive_delay_ms = cpu_to_be32( |
1451 | o2net_keepalive_delay()); |
1452 | o2net_hand->o2net_reconnect_delay_ms = cpu_to_be32( |
1453 | o2net_reconnect_delay()); |
1454 | } |
1455 | |
1456 | /* ------------------------------------------------------------ */ |
1457 | |
1458 | /* called when a connect completes and after a sock is accepted. the |
1459 | * rx path will see the response and mark the sc valid */ |
1460 | static void o2net_sc_connect_completed(struct work_struct *work) |
1461 | { |
1462 | struct o2net_sock_container *sc = |
1463 | container_of(work, struct o2net_sock_container, |
1464 | sc_connect_work); |
1465 | |
1466 | mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n" , |
1467 | (unsigned long long)O2NET_PROTOCOL_VERSION, |
1468 | (unsigned long long)be64_to_cpu(o2net_hand->connector_id)); |
1469 | |
1470 | o2net_initialize_handshake(); |
1471 | o2net_sendpage(sc, virt: o2net_hand, size: sizeof(*o2net_hand)); |
1472 | sc_put(sc); |
1473 | } |
1474 | |
1475 | /* this is called as a work_struct func. */ |
1476 | static void o2net_sc_send_keep_req(struct work_struct *work) |
1477 | { |
1478 | struct o2net_sock_container *sc = |
1479 | container_of(work, struct o2net_sock_container, |
1480 | sc_keepalive_work.work); |
1481 | |
1482 | o2net_sendpage(sc, virt: o2net_keep_req, size: sizeof(*o2net_keep_req)); |
1483 | sc_put(sc); |
1484 | } |
1485 | |
1486 | /* socket shutdown does a del_timer_sync against this as it tears down. |
1487 | * we can't start this timer until we've got to the point in sc buildup |
1488 | * where shutdown is going to be involved */ |
1489 | static void o2net_idle_timer(struct timer_list *t) |
1490 | { |
1491 | struct o2net_sock_container *sc = from_timer(sc, t, sc_idle_timeout); |
1492 | struct o2net_node *nn = o2net_nn_from_num(node_num: sc->sc_node->nd_num); |
1493 | #ifdef CONFIG_DEBUG_FS |
1494 | unsigned long msecs = ktime_to_ms(kt: ktime_get()) - |
1495 | ktime_to_ms(kt: sc->sc_tv_timer); |
1496 | #else |
1497 | unsigned long msecs = o2net_idle_timeout(); |
1498 | #endif |
1499 | |
1500 | printk(KERN_NOTICE "o2net: Connection to " SC_NODEF_FMT " has been " |
1501 | "idle for %lu.%lu secs.\n" , |
1502 | SC_NODEF_ARGS(sc), msecs / 1000, msecs % 1000); |
1503 | |
1504 | /* idle timerout happen, don't shutdown the connection, but |
1505 | * make fence decision. Maybe the connection can recover before |
1506 | * the decision is made. |
1507 | */ |
1508 | atomic_set(v: &nn->nn_timeout, i: 1); |
1509 | o2quo_conn_err(node: o2net_num_from_nn(nn)); |
1510 | queue_delayed_work(wq: o2net_wq, dwork: &nn->nn_still_up, |
1511 | delay: msecs_to_jiffies(O2NET_QUORUM_DELAY_MS)); |
1512 | |
1513 | o2net_sc_reset_idle_timer(sc); |
1514 | |
1515 | } |
1516 | |
1517 | static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc) |
1518 | { |
1519 | o2net_sc_cancel_delayed_work(sc, work: &sc->sc_keepalive_work); |
1520 | o2net_sc_queue_delayed_work(sc, work: &sc->sc_keepalive_work, |
1521 | delay: msecs_to_jiffies(m: o2net_keepalive_delay())); |
1522 | o2net_set_sock_timer(sc); |
1523 | mod_timer(timer: &sc->sc_idle_timeout, |
1524 | expires: jiffies + msecs_to_jiffies(m: o2net_idle_timeout())); |
1525 | } |
1526 | |
1527 | static void o2net_sc_postpone_idle(struct o2net_sock_container *sc) |
1528 | { |
1529 | struct o2net_node *nn = o2net_nn_from_num(node_num: sc->sc_node->nd_num); |
1530 | |
1531 | /* clear fence decision since the connection recover from timeout*/ |
1532 | if (atomic_read(v: &nn->nn_timeout)) { |
1533 | o2quo_conn_up(node: o2net_num_from_nn(nn)); |
1534 | cancel_delayed_work(dwork: &nn->nn_still_up); |
1535 | atomic_set(v: &nn->nn_timeout, i: 0); |
1536 | } |
1537 | |
1538 | /* Only push out an existing timer */ |
1539 | if (timer_pending(timer: &sc->sc_idle_timeout)) |
1540 | o2net_sc_reset_idle_timer(sc); |
1541 | } |
1542 | |
1543 | /* this work func is kicked whenever a path sets the nn state which doesn't |
1544 | * have valid set. This includes seeing hb come up, losing a connection, |
1545 | * having a connect attempt fail, etc. This centralizes the logic which decides |
1546 | * if a connect attempt should be made or if we should give up and all future |
1547 | * transmit attempts should fail */ |
1548 | static void o2net_start_connect(struct work_struct *work) |
1549 | { |
1550 | struct o2net_node *nn = |
1551 | container_of(work, struct o2net_node, nn_connect_work.work); |
1552 | struct o2net_sock_container *sc = NULL; |
1553 | struct o2nm_node *node = NULL, *mynode = NULL; |
1554 | struct socket *sock = NULL; |
1555 | struct sockaddr_in myaddr = {0, }, remoteaddr = {0, }; |
1556 | int ret = 0, stop; |
1557 | unsigned int timeout; |
1558 | unsigned int nofs_flag; |
1559 | |
1560 | /* |
1561 | * sock_create allocates the sock with GFP_KERNEL. We must |
1562 | * prevent the filesystem from being reentered by memory reclaim. |
1563 | */ |
1564 | nofs_flag = memalloc_nofs_save(); |
1565 | /* if we're greater we initiate tx, otherwise we accept */ |
1566 | if (o2nm_this_node() <= o2net_num_from_nn(nn)) |
1567 | goto out; |
1568 | |
1569 | /* watch for racing with tearing a node down */ |
1570 | node = o2nm_get_node_by_num(node_num: o2net_num_from_nn(nn)); |
1571 | if (node == NULL) |
1572 | goto out; |
1573 | |
1574 | mynode = o2nm_get_node_by_num(node_num: o2nm_this_node()); |
1575 | if (mynode == NULL) |
1576 | goto out; |
1577 | |
1578 | spin_lock(lock: &nn->nn_lock); |
1579 | /* |
1580 | * see if we already have one pending or have given up. |
1581 | * For nn_timeout, it is set when we close the connection |
1582 | * because of the idle time out. So it means that we have |
1583 | * at least connected to that node successfully once, |
1584 | * now try to connect to it again. |
1585 | */ |
1586 | timeout = atomic_read(v: &nn->nn_timeout); |
1587 | stop = (nn->nn_sc || |
1588 | (nn->nn_persistent_error && |
1589 | (nn->nn_persistent_error != -ENOTCONN || timeout == 0))); |
1590 | spin_unlock(lock: &nn->nn_lock); |
1591 | if (stop) |
1592 | goto out; |
1593 | |
1594 | nn->nn_last_connect_attempt = jiffies; |
1595 | |
1596 | sc = sc_alloc(node); |
1597 | if (sc == NULL) { |
1598 | mlog(0, "couldn't allocate sc\n" ); |
1599 | ret = -ENOMEM; |
1600 | goto out; |
1601 | } |
1602 | |
1603 | ret = sock_create(PF_INET, type: SOCK_STREAM, IPPROTO_TCP, res: &sock); |
1604 | if (ret < 0) { |
1605 | mlog(0, "can't create socket: %d\n" , ret); |
1606 | goto out; |
1607 | } |
1608 | sc->sc_sock = sock; /* freed by sc_kref_release */ |
1609 | |
1610 | sock->sk->sk_allocation = GFP_ATOMIC; |
1611 | sock->sk->sk_use_task_frag = false; |
1612 | |
1613 | myaddr.sin_family = AF_INET; |
1614 | myaddr.sin_addr.s_addr = mynode->nd_ipv4_address; |
1615 | myaddr.sin_port = htons(0); /* any port */ |
1616 | |
1617 | ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr, |
1618 | sizeof(myaddr)); |
1619 | if (ret) { |
1620 | mlog(ML_ERROR, "bind failed with %d at address %pI4\n" , |
1621 | ret, &mynode->nd_ipv4_address); |
1622 | goto out; |
1623 | } |
1624 | |
1625 | tcp_sock_set_nodelay(sk: sc->sc_sock->sk); |
1626 | tcp_sock_set_user_timeout(sk: sock->sk, O2NET_TCP_USER_TIMEOUT); |
1627 | |
1628 | o2net_register_callbacks(sk: sc->sc_sock->sk, sc); |
1629 | |
1630 | spin_lock(lock: &nn->nn_lock); |
1631 | /* handshake completion will set nn->nn_sc_valid */ |
1632 | o2net_set_nn_state(nn, sc, valid: 0, err: 0); |
1633 | spin_unlock(lock: &nn->nn_lock); |
1634 | |
1635 | remoteaddr.sin_family = AF_INET; |
1636 | remoteaddr.sin_addr.s_addr = node->nd_ipv4_address; |
1637 | remoteaddr.sin_port = node->nd_ipv4_port; |
1638 | |
1639 | ret = sc->sc_sock->ops->connect(sc->sc_sock, |
1640 | (struct sockaddr *)&remoteaddr, |
1641 | sizeof(remoteaddr), |
1642 | O_NONBLOCK); |
1643 | if (ret == -EINPROGRESS) |
1644 | ret = 0; |
1645 | |
1646 | out: |
1647 | if (ret && sc) { |
1648 | printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT |
1649 | " failed with errno %d\n" , SC_NODEF_ARGS(sc), ret); |
1650 | /* 0 err so that another will be queued and attempted |
1651 | * from set_nn_state */ |
1652 | o2net_ensure_shutdown(nn, sc, err: 0); |
1653 | } |
1654 | if (sc) |
1655 | sc_put(sc); |
1656 | if (node) |
1657 | o2nm_node_put(node); |
1658 | if (mynode) |
1659 | o2nm_node_put(node: mynode); |
1660 | |
1661 | memalloc_nofs_restore(flags: nofs_flag); |
1662 | return; |
1663 | } |
1664 | |
1665 | static void o2net_connect_expired(struct work_struct *work) |
1666 | { |
1667 | struct o2net_node *nn = |
1668 | container_of(work, struct o2net_node, nn_connect_expired.work); |
1669 | |
1670 | spin_lock(lock: &nn->nn_lock); |
1671 | if (!nn->nn_sc_valid) { |
1672 | printk(KERN_NOTICE "o2net: No connection established with " |
1673 | "node %u after %u.%u seconds, check network and" |
1674 | " cluster configuration.\n" , |
1675 | o2net_num_from_nn(nn), |
1676 | o2net_idle_timeout() / 1000, |
1677 | o2net_idle_timeout() % 1000); |
1678 | |
1679 | o2net_set_nn_state(nn, NULL, valid: 0, err: 0); |
1680 | } |
1681 | spin_unlock(lock: &nn->nn_lock); |
1682 | } |
1683 | |
1684 | static void o2net_still_up(struct work_struct *work) |
1685 | { |
1686 | struct o2net_node *nn = |
1687 | container_of(work, struct o2net_node, nn_still_up.work); |
1688 | |
1689 | o2quo_hb_still_up(node: o2net_num_from_nn(nn)); |
1690 | } |
1691 | |
1692 | /* ------------------------------------------------------------ */ |
1693 | |
1694 | void o2net_disconnect_node(struct o2nm_node *node) |
1695 | { |
1696 | struct o2net_node *nn = o2net_nn_from_num(node_num: node->nd_num); |
1697 | |
1698 | /* don't reconnect until it's heartbeating again */ |
1699 | spin_lock(lock: &nn->nn_lock); |
1700 | atomic_set(v: &nn->nn_timeout, i: 0); |
1701 | o2net_set_nn_state(nn, NULL, valid: 0, err: -ENOTCONN); |
1702 | spin_unlock(lock: &nn->nn_lock); |
1703 | |
1704 | if (o2net_wq) { |
1705 | cancel_delayed_work(dwork: &nn->nn_connect_expired); |
1706 | cancel_delayed_work(dwork: &nn->nn_connect_work); |
1707 | cancel_delayed_work(dwork: &nn->nn_still_up); |
1708 | flush_workqueue(o2net_wq); |
1709 | } |
1710 | } |
1711 | |
1712 | static void o2net_hb_node_down_cb(struct o2nm_node *node, int node_num, |
1713 | void *data) |
1714 | { |
1715 | o2quo_hb_down(node: node_num); |
1716 | |
1717 | if (!node) |
1718 | return; |
1719 | |
1720 | if (node_num != o2nm_this_node()) |
1721 | o2net_disconnect_node(node); |
1722 | |
1723 | BUG_ON(atomic_read(&o2net_connected_peers) < 0); |
1724 | } |
1725 | |
1726 | static void o2net_hb_node_up_cb(struct o2nm_node *node, int node_num, |
1727 | void *data) |
1728 | { |
1729 | struct o2net_node *nn = o2net_nn_from_num(node_num); |
1730 | |
1731 | o2quo_hb_up(node: node_num); |
1732 | |
1733 | BUG_ON(!node); |
1734 | |
1735 | /* ensure an immediate connect attempt */ |
1736 | nn->nn_last_connect_attempt = jiffies - |
1737 | (msecs_to_jiffies(m: o2net_reconnect_delay()) + 1); |
1738 | |
1739 | if (node_num != o2nm_this_node()) { |
1740 | /* believe it or not, accept and node heartbeating testing |
1741 | * can succeed for this node before we got here.. so |
1742 | * only use set_nn_state to clear the persistent error |
1743 | * if that hasn't already happened */ |
1744 | spin_lock(lock: &nn->nn_lock); |
1745 | atomic_set(v: &nn->nn_timeout, i: 0); |
1746 | if (nn->nn_persistent_error) |
1747 | o2net_set_nn_state(nn, NULL, valid: 0, err: 0); |
1748 | spin_unlock(lock: &nn->nn_lock); |
1749 | } |
1750 | } |
1751 | |
1752 | void o2net_unregister_hb_callbacks(void) |
1753 | { |
1754 | o2hb_unregister_callback(NULL, hc: &o2net_hb_up); |
1755 | o2hb_unregister_callback(NULL, hc: &o2net_hb_down); |
1756 | } |
1757 | |
1758 | int o2net_register_hb_callbacks(void) |
1759 | { |
1760 | int ret; |
1761 | |
1762 | o2hb_setup_callback(hc: &o2net_hb_down, type: O2HB_NODE_DOWN_CB, |
1763 | func: o2net_hb_node_down_cb, NULL, O2NET_HB_PRI); |
1764 | o2hb_setup_callback(hc: &o2net_hb_up, type: O2HB_NODE_UP_CB, |
1765 | func: o2net_hb_node_up_cb, NULL, O2NET_HB_PRI); |
1766 | |
1767 | ret = o2hb_register_callback(NULL, hc: &o2net_hb_up); |
1768 | if (ret == 0) |
1769 | ret = o2hb_register_callback(NULL, hc: &o2net_hb_down); |
1770 | |
1771 | if (ret) |
1772 | o2net_unregister_hb_callbacks(); |
1773 | |
1774 | return ret; |
1775 | } |
1776 | |
1777 | /* ------------------------------------------------------------ */ |
1778 | |
1779 | static int o2net_accept_one(struct socket *sock, int *more) |
1780 | { |
1781 | int ret; |
1782 | struct sockaddr_in sin; |
1783 | struct socket *new_sock = NULL; |
1784 | struct o2nm_node *node = NULL; |
1785 | struct o2nm_node *local_node = NULL; |
1786 | struct o2net_sock_container *sc = NULL; |
1787 | struct o2net_node *nn; |
1788 | unsigned int nofs_flag; |
1789 | |
1790 | /* |
1791 | * sock_create_lite allocates the sock with GFP_KERNEL. We must |
1792 | * prevent the filesystem from being reentered by memory reclaim. |
1793 | */ |
1794 | nofs_flag = memalloc_nofs_save(); |
1795 | |
1796 | BUG_ON(sock == NULL); |
1797 | *more = 0; |
1798 | ret = sock_create_lite(family: sock->sk->sk_family, type: sock->sk->sk_type, |
1799 | proto: sock->sk->sk_protocol, res: &new_sock); |
1800 | if (ret) |
1801 | goto out; |
1802 | |
1803 | new_sock->type = sock->type; |
1804 | new_sock->ops = sock->ops; |
1805 | ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, false); |
1806 | if (ret < 0) |
1807 | goto out; |
1808 | |
1809 | *more = 1; |
1810 | new_sock->sk->sk_allocation = GFP_ATOMIC; |
1811 | |
1812 | tcp_sock_set_nodelay(sk: new_sock->sk); |
1813 | tcp_sock_set_user_timeout(sk: new_sock->sk, O2NET_TCP_USER_TIMEOUT); |
1814 | |
1815 | ret = new_sock->ops->getname(new_sock, (struct sockaddr *) &sin, 1); |
1816 | if (ret < 0) |
1817 | goto out; |
1818 | |
1819 | node = o2nm_get_node_by_ip(addr: sin.sin_addr.s_addr); |
1820 | if (node == NULL) { |
1821 | printk(KERN_NOTICE "o2net: Attempt to connect from unknown " |
1822 | "node at %pI4:%d\n" , &sin.sin_addr.s_addr, |
1823 | ntohs(sin.sin_port)); |
1824 | ret = -EINVAL; |
1825 | goto out; |
1826 | } |
1827 | |
1828 | if (o2nm_this_node() >= node->nd_num) { |
1829 | local_node = o2nm_get_node_by_num(node_num: o2nm_this_node()); |
1830 | if (local_node) |
1831 | printk(KERN_NOTICE "o2net: Unexpected connect attempt " |
1832 | "seen at node '%s' (%u, %pI4:%d) from " |
1833 | "node '%s' (%u, %pI4:%d)\n" , |
1834 | local_node->nd_name, local_node->nd_num, |
1835 | &(local_node->nd_ipv4_address), |
1836 | ntohs(local_node->nd_ipv4_port), |
1837 | node->nd_name, |
1838 | node->nd_num, &sin.sin_addr.s_addr, |
1839 | ntohs(sin.sin_port)); |
1840 | ret = -EINVAL; |
1841 | goto out; |
1842 | } |
1843 | |
1844 | /* this happens all the time when the other node sees our heartbeat |
1845 | * and tries to connect before we see their heartbeat */ |
1846 | if (!o2hb_check_node_heartbeating_from_callback(node_num: node->nd_num)) { |
1847 | mlog(ML_CONN, "attempt to connect from node '%s' at " |
1848 | "%pI4:%d but it isn't heartbeating\n" , |
1849 | node->nd_name, &sin.sin_addr.s_addr, |
1850 | ntohs(sin.sin_port)); |
1851 | ret = -EINVAL; |
1852 | goto out; |
1853 | } |
1854 | |
1855 | nn = o2net_nn_from_num(node_num: node->nd_num); |
1856 | |
1857 | spin_lock(lock: &nn->nn_lock); |
1858 | if (nn->nn_sc) |
1859 | ret = -EBUSY; |
1860 | else |
1861 | ret = 0; |
1862 | spin_unlock(lock: &nn->nn_lock); |
1863 | if (ret) { |
1864 | printk(KERN_NOTICE "o2net: Attempt to connect from node '%s' " |
1865 | "at %pI4:%d but it already has an open connection\n" , |
1866 | node->nd_name, &sin.sin_addr.s_addr, |
1867 | ntohs(sin.sin_port)); |
1868 | goto out; |
1869 | } |
1870 | |
1871 | sc = sc_alloc(node); |
1872 | if (sc == NULL) { |
1873 | ret = -ENOMEM; |
1874 | goto out; |
1875 | } |
1876 | |
1877 | sc->sc_sock = new_sock; |
1878 | new_sock = NULL; |
1879 | |
1880 | spin_lock(lock: &nn->nn_lock); |
1881 | atomic_set(v: &nn->nn_timeout, i: 0); |
1882 | o2net_set_nn_state(nn, sc, valid: 0, err: 0); |
1883 | spin_unlock(lock: &nn->nn_lock); |
1884 | |
1885 | o2net_register_callbacks(sk: sc->sc_sock->sk, sc); |
1886 | o2net_sc_queue_work(sc, work: &sc->sc_rx_work); |
1887 | |
1888 | o2net_initialize_handshake(); |
1889 | o2net_sendpage(sc, virt: o2net_hand, size: sizeof(*o2net_hand)); |
1890 | |
1891 | out: |
1892 | if (new_sock) |
1893 | sock_release(sock: new_sock); |
1894 | if (node) |
1895 | o2nm_node_put(node); |
1896 | if (local_node) |
1897 | o2nm_node_put(node: local_node); |
1898 | if (sc) |
1899 | sc_put(sc); |
1900 | |
1901 | memalloc_nofs_restore(flags: nofs_flag); |
1902 | return ret; |
1903 | } |
1904 | |
1905 | /* |
1906 | * This function is invoked in response to one or more |
1907 | * pending accepts at softIRQ level. We must drain the |
1908 | * entire que before returning. |
1909 | */ |
1910 | |
1911 | static void o2net_accept_many(struct work_struct *work) |
1912 | { |
1913 | struct socket *sock = o2net_listen_sock; |
1914 | int more; |
1915 | |
1916 | /* |
1917 | * It is critical to note that due to interrupt moderation |
1918 | * at the network driver level, we can't assume to get a |
1919 | * softIRQ for every single conn since tcp SYN packets |
1920 | * can arrive back-to-back, and therefore many pending |
1921 | * accepts may result in just 1 softIRQ. If we terminate |
1922 | * the o2net_accept_one() loop upon seeing an err, what happens |
1923 | * to the rest of the conns in the queue? If no new SYN |
1924 | * arrives for hours, no softIRQ will be delivered, |
1925 | * and the connections will just sit in the queue. |
1926 | */ |
1927 | |
1928 | for (;;) { |
1929 | o2net_accept_one(sock, more: &more); |
1930 | if (!more) |
1931 | break; |
1932 | cond_resched(); |
1933 | } |
1934 | } |
1935 | |
1936 | static void o2net_listen_data_ready(struct sock *sk) |
1937 | { |
1938 | void (*ready)(struct sock *sk); |
1939 | |
1940 | trace_sk_data_ready(sk); |
1941 | |
1942 | read_lock_bh(&sk->sk_callback_lock); |
1943 | ready = sk->sk_user_data; |
1944 | if (ready == NULL) { /* check for teardown race */ |
1945 | ready = sk->sk_data_ready; |
1946 | goto out; |
1947 | } |
1948 | |
1949 | /* This callback may called twice when a new connection |
1950 | * is being established as a child socket inherits everything |
1951 | * from a parent LISTEN socket, including the data_ready cb of |
1952 | * the parent. This leads to a hazard. In o2net_accept_one() |
1953 | * we are still initializing the child socket but have not |
1954 | * changed the inherited data_ready callback yet when |
1955 | * data starts arriving. |
1956 | * We avoid this hazard by checking the state. |
1957 | * For the listening socket, the state will be TCP_LISTEN; for the new |
1958 | * socket, will be TCP_ESTABLISHED. Also, in this case, |
1959 | * sk->sk_user_data is not a valid function pointer. |
1960 | */ |
1961 | |
1962 | if (sk->sk_state == TCP_LISTEN) { |
1963 | queue_work(wq: o2net_wq, work: &o2net_listen_work); |
1964 | } else { |
1965 | ready = NULL; |
1966 | } |
1967 | |
1968 | out: |
1969 | read_unlock_bh(&sk->sk_callback_lock); |
1970 | if (ready != NULL) |
1971 | ready(sk); |
1972 | } |
1973 | |
1974 | static int o2net_open_listening_sock(__be32 addr, __be16 port) |
1975 | { |
1976 | struct socket *sock = NULL; |
1977 | int ret; |
1978 | struct sockaddr_in sin = { |
1979 | .sin_family = PF_INET, |
1980 | .sin_addr = { .s_addr = addr }, |
1981 | .sin_port = port, |
1982 | }; |
1983 | |
1984 | ret = sock_create(PF_INET, type: SOCK_STREAM, IPPROTO_TCP, res: &sock); |
1985 | if (ret < 0) { |
1986 | printk(KERN_ERR "o2net: Error %d while creating socket\n" , ret); |
1987 | goto out; |
1988 | } |
1989 | |
1990 | sock->sk->sk_allocation = GFP_ATOMIC; |
1991 | |
1992 | write_lock_bh(&sock->sk->sk_callback_lock); |
1993 | sock->sk->sk_user_data = sock->sk->sk_data_ready; |
1994 | sock->sk->sk_data_ready = o2net_listen_data_ready; |
1995 | write_unlock_bh(&sock->sk->sk_callback_lock); |
1996 | |
1997 | o2net_listen_sock = sock; |
1998 | INIT_WORK(&o2net_listen_work, o2net_accept_many); |
1999 | |
2000 | sock->sk->sk_reuse = SK_CAN_REUSE; |
2001 | ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); |
2002 | if (ret < 0) { |
2003 | printk(KERN_ERR "o2net: Error %d while binding socket at " |
2004 | "%pI4:%u\n" , ret, &addr, ntohs(port)); |
2005 | goto out; |
2006 | } |
2007 | |
2008 | ret = sock->ops->listen(sock, 64); |
2009 | if (ret < 0) |
2010 | printk(KERN_ERR "o2net: Error %d while listening on %pI4:%u\n" , |
2011 | ret, &addr, ntohs(port)); |
2012 | |
2013 | out: |
2014 | if (ret) { |
2015 | o2net_listen_sock = NULL; |
2016 | if (sock) |
2017 | sock_release(sock); |
2018 | } |
2019 | return ret; |
2020 | } |
2021 | |
2022 | /* |
2023 | * called from node manager when we should bring up our network listening |
2024 | * socket. node manager handles all the serialization to only call this |
2025 | * once and to match it with o2net_stop_listening(). note, |
2026 | * o2nm_this_node() doesn't work yet as we're being called while it |
2027 | * is being set up. |
2028 | */ |
2029 | int o2net_start_listening(struct o2nm_node *node) |
2030 | { |
2031 | int ret = 0; |
2032 | |
2033 | BUG_ON(o2net_wq != NULL); |
2034 | BUG_ON(o2net_listen_sock != NULL); |
2035 | |
2036 | mlog(ML_KTHREAD, "starting o2net thread...\n" ); |
2037 | o2net_wq = alloc_ordered_workqueue("o2net" , WQ_MEM_RECLAIM); |
2038 | if (o2net_wq == NULL) { |
2039 | mlog(ML_ERROR, "unable to launch o2net thread\n" ); |
2040 | return -ENOMEM; /* ? */ |
2041 | } |
2042 | |
2043 | ret = o2net_open_listening_sock(addr: node->nd_ipv4_address, |
2044 | port: node->nd_ipv4_port); |
2045 | if (ret) { |
2046 | destroy_workqueue(wq: o2net_wq); |
2047 | o2net_wq = NULL; |
2048 | } else |
2049 | o2quo_conn_up(node: node->nd_num); |
2050 | |
2051 | return ret; |
2052 | } |
2053 | |
2054 | /* again, o2nm_this_node() doesn't work here as we're involved in |
2055 | * tearing it down */ |
2056 | void o2net_stop_listening(struct o2nm_node *node) |
2057 | { |
2058 | struct socket *sock = o2net_listen_sock; |
2059 | size_t i; |
2060 | |
2061 | BUG_ON(o2net_wq == NULL); |
2062 | BUG_ON(o2net_listen_sock == NULL); |
2063 | |
2064 | /* stop the listening socket from generating work */ |
2065 | write_lock_bh(&sock->sk->sk_callback_lock); |
2066 | sock->sk->sk_data_ready = sock->sk->sk_user_data; |
2067 | sock->sk->sk_user_data = NULL; |
2068 | write_unlock_bh(&sock->sk->sk_callback_lock); |
2069 | |
2070 | for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) { |
2071 | struct o2nm_node *node = o2nm_get_node_by_num(node_num: i); |
2072 | if (node) { |
2073 | o2net_disconnect_node(node); |
2074 | o2nm_node_put(node); |
2075 | } |
2076 | } |
2077 | |
2078 | /* finish all work and tear down the work queue */ |
2079 | mlog(ML_KTHREAD, "waiting for o2net thread to exit....\n" ); |
2080 | destroy_workqueue(wq: o2net_wq); |
2081 | o2net_wq = NULL; |
2082 | |
2083 | sock_release(sock: o2net_listen_sock); |
2084 | o2net_listen_sock = NULL; |
2085 | |
2086 | o2quo_conn_err(node: node->nd_num); |
2087 | } |
2088 | |
2089 | /* ------------------------------------------------------------ */ |
2090 | |
2091 | int o2net_init(void) |
2092 | { |
2093 | struct folio *folio; |
2094 | void *p; |
2095 | unsigned long i; |
2096 | |
2097 | o2quo_init(); |
2098 | o2net_debugfs_init(); |
2099 | |
2100 | folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, order: 0); |
2101 | if (!folio) |
2102 | goto out; |
2103 | |
2104 | p = folio_address(folio); |
2105 | o2net_hand = p; |
2106 | p += sizeof(struct o2net_handshake); |
2107 | o2net_keep_req = p; |
2108 | p += sizeof(struct o2net_msg); |
2109 | o2net_keep_resp = p; |
2110 | |
2111 | o2net_hand->protocol_version = cpu_to_be64(O2NET_PROTOCOL_VERSION); |
2112 | o2net_hand->connector_id = cpu_to_be64(1); |
2113 | |
2114 | o2net_keep_req->magic = cpu_to_be16(O2NET_MSG_KEEP_REQ_MAGIC); |
2115 | o2net_keep_resp->magic = cpu_to_be16(O2NET_MSG_KEEP_RESP_MAGIC); |
2116 | |
2117 | for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) { |
2118 | struct o2net_node *nn = o2net_nn_from_num(node_num: i); |
2119 | |
2120 | atomic_set(v: &nn->nn_timeout, i: 0); |
2121 | spin_lock_init(&nn->nn_lock); |
2122 | INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect); |
2123 | INIT_DELAYED_WORK(&nn->nn_connect_expired, |
2124 | o2net_connect_expired); |
2125 | INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up); |
2126 | /* until we see hb from a node we'll return einval */ |
2127 | nn->nn_persistent_error = -ENOTCONN; |
2128 | init_waitqueue_head(&nn->nn_sc_wq); |
2129 | idr_init(idr: &nn->nn_status_idr); |
2130 | INIT_LIST_HEAD(list: &nn->nn_status_list); |
2131 | } |
2132 | |
2133 | return 0; |
2134 | |
2135 | out: |
2136 | o2net_debugfs_exit(); |
2137 | o2quo_exit(); |
2138 | return -ENOMEM; |
2139 | } |
2140 | |
2141 | void o2net_exit(void) |
2142 | { |
2143 | o2quo_exit(); |
2144 | o2net_debugfs_exit(); |
2145 | folio_put(folio: virt_to_folio(x: o2net_hand)); |
2146 | } |
2147 | |