1 | /* |
2 | * Copyright (c) 2005 Voltaire Inc. All rights reserved. |
3 | * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. |
4 | * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. |
5 | * Copyright (c) 2005 Intel Corporation. All rights reserved. |
6 | * |
7 | * This software is available to you under a choice of one of two |
8 | * licenses. You may choose to be licensed under the terms of the GNU |
9 | * General Public License (GPL) Version 2, available from the file |
10 | * COPYING in the main directory of this source tree, or the |
11 | * OpenIB.org BSD license below: |
12 | * |
13 | * Redistribution and use in source and binary forms, with or |
14 | * without modification, are permitted provided that the following |
15 | * conditions are met: |
16 | * |
17 | * - Redistributions of source code must retain the above |
18 | * copyright notice, this list of conditions and the following |
19 | * disclaimer. |
20 | * |
21 | * - Redistributions in binary form must reproduce the above |
22 | * copyright notice, this list of conditions and the following |
23 | * disclaimer in the documentation and/or other materials |
24 | * provided with the distribution. |
25 | * |
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
33 | * SOFTWARE. |
34 | */ |
35 | |
36 | #include <linux/mutex.h> |
37 | #include <linux/inetdevice.h> |
38 | #include <linux/slab.h> |
39 | #include <linux/workqueue.h> |
40 | #include <net/arp.h> |
41 | #include <net/neighbour.h> |
42 | #include <net/route.h> |
43 | #include <net/netevent.h> |
44 | #include <net/ipv6_stubs.h> |
45 | #include <net/ip6_route.h> |
46 | #include <rdma/ib_addr.h> |
47 | #include <rdma/ib_cache.h> |
48 | #include <rdma/ib_sa.h> |
49 | #include <rdma/ib.h> |
50 | #include <rdma/rdma_netlink.h> |
51 | #include <net/netlink.h> |
52 | |
53 | #include "core_priv.h" |
54 | |
55 | struct addr_req { |
56 | struct list_head list; |
57 | struct sockaddr_storage src_addr; |
58 | struct sockaddr_storage dst_addr; |
59 | struct rdma_dev_addr *addr; |
60 | void *context; |
61 | void (*callback)(int status, struct sockaddr *src_addr, |
62 | struct rdma_dev_addr *addr, void *context); |
63 | unsigned long timeout; |
64 | struct delayed_work work; |
65 | bool resolve_by_gid_attr; /* Consider gid attr in resolve phase */ |
66 | int status; |
67 | u32 seq; |
68 | }; |
69 | |
70 | static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0); |
71 | |
72 | static DEFINE_SPINLOCK(lock); |
73 | static LIST_HEAD(req_list); |
74 | static struct workqueue_struct *addr_wq; |
75 | |
76 | static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = { |
77 | [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, |
78 | .len = sizeof(struct rdma_nla_ls_gid), |
79 | .validation_type = NLA_VALIDATE_MIN, |
80 | .min = sizeof(struct rdma_nla_ls_gid)}, |
81 | }; |
82 | |
83 | static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh) |
84 | { |
85 | struct nlattr *tb[LS_NLA_TYPE_MAX] = {}; |
86 | int ret; |
87 | |
88 | if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) |
89 | return false; |
90 | |
91 | ret = nla_parse_deprecated(tb, maxtype: LS_NLA_TYPE_MAX - 1, head: nlmsg_data(nlh), |
92 | len: nlmsg_len(nlh), policy: ib_nl_addr_policy, NULL); |
93 | if (ret) |
94 | return false; |
95 | |
96 | return true; |
97 | } |
98 | |
99 | static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh) |
100 | { |
101 | const struct nlattr *head, *curr; |
102 | union ib_gid gid; |
103 | struct addr_req *req; |
104 | int len, rem; |
105 | int found = 0; |
106 | |
107 | head = (const struct nlattr *)nlmsg_data(nlh); |
108 | len = nlmsg_len(nlh); |
109 | |
110 | nla_for_each_attr(curr, head, len, rem) { |
111 | if (curr->nla_type == LS_NLA_TYPE_DGID) |
112 | memcpy(&gid, nla_data(curr), nla_len(curr)); |
113 | } |
114 | |
115 | spin_lock_bh(lock: &lock); |
116 | list_for_each_entry(req, &req_list, list) { |
117 | if (nlh->nlmsg_seq != req->seq) |
118 | continue; |
119 | /* We set the DGID part, the rest was set earlier */ |
120 | rdma_addr_set_dgid(dev_addr: req->addr, gid: &gid); |
121 | req->status = 0; |
122 | found = 1; |
123 | break; |
124 | } |
125 | spin_unlock_bh(lock: &lock); |
126 | |
127 | if (!found) |
128 | pr_info("Couldn't find request waiting for DGID: %pI6\n" , |
129 | &gid); |
130 | } |
131 | |
132 | int ib_nl_handle_ip_res_resp(struct sk_buff *skb, |
133 | struct nlmsghdr *nlh, |
134 | struct netlink_ext_ack *extack) |
135 | { |
136 | if ((nlh->nlmsg_flags & NLM_F_REQUEST) || |
137 | !(NETLINK_CB(skb).sk)) |
138 | return -EPERM; |
139 | |
140 | if (ib_nl_is_good_ip_resp(nlh)) |
141 | ib_nl_process_good_ip_rsep(nlh); |
142 | |
143 | return 0; |
144 | } |
145 | |
146 | static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr, |
147 | const void *daddr, |
148 | u32 seq, u16 family) |
149 | { |
150 | struct sk_buff *skb = NULL; |
151 | struct nlmsghdr *nlh; |
152 | struct rdma_ls_ip_resolve_header *; |
153 | void *data; |
154 | size_t size; |
155 | int attrtype; |
156 | int len; |
157 | |
158 | if (family == AF_INET) { |
159 | size = sizeof(struct in_addr); |
160 | attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV4; |
161 | } else { |
162 | size = sizeof(struct in6_addr); |
163 | attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV6; |
164 | } |
165 | |
166 | len = nla_total_size(payload: sizeof(size)); |
167 | len += NLMSG_ALIGN(sizeof(*header)); |
168 | |
169 | skb = nlmsg_new(payload: len, GFP_KERNEL); |
170 | if (!skb) |
171 | return -ENOMEM; |
172 | |
173 | data = ibnl_put_msg(skb, nlh: &nlh, seq, len: 0, client: RDMA_NL_LS, |
174 | op: RDMA_NL_LS_OP_IP_RESOLVE, NLM_F_REQUEST); |
175 | if (!data) { |
176 | nlmsg_free(skb); |
177 | return -ENODATA; |
178 | } |
179 | |
180 | /* Construct the family header first */ |
181 | header = skb_put(skb, NLMSG_ALIGN(sizeof(*header))); |
182 | header->ifindex = dev_addr->bound_dev_if; |
183 | nla_put(skb, attrtype, attrlen: size, data: daddr); |
184 | |
185 | /* Repair the nlmsg header length */ |
186 | nlmsg_end(skb, nlh); |
187 | rdma_nl_multicast(net: &init_net, skb, group: RDMA_NL_GROUP_LS, GFP_KERNEL); |
188 | |
189 | /* Make the request retry, so when we get the response from userspace |
190 | * we will have something. |
191 | */ |
192 | return -ENODATA; |
193 | } |
194 | |
195 | int rdma_addr_size(const struct sockaddr *addr) |
196 | { |
197 | switch (addr->sa_family) { |
198 | case AF_INET: |
199 | return sizeof(struct sockaddr_in); |
200 | case AF_INET6: |
201 | return sizeof(struct sockaddr_in6); |
202 | case AF_IB: |
203 | return sizeof(struct sockaddr_ib); |
204 | default: |
205 | return 0; |
206 | } |
207 | } |
208 | EXPORT_SYMBOL(rdma_addr_size); |
209 | |
210 | int rdma_addr_size_in6(struct sockaddr_in6 *addr) |
211 | { |
212 | int ret = rdma_addr_size((struct sockaddr *) addr); |
213 | |
214 | return ret <= sizeof(*addr) ? ret : 0; |
215 | } |
216 | EXPORT_SYMBOL(rdma_addr_size_in6); |
217 | |
218 | int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr) |
219 | { |
220 | int ret = rdma_addr_size((struct sockaddr *) addr); |
221 | |
222 | return ret <= sizeof(*addr) ? ret : 0; |
223 | } |
224 | EXPORT_SYMBOL(rdma_addr_size_kss); |
225 | |
226 | /** |
227 | * rdma_copy_src_l2_addr - Copy netdevice source addresses |
228 | * @dev_addr: Destination address pointer where to copy the addresses |
229 | * @dev: Netdevice whose source addresses to copy |
230 | * |
231 | * rdma_copy_src_l2_addr() copies source addresses from the specified netdevice. |
232 | * This includes unicast address, broadcast address, device type and |
233 | * interface index. |
234 | */ |
235 | void rdma_copy_src_l2_addr(struct rdma_dev_addr *dev_addr, |
236 | const struct net_device *dev) |
237 | { |
238 | dev_addr->dev_type = dev->type; |
239 | memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN); |
240 | memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN); |
241 | dev_addr->bound_dev_if = dev->ifindex; |
242 | } |
243 | EXPORT_SYMBOL(rdma_copy_src_l2_addr); |
244 | |
245 | static struct net_device * |
246 | rdma_find_ndev_for_src_ip_rcu(struct net *net, const struct sockaddr *src_in) |
247 | { |
248 | struct net_device *dev = NULL; |
249 | int ret = -EADDRNOTAVAIL; |
250 | |
251 | switch (src_in->sa_family) { |
252 | case AF_INET: |
253 | dev = __ip_dev_find(net, |
254 | addr: ((const struct sockaddr_in *)src_in)->sin_addr.s_addr, |
255 | devref: false); |
256 | if (dev) |
257 | ret = 0; |
258 | break; |
259 | #if IS_ENABLED(CONFIG_IPV6) |
260 | case AF_INET6: |
261 | for_each_netdev_rcu(net, dev) { |
262 | if (ipv6_chk_addr(net, |
263 | addr: &((const struct sockaddr_in6 *)src_in)->sin6_addr, |
264 | dev, strict: 1)) { |
265 | ret = 0; |
266 | break; |
267 | } |
268 | } |
269 | break; |
270 | #endif |
271 | } |
272 | return ret ? ERR_PTR(error: ret) : dev; |
273 | } |
274 | |
275 | int rdma_translate_ip(const struct sockaddr *addr, |
276 | struct rdma_dev_addr *dev_addr) |
277 | { |
278 | struct net_device *dev; |
279 | |
280 | if (dev_addr->bound_dev_if) { |
281 | dev = dev_get_by_index(net: dev_addr->net, ifindex: dev_addr->bound_dev_if); |
282 | if (!dev) |
283 | return -ENODEV; |
284 | rdma_copy_src_l2_addr(dev_addr, dev); |
285 | dev_put(dev); |
286 | return 0; |
287 | } |
288 | |
289 | rcu_read_lock(); |
290 | dev = rdma_find_ndev_for_src_ip_rcu(net: dev_addr->net, src_in: addr); |
291 | if (!IS_ERR(ptr: dev)) |
292 | rdma_copy_src_l2_addr(dev_addr, dev); |
293 | rcu_read_unlock(); |
294 | return PTR_ERR_OR_ZERO(ptr: dev); |
295 | } |
296 | EXPORT_SYMBOL(rdma_translate_ip); |
297 | |
298 | static void set_timeout(struct addr_req *req, unsigned long time) |
299 | { |
300 | unsigned long delay; |
301 | |
302 | delay = time - jiffies; |
303 | if ((long)delay < 0) |
304 | delay = 0; |
305 | |
306 | mod_delayed_work(wq: addr_wq, dwork: &req->work, delay); |
307 | } |
308 | |
309 | static void queue_req(struct addr_req *req) |
310 | { |
311 | spin_lock_bh(lock: &lock); |
312 | list_add_tail(new: &req->list, head: &req_list); |
313 | set_timeout(req, time: req->timeout); |
314 | spin_unlock_bh(lock: &lock); |
315 | } |
316 | |
317 | static int ib_nl_fetch_ha(struct rdma_dev_addr *dev_addr, |
318 | const void *daddr, u32 seq, u16 family) |
319 | { |
320 | if (!rdma_nl_chk_listeners(group: RDMA_NL_GROUP_LS)) |
321 | return -EADDRNOTAVAIL; |
322 | |
323 | return ib_nl_ip_send_msg(dev_addr, daddr, seq, family); |
324 | } |
325 | |
326 | static int dst_fetch_ha(const struct dst_entry *dst, |
327 | struct rdma_dev_addr *dev_addr, |
328 | const void *daddr) |
329 | { |
330 | struct neighbour *n; |
331 | int ret = 0; |
332 | |
333 | n = dst_neigh_lookup(dst, daddr); |
334 | if (!n) |
335 | return -ENODATA; |
336 | |
337 | if (!(n->nud_state & NUD_VALID)) { |
338 | neigh_event_send(neigh: n, NULL); |
339 | ret = -ENODATA; |
340 | } else { |
341 | neigh_ha_snapshot(dst: dev_addr->dst_dev_addr, n, dev: dst->dev); |
342 | } |
343 | |
344 | neigh_release(neigh: n); |
345 | |
346 | return ret; |
347 | } |
348 | |
349 | static bool has_gateway(const struct dst_entry *dst, sa_family_t family) |
350 | { |
351 | struct rtable *rt; |
352 | struct rt6_info *rt6; |
353 | |
354 | if (family == AF_INET) { |
355 | rt = container_of(dst, struct rtable, dst); |
356 | return rt->rt_uses_gateway; |
357 | } |
358 | |
359 | rt6 = container_of(dst, struct rt6_info, dst); |
360 | return rt6->rt6i_flags & RTF_GATEWAY; |
361 | } |
362 | |
363 | static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr, |
364 | const struct sockaddr *dst_in, u32 seq) |
365 | { |
366 | const struct sockaddr_in *dst_in4 = |
367 | (const struct sockaddr_in *)dst_in; |
368 | const struct sockaddr_in6 *dst_in6 = |
369 | (const struct sockaddr_in6 *)dst_in; |
370 | const void *daddr = (dst_in->sa_family == AF_INET) ? |
371 | (const void *)&dst_in4->sin_addr.s_addr : |
372 | (const void *)&dst_in6->sin6_addr; |
373 | sa_family_t family = dst_in->sa_family; |
374 | |
375 | might_sleep(); |
376 | |
377 | /* If we have a gateway in IB mode then it must be an IB network */ |
378 | if (has_gateway(dst, family) && dev_addr->network == RDMA_NETWORK_IB) |
379 | return ib_nl_fetch_ha(dev_addr, daddr, seq, family); |
380 | else |
381 | return dst_fetch_ha(dst, dev_addr, daddr); |
382 | } |
383 | |
384 | static int addr4_resolve(struct sockaddr *src_sock, |
385 | const struct sockaddr *dst_sock, |
386 | struct rdma_dev_addr *addr, |
387 | struct rtable **prt) |
388 | { |
389 | struct sockaddr_in *src_in = (struct sockaddr_in *)src_sock; |
390 | const struct sockaddr_in *dst_in = |
391 | (const struct sockaddr_in *)dst_sock; |
392 | |
393 | __be32 src_ip = src_in->sin_addr.s_addr; |
394 | __be32 dst_ip = dst_in->sin_addr.s_addr; |
395 | struct rtable *rt; |
396 | struct flowi4 fl4; |
397 | int ret; |
398 | |
399 | memset(&fl4, 0, sizeof(fl4)); |
400 | fl4.daddr = dst_ip; |
401 | fl4.saddr = src_ip; |
402 | fl4.flowi4_oif = addr->bound_dev_if; |
403 | rt = ip_route_output_key(net: addr->net, flp: &fl4); |
404 | ret = PTR_ERR_OR_ZERO(ptr: rt); |
405 | if (ret) |
406 | return ret; |
407 | |
408 | src_in->sin_addr.s_addr = fl4.saddr; |
409 | |
410 | addr->hoplimit = ip4_dst_hoplimit(dst: &rt->dst); |
411 | |
412 | *prt = rt; |
413 | return 0; |
414 | } |
415 | |
416 | #if IS_ENABLED(CONFIG_IPV6) |
417 | static int addr6_resolve(struct sockaddr *src_sock, |
418 | const struct sockaddr *dst_sock, |
419 | struct rdma_dev_addr *addr, |
420 | struct dst_entry **pdst) |
421 | { |
422 | struct sockaddr_in6 *src_in = (struct sockaddr_in6 *)src_sock; |
423 | const struct sockaddr_in6 *dst_in = |
424 | (const struct sockaddr_in6 *)dst_sock; |
425 | struct flowi6 fl6; |
426 | struct dst_entry *dst; |
427 | |
428 | memset(&fl6, 0, sizeof fl6); |
429 | fl6.daddr = dst_in->sin6_addr; |
430 | fl6.saddr = src_in->sin6_addr; |
431 | fl6.flowi6_oif = addr->bound_dev_if; |
432 | |
433 | dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL); |
434 | if (IS_ERR(ptr: dst)) |
435 | return PTR_ERR(ptr: dst); |
436 | |
437 | if (ipv6_addr_any(a: &src_in->sin6_addr)) |
438 | src_in->sin6_addr = fl6.saddr; |
439 | |
440 | addr->hoplimit = ip6_dst_hoplimit(dst); |
441 | |
442 | *pdst = dst; |
443 | return 0; |
444 | } |
445 | #else |
446 | static int addr6_resolve(struct sockaddr *src_sock, |
447 | const struct sockaddr *dst_sock, |
448 | struct rdma_dev_addr *addr, |
449 | struct dst_entry **pdst) |
450 | { |
451 | return -EADDRNOTAVAIL; |
452 | } |
453 | #endif |
454 | |
455 | static int addr_resolve_neigh(const struct dst_entry *dst, |
456 | const struct sockaddr *dst_in, |
457 | struct rdma_dev_addr *addr, |
458 | unsigned int ndev_flags, |
459 | u32 seq) |
460 | { |
461 | int ret = 0; |
462 | |
463 | if (ndev_flags & IFF_LOOPBACK) { |
464 | memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); |
465 | } else { |
466 | if (!(ndev_flags & IFF_NOARP)) { |
467 | /* If the device doesn't do ARP internally */ |
468 | ret = fetch_ha(dst, dev_addr: addr, dst_in, seq); |
469 | } |
470 | } |
471 | return ret; |
472 | } |
473 | |
474 | static int copy_src_l2_addr(struct rdma_dev_addr *dev_addr, |
475 | const struct sockaddr *dst_in, |
476 | const struct dst_entry *dst, |
477 | const struct net_device *ndev) |
478 | { |
479 | int ret = 0; |
480 | |
481 | if (dst->dev->flags & IFF_LOOPBACK) |
482 | ret = rdma_translate_ip(dst_in, dev_addr); |
483 | else |
484 | rdma_copy_src_l2_addr(dev_addr, dst->dev); |
485 | |
486 | /* |
487 | * If there's a gateway and type of device not ARPHRD_INFINIBAND, |
488 | * we're definitely in RoCE v2 (as RoCE v1 isn't routable) set the |
489 | * network type accordingly. |
490 | */ |
491 | if (has_gateway(dst, family: dst_in->sa_family) && |
492 | ndev->type != ARPHRD_INFINIBAND) |
493 | dev_addr->network = dst_in->sa_family == AF_INET ? |
494 | RDMA_NETWORK_IPV4 : |
495 | RDMA_NETWORK_IPV6; |
496 | else |
497 | dev_addr->network = RDMA_NETWORK_IB; |
498 | |
499 | return ret; |
500 | } |
501 | |
502 | static int rdma_set_src_addr_rcu(struct rdma_dev_addr *dev_addr, |
503 | unsigned int *ndev_flags, |
504 | const struct sockaddr *dst_in, |
505 | const struct dst_entry *dst) |
506 | { |
507 | struct net_device *ndev = READ_ONCE(dst->dev); |
508 | |
509 | *ndev_flags = ndev->flags; |
510 | /* A physical device must be the RDMA device to use */ |
511 | if (ndev->flags & IFF_LOOPBACK) { |
512 | /* |
513 | * RDMA (IB/RoCE, iWarp) doesn't run on lo interface or |
514 | * loopback IP address. So if route is resolved to loopback |
515 | * interface, translate that to a real ndev based on non |
516 | * loopback IP address. |
517 | */ |
518 | ndev = rdma_find_ndev_for_src_ip_rcu(net: dev_net(dev: ndev), src_in: dst_in); |
519 | if (IS_ERR(ptr: ndev)) |
520 | return -ENODEV; |
521 | } |
522 | |
523 | return copy_src_l2_addr(dev_addr, dst_in, dst, ndev); |
524 | } |
525 | |
526 | static int set_addr_netns_by_gid_rcu(struct rdma_dev_addr *addr) |
527 | { |
528 | struct net_device *ndev; |
529 | |
530 | ndev = rdma_read_gid_attr_ndev_rcu(attr: addr->sgid_attr); |
531 | if (IS_ERR(ptr: ndev)) |
532 | return PTR_ERR(ptr: ndev); |
533 | |
534 | /* |
535 | * Since we are holding the rcu, reading net and ifindex |
536 | * are safe without any additional reference; because |
537 | * change_net_namespace() in net/core/dev.c does rcu sync |
538 | * after it changes the state to IFF_DOWN and before |
539 | * updating netdev fields {net, ifindex}. |
540 | */ |
541 | addr->net = dev_net(dev: ndev); |
542 | addr->bound_dev_if = ndev->ifindex; |
543 | return 0; |
544 | } |
545 | |
546 | static void rdma_addr_set_net_defaults(struct rdma_dev_addr *addr) |
547 | { |
548 | addr->net = &init_net; |
549 | addr->bound_dev_if = 0; |
550 | } |
551 | |
552 | static int addr_resolve(struct sockaddr *src_in, |
553 | const struct sockaddr *dst_in, |
554 | struct rdma_dev_addr *addr, |
555 | bool resolve_neigh, |
556 | bool resolve_by_gid_attr, |
557 | u32 seq) |
558 | { |
559 | struct dst_entry *dst = NULL; |
560 | unsigned int ndev_flags = 0; |
561 | struct rtable *rt = NULL; |
562 | int ret; |
563 | |
564 | if (!addr->net) { |
565 | pr_warn_ratelimited("%s: missing namespace\n" , __func__); |
566 | return -EINVAL; |
567 | } |
568 | |
569 | rcu_read_lock(); |
570 | if (resolve_by_gid_attr) { |
571 | if (!addr->sgid_attr) { |
572 | rcu_read_unlock(); |
573 | pr_warn_ratelimited("%s: missing gid_attr\n" , __func__); |
574 | return -EINVAL; |
575 | } |
576 | /* |
577 | * If the request is for a specific gid attribute of the |
578 | * rdma_dev_addr, derive net from the netdevice of the |
579 | * GID attribute. |
580 | */ |
581 | ret = set_addr_netns_by_gid_rcu(addr); |
582 | if (ret) { |
583 | rcu_read_unlock(); |
584 | return ret; |
585 | } |
586 | } |
587 | if (src_in->sa_family == AF_INET) { |
588 | ret = addr4_resolve(src_sock: src_in, dst_sock: dst_in, addr, prt: &rt); |
589 | dst = &rt->dst; |
590 | } else { |
591 | ret = addr6_resolve(src_sock: src_in, dst_sock: dst_in, addr, pdst: &dst); |
592 | } |
593 | if (ret) { |
594 | rcu_read_unlock(); |
595 | goto done; |
596 | } |
597 | ret = rdma_set_src_addr_rcu(dev_addr: addr, ndev_flags: &ndev_flags, dst_in, dst); |
598 | rcu_read_unlock(); |
599 | |
600 | /* |
601 | * Resolve neighbor destination address if requested and |
602 | * only if src addr translation didn't fail. |
603 | */ |
604 | if (!ret && resolve_neigh) |
605 | ret = addr_resolve_neigh(dst, dst_in, addr, ndev_flags, seq); |
606 | |
607 | if (src_in->sa_family == AF_INET) |
608 | ip_rt_put(rt); |
609 | else |
610 | dst_release(dst); |
611 | done: |
612 | /* |
613 | * Clear the addr net to go back to its original state, only if it was |
614 | * derived from GID attribute in this context. |
615 | */ |
616 | if (resolve_by_gid_attr) |
617 | rdma_addr_set_net_defaults(addr); |
618 | return ret; |
619 | } |
620 | |
621 | static void process_one_req(struct work_struct *_work) |
622 | { |
623 | struct addr_req *req; |
624 | struct sockaddr *src_in, *dst_in; |
625 | |
626 | req = container_of(_work, struct addr_req, work.work); |
627 | |
628 | if (req->status == -ENODATA) { |
629 | src_in = (struct sockaddr *)&req->src_addr; |
630 | dst_in = (struct sockaddr *)&req->dst_addr; |
631 | req->status = addr_resolve(src_in, dst_in, addr: req->addr, |
632 | resolve_neigh: true, resolve_by_gid_attr: req->resolve_by_gid_attr, |
633 | seq: req->seq); |
634 | if (req->status && time_after_eq(jiffies, req->timeout)) { |
635 | req->status = -ETIMEDOUT; |
636 | } else if (req->status == -ENODATA) { |
637 | /* requeue the work for retrying again */ |
638 | spin_lock_bh(lock: &lock); |
639 | if (!list_empty(head: &req->list)) |
640 | set_timeout(req, time: req->timeout); |
641 | spin_unlock_bh(lock: &lock); |
642 | return; |
643 | } |
644 | } |
645 | |
646 | req->callback(req->status, (struct sockaddr *)&req->src_addr, |
647 | req->addr, req->context); |
648 | req->callback = NULL; |
649 | |
650 | spin_lock_bh(lock: &lock); |
651 | /* |
652 | * Although the work will normally have been canceled by the workqueue, |
653 | * it can still be requeued as long as it is on the req_list. |
654 | */ |
655 | cancel_delayed_work(dwork: &req->work); |
656 | if (!list_empty(head: &req->list)) { |
657 | list_del_init(entry: &req->list); |
658 | kfree(objp: req); |
659 | } |
660 | spin_unlock_bh(lock: &lock); |
661 | } |
662 | |
663 | int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr, |
664 | struct rdma_dev_addr *addr, unsigned long timeout_ms, |
665 | void (*callback)(int status, struct sockaddr *src_addr, |
666 | struct rdma_dev_addr *addr, void *context), |
667 | bool resolve_by_gid_attr, void *context) |
668 | { |
669 | struct sockaddr *src_in, *dst_in; |
670 | struct addr_req *req; |
671 | int ret = 0; |
672 | |
673 | req = kzalloc(size: sizeof *req, GFP_KERNEL); |
674 | if (!req) |
675 | return -ENOMEM; |
676 | |
677 | src_in = (struct sockaddr *) &req->src_addr; |
678 | dst_in = (struct sockaddr *) &req->dst_addr; |
679 | |
680 | if (src_addr) { |
681 | if (src_addr->sa_family != dst_addr->sa_family) { |
682 | ret = -EINVAL; |
683 | goto err; |
684 | } |
685 | |
686 | memcpy(src_in, src_addr, rdma_addr_size(src_addr)); |
687 | } else { |
688 | src_in->sa_family = dst_addr->sa_family; |
689 | } |
690 | |
691 | memcpy(dst_in, dst_addr, rdma_addr_size(dst_addr)); |
692 | req->addr = addr; |
693 | req->callback = callback; |
694 | req->context = context; |
695 | req->resolve_by_gid_attr = resolve_by_gid_attr; |
696 | INIT_DELAYED_WORK(&req->work, process_one_req); |
697 | req->seq = (u32)atomic_inc_return(v: &ib_nl_addr_request_seq); |
698 | |
699 | req->status = addr_resolve(src_in, dst_in, addr, resolve_neigh: true, |
700 | resolve_by_gid_attr: req->resolve_by_gid_attr, seq: req->seq); |
701 | switch (req->status) { |
702 | case 0: |
703 | req->timeout = jiffies; |
704 | queue_req(req); |
705 | break; |
706 | case -ENODATA: |
707 | req->timeout = msecs_to_jiffies(m: timeout_ms) + jiffies; |
708 | queue_req(req); |
709 | break; |
710 | default: |
711 | ret = req->status; |
712 | goto err; |
713 | } |
714 | return ret; |
715 | err: |
716 | kfree(objp: req); |
717 | return ret; |
718 | } |
719 | EXPORT_SYMBOL(rdma_resolve_ip); |
720 | |
721 | int roce_resolve_route_from_path(struct sa_path_rec *rec, |
722 | const struct ib_gid_attr *attr) |
723 | { |
724 | union { |
725 | struct sockaddr _sockaddr; |
726 | struct sockaddr_in _sockaddr_in; |
727 | struct sockaddr_in6 _sockaddr_in6; |
728 | } sgid, dgid; |
729 | struct rdma_dev_addr dev_addr = {}; |
730 | int ret; |
731 | |
732 | might_sleep(); |
733 | |
734 | if (rec->roce.route_resolved) |
735 | return 0; |
736 | |
737 | rdma_gid2ip(out: (struct sockaddr *)&sgid, gid: &rec->sgid); |
738 | rdma_gid2ip(out: (struct sockaddr *)&dgid, gid: &rec->dgid); |
739 | |
740 | if (sgid._sockaddr.sa_family != dgid._sockaddr.sa_family) |
741 | return -EINVAL; |
742 | |
743 | if (!attr || !attr->ndev) |
744 | return -EINVAL; |
745 | |
746 | dev_addr.net = &init_net; |
747 | dev_addr.sgid_attr = attr; |
748 | |
749 | ret = addr_resolve(src_in: (struct sockaddr *)&sgid, dst_in: (struct sockaddr *)&dgid, |
750 | addr: &dev_addr, resolve_neigh: false, resolve_by_gid_attr: true, seq: 0); |
751 | if (ret) |
752 | return ret; |
753 | |
754 | if ((dev_addr.network == RDMA_NETWORK_IPV4 || |
755 | dev_addr.network == RDMA_NETWORK_IPV6) && |
756 | rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2) |
757 | return -EINVAL; |
758 | |
759 | rec->roce.route_resolved = true; |
760 | return 0; |
761 | } |
762 | |
763 | /** |
764 | * rdma_addr_cancel - Cancel resolve ip request |
765 | * @addr: Pointer to address structure given previously |
766 | * during rdma_resolve_ip(). |
767 | * rdma_addr_cancel() is synchronous function which cancels any pending |
768 | * request if there is any. |
769 | */ |
770 | void rdma_addr_cancel(struct rdma_dev_addr *addr) |
771 | { |
772 | struct addr_req *req, *temp_req; |
773 | struct addr_req *found = NULL; |
774 | |
775 | spin_lock_bh(lock: &lock); |
776 | list_for_each_entry_safe(req, temp_req, &req_list, list) { |
777 | if (req->addr == addr) { |
778 | /* |
779 | * Removing from the list means we take ownership of |
780 | * the req |
781 | */ |
782 | list_del_init(entry: &req->list); |
783 | found = req; |
784 | break; |
785 | } |
786 | } |
787 | spin_unlock_bh(lock: &lock); |
788 | |
789 | if (!found) |
790 | return; |
791 | |
792 | /* |
793 | * sync canceling the work after removing it from the req_list |
794 | * guarentees no work is running and none will be started. |
795 | */ |
796 | cancel_delayed_work_sync(dwork: &found->work); |
797 | kfree(objp: found); |
798 | } |
799 | EXPORT_SYMBOL(rdma_addr_cancel); |
800 | |
801 | struct resolve_cb_context { |
802 | struct completion comp; |
803 | int status; |
804 | }; |
805 | |
806 | static void resolve_cb(int status, struct sockaddr *src_addr, |
807 | struct rdma_dev_addr *addr, void *context) |
808 | { |
809 | ((struct resolve_cb_context *)context)->status = status; |
810 | complete(&((struct resolve_cb_context *)context)->comp); |
811 | } |
812 | |
813 | int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, |
814 | const union ib_gid *dgid, |
815 | u8 *dmac, const struct ib_gid_attr *sgid_attr, |
816 | int *hoplimit) |
817 | { |
818 | struct rdma_dev_addr dev_addr; |
819 | struct resolve_cb_context ctx; |
820 | union { |
821 | struct sockaddr_in _sockaddr_in; |
822 | struct sockaddr_in6 _sockaddr_in6; |
823 | } sgid_addr, dgid_addr; |
824 | int ret; |
825 | |
826 | rdma_gid2ip(out: (struct sockaddr *)&sgid_addr, gid: sgid); |
827 | rdma_gid2ip(out: (struct sockaddr *)&dgid_addr, gid: dgid); |
828 | |
829 | memset(&dev_addr, 0, sizeof(dev_addr)); |
830 | dev_addr.net = &init_net; |
831 | dev_addr.sgid_attr = sgid_attr; |
832 | |
833 | init_completion(x: &ctx.comp); |
834 | ret = rdma_resolve_ip((struct sockaddr *)&sgid_addr, |
835 | (struct sockaddr *)&dgid_addr, &dev_addr, 1000, |
836 | resolve_cb, true, &ctx); |
837 | if (ret) |
838 | return ret; |
839 | |
840 | wait_for_completion(&ctx.comp); |
841 | |
842 | ret = ctx.status; |
843 | if (ret) |
844 | return ret; |
845 | |
846 | memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); |
847 | *hoplimit = dev_addr.hoplimit; |
848 | return 0; |
849 | } |
850 | |
851 | static int netevent_callback(struct notifier_block *self, unsigned long event, |
852 | void *ctx) |
853 | { |
854 | struct addr_req *req; |
855 | |
856 | if (event == NETEVENT_NEIGH_UPDATE) { |
857 | struct neighbour *neigh = ctx; |
858 | |
859 | if (neigh->nud_state & NUD_VALID) { |
860 | spin_lock_bh(lock: &lock); |
861 | list_for_each_entry(req, &req_list, list) |
862 | set_timeout(req, time: jiffies); |
863 | spin_unlock_bh(lock: &lock); |
864 | } |
865 | } |
866 | return 0; |
867 | } |
868 | |
869 | static struct notifier_block nb = { |
870 | .notifier_call = netevent_callback |
871 | }; |
872 | |
873 | int addr_init(void) |
874 | { |
875 | addr_wq = alloc_ordered_workqueue("ib_addr" , 0); |
876 | if (!addr_wq) |
877 | return -ENOMEM; |
878 | |
879 | register_netevent_notifier(nb: &nb); |
880 | |
881 | return 0; |
882 | } |
883 | |
884 | void addr_cleanup(void) |
885 | { |
886 | unregister_netevent_notifier(nb: &nb); |
887 | destroy_workqueue(wq: addr_wq); |
888 | WARN_ON(!list_empty(&req_list)); |
889 | } |
890 | |