1// SPDX-License-Identifier: GPL-2.0-or-later
2/* GTP according to GSM TS 09.60 / 3GPP TS 29.060
3 *
4 * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
5 * (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org>
6 *
7 * Author: Harald Welte <hwelte@sysmocom.de>
8 * Pablo Neira Ayuso <pablo@netfilter.org>
9 * Andreas Schultz <aschultz@travelping.com>
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/module.h>
15#include <linux/skbuff.h>
16#include <linux/udp.h>
17#include <linux/rculist.h>
18#include <linux/jhash.h>
19#include <linux/if_tunnel.h>
20#include <linux/net.h>
21#include <linux/file.h>
22#include <linux/gtp.h>
23
24#include <net/net_namespace.h>
25#include <net/protocol.h>
26#include <net/ip.h>
27#include <net/udp.h>
28#include <net/udp_tunnel.h>
29#include <net/icmp.h>
30#include <net/xfrm.h>
31#include <net/genetlink.h>
32#include <net/netns/generic.h>
33#include <net/gtp.h>
34
35/* An active session for the subscriber. */
36struct pdp_ctx {
37 struct hlist_node hlist_tid;
38 struct hlist_node hlist_addr;
39
40 union {
41 struct {
42 u64 tid;
43 u16 flow;
44 } v0;
45 struct {
46 u32 i_tei;
47 u32 o_tei;
48 } v1;
49 } u;
50 u8 gtp_version;
51 u16 af;
52
53 struct in_addr ms_addr_ip4;
54 struct in_addr peer_addr_ip4;
55
56 struct sock *sk;
57 struct net_device *dev;
58
59 atomic_t tx_seq;
60 struct rcu_head rcu_head;
61};
62
63/* One instance of the GTP device. */
64struct gtp_dev {
65 struct list_head list;
66
67 struct sock *sk0;
68 struct sock *sk1u;
69 u8 sk_created;
70
71 struct net_device *dev;
72 struct net *net;
73
74 unsigned int role;
75 unsigned int hash_size;
76 struct hlist_head *tid_hash;
77 struct hlist_head *addr_hash;
78
79 u8 restart_count;
80};
81
82struct echo_info {
83 struct in_addr ms_addr_ip4;
84 struct in_addr peer_addr_ip4;
85 u8 gtp_version;
86};
87
88static unsigned int gtp_net_id __read_mostly;
89
90struct gtp_net {
91 struct list_head gtp_dev_list;
92};
93
94static u32 gtp_h_initval;
95
96static struct genl_family gtp_genl_family;
97
98enum gtp_multicast_groups {
99 GTP_GENL_MCGRP,
100};
101
102static const struct genl_multicast_group gtp_genl_mcgrps[] = {
103 [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
104};
105
106static void pdp_context_delete(struct pdp_ctx *pctx);
107
108static inline u32 gtp0_hashfn(u64 tid)
109{
110 u32 *tid32 = (u32 *) &tid;
111 return jhash_2words(a: tid32[0], b: tid32[1], initval: gtp_h_initval);
112}
113
114static inline u32 gtp1u_hashfn(u32 tid)
115{
116 return jhash_1word(a: tid, initval: gtp_h_initval);
117}
118
119static inline u32 ipv4_hashfn(__be32 ip)
120{
121 return jhash_1word(a: (__force u32)ip, initval: gtp_h_initval);
122}
123
124/* Resolve a PDP context structure based on the 64bit TID. */
125static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
126{
127 struct hlist_head *head;
128 struct pdp_ctx *pdp;
129
130 head = &gtp->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
131
132 hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
133 if (pdp->gtp_version == GTP_V0 &&
134 pdp->u.v0.tid == tid)
135 return pdp;
136 }
137 return NULL;
138}
139
140/* Resolve a PDP context structure based on the 32bit TEI. */
141static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)
142{
143 struct hlist_head *head;
144 struct pdp_ctx *pdp;
145
146 head = &gtp->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
147
148 hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
149 if (pdp->gtp_version == GTP_V1 &&
150 pdp->u.v1.i_tei == tid)
151 return pdp;
152 }
153 return NULL;
154}
155
156/* Resolve a PDP context based on IPv4 address of MS. */
157static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
158{
159 struct hlist_head *head;
160 struct pdp_ctx *pdp;
161
162 head = &gtp->addr_hash[ipv4_hashfn(ip: ms_addr) % gtp->hash_size];
163
164 hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
165 if (pdp->af == AF_INET &&
166 pdp->ms_addr_ip4.s_addr == ms_addr)
167 return pdp;
168 }
169
170 return NULL;
171}
172
173static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
174 unsigned int hdrlen, unsigned int role)
175{
176 struct iphdr *iph;
177
178 if (!pskb_may_pull(skb, len: hdrlen + sizeof(struct iphdr)))
179 return false;
180
181 iph = (struct iphdr *)(skb->data + hdrlen);
182
183 if (role == GTP_ROLE_SGSN)
184 return iph->daddr == pctx->ms_addr_ip4.s_addr;
185 else
186 return iph->saddr == pctx->ms_addr_ip4.s_addr;
187}
188
189/* Check if the inner IP address in this packet is assigned to any
190 * existing mobile subscriber.
191 */
192static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
193 unsigned int hdrlen, unsigned int role)
194{
195 switch (ntohs(skb->protocol)) {
196 case ETH_P_IP:
197 return gtp_check_ms_ipv4(skb, pctx, hdrlen, role);
198 }
199 return false;
200}
201
202static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
203 unsigned int hdrlen, unsigned int role)
204{
205 if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
206 netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
207 return 1;
208 }
209
210 /* Get rid of the GTP + UDP headers. */
211 if (iptunnel_pull_header(skb, hdr_len: hdrlen, inner_proto: skb->protocol,
212 xnet: !net_eq(net1: sock_net(sk: pctx->sk), net2: dev_net(dev: pctx->dev)))) {
213 pctx->dev->stats.rx_length_errors++;
214 goto err;
215 }
216
217 netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
218
219 /* Now that the UDP and the GTP header have been removed, set up the
220 * new network header. This is required by the upper layer to
221 * calculate the transport header.
222 */
223 skb_reset_network_header(skb);
224 skb_reset_mac_header(skb);
225
226 skb->dev = pctx->dev;
227
228 dev_sw_netstats_rx_add(dev: pctx->dev, len: skb->len);
229
230 __netif_rx(skb);
231 return 0;
232
233err:
234 pctx->dev->stats.rx_dropped++;
235 return -1;
236}
237
238static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
239 const struct sock *sk,
240 __be32 daddr, __be32 saddr)
241{
242 memset(fl4, 0, sizeof(*fl4));
243 fl4->flowi4_oif = sk->sk_bound_dev_if;
244 fl4->daddr = daddr;
245 fl4->saddr = saddr;
246 fl4->flowi4_tos = ip_sock_rt_tos(sk);
247 fl4->flowi4_scope = ip_sock_rt_scope(sk);
248 fl4->flowi4_proto = sk->sk_protocol;
249
250 return ip_route_output_key(net: sock_net(sk), flp: fl4);
251}
252
253/* GSM TS 09.60. 7.3
254 * In all Path Management messages:
255 * - TID: is not used and shall be set to 0.
256 * - Flow Label is not used and shall be set to 0
257 * In signalling messages:
258 * - number: this field is not yet used in signalling messages.
259 * It shall be set to 255 by the sender and shall be ignored
260 * by the receiver
261 * Returns true if the echo req was correct, false otherwise.
262 */
263static bool gtp0_validate_echo_hdr(struct gtp0_header *gtp0)
264{
265 return !(gtp0->tid || (gtp0->flags ^ 0x1e) ||
266 gtp0->number != 0xff || gtp0->flow);
267}
268
269/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
270static void gtp0_build_echo_msg(struct gtp0_header *hdr, __u8 msg_type)
271{
272 int len_pkt, len_hdr;
273
274 hdr->flags = 0x1e; /* v0, GTP-non-prime. */
275 hdr->type = msg_type;
276 /* GSM TS 09.60. 7.3 In all Path Management Flow Label and TID
277 * are not used and shall be set to 0.
278 */
279 hdr->flow = 0;
280 hdr->tid = 0;
281 hdr->number = 0xff;
282 hdr->spare[0] = 0xff;
283 hdr->spare[1] = 0xff;
284 hdr->spare[2] = 0xff;
285
286 len_pkt = sizeof(struct gtp0_packet);
287 len_hdr = sizeof(struct gtp0_header);
288
289 if (msg_type == GTP_ECHO_RSP)
290 hdr->length = htons(len_pkt - len_hdr);
291 else
292 hdr->length = 0;
293}
294
295static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
296{
297 struct gtp0_packet *gtp_pkt;
298 struct gtp0_header *gtp0;
299 struct rtable *rt;
300 struct flowi4 fl4;
301 struct iphdr *iph;
302 __be16 seq;
303
304 gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
305
306 if (!gtp0_validate_echo_hdr(gtp0))
307 return -1;
308
309 seq = gtp0->seq;
310
311 /* pull GTP and UDP headers */
312 skb_pull_data(skb, len: sizeof(struct gtp0_header) + sizeof(struct udphdr));
313
314 gtp_pkt = skb_push(skb, len: sizeof(struct gtp0_packet));
315 memset(gtp_pkt, 0, sizeof(struct gtp0_packet));
316
317 gtp0_build_echo_msg(hdr: &gtp_pkt->gtp0_h, GTP_ECHO_RSP);
318
319 /* GSM TS 09.60. 7.3 The Sequence Number in a signalling response
320 * message shall be copied from the signalling request message
321 * that the GSN is replying to.
322 */
323 gtp_pkt->gtp0_h.seq = seq;
324
325 gtp_pkt->ie.tag = GTPIE_RECOVERY;
326 gtp_pkt->ie.val = gtp->restart_count;
327
328 iph = ip_hdr(skb);
329
330 /* find route to the sender,
331 * src address becomes dst address and vice versa.
332 */
333 rt = ip4_route_output_gtp(fl4: &fl4, sk: gtp->sk0, daddr: iph->saddr, saddr: iph->daddr);
334 if (IS_ERR(ptr: rt)) {
335 netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
336 &iph->saddr);
337 return -1;
338 }
339
340 udp_tunnel_xmit_skb(rt, sk: gtp->sk0, skb,
341 src: fl4.saddr, dst: fl4.daddr,
342 tos: iph->tos,
343 ttl: ip4_dst_hoplimit(dst: &rt->dst),
344 df: 0,
345 htons(GTP0_PORT), htons(GTP0_PORT),
346 xnet: !net_eq(net1: sock_net(sk: gtp->sk1u),
347 net2: dev_net(dev: gtp->dev)),
348 nocheck: false);
349 return 0;
350}
351
352static int gtp_genl_fill_echo(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
353 int flags, u32 type, struct echo_info echo)
354{
355 void *genlh;
356
357 genlh = genlmsg_put(skb, portid: snd_portid, seq: snd_seq, family: &gtp_genl_family, flags,
358 cmd: type);
359 if (!genlh)
360 goto failure;
361
362 if (nla_put_u32(skb, attrtype: GTPA_VERSION, value: echo.gtp_version) ||
363 nla_put_be32(skb, attrtype: GTPA_PEER_ADDRESS, value: echo.peer_addr_ip4.s_addr) ||
364 nla_put_be32(skb, attrtype: GTPA_MS_ADDRESS, value: echo.ms_addr_ip4.s_addr))
365 goto failure;
366
367 genlmsg_end(skb, hdr: genlh);
368 return 0;
369
370failure:
371 genlmsg_cancel(skb, hdr: genlh);
372 return -EMSGSIZE;
373}
374
375static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
376{
377 struct gtp0_header *gtp0;
378 struct echo_info echo;
379 struct sk_buff *msg;
380 struct iphdr *iph;
381 int ret;
382
383 gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
384
385 if (!gtp0_validate_echo_hdr(gtp0))
386 return -1;
387
388 iph = ip_hdr(skb);
389 echo.ms_addr_ip4.s_addr = iph->daddr;
390 echo.peer_addr_ip4.s_addr = iph->saddr;
391 echo.gtp_version = GTP_V0;
392
393 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
394 if (!msg)
395 return -ENOMEM;
396
397 ret = gtp_genl_fill_echo(skb: msg, snd_portid: 0, snd_seq: 0, flags: 0, type: GTP_CMD_ECHOREQ, echo);
398 if (ret < 0) {
399 nlmsg_free(skb: msg);
400 return ret;
401 }
402
403 return genlmsg_multicast_netns(family: &gtp_genl_family, net: dev_net(dev: gtp->dev),
404 skb: msg, portid: 0, group: GTP_GENL_MCGRP, GFP_ATOMIC);
405}
406
407/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
408static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
409{
410 unsigned int hdrlen = sizeof(struct udphdr) +
411 sizeof(struct gtp0_header);
412 struct gtp0_header *gtp0;
413 struct pdp_ctx *pctx;
414
415 if (!pskb_may_pull(skb, len: hdrlen))
416 return -1;
417
418 gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
419
420 if ((gtp0->flags >> 5) != GTP_V0)
421 return 1;
422
423 /* If the sockets were created in kernel, it means that
424 * there is no daemon running in userspace which would
425 * handle echo request.
426 */
427 if (gtp0->type == GTP_ECHO_REQ && gtp->sk_created)
428 return gtp0_send_echo_resp(gtp, skb);
429
430 if (gtp0->type == GTP_ECHO_RSP && gtp->sk_created)
431 return gtp0_handle_echo_resp(gtp, skb);
432
433 if (gtp0->type != GTP_TPDU)
434 return 1;
435
436 pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
437 if (!pctx) {
438 netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
439 return 1;
440 }
441
442 return gtp_rx(pctx, skb, hdrlen, role: gtp->role);
443}
444
445/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
446static void gtp1u_build_echo_msg(struct gtp1_header_long *hdr, __u8 msg_type)
447{
448 int len_pkt, len_hdr;
449
450 /* S flag must be set to 1 */
451 hdr->flags = 0x32; /* v1, GTP-non-prime. */
452 hdr->type = msg_type;
453 /* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */
454 hdr->tid = 0;
455
456 /* seq, npdu and next should be counted to the length of the GTP packet
457 * that's why szie of gtp1_header should be subtracted,
458 * not size of gtp1_header_long.
459 */
460
461 len_hdr = sizeof(struct gtp1_header);
462
463 if (msg_type == GTP_ECHO_RSP) {
464 len_pkt = sizeof(struct gtp1u_packet);
465 hdr->length = htons(len_pkt - len_hdr);
466 } else {
467 /* GTP_ECHO_REQ does not carry GTP Information Element,
468 * the why gtp1_header_long is used here.
469 */
470 len_pkt = sizeof(struct gtp1_header_long);
471 hdr->length = htons(len_pkt - len_hdr);
472 }
473}
474
475static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
476{
477 struct gtp1_header_long *gtp1u;
478 struct gtp1u_packet *gtp_pkt;
479 struct rtable *rt;
480 struct flowi4 fl4;
481 struct iphdr *iph;
482
483 gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
484
485 /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
486 * Error Indication and Supported Extension Headers Notification
487 * messages, the S flag shall be set to 1 and TEID shall be set to 0.
488 */
489 if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
490 return -1;
491
492 /* pull GTP and UDP headers */
493 skb_pull_data(skb,
494 len: sizeof(struct gtp1_header_long) + sizeof(struct udphdr));
495
496 gtp_pkt = skb_push(skb, len: sizeof(struct gtp1u_packet));
497 memset(gtp_pkt, 0, sizeof(struct gtp1u_packet));
498
499 gtp1u_build_echo_msg(hdr: &gtp_pkt->gtp1u_h, GTP_ECHO_RSP);
500
501 /* 3GPP TS 29.281 7.7.2 - The Restart Counter value in the
502 * Recovery information element shall not be used, i.e. it shall
503 * be set to zero by the sender and shall be ignored by the receiver.
504 * The Recovery information element is mandatory due to backwards
505 * compatibility reasons.
506 */
507 gtp_pkt->ie.tag = GTPIE_RECOVERY;
508 gtp_pkt->ie.val = 0;
509
510 iph = ip_hdr(skb);
511
512 /* find route to the sender,
513 * src address becomes dst address and vice versa.
514 */
515 rt = ip4_route_output_gtp(fl4: &fl4, sk: gtp->sk1u, daddr: iph->saddr, saddr: iph->daddr);
516 if (IS_ERR(ptr: rt)) {
517 netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
518 &iph->saddr);
519 return -1;
520 }
521
522 udp_tunnel_xmit_skb(rt, sk: gtp->sk1u, skb,
523 src: fl4.saddr, dst: fl4.daddr,
524 tos: iph->tos,
525 ttl: ip4_dst_hoplimit(dst: &rt->dst),
526 df: 0,
527 htons(GTP1U_PORT), htons(GTP1U_PORT),
528 xnet: !net_eq(net1: sock_net(sk: gtp->sk1u),
529 net2: dev_net(dev: gtp->dev)),
530 nocheck: false);
531 return 0;
532}
533
534static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
535{
536 struct gtp1_header_long *gtp1u;
537 struct echo_info echo;
538 struct sk_buff *msg;
539 struct iphdr *iph;
540 int ret;
541
542 gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
543
544 /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
545 * Error Indication and Supported Extension Headers Notification
546 * messages, the S flag shall be set to 1 and TEID shall be set to 0.
547 */
548 if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
549 return -1;
550
551 iph = ip_hdr(skb);
552 echo.ms_addr_ip4.s_addr = iph->daddr;
553 echo.peer_addr_ip4.s_addr = iph->saddr;
554 echo.gtp_version = GTP_V1;
555
556 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
557 if (!msg)
558 return -ENOMEM;
559
560 ret = gtp_genl_fill_echo(skb: msg, snd_portid: 0, snd_seq: 0, flags: 0, type: GTP_CMD_ECHOREQ, echo);
561 if (ret < 0) {
562 nlmsg_free(skb: msg);
563 return ret;
564 }
565
566 return genlmsg_multicast_netns(family: &gtp_genl_family, net: dev_net(dev: gtp->dev),
567 skb: msg, portid: 0, group: GTP_GENL_MCGRP, GFP_ATOMIC);
568}
569
570static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
571{
572 unsigned int hdrlen = sizeof(struct udphdr) +
573 sizeof(struct gtp1_header);
574 struct gtp1_header *gtp1;
575 struct pdp_ctx *pctx;
576
577 if (!pskb_may_pull(skb, len: hdrlen))
578 return -1;
579
580 gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
581
582 if ((gtp1->flags >> 5) != GTP_V1)
583 return 1;
584
585 /* If the sockets were created in kernel, it means that
586 * there is no daemon running in userspace which would
587 * handle echo request.
588 */
589 if (gtp1->type == GTP_ECHO_REQ && gtp->sk_created)
590 return gtp1u_send_echo_resp(gtp, skb);
591
592 if (gtp1->type == GTP_ECHO_RSP && gtp->sk_created)
593 return gtp1u_handle_echo_resp(gtp, skb);
594
595 if (gtp1->type != GTP_TPDU)
596 return 1;
597
598 /* From 29.060: "This field shall be present if and only if any one or
599 * more of the S, PN and E flags are set.".
600 *
601 * If any of the bit is set, then the remaining ones also have to be
602 * set.
603 */
604 if (gtp1->flags & GTP1_F_MASK)
605 hdrlen += 4;
606
607 /* Make sure the header is larger enough, including extensions. */
608 if (!pskb_may_pull(skb, len: hdrlen))
609 return -1;
610
611 gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
612
613 pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
614 if (!pctx) {
615 netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
616 return 1;
617 }
618
619 return gtp_rx(pctx, skb, hdrlen, role: gtp->role);
620}
621
622static void __gtp_encap_destroy(struct sock *sk)
623{
624 struct gtp_dev *gtp;
625
626 lock_sock(sk);
627 gtp = sk->sk_user_data;
628 if (gtp) {
629 if (gtp->sk0 == sk)
630 gtp->sk0 = NULL;
631 else
632 gtp->sk1u = NULL;
633 WRITE_ONCE(udp_sk(sk)->encap_type, 0);
634 rcu_assign_sk_user_data(sk, NULL);
635 release_sock(sk);
636 sock_put(sk);
637 return;
638 }
639 release_sock(sk);
640}
641
642static void gtp_encap_destroy(struct sock *sk)
643{
644 rtnl_lock();
645 __gtp_encap_destroy(sk);
646 rtnl_unlock();
647}
648
649static void gtp_encap_disable_sock(struct sock *sk)
650{
651 if (!sk)
652 return;
653
654 __gtp_encap_destroy(sk);
655}
656
657static void gtp_encap_disable(struct gtp_dev *gtp)
658{
659 if (gtp->sk_created) {
660 udp_tunnel_sock_release(sock: gtp->sk0->sk_socket);
661 udp_tunnel_sock_release(sock: gtp->sk1u->sk_socket);
662 gtp->sk_created = false;
663 gtp->sk0 = NULL;
664 gtp->sk1u = NULL;
665 } else {
666 gtp_encap_disable_sock(sk: gtp->sk0);
667 gtp_encap_disable_sock(sk: gtp->sk1u);
668 }
669}
670
671/* UDP encapsulation receive handler. See net/ipv4/udp.c.
672 * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
673 */
674static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
675{
676 struct gtp_dev *gtp;
677 int ret = 0;
678
679 gtp = rcu_dereference_sk_user_data(sk);
680 if (!gtp)
681 return 1;
682
683 netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
684
685 switch (READ_ONCE(udp_sk(sk)->encap_type)) {
686 case UDP_ENCAP_GTP0:
687 netdev_dbg(gtp->dev, "received GTP0 packet\n");
688 ret = gtp0_udp_encap_recv(gtp, skb);
689 break;
690 case UDP_ENCAP_GTP1U:
691 netdev_dbg(gtp->dev, "received GTP1U packet\n");
692 ret = gtp1u_udp_encap_recv(gtp, skb);
693 break;
694 default:
695 ret = -1; /* Shouldn't happen. */
696 }
697
698 switch (ret) {
699 case 1:
700 netdev_dbg(gtp->dev, "pass up to the process\n");
701 break;
702 case 0:
703 break;
704 case -1:
705 netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
706 kfree_skb(skb);
707 ret = 0;
708 break;
709 }
710
711 return ret;
712}
713
714static void gtp_dev_uninit(struct net_device *dev)
715{
716 struct gtp_dev *gtp = netdev_priv(dev);
717
718 gtp_encap_disable(gtp);
719}
720
721static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
722{
723 int payload_len = skb->len;
724 struct gtp0_header *gtp0;
725
726 gtp0 = skb_push(skb, len: sizeof(*gtp0));
727
728 gtp0->flags = 0x1e; /* v0, GTP-non-prime. */
729 gtp0->type = GTP_TPDU;
730 gtp0->length = htons(payload_len);
731 gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
732 gtp0->flow = htons(pctx->u.v0.flow);
733 gtp0->number = 0xff;
734 gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff;
735 gtp0->tid = cpu_to_be64(pctx->u.v0.tid);
736}
737
738static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
739{
740 int payload_len = skb->len;
741 struct gtp1_header *gtp1;
742
743 gtp1 = skb_push(skb, len: sizeof(*gtp1));
744
745 /* Bits 8 7 6 5 4 3 2 1
746 * +--+--+--+--+--+--+--+--+
747 * |version |PT| 0| E| S|PN|
748 * +--+--+--+--+--+--+--+--+
749 * 0 0 1 1 1 0 0 0
750 */
751 gtp1->flags = 0x30; /* v1, GTP-non-prime. */
752 gtp1->type = GTP_TPDU;
753 gtp1->length = htons(payload_len);
754 gtp1->tid = htonl(pctx->u.v1.o_tei);
755
756 /* TODO: Support for extension header, sequence number and N-PDU.
757 * Update the length field if any of them is available.
758 */
759}
760
761struct gtp_pktinfo {
762 struct sock *sk;
763 struct iphdr *iph;
764 struct flowi4 fl4;
765 struct rtable *rt;
766 struct pdp_ctx *pctx;
767 struct net_device *dev;
768 __be16 gtph_port;
769};
770
771static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
772{
773 switch (pktinfo->pctx->gtp_version) {
774 case GTP_V0:
775 pktinfo->gtph_port = htons(GTP0_PORT);
776 gtp0_push_header(skb, pctx: pktinfo->pctx);
777 break;
778 case GTP_V1:
779 pktinfo->gtph_port = htons(GTP1U_PORT);
780 gtp1_push_header(skb, pctx: pktinfo->pctx);
781 break;
782 }
783}
784
785static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
786 struct sock *sk, struct iphdr *iph,
787 struct pdp_ctx *pctx, struct rtable *rt,
788 struct flowi4 *fl4,
789 struct net_device *dev)
790{
791 pktinfo->sk = sk;
792 pktinfo->iph = iph;
793 pktinfo->pctx = pctx;
794 pktinfo->rt = rt;
795 pktinfo->fl4 = *fl4;
796 pktinfo->dev = dev;
797}
798
799static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
800 struct gtp_pktinfo *pktinfo)
801{
802 struct gtp_dev *gtp = netdev_priv(dev);
803 struct pdp_ctx *pctx;
804 struct rtable *rt;
805 struct flowi4 fl4;
806 struct iphdr *iph;
807 __be16 df;
808 int mtu;
809
810 /* Read the IP destination address and resolve the PDP context.
811 * Prepend PDP header with TEI/TID from PDP ctx.
812 */
813 iph = ip_hdr(skb);
814 if (gtp->role == GTP_ROLE_SGSN)
815 pctx = ipv4_pdp_find(gtp, ms_addr: iph->saddr);
816 else
817 pctx = ipv4_pdp_find(gtp, ms_addr: iph->daddr);
818
819 if (!pctx) {
820 netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
821 &iph->daddr);
822 return -ENOENT;
823 }
824 netdev_dbg(dev, "found PDP context %p\n", pctx);
825
826 rt = ip4_route_output_gtp(fl4: &fl4, sk: pctx->sk, daddr: pctx->peer_addr_ip4.s_addr,
827 inet_sk(pctx->sk)->inet_saddr);
828 if (IS_ERR(ptr: rt)) {
829 netdev_dbg(dev, "no route to SSGN %pI4\n",
830 &pctx->peer_addr_ip4.s_addr);
831 dev->stats.tx_carrier_errors++;
832 goto err;
833 }
834
835 if (rt->dst.dev == dev) {
836 netdev_dbg(dev, "circular route to SSGN %pI4\n",
837 &pctx->peer_addr_ip4.s_addr);
838 dev->stats.collisions++;
839 goto err_rt;
840 }
841
842 /* This is similar to tnl_update_pmtu(). */
843 df = iph->frag_off;
844 if (df) {
845 mtu = dst_mtu(dst: &rt->dst) - dev->hard_header_len -
846 sizeof(struct iphdr) - sizeof(struct udphdr);
847 switch (pctx->gtp_version) {
848 case GTP_V0:
849 mtu -= sizeof(struct gtp0_header);
850 break;
851 case GTP_V1:
852 mtu -= sizeof(struct gtp1_header);
853 break;
854 }
855 } else {
856 mtu = dst_mtu(dst: &rt->dst);
857 }
858
859 skb_dst_update_pmtu_no_confirm(skb, mtu);
860
861 if (iph->frag_off & htons(IP_DF) &&
862 ((!skb_is_gso(skb) && skb->len > mtu) ||
863 (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {
864 netdev_dbg(dev, "packet too big, fragmentation needed\n");
865 icmp_ndo_send(skb_in: skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
866 htonl(mtu));
867 goto err_rt;
868 }
869
870 gtp_set_pktinfo_ipv4(pktinfo, sk: pctx->sk, iph, pctx, rt, fl4: &fl4, dev);
871 gtp_push_header(skb, pktinfo);
872
873 return 0;
874err_rt:
875 ip_rt_put(rt);
876err:
877 return -EBADMSG;
878}
879
880static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
881{
882 unsigned int proto = ntohs(skb->protocol);
883 struct gtp_pktinfo pktinfo;
884 int err;
885
886 /* Ensure there is sufficient headroom. */
887 if (skb_cow_head(skb, headroom: dev->needed_headroom))
888 goto tx_err;
889
890 skb_reset_inner_headers(skb);
891
892 /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
893 rcu_read_lock();
894 switch (proto) {
895 case ETH_P_IP:
896 err = gtp_build_skb_ip4(skb, dev, pktinfo: &pktinfo);
897 break;
898 default:
899 err = -EOPNOTSUPP;
900 break;
901 }
902 rcu_read_unlock();
903
904 if (err < 0)
905 goto tx_err;
906
907 switch (proto) {
908 case ETH_P_IP:
909 netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
910 &pktinfo.iph->saddr, &pktinfo.iph->daddr);
911 udp_tunnel_xmit_skb(rt: pktinfo.rt, sk: pktinfo.sk, skb,
912 src: pktinfo.fl4.saddr, dst: pktinfo.fl4.daddr,
913 tos: pktinfo.iph->tos,
914 ttl: ip4_dst_hoplimit(dst: &pktinfo.rt->dst),
915 df: 0,
916 src_port: pktinfo.gtph_port, dst_port: pktinfo.gtph_port,
917 xnet: !net_eq(net1: sock_net(sk: pktinfo.pctx->sk),
918 net2: dev_net(dev)),
919 nocheck: false);
920 break;
921 }
922
923 return NETDEV_TX_OK;
924tx_err:
925 dev->stats.tx_errors++;
926 dev_kfree_skb(skb);
927 return NETDEV_TX_OK;
928}
929
930static const struct net_device_ops gtp_netdev_ops = {
931 .ndo_uninit = gtp_dev_uninit,
932 .ndo_start_xmit = gtp_dev_xmit,
933};
934
935static const struct device_type gtp_type = {
936 .name = "gtp",
937};
938
939static void gtp_link_setup(struct net_device *dev)
940{
941 unsigned int max_gtp_header_len = sizeof(struct iphdr) +
942 sizeof(struct udphdr) +
943 sizeof(struct gtp0_header);
944 struct gtp_dev *gtp = netdev_priv(dev);
945
946 dev->netdev_ops = &gtp_netdev_ops;
947 dev->needs_free_netdev = true;
948 SET_NETDEV_DEVTYPE(dev, &gtp_type);
949
950 dev->hard_header_len = 0;
951 dev->addr_len = 0;
952 dev->mtu = ETH_DATA_LEN - max_gtp_header_len;
953
954 /* Zero header length. */
955 dev->type = ARPHRD_NONE;
956 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
957
958 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
959 dev->priv_flags |= IFF_NO_QUEUE;
960 dev->features |= NETIF_F_LLTX;
961 netif_keep_dst(dev);
962
963 dev->needed_headroom = LL_MAX_HEADER + max_gtp_header_len;
964 gtp->dev = dev;
965}
966
967static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
968static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
969
970static void gtp_destructor(struct net_device *dev)
971{
972 struct gtp_dev *gtp = netdev_priv(dev);
973
974 kfree(objp: gtp->addr_hash);
975 kfree(objp: gtp->tid_hash);
976}
977
978static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp)
979{
980 struct udp_tunnel_sock_cfg tuncfg = {};
981 struct udp_port_cfg udp_conf = {
982 .local_ip.s_addr = htonl(INADDR_ANY),
983 .family = AF_INET,
984 };
985 struct net *net = gtp->net;
986 struct socket *sock;
987 int err;
988
989 if (type == UDP_ENCAP_GTP0)
990 udp_conf.local_udp_port = htons(GTP0_PORT);
991 else if (type == UDP_ENCAP_GTP1U)
992 udp_conf.local_udp_port = htons(GTP1U_PORT);
993 else
994 return ERR_PTR(error: -EINVAL);
995
996 err = udp_sock_create(net, cfg: &udp_conf, sockp: &sock);
997 if (err)
998 return ERR_PTR(error: err);
999
1000 tuncfg.sk_user_data = gtp;
1001 tuncfg.encap_type = type;
1002 tuncfg.encap_rcv = gtp_encap_recv;
1003 tuncfg.encap_destroy = NULL;
1004
1005 setup_udp_tunnel_sock(net, sock, sock_cfg: &tuncfg);
1006
1007 return sock->sk;
1008}
1009
1010static int gtp_create_sockets(struct gtp_dev *gtp, struct nlattr *data[])
1011{
1012 struct sock *sk1u = NULL;
1013 struct sock *sk0 = NULL;
1014
1015 sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp);
1016 if (IS_ERR(ptr: sk0))
1017 return PTR_ERR(ptr: sk0);
1018
1019 sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp);
1020 if (IS_ERR(ptr: sk1u)) {
1021 udp_tunnel_sock_release(sock: sk0->sk_socket);
1022 return PTR_ERR(ptr: sk1u);
1023 }
1024
1025 gtp->sk_created = true;
1026 gtp->sk0 = sk0;
1027 gtp->sk1u = sk1u;
1028
1029 return 0;
1030}
1031
1032static int gtp_newlink(struct net *src_net, struct net_device *dev,
1033 struct nlattr *tb[], struct nlattr *data[],
1034 struct netlink_ext_ack *extack)
1035{
1036 unsigned int role = GTP_ROLE_GGSN;
1037 struct gtp_dev *gtp;
1038 struct gtp_net *gn;
1039 int hashsize, err;
1040
1041 gtp = netdev_priv(dev);
1042
1043 if (!data[IFLA_GTP_PDP_HASHSIZE]) {
1044 hashsize = 1024;
1045 } else {
1046 hashsize = nla_get_u32(nla: data[IFLA_GTP_PDP_HASHSIZE]);
1047 if (!hashsize)
1048 hashsize = 1024;
1049 }
1050
1051 if (data[IFLA_GTP_ROLE]) {
1052 role = nla_get_u32(nla: data[IFLA_GTP_ROLE]);
1053 if (role > GTP_ROLE_SGSN)
1054 return -EINVAL;
1055 }
1056 gtp->role = role;
1057
1058 if (!data[IFLA_GTP_RESTART_COUNT])
1059 gtp->restart_count = 0;
1060 else
1061 gtp->restart_count = nla_get_u8(nla: data[IFLA_GTP_RESTART_COUNT]);
1062
1063 gtp->net = src_net;
1064
1065 err = gtp_hashtable_new(gtp, hsize: hashsize);
1066 if (err < 0)
1067 return err;
1068
1069 if (data[IFLA_GTP_CREATE_SOCKETS])
1070 err = gtp_create_sockets(gtp, data);
1071 else
1072 err = gtp_encap_enable(gtp, data);
1073 if (err < 0)
1074 goto out_hashtable;
1075
1076 err = register_netdevice(dev);
1077 if (err < 0) {
1078 netdev_dbg(dev, "failed to register new netdev %d\n", err);
1079 goto out_encap;
1080 }
1081
1082 gn = net_generic(net: dev_net(dev), id: gtp_net_id);
1083 list_add_rcu(new: &gtp->list, head: &gn->gtp_dev_list);
1084 dev->priv_destructor = gtp_destructor;
1085
1086 netdev_dbg(dev, "registered new GTP interface\n");
1087
1088 return 0;
1089
1090out_encap:
1091 gtp_encap_disable(gtp);
1092out_hashtable:
1093 kfree(objp: gtp->addr_hash);
1094 kfree(objp: gtp->tid_hash);
1095 return err;
1096}
1097
1098static void gtp_dellink(struct net_device *dev, struct list_head *head)
1099{
1100 struct gtp_dev *gtp = netdev_priv(dev);
1101 struct pdp_ctx *pctx;
1102 int i;
1103
1104 for (i = 0; i < gtp->hash_size; i++)
1105 hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
1106 pdp_context_delete(pctx);
1107
1108 list_del_rcu(entry: &gtp->list);
1109 unregister_netdevice_queue(dev, head);
1110}
1111
1112static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
1113 [IFLA_GTP_FD0] = { .type = NLA_U32 },
1114 [IFLA_GTP_FD1] = { .type = NLA_U32 },
1115 [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
1116 [IFLA_GTP_ROLE] = { .type = NLA_U32 },
1117 [IFLA_GTP_CREATE_SOCKETS] = { .type = NLA_U8 },
1118 [IFLA_GTP_RESTART_COUNT] = { .type = NLA_U8 },
1119};
1120
1121static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
1122 struct netlink_ext_ack *extack)
1123{
1124 if (!data)
1125 return -EINVAL;
1126
1127 return 0;
1128}
1129
1130static size_t gtp_get_size(const struct net_device *dev)
1131{
1132 return nla_total_size(payload: sizeof(__u32)) + /* IFLA_GTP_PDP_HASHSIZE */
1133 nla_total_size(payload: sizeof(__u32)) + /* IFLA_GTP_ROLE */
1134 nla_total_size(payload: sizeof(__u8)); /* IFLA_GTP_RESTART_COUNT */
1135}
1136
1137static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
1138{
1139 struct gtp_dev *gtp = netdev_priv(dev);
1140
1141 if (nla_put_u32(skb, attrtype: IFLA_GTP_PDP_HASHSIZE, value: gtp->hash_size))
1142 goto nla_put_failure;
1143 if (nla_put_u32(skb, attrtype: IFLA_GTP_ROLE, value: gtp->role))
1144 goto nla_put_failure;
1145 if (nla_put_u8(skb, attrtype: IFLA_GTP_RESTART_COUNT, value: gtp->restart_count))
1146 goto nla_put_failure;
1147
1148 return 0;
1149
1150nla_put_failure:
1151 return -EMSGSIZE;
1152}
1153
1154static struct rtnl_link_ops gtp_link_ops __read_mostly = {
1155 .kind = "gtp",
1156 .maxtype = IFLA_GTP_MAX,
1157 .policy = gtp_policy,
1158 .priv_size = sizeof(struct gtp_dev),
1159 .setup = gtp_link_setup,
1160 .validate = gtp_validate,
1161 .newlink = gtp_newlink,
1162 .dellink = gtp_dellink,
1163 .get_size = gtp_get_size,
1164 .fill_info = gtp_fill_info,
1165};
1166
1167static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
1168{
1169 int i;
1170
1171 gtp->addr_hash = kmalloc_array(n: hsize, size: sizeof(struct hlist_head),
1172 GFP_KERNEL | __GFP_NOWARN);
1173 if (gtp->addr_hash == NULL)
1174 return -ENOMEM;
1175
1176 gtp->tid_hash = kmalloc_array(n: hsize, size: sizeof(struct hlist_head),
1177 GFP_KERNEL | __GFP_NOWARN);
1178 if (gtp->tid_hash == NULL)
1179 goto err1;
1180
1181 gtp->hash_size = hsize;
1182
1183 for (i = 0; i < hsize; i++) {
1184 INIT_HLIST_HEAD(&gtp->addr_hash[i]);
1185 INIT_HLIST_HEAD(&gtp->tid_hash[i]);
1186 }
1187 return 0;
1188err1:
1189 kfree(objp: gtp->addr_hash);
1190 return -ENOMEM;
1191}
1192
1193static struct sock *gtp_encap_enable_socket(int fd, int type,
1194 struct gtp_dev *gtp)
1195{
1196 struct udp_tunnel_sock_cfg tuncfg = {NULL};
1197 struct socket *sock;
1198 struct sock *sk;
1199 int err;
1200
1201 pr_debug("enable gtp on %d, %d\n", fd, type);
1202
1203 sock = sockfd_lookup(fd, err: &err);
1204 if (!sock) {
1205 pr_debug("gtp socket fd=%d not found\n", fd);
1206 return NULL;
1207 }
1208
1209 sk = sock->sk;
1210 if (sk->sk_protocol != IPPROTO_UDP ||
1211 sk->sk_type != SOCK_DGRAM ||
1212 (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
1213 pr_debug("socket fd=%d not UDP\n", fd);
1214 sk = ERR_PTR(error: -EINVAL);
1215 goto out_sock;
1216 }
1217
1218 lock_sock(sk);
1219 if (sk->sk_user_data) {
1220 sk = ERR_PTR(error: -EBUSY);
1221 goto out_rel_sock;
1222 }
1223
1224 sock_hold(sk);
1225
1226 tuncfg.sk_user_data = gtp;
1227 tuncfg.encap_type = type;
1228 tuncfg.encap_rcv = gtp_encap_recv;
1229 tuncfg.encap_destroy = gtp_encap_destroy;
1230
1231 setup_udp_tunnel_sock(net: sock_net(sk: sock->sk), sock, sock_cfg: &tuncfg);
1232
1233out_rel_sock:
1234 release_sock(sk: sock->sk);
1235out_sock:
1236 sockfd_put(sock);
1237 return sk;
1238}
1239
1240static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
1241{
1242 struct sock *sk1u = NULL;
1243 struct sock *sk0 = NULL;
1244
1245 if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
1246 return -EINVAL;
1247
1248 if (data[IFLA_GTP_FD0]) {
1249 u32 fd0 = nla_get_u32(nla: data[IFLA_GTP_FD0]);
1250
1251 sk0 = gtp_encap_enable_socket(fd: fd0, UDP_ENCAP_GTP0, gtp);
1252 if (IS_ERR(ptr: sk0))
1253 return PTR_ERR(ptr: sk0);
1254 }
1255
1256 if (data[IFLA_GTP_FD1]) {
1257 u32 fd1 = nla_get_u32(nla: data[IFLA_GTP_FD1]);
1258
1259 sk1u = gtp_encap_enable_socket(fd: fd1, UDP_ENCAP_GTP1U, gtp);
1260 if (IS_ERR(ptr: sk1u)) {
1261 gtp_encap_disable_sock(sk: sk0);
1262 return PTR_ERR(ptr: sk1u);
1263 }
1264 }
1265
1266 gtp->sk0 = sk0;
1267 gtp->sk1u = sk1u;
1268
1269 return 0;
1270}
1271
1272static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
1273{
1274 struct gtp_dev *gtp = NULL;
1275 struct net_device *dev;
1276 struct net *net;
1277
1278 /* Examine the link attributes and figure out which network namespace
1279 * we are talking about.
1280 */
1281 if (nla[GTPA_NET_NS_FD])
1282 net = get_net_ns_by_fd(fd: nla_get_u32(nla: nla[GTPA_NET_NS_FD]));
1283 else
1284 net = get_net(net: src_net);
1285
1286 if (IS_ERR(ptr: net))
1287 return NULL;
1288
1289 /* Check if there's an existing gtpX device to configure */
1290 dev = dev_get_by_index_rcu(net, ifindex: nla_get_u32(nla: nla[GTPA_LINK]));
1291 if (dev && dev->netdev_ops == &gtp_netdev_ops)
1292 gtp = netdev_priv(dev);
1293
1294 put_net(net);
1295 return gtp;
1296}
1297
1298static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
1299{
1300 pctx->gtp_version = nla_get_u32(nla: info->attrs[GTPA_VERSION]);
1301 pctx->af = AF_INET;
1302 pctx->peer_addr_ip4.s_addr =
1303 nla_get_be32(nla: info->attrs[GTPA_PEER_ADDRESS]);
1304 pctx->ms_addr_ip4.s_addr =
1305 nla_get_be32(nla: info->attrs[GTPA_MS_ADDRESS]);
1306
1307 switch (pctx->gtp_version) {
1308 case GTP_V0:
1309 /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
1310 * label needs to be the same for uplink and downlink packets,
1311 * so let's annotate this.
1312 */
1313 pctx->u.v0.tid = nla_get_u64(nla: info->attrs[GTPA_TID]);
1314 pctx->u.v0.flow = nla_get_u16(nla: info->attrs[GTPA_FLOW]);
1315 break;
1316 case GTP_V1:
1317 pctx->u.v1.i_tei = nla_get_u32(nla: info->attrs[GTPA_I_TEI]);
1318 pctx->u.v1.o_tei = nla_get_u32(nla: info->attrs[GTPA_O_TEI]);
1319 break;
1320 default:
1321 break;
1322 }
1323}
1324
1325static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
1326 struct genl_info *info)
1327{
1328 struct pdp_ctx *pctx, *pctx_tid = NULL;
1329 struct net_device *dev = gtp->dev;
1330 u32 hash_ms, hash_tid = 0;
1331 unsigned int version;
1332 bool found = false;
1333 __be32 ms_addr;
1334
1335 ms_addr = nla_get_be32(nla: info->attrs[GTPA_MS_ADDRESS]);
1336 hash_ms = ipv4_hashfn(ip: ms_addr) % gtp->hash_size;
1337 version = nla_get_u32(nla: info->attrs[GTPA_VERSION]);
1338
1339 pctx = ipv4_pdp_find(gtp, ms_addr);
1340 if (pctx)
1341 found = true;
1342 if (version == GTP_V0)
1343 pctx_tid = gtp0_pdp_find(gtp,
1344 tid: nla_get_u64(nla: info->attrs[GTPA_TID]));
1345 else if (version == GTP_V1)
1346 pctx_tid = gtp1_pdp_find(gtp,
1347 tid: nla_get_u32(nla: info->attrs[GTPA_I_TEI]));
1348 if (pctx_tid)
1349 found = true;
1350
1351 if (found) {
1352 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
1353 return ERR_PTR(error: -EEXIST);
1354 if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
1355 return ERR_PTR(error: -EOPNOTSUPP);
1356
1357 if (pctx && pctx_tid)
1358 return ERR_PTR(error: -EEXIST);
1359 if (!pctx)
1360 pctx = pctx_tid;
1361
1362 ipv4_pdp_fill(pctx, info);
1363
1364 if (pctx->gtp_version == GTP_V0)
1365 netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
1366 pctx->u.v0.tid, pctx);
1367 else if (pctx->gtp_version == GTP_V1)
1368 netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
1369 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
1370
1371 return pctx;
1372
1373 }
1374
1375 pctx = kmalloc(size: sizeof(*pctx), GFP_ATOMIC);
1376 if (pctx == NULL)
1377 return ERR_PTR(error: -ENOMEM);
1378
1379 sock_hold(sk);
1380 pctx->sk = sk;
1381 pctx->dev = gtp->dev;
1382 ipv4_pdp_fill(pctx, info);
1383 atomic_set(v: &pctx->tx_seq, i: 0);
1384
1385 switch (pctx->gtp_version) {
1386 case GTP_V0:
1387 /* TS 09.60: "The flow label identifies unambiguously a GTP
1388 * flow.". We use the tid for this instead, I cannot find a
1389 * situation in which this doesn't unambiguosly identify the
1390 * PDP context.
1391 */
1392 hash_tid = gtp0_hashfn(tid: pctx->u.v0.tid) % gtp->hash_size;
1393 break;
1394 case GTP_V1:
1395 hash_tid = gtp1u_hashfn(tid: pctx->u.v1.i_tei) % gtp->hash_size;
1396 break;
1397 }
1398
1399 hlist_add_head_rcu(n: &pctx->hlist_addr, h: &gtp->addr_hash[hash_ms]);
1400 hlist_add_head_rcu(n: &pctx->hlist_tid, h: &gtp->tid_hash[hash_tid]);
1401
1402 switch (pctx->gtp_version) {
1403 case GTP_V0:
1404 netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
1405 pctx->u.v0.tid, &pctx->peer_addr_ip4,
1406 &pctx->ms_addr_ip4, pctx);
1407 break;
1408 case GTP_V1:
1409 netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
1410 pctx->u.v1.i_tei, pctx->u.v1.o_tei,
1411 &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx);
1412 break;
1413 }
1414
1415 return pctx;
1416}
1417
1418static void pdp_context_free(struct rcu_head *head)
1419{
1420 struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head);
1421
1422 sock_put(sk: pctx->sk);
1423 kfree(objp: pctx);
1424}
1425
1426static void pdp_context_delete(struct pdp_ctx *pctx)
1427{
1428 hlist_del_rcu(n: &pctx->hlist_tid);
1429 hlist_del_rcu(n: &pctx->hlist_addr);
1430 call_rcu(head: &pctx->rcu_head, func: pdp_context_free);
1431}
1432
1433static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation);
1434
1435static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
1436{
1437 unsigned int version;
1438 struct pdp_ctx *pctx;
1439 struct gtp_dev *gtp;
1440 struct sock *sk;
1441 int err;
1442
1443 if (!info->attrs[GTPA_VERSION] ||
1444 !info->attrs[GTPA_LINK] ||
1445 !info->attrs[GTPA_PEER_ADDRESS] ||
1446 !info->attrs[GTPA_MS_ADDRESS])
1447 return -EINVAL;
1448
1449 version = nla_get_u32(nla: info->attrs[GTPA_VERSION]);
1450
1451 switch (version) {
1452 case GTP_V0:
1453 if (!info->attrs[GTPA_TID] ||
1454 !info->attrs[GTPA_FLOW])
1455 return -EINVAL;
1456 break;
1457 case GTP_V1:
1458 if (!info->attrs[GTPA_I_TEI] ||
1459 !info->attrs[GTPA_O_TEI])
1460 return -EINVAL;
1461 break;
1462
1463 default:
1464 return -EINVAL;
1465 }
1466
1467 rtnl_lock();
1468
1469 gtp = gtp_find_dev(src_net: sock_net(sk: skb->sk), nla: info->attrs);
1470 if (!gtp) {
1471 err = -ENODEV;
1472 goto out_unlock;
1473 }
1474
1475 if (version == GTP_V0)
1476 sk = gtp->sk0;
1477 else if (version == GTP_V1)
1478 sk = gtp->sk1u;
1479 else
1480 sk = NULL;
1481
1482 if (!sk) {
1483 err = -ENODEV;
1484 goto out_unlock;
1485 }
1486
1487 pctx = gtp_pdp_add(gtp, sk, info);
1488 if (IS_ERR(ptr: pctx)) {
1489 err = PTR_ERR(ptr: pctx);
1490 } else {
1491 gtp_tunnel_notify(pctx, cmd: GTP_CMD_NEWPDP, GFP_KERNEL);
1492 err = 0;
1493 }
1494
1495out_unlock:
1496 rtnl_unlock();
1497 return err;
1498}
1499
1500static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
1501 struct nlattr *nla[])
1502{
1503 struct gtp_dev *gtp;
1504
1505 gtp = gtp_find_dev(src_net: net, nla);
1506 if (!gtp)
1507 return ERR_PTR(error: -ENODEV);
1508
1509 if (nla[GTPA_MS_ADDRESS]) {
1510 __be32 ip = nla_get_be32(nla: nla[GTPA_MS_ADDRESS]);
1511
1512 return ipv4_pdp_find(gtp, ms_addr: ip);
1513 } else if (nla[GTPA_VERSION]) {
1514 u32 gtp_version = nla_get_u32(nla: nla[GTPA_VERSION]);
1515
1516 if (gtp_version == GTP_V0 && nla[GTPA_TID])
1517 return gtp0_pdp_find(gtp, tid: nla_get_u64(nla: nla[GTPA_TID]));
1518 else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI])
1519 return gtp1_pdp_find(gtp, tid: nla_get_u32(nla: nla[GTPA_I_TEI]));
1520 }
1521
1522 return ERR_PTR(error: -EINVAL);
1523}
1524
1525static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[])
1526{
1527 struct pdp_ctx *pctx;
1528
1529 if (nla[GTPA_LINK])
1530 pctx = gtp_find_pdp_by_link(net, nla);
1531 else
1532 pctx = ERR_PTR(error: -EINVAL);
1533
1534 if (!pctx)
1535 pctx = ERR_PTR(error: -ENOENT);
1536
1537 return pctx;
1538}
1539
1540static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
1541{
1542 struct pdp_ctx *pctx;
1543 int err = 0;
1544
1545 if (!info->attrs[GTPA_VERSION])
1546 return -EINVAL;
1547
1548 rcu_read_lock();
1549
1550 pctx = gtp_find_pdp(net: sock_net(sk: skb->sk), nla: info->attrs);
1551 if (IS_ERR(ptr: pctx)) {
1552 err = PTR_ERR(ptr: pctx);
1553 goto out_unlock;
1554 }
1555
1556 if (pctx->gtp_version == GTP_V0)
1557 netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
1558 pctx->u.v0.tid, pctx);
1559 else if (pctx->gtp_version == GTP_V1)
1560 netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
1561 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
1562
1563 gtp_tunnel_notify(pctx, cmd: GTP_CMD_DELPDP, GFP_ATOMIC);
1564 pdp_context_delete(pctx);
1565
1566out_unlock:
1567 rcu_read_unlock();
1568 return err;
1569}
1570
1571static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
1572 int flags, u32 type, struct pdp_ctx *pctx)
1573{
1574 void *genlh;
1575
1576 genlh = genlmsg_put(skb, portid: snd_portid, seq: snd_seq, family: &gtp_genl_family, flags,
1577 cmd: type);
1578 if (genlh == NULL)
1579 goto nlmsg_failure;
1580
1581 if (nla_put_u32(skb, attrtype: GTPA_VERSION, value: pctx->gtp_version) ||
1582 nla_put_u32(skb, attrtype: GTPA_LINK, value: pctx->dev->ifindex) ||
1583 nla_put_be32(skb, attrtype: GTPA_PEER_ADDRESS, value: pctx->peer_addr_ip4.s_addr) ||
1584 nla_put_be32(skb, attrtype: GTPA_MS_ADDRESS, value: pctx->ms_addr_ip4.s_addr))
1585 goto nla_put_failure;
1586
1587 switch (pctx->gtp_version) {
1588 case GTP_V0:
1589 if (nla_put_u64_64bit(skb, attrtype: GTPA_TID, value: pctx->u.v0.tid, padattr: GTPA_PAD) ||
1590 nla_put_u16(skb, attrtype: GTPA_FLOW, value: pctx->u.v0.flow))
1591 goto nla_put_failure;
1592 break;
1593 case GTP_V1:
1594 if (nla_put_u32(skb, attrtype: GTPA_I_TEI, value: pctx->u.v1.i_tei) ||
1595 nla_put_u32(skb, attrtype: GTPA_O_TEI, value: pctx->u.v1.o_tei))
1596 goto nla_put_failure;
1597 break;
1598 }
1599 genlmsg_end(skb, hdr: genlh);
1600 return 0;
1601
1602nlmsg_failure:
1603nla_put_failure:
1604 genlmsg_cancel(skb, hdr: genlh);
1605 return -EMSGSIZE;
1606}
1607
1608static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation)
1609{
1610 struct sk_buff *msg;
1611 int ret;
1612
1613 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, flags: allocation);
1614 if (!msg)
1615 return -ENOMEM;
1616
1617 ret = gtp_genl_fill_info(skb: msg, snd_portid: 0, snd_seq: 0, flags: 0, type: cmd, pctx);
1618 if (ret < 0) {
1619 nlmsg_free(skb: msg);
1620 return ret;
1621 }
1622
1623 ret = genlmsg_multicast_netns(family: &gtp_genl_family, net: dev_net(dev: pctx->dev), skb: msg,
1624 portid: 0, group: GTP_GENL_MCGRP, GFP_ATOMIC);
1625 return ret;
1626}
1627
1628static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
1629{
1630 struct pdp_ctx *pctx = NULL;
1631 struct sk_buff *skb2;
1632 int err;
1633
1634 if (!info->attrs[GTPA_VERSION])
1635 return -EINVAL;
1636
1637 rcu_read_lock();
1638
1639 pctx = gtp_find_pdp(net: sock_net(sk: skb->sk), nla: info->attrs);
1640 if (IS_ERR(ptr: pctx)) {
1641 err = PTR_ERR(ptr: pctx);
1642 goto err_unlock;
1643 }
1644
1645 skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
1646 if (skb2 == NULL) {
1647 err = -ENOMEM;
1648 goto err_unlock;
1649 }
1650
1651 err = gtp_genl_fill_info(skb: skb2, NETLINK_CB(skb).portid, snd_seq: info->snd_seq,
1652 flags: 0, type: info->nlhdr->nlmsg_type, pctx);
1653 if (err < 0)
1654 goto err_unlock_free;
1655
1656 rcu_read_unlock();
1657 return genlmsg_unicast(net: genl_info_net(info), skb: skb2, portid: info->snd_portid);
1658
1659err_unlock_free:
1660 kfree_skb(skb: skb2);
1661err_unlock:
1662 rcu_read_unlock();
1663 return err;
1664}
1665
1666static int gtp_genl_dump_pdp(struct sk_buff *skb,
1667 struct netlink_callback *cb)
1668{
1669 struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
1670 int i, j, bucket = cb->args[0], skip = cb->args[1];
1671 struct net *net = sock_net(sk: skb->sk);
1672 struct pdp_ctx *pctx;
1673 struct gtp_net *gn;
1674
1675 gn = net_generic(net, id: gtp_net_id);
1676
1677 if (cb->args[4])
1678 return 0;
1679
1680 rcu_read_lock();
1681 list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
1682 if (last_gtp && last_gtp != gtp)
1683 continue;
1684 else
1685 last_gtp = NULL;
1686
1687 for (i = bucket; i < gtp->hash_size; i++) {
1688 j = 0;
1689 hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i],
1690 hlist_tid) {
1691 if (j >= skip &&
1692 gtp_genl_fill_info(skb,
1693 NETLINK_CB(cb->skb).portid,
1694 snd_seq: cb->nlh->nlmsg_seq,
1695 NLM_F_MULTI,
1696 type: cb->nlh->nlmsg_type, pctx)) {
1697 cb->args[0] = i;
1698 cb->args[1] = j;
1699 cb->args[2] = (unsigned long)gtp;
1700 goto out;
1701 }
1702 j++;
1703 }
1704 skip = 0;
1705 }
1706 bucket = 0;
1707 }
1708 cb->args[4] = 1;
1709out:
1710 rcu_read_unlock();
1711 return skb->len;
1712}
1713
1714static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info)
1715{
1716 struct sk_buff *skb_to_send;
1717 __be32 src_ip, dst_ip;
1718 unsigned int version;
1719 struct gtp_dev *gtp;
1720 struct flowi4 fl4;
1721 struct rtable *rt;
1722 struct sock *sk;
1723 __be16 port;
1724 int len;
1725
1726 if (!info->attrs[GTPA_VERSION] ||
1727 !info->attrs[GTPA_LINK] ||
1728 !info->attrs[GTPA_PEER_ADDRESS] ||
1729 !info->attrs[GTPA_MS_ADDRESS])
1730 return -EINVAL;
1731
1732 version = nla_get_u32(nla: info->attrs[GTPA_VERSION]);
1733 dst_ip = nla_get_be32(nla: info->attrs[GTPA_PEER_ADDRESS]);
1734 src_ip = nla_get_be32(nla: info->attrs[GTPA_MS_ADDRESS]);
1735
1736 gtp = gtp_find_dev(src_net: sock_net(sk: skb->sk), nla: info->attrs);
1737 if (!gtp)
1738 return -ENODEV;
1739
1740 if (!gtp->sk_created)
1741 return -EOPNOTSUPP;
1742 if (!(gtp->dev->flags & IFF_UP))
1743 return -ENETDOWN;
1744
1745 if (version == GTP_V0) {
1746 struct gtp0_header *gtp0_h;
1747
1748 len = LL_RESERVED_SPACE(gtp->dev) + sizeof(struct gtp0_header) +
1749 sizeof(struct iphdr) + sizeof(struct udphdr);
1750
1751 skb_to_send = netdev_alloc_skb_ip_align(dev: gtp->dev, length: len);
1752 if (!skb_to_send)
1753 return -ENOMEM;
1754
1755 sk = gtp->sk0;
1756 port = htons(GTP0_PORT);
1757
1758 gtp0_h = skb_push(skb: skb_to_send, len: sizeof(struct gtp0_header));
1759 memset(gtp0_h, 0, sizeof(struct gtp0_header));
1760 gtp0_build_echo_msg(hdr: gtp0_h, GTP_ECHO_REQ);
1761 } else if (version == GTP_V1) {
1762 struct gtp1_header_long *gtp1u_h;
1763
1764 len = LL_RESERVED_SPACE(gtp->dev) +
1765 sizeof(struct gtp1_header_long) +
1766 sizeof(struct iphdr) + sizeof(struct udphdr);
1767
1768 skb_to_send = netdev_alloc_skb_ip_align(dev: gtp->dev, length: len);
1769 if (!skb_to_send)
1770 return -ENOMEM;
1771
1772 sk = gtp->sk1u;
1773 port = htons(GTP1U_PORT);
1774
1775 gtp1u_h = skb_push(skb: skb_to_send,
1776 len: sizeof(struct gtp1_header_long));
1777 memset(gtp1u_h, 0, sizeof(struct gtp1_header_long));
1778 gtp1u_build_echo_msg(hdr: gtp1u_h, GTP_ECHO_REQ);
1779 } else {
1780 return -ENODEV;
1781 }
1782
1783 rt = ip4_route_output_gtp(fl4: &fl4, sk, daddr: dst_ip, saddr: src_ip);
1784 if (IS_ERR(ptr: rt)) {
1785 netdev_dbg(gtp->dev, "no route for echo request to %pI4\n",
1786 &dst_ip);
1787 kfree_skb(skb: skb_to_send);
1788 return -ENODEV;
1789 }
1790
1791 udp_tunnel_xmit_skb(rt, sk, skb: skb_to_send,
1792 src: fl4.saddr, dst: fl4.daddr,
1793 tos: fl4.flowi4_tos,
1794 ttl: ip4_dst_hoplimit(dst: &rt->dst),
1795 df: 0,
1796 src_port: port, dst_port: port,
1797 xnet: !net_eq(net1: sock_net(sk),
1798 net2: dev_net(dev: gtp->dev)),
1799 nocheck: false);
1800 return 0;
1801}
1802
1803static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
1804 [GTPA_LINK] = { .type = NLA_U32, },
1805 [GTPA_VERSION] = { .type = NLA_U32, },
1806 [GTPA_TID] = { .type = NLA_U64, },
1807 [GTPA_PEER_ADDRESS] = { .type = NLA_U32, },
1808 [GTPA_MS_ADDRESS] = { .type = NLA_U32, },
1809 [GTPA_FLOW] = { .type = NLA_U16, },
1810 [GTPA_NET_NS_FD] = { .type = NLA_U32, },
1811 [GTPA_I_TEI] = { .type = NLA_U32, },
1812 [GTPA_O_TEI] = { .type = NLA_U32, },
1813};
1814
1815static const struct genl_small_ops gtp_genl_ops[] = {
1816 {
1817 .cmd = GTP_CMD_NEWPDP,
1818 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1819 .doit = gtp_genl_new_pdp,
1820 .flags = GENL_ADMIN_PERM,
1821 },
1822 {
1823 .cmd = GTP_CMD_DELPDP,
1824 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1825 .doit = gtp_genl_del_pdp,
1826 .flags = GENL_ADMIN_PERM,
1827 },
1828 {
1829 .cmd = GTP_CMD_GETPDP,
1830 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1831 .doit = gtp_genl_get_pdp,
1832 .dumpit = gtp_genl_dump_pdp,
1833 .flags = GENL_ADMIN_PERM,
1834 },
1835 {
1836 .cmd = GTP_CMD_ECHOREQ,
1837 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1838 .doit = gtp_genl_send_echo_req,
1839 .flags = GENL_ADMIN_PERM,
1840 },
1841};
1842
1843static struct genl_family gtp_genl_family __ro_after_init = {
1844 .name = "gtp",
1845 .version = 0,
1846 .hdrsize = 0,
1847 .maxattr = GTPA_MAX,
1848 .policy = gtp_genl_policy,
1849 .netnsok = true,
1850 .module = THIS_MODULE,
1851 .small_ops = gtp_genl_ops,
1852 .n_small_ops = ARRAY_SIZE(gtp_genl_ops),
1853 .resv_start_op = GTP_CMD_ECHOREQ + 1,
1854 .mcgrps = gtp_genl_mcgrps,
1855 .n_mcgrps = ARRAY_SIZE(gtp_genl_mcgrps),
1856};
1857
1858static int __net_init gtp_net_init(struct net *net)
1859{
1860 struct gtp_net *gn = net_generic(net, id: gtp_net_id);
1861
1862 INIT_LIST_HEAD(list: &gn->gtp_dev_list);
1863 return 0;
1864}
1865
1866static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
1867 struct list_head *dev_to_kill)
1868{
1869 struct net *net;
1870
1871 list_for_each_entry(net, net_list, exit_list) {
1872 struct gtp_net *gn = net_generic(net, id: gtp_net_id);
1873 struct gtp_dev *gtp;
1874
1875 list_for_each_entry(gtp, &gn->gtp_dev_list, list)
1876 gtp_dellink(dev: gtp->dev, head: dev_to_kill);
1877 }
1878}
1879
1880static struct pernet_operations gtp_net_ops = {
1881 .init = gtp_net_init,
1882 .exit_batch_rtnl = gtp_net_exit_batch_rtnl,
1883 .id = &gtp_net_id,
1884 .size = sizeof(struct gtp_net),
1885};
1886
1887static int __init gtp_init(void)
1888{
1889 int err;
1890
1891 get_random_bytes(buf: &gtp_h_initval, len: sizeof(gtp_h_initval));
1892
1893 err = register_pernet_subsys(&gtp_net_ops);
1894 if (err < 0)
1895 goto error_out;
1896
1897 err = rtnl_link_register(ops: &gtp_link_ops);
1898 if (err < 0)
1899 goto unreg_pernet_subsys;
1900
1901 err = genl_register_family(family: &gtp_genl_family);
1902 if (err < 0)
1903 goto unreg_rtnl_link;
1904
1905 pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
1906 sizeof(struct pdp_ctx));
1907 return 0;
1908
1909unreg_rtnl_link:
1910 rtnl_link_unregister(ops: &gtp_link_ops);
1911unreg_pernet_subsys:
1912 unregister_pernet_subsys(&gtp_net_ops);
1913error_out:
1914 pr_err("error loading GTP module loaded\n");
1915 return err;
1916}
1917late_initcall(gtp_init);
1918
1919static void __exit gtp_fini(void)
1920{
1921 genl_unregister_family(family: &gtp_genl_family);
1922 rtnl_link_unregister(ops: &gtp_link_ops);
1923 unregister_pernet_subsys(&gtp_net_ops);
1924
1925 pr_info("GTP module unloaded\n");
1926}
1927module_exit(gtp_fini);
1928
1929MODULE_LICENSE("GPL");
1930MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
1931MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
1932MODULE_ALIAS_RTNL_LINK("gtp");
1933MODULE_ALIAS_GENL_FAMILY("gtp");
1934

source code of linux/drivers/net/gtp.c