1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | #define pr_fmt(fmt) "IPsec: " fmt |
3 | |
4 | #include <crypto/aead.h> |
5 | #include <crypto/authenc.h> |
6 | #include <linux/err.h> |
7 | #include <linux/module.h> |
8 | #include <net/ip.h> |
9 | #include <net/xfrm.h> |
10 | #include <net/esp.h> |
11 | #include <linux/scatterlist.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/pfkeyv2.h> |
14 | #include <linux/rtnetlink.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/spinlock.h> |
17 | #include <linux/in6.h> |
18 | #include <net/icmp.h> |
19 | #include <net/protocol.h> |
20 | #include <net/udp.h> |
21 | #include <net/tcp.h> |
22 | #include <net/espintcp.h> |
23 | |
24 | #include <linux/highmem.h> |
25 | |
26 | struct esp_skb_cb { |
27 | struct xfrm_skb_cb xfrm; |
28 | void *tmp; |
29 | }; |
30 | |
31 | struct { |
32 | __be32 ; |
33 | u32 ; |
34 | }; |
35 | |
36 | #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) |
37 | |
38 | /* |
39 | * Allocate an AEAD request structure with extra space for SG and IV. |
40 | * |
41 | * For alignment considerations the IV is placed at the front, followed |
42 | * by the request and finally the SG list. |
43 | * |
44 | * TODO: Use spare space in skb for this where possible. |
45 | */ |
46 | static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int ) |
47 | { |
48 | unsigned int len; |
49 | |
50 | len = extralen; |
51 | |
52 | len += crypto_aead_ivsize(tfm: aead); |
53 | |
54 | if (len) { |
55 | len += crypto_aead_alignmask(tfm: aead) & |
56 | ~(crypto_tfm_ctx_alignment() - 1); |
57 | len = ALIGN(len, crypto_tfm_ctx_alignment()); |
58 | } |
59 | |
60 | len += sizeof(struct aead_request) + crypto_aead_reqsize(tfm: aead); |
61 | len = ALIGN(len, __alignof__(struct scatterlist)); |
62 | |
63 | len += sizeof(struct scatterlist) * nfrags; |
64 | |
65 | return kmalloc(size: len, GFP_ATOMIC); |
66 | } |
67 | |
68 | static inline void *(void *tmp) |
69 | { |
70 | return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra)); |
71 | } |
72 | |
73 | static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int ) |
74 | { |
75 | return crypto_aead_ivsize(tfm: aead) ? |
76 | PTR_ALIGN((u8 *)tmp + extralen, |
77 | crypto_aead_alignmask(aead) + 1) : tmp + extralen; |
78 | } |
79 | |
80 | static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) |
81 | { |
82 | struct aead_request *req; |
83 | |
84 | req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), |
85 | crypto_tfm_ctx_alignment()); |
86 | aead_request_set_tfm(req, tfm: aead); |
87 | return req; |
88 | } |
89 | |
90 | static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, |
91 | struct aead_request *req) |
92 | { |
93 | return (void *)ALIGN((unsigned long)(req + 1) + |
94 | crypto_aead_reqsize(aead), |
95 | __alignof__(struct scatterlist)); |
96 | } |
97 | |
98 | static void esp_ssg_unref(struct xfrm_state *x, void *tmp) |
99 | { |
100 | struct crypto_aead *aead = x->data; |
101 | int = 0; |
102 | u8 *iv; |
103 | struct aead_request *req; |
104 | struct scatterlist *sg; |
105 | |
106 | if (x->props.flags & XFRM_STATE_ESN) |
107 | extralen += sizeof(struct esp_output_extra); |
108 | |
109 | iv = esp_tmp_iv(aead, tmp, extralen); |
110 | req = esp_tmp_req(aead, iv); |
111 | |
112 | /* Unref skb_frag_pages in the src scatterlist if necessary. |
113 | * Skip the first sg which comes from skb->data. |
114 | */ |
115 | if (req->src != req->dst) |
116 | for (sg = sg_next(req->src); sg; sg = sg_next(sg)) |
117 | put_page(page: sg_page(sg)); |
118 | } |
119 | |
120 | #ifdef CONFIG_INET_ESPINTCP |
121 | struct esp_tcp_sk { |
122 | struct sock *sk; |
123 | struct rcu_head rcu; |
124 | }; |
125 | |
126 | static void esp_free_tcp_sk(struct rcu_head *head) |
127 | { |
128 | struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu); |
129 | |
130 | sock_put(sk: esk->sk); |
131 | kfree(objp: esk); |
132 | } |
133 | |
134 | static struct sock *esp_find_tcp_sk(struct xfrm_state *x) |
135 | { |
136 | struct xfrm_encap_tmpl *encap = x->encap; |
137 | struct net *net = xs_net(x); |
138 | struct esp_tcp_sk *esk; |
139 | __be16 sport, dport; |
140 | struct sock *nsk; |
141 | struct sock *sk; |
142 | |
143 | sk = rcu_dereference(x->encap_sk); |
144 | if (sk && sk->sk_state == TCP_ESTABLISHED) |
145 | return sk; |
146 | |
147 | spin_lock_bh(lock: &x->lock); |
148 | sport = encap->encap_sport; |
149 | dport = encap->encap_dport; |
150 | nsk = rcu_dereference_protected(x->encap_sk, |
151 | lockdep_is_held(&x->lock)); |
152 | if (sk && sk == nsk) { |
153 | esk = kmalloc(size: sizeof(*esk), GFP_ATOMIC); |
154 | if (!esk) { |
155 | spin_unlock_bh(lock: &x->lock); |
156 | return ERR_PTR(error: -ENOMEM); |
157 | } |
158 | RCU_INIT_POINTER(x->encap_sk, NULL); |
159 | esk->sk = sk; |
160 | call_rcu(head: &esk->rcu, func: esp_free_tcp_sk); |
161 | } |
162 | spin_unlock_bh(lock: &x->lock); |
163 | |
164 | sk = inet_lookup_established(net, hashinfo: net->ipv4.tcp_death_row.hashinfo, saddr: x->id.daddr.a4, |
165 | sport: dport, daddr: x->props.saddr.a4, dport: sport, dif: 0); |
166 | if (!sk) |
167 | return ERR_PTR(error: -ENOENT); |
168 | |
169 | if (!tcp_is_ulp_esp(sk)) { |
170 | sock_put(sk); |
171 | return ERR_PTR(error: -EINVAL); |
172 | } |
173 | |
174 | spin_lock_bh(lock: &x->lock); |
175 | nsk = rcu_dereference_protected(x->encap_sk, |
176 | lockdep_is_held(&x->lock)); |
177 | if (encap->encap_sport != sport || |
178 | encap->encap_dport != dport) { |
179 | sock_put(sk); |
180 | sk = nsk ?: ERR_PTR(error: -EREMCHG); |
181 | } else if (sk == nsk) { |
182 | sock_put(sk); |
183 | } else { |
184 | rcu_assign_pointer(x->encap_sk, sk); |
185 | } |
186 | spin_unlock_bh(lock: &x->lock); |
187 | |
188 | return sk; |
189 | } |
190 | |
191 | static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) |
192 | { |
193 | struct sock *sk; |
194 | int err; |
195 | |
196 | rcu_read_lock(); |
197 | |
198 | sk = esp_find_tcp_sk(x); |
199 | err = PTR_ERR_OR_ZERO(ptr: sk); |
200 | if (err) |
201 | goto out; |
202 | |
203 | bh_lock_sock(sk); |
204 | if (sock_owned_by_user(sk)) |
205 | err = espintcp_queue_out(sk, skb); |
206 | else |
207 | err = espintcp_push_skb(sk, skb); |
208 | bh_unlock_sock(sk); |
209 | |
210 | out: |
211 | rcu_read_unlock(); |
212 | return err; |
213 | } |
214 | |
215 | static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk, |
216 | struct sk_buff *skb) |
217 | { |
218 | struct dst_entry *dst = skb_dst(skb); |
219 | struct xfrm_state *x = dst->xfrm; |
220 | |
221 | return esp_output_tcp_finish(x, skb); |
222 | } |
223 | |
224 | static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) |
225 | { |
226 | int err; |
227 | |
228 | local_bh_disable(); |
229 | err = xfrm_trans_queue_net(net: xs_net(x), skb, finish: esp_output_tcp_encap_cb); |
230 | local_bh_enable(); |
231 | |
232 | /* EINPROGRESS just happens to do the right thing. It |
233 | * actually means that the skb has been consumed and |
234 | * isn't coming back. |
235 | */ |
236 | return err ?: -EINPROGRESS; |
237 | } |
238 | #else |
239 | static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) |
240 | { |
241 | kfree_skb(skb); |
242 | |
243 | return -EOPNOTSUPP; |
244 | } |
245 | #endif |
246 | |
247 | static void esp_output_done(void *data, int err) |
248 | { |
249 | struct sk_buff *skb = data; |
250 | struct xfrm_offload *xo = xfrm_offload(skb); |
251 | void *tmp; |
252 | struct xfrm_state *x; |
253 | |
254 | if (xo && (xo->flags & XFRM_DEV_RESUME)) { |
255 | struct sec_path *sp = skb_sec_path(skb); |
256 | |
257 | x = sp->xvec[sp->len - 1]; |
258 | } else { |
259 | x = skb_dst(skb)->xfrm; |
260 | } |
261 | |
262 | tmp = ESP_SKB_CB(skb)->tmp; |
263 | esp_ssg_unref(x, tmp); |
264 | kfree(objp: tmp); |
265 | |
266 | if (xo && (xo->flags & XFRM_DEV_RESUME)) { |
267 | if (err) { |
268 | XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); |
269 | kfree_skb(skb); |
270 | return; |
271 | } |
272 | |
273 | skb_push(skb, len: skb->data - skb_mac_header(skb)); |
274 | secpath_reset(skb); |
275 | xfrm_dev_resume(skb); |
276 | } else { |
277 | if (!err && |
278 | x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) |
279 | esp_output_tail_tcp(x, skb); |
280 | else |
281 | xfrm_output_resume(sk: skb->sk, skb, err); |
282 | } |
283 | } |
284 | |
285 | /* Move ESP header back into place. */ |
286 | static void (struct sk_buff *skb, unsigned int offset) |
287 | { |
288 | struct ip_esp_hdr *esph = (void *)(skb->data + offset); |
289 | void *tmp = ESP_SKB_CB(skb)->tmp; |
290 | __be32 *seqhi = esp_tmp_extra(tmp); |
291 | |
292 | esph->seq_no = esph->spi; |
293 | esph->spi = *seqhi; |
294 | } |
295 | |
296 | static void (struct sk_buff *skb) |
297 | { |
298 | void *tmp = ESP_SKB_CB(skb)->tmp; |
299 | struct esp_output_extra * = esp_tmp_extra(tmp); |
300 | |
301 | esp_restore_header(skb, offset: skb_transport_offset(skb) + extra->esphoff - |
302 | sizeof(__be32)); |
303 | } |
304 | |
305 | static struct ip_esp_hdr *(struct sk_buff *skb, |
306 | struct xfrm_state *x, |
307 | struct ip_esp_hdr *esph, |
308 | struct esp_output_extra *) |
309 | { |
310 | /* For ESN we move the header forward by 4 bytes to |
311 | * accommodate the high bits. We will move it back after |
312 | * encryption. |
313 | */ |
314 | if ((x->props.flags & XFRM_STATE_ESN)) { |
315 | __u32 seqhi; |
316 | struct xfrm_offload *xo = xfrm_offload(skb); |
317 | |
318 | if (xo) |
319 | seqhi = xo->seq.hi; |
320 | else |
321 | seqhi = XFRM_SKB_CB(skb)->seq.output.hi; |
322 | |
323 | extra->esphoff = (unsigned char *)esph - |
324 | skb_transport_header(skb); |
325 | esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4); |
326 | extra->seqhi = esph->spi; |
327 | esph->seq_no = htonl(seqhi); |
328 | } |
329 | |
330 | esph->spi = x->id.spi; |
331 | |
332 | return esph; |
333 | } |
334 | |
335 | static void esp_output_done_esn(void *data, int err) |
336 | { |
337 | struct sk_buff *skb = data; |
338 | |
339 | esp_output_restore_header(skb); |
340 | esp_output_done(data, err); |
341 | } |
342 | |
343 | static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb, |
344 | int encap_type, |
345 | struct esp_info *esp, |
346 | __be16 sport, |
347 | __be16 dport) |
348 | { |
349 | struct udphdr *uh; |
350 | __be32 *udpdata32; |
351 | unsigned int len; |
352 | |
353 | len = skb->len + esp->tailen - skb_transport_offset(skb); |
354 | if (len + sizeof(struct iphdr) > IP_MAX_MTU) |
355 | return ERR_PTR(error: -EMSGSIZE); |
356 | |
357 | uh = (struct udphdr *)esp->esph; |
358 | uh->source = sport; |
359 | uh->dest = dport; |
360 | uh->len = htons(len); |
361 | uh->check = 0; |
362 | |
363 | *skb_mac_header(skb) = IPPROTO_UDP; |
364 | |
365 | if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) { |
366 | udpdata32 = (__be32 *)(uh + 1); |
367 | udpdata32[0] = udpdata32[1] = 0; |
368 | return (struct ip_esp_hdr *)(udpdata32 + 2); |
369 | } |
370 | |
371 | return (struct ip_esp_hdr *)(uh + 1); |
372 | } |
373 | |
374 | #ifdef CONFIG_INET_ESPINTCP |
375 | static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x, |
376 | struct sk_buff *skb, |
377 | struct esp_info *esp) |
378 | { |
379 | __be16 *lenp = (void *)esp->esph; |
380 | struct ip_esp_hdr *esph; |
381 | unsigned int len; |
382 | struct sock *sk; |
383 | |
384 | len = skb->len + esp->tailen - skb_transport_offset(skb); |
385 | if (len > IP_MAX_MTU) |
386 | return ERR_PTR(error: -EMSGSIZE); |
387 | |
388 | rcu_read_lock(); |
389 | sk = esp_find_tcp_sk(x); |
390 | rcu_read_unlock(); |
391 | |
392 | if (IS_ERR(ptr: sk)) |
393 | return ERR_CAST(ptr: sk); |
394 | |
395 | *lenp = htons(len); |
396 | esph = (struct ip_esp_hdr *)(lenp + 1); |
397 | |
398 | return esph; |
399 | } |
400 | #else |
401 | static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x, |
402 | struct sk_buff *skb, |
403 | struct esp_info *esp) |
404 | { |
405 | return ERR_PTR(-EOPNOTSUPP); |
406 | } |
407 | #endif |
408 | |
409 | static int esp_output_encap(struct xfrm_state *x, struct sk_buff *skb, |
410 | struct esp_info *esp) |
411 | { |
412 | struct xfrm_encap_tmpl *encap = x->encap; |
413 | struct ip_esp_hdr *esph; |
414 | __be16 sport, dport; |
415 | int encap_type; |
416 | |
417 | spin_lock_bh(lock: &x->lock); |
418 | sport = encap->encap_sport; |
419 | dport = encap->encap_dport; |
420 | encap_type = encap->encap_type; |
421 | spin_unlock_bh(lock: &x->lock); |
422 | |
423 | switch (encap_type) { |
424 | default: |
425 | case UDP_ENCAP_ESPINUDP: |
426 | case UDP_ENCAP_ESPINUDP_NON_IKE: |
427 | esph = esp_output_udp_encap(skb, encap_type, esp, sport, dport); |
428 | break; |
429 | case TCP_ENCAP_ESPINTCP: |
430 | esph = esp_output_tcp_encap(x, skb, esp); |
431 | break; |
432 | } |
433 | |
434 | if (IS_ERR(ptr: esph)) |
435 | return PTR_ERR(ptr: esph); |
436 | |
437 | esp->esph = esph; |
438 | |
439 | return 0; |
440 | } |
441 | |
442 | int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) |
443 | { |
444 | u8 *tail; |
445 | int nfrags; |
446 | int esph_offset; |
447 | struct page *page; |
448 | struct sk_buff *trailer; |
449 | int tailen = esp->tailen; |
450 | |
451 | /* this is non-NULL only with TCP/UDP Encapsulation */ |
452 | if (x->encap) { |
453 | int err = esp_output_encap(x, skb, esp); |
454 | |
455 | if (err < 0) |
456 | return err; |
457 | } |
458 | |
459 | if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE || |
460 | ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE) |
461 | goto cow; |
462 | |
463 | if (!skb_cloned(skb)) { |
464 | if (tailen <= skb_tailroom(skb)) { |
465 | nfrags = 1; |
466 | trailer = skb; |
467 | tail = skb_tail_pointer(skb: trailer); |
468 | |
469 | goto skip_cow; |
470 | } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS) |
471 | && !skb_has_frag_list(skb)) { |
472 | int allocsize; |
473 | struct sock *sk = skb->sk; |
474 | struct page_frag *pfrag = &x->xfrag; |
475 | |
476 | esp->inplace = false; |
477 | |
478 | allocsize = ALIGN(tailen, L1_CACHE_BYTES); |
479 | |
480 | spin_lock_bh(lock: &x->lock); |
481 | |
482 | if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { |
483 | spin_unlock_bh(lock: &x->lock); |
484 | goto cow; |
485 | } |
486 | |
487 | page = pfrag->page; |
488 | get_page(page); |
489 | |
490 | tail = page_address(page) + pfrag->offset; |
491 | |
492 | esp_output_fill_trailer(tail, tfclen: esp->tfclen, plen: esp->plen, proto: esp->proto); |
493 | |
494 | nfrags = skb_shinfo(skb)->nr_frags; |
495 | |
496 | __skb_fill_page_desc(skb, i: nfrags, page, off: pfrag->offset, |
497 | size: tailen); |
498 | skb_shinfo(skb)->nr_frags = ++nfrags; |
499 | |
500 | pfrag->offset = pfrag->offset + allocsize; |
501 | |
502 | spin_unlock_bh(lock: &x->lock); |
503 | |
504 | nfrags++; |
505 | |
506 | skb_len_add(skb, delta: tailen); |
507 | if (sk && sk_fullsock(sk)) |
508 | refcount_add(i: tailen, r: &sk->sk_wmem_alloc); |
509 | |
510 | goto out; |
511 | } |
512 | } |
513 | |
514 | cow: |
515 | esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb); |
516 | |
517 | nfrags = skb_cow_data(skb, tailbits: tailen, trailer: &trailer); |
518 | if (nfrags < 0) |
519 | goto out; |
520 | tail = skb_tail_pointer(skb: trailer); |
521 | esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset); |
522 | |
523 | skip_cow: |
524 | esp_output_fill_trailer(tail, tfclen: esp->tfclen, plen: esp->plen, proto: esp->proto); |
525 | pskb_put(skb, tail: trailer, len: tailen); |
526 | |
527 | out: |
528 | return nfrags; |
529 | } |
530 | EXPORT_SYMBOL_GPL(esp_output_head); |
531 | |
532 | int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) |
533 | { |
534 | u8 *iv; |
535 | int alen; |
536 | void *tmp; |
537 | int ivlen; |
538 | int assoclen; |
539 | int ; |
540 | struct page *page; |
541 | struct ip_esp_hdr *esph; |
542 | struct crypto_aead *aead; |
543 | struct aead_request *req; |
544 | struct scatterlist *sg, *dsg; |
545 | struct esp_output_extra *; |
546 | int err = -ENOMEM; |
547 | |
548 | assoclen = sizeof(struct ip_esp_hdr); |
549 | extralen = 0; |
550 | |
551 | if (x->props.flags & XFRM_STATE_ESN) { |
552 | extralen += sizeof(*extra); |
553 | assoclen += sizeof(__be32); |
554 | } |
555 | |
556 | aead = x->data; |
557 | alen = crypto_aead_authsize(tfm: aead); |
558 | ivlen = crypto_aead_ivsize(tfm: aead); |
559 | |
560 | tmp = esp_alloc_tmp(aead, nfrags: esp->nfrags + 2, extralen); |
561 | if (!tmp) |
562 | goto error; |
563 | |
564 | extra = esp_tmp_extra(tmp); |
565 | iv = esp_tmp_iv(aead, tmp, extralen); |
566 | req = esp_tmp_req(aead, iv); |
567 | sg = esp_req_sg(aead, req); |
568 | |
569 | if (esp->inplace) |
570 | dsg = sg; |
571 | else |
572 | dsg = &sg[esp->nfrags]; |
573 | |
574 | esph = esp_output_set_extra(skb, x, esph: esp->esph, extra); |
575 | esp->esph = esph; |
576 | |
577 | sg_init_table(sg, esp->nfrags); |
578 | err = skb_to_sgvec(skb, sg, |
579 | offset: (unsigned char *)esph - skb->data, |
580 | len: assoclen + ivlen + esp->clen + alen); |
581 | if (unlikely(err < 0)) |
582 | goto error_free; |
583 | |
584 | if (!esp->inplace) { |
585 | int allocsize; |
586 | struct page_frag *pfrag = &x->xfrag; |
587 | |
588 | allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); |
589 | |
590 | spin_lock_bh(lock: &x->lock); |
591 | if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { |
592 | spin_unlock_bh(lock: &x->lock); |
593 | goto error_free; |
594 | } |
595 | |
596 | skb_shinfo(skb)->nr_frags = 1; |
597 | |
598 | page = pfrag->page; |
599 | get_page(page); |
600 | /* replace page frags in skb with new page */ |
601 | __skb_fill_page_desc(skb, i: 0, page, off: pfrag->offset, size: skb->data_len); |
602 | pfrag->offset = pfrag->offset + allocsize; |
603 | spin_unlock_bh(lock: &x->lock); |
604 | |
605 | sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); |
606 | err = skb_to_sgvec(skb, sg: dsg, |
607 | offset: (unsigned char *)esph - skb->data, |
608 | len: assoclen + ivlen + esp->clen + alen); |
609 | if (unlikely(err < 0)) |
610 | goto error_free; |
611 | } |
612 | |
613 | if ((x->props.flags & XFRM_STATE_ESN)) |
614 | aead_request_set_callback(req, flags: 0, compl: esp_output_done_esn, data: skb); |
615 | else |
616 | aead_request_set_callback(req, flags: 0, compl: esp_output_done, data: skb); |
617 | |
618 | aead_request_set_crypt(req, src: sg, dst: dsg, cryptlen: ivlen + esp->clen, iv); |
619 | aead_request_set_ad(req, assoclen); |
620 | |
621 | memset(iv, 0, ivlen); |
622 | memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8), |
623 | min(ivlen, 8)); |
624 | |
625 | ESP_SKB_CB(skb)->tmp = tmp; |
626 | err = crypto_aead_encrypt(req); |
627 | |
628 | switch (err) { |
629 | case -EINPROGRESS: |
630 | goto error; |
631 | |
632 | case -ENOSPC: |
633 | err = NET_XMIT_DROP; |
634 | break; |
635 | |
636 | case 0: |
637 | if ((x->props.flags & XFRM_STATE_ESN)) |
638 | esp_output_restore_header(skb); |
639 | } |
640 | |
641 | if (sg != dsg) |
642 | esp_ssg_unref(x, tmp); |
643 | |
644 | if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) |
645 | err = esp_output_tail_tcp(x, skb); |
646 | |
647 | error_free: |
648 | kfree(objp: tmp); |
649 | error: |
650 | return err; |
651 | } |
652 | EXPORT_SYMBOL_GPL(esp_output_tail); |
653 | |
654 | static int esp_output(struct xfrm_state *x, struct sk_buff *skb) |
655 | { |
656 | int alen; |
657 | int blksize; |
658 | struct ip_esp_hdr *esph; |
659 | struct crypto_aead *aead; |
660 | struct esp_info esp; |
661 | |
662 | esp.inplace = true; |
663 | |
664 | esp.proto = *skb_mac_header(skb); |
665 | *skb_mac_header(skb) = IPPROTO_ESP; |
666 | |
667 | /* skb is pure payload to encrypt */ |
668 | |
669 | aead = x->data; |
670 | alen = crypto_aead_authsize(tfm: aead); |
671 | |
672 | esp.tfclen = 0; |
673 | if (x->tfcpad) { |
674 | struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); |
675 | u32 padto; |
676 | |
677 | padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached)); |
678 | if (skb->len < padto) |
679 | esp.tfclen = padto - skb->len; |
680 | } |
681 | blksize = ALIGN(crypto_aead_blocksize(aead), 4); |
682 | esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); |
683 | esp.plen = esp.clen - skb->len - esp.tfclen; |
684 | esp.tailen = esp.tfclen + esp.plen + alen; |
685 | |
686 | esp.esph = ip_esp_hdr(skb); |
687 | |
688 | esp.nfrags = esp_output_head(x, skb, &esp); |
689 | if (esp.nfrags < 0) |
690 | return esp.nfrags; |
691 | |
692 | esph = esp.esph; |
693 | esph->spi = x->id.spi; |
694 | |
695 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); |
696 | esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + |
697 | ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); |
698 | |
699 | skb_push(skb, len: -skb_network_offset(skb)); |
700 | |
701 | return esp_output_tail(x, skb, &esp); |
702 | } |
703 | |
704 | static inline int esp_remove_trailer(struct sk_buff *skb) |
705 | { |
706 | struct xfrm_state *x = xfrm_input_state(skb); |
707 | struct crypto_aead *aead = x->data; |
708 | int alen, hlen, elen; |
709 | int padlen, trimlen; |
710 | __wsum csumdiff; |
711 | u8 nexthdr[2]; |
712 | int ret; |
713 | |
714 | alen = crypto_aead_authsize(tfm: aead); |
715 | hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(tfm: aead); |
716 | elen = skb->len - hlen; |
717 | |
718 | if (skb_copy_bits(skb, offset: skb->len - alen - 2, to: nexthdr, len: 2)) |
719 | BUG(); |
720 | |
721 | ret = -EINVAL; |
722 | padlen = nexthdr[0]; |
723 | if (padlen + 2 + alen >= elen) { |
724 | net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n" , |
725 | padlen + 2, elen - alen); |
726 | goto out; |
727 | } |
728 | |
729 | trimlen = alen + padlen + 2; |
730 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
731 | csumdiff = skb_checksum(skb, offset: skb->len - trimlen, len: trimlen, csum: 0); |
732 | skb->csum = csum_block_sub(csum: skb->csum, csum2: csumdiff, |
733 | offset: skb->len - trimlen); |
734 | } |
735 | ret = pskb_trim(skb, len: skb->len - trimlen); |
736 | if (unlikely(ret)) |
737 | return ret; |
738 | |
739 | ret = nexthdr[1]; |
740 | |
741 | out: |
742 | return ret; |
743 | } |
744 | |
745 | int esp_input_done2(struct sk_buff *skb, int err) |
746 | { |
747 | const struct iphdr *iph; |
748 | struct xfrm_state *x = xfrm_input_state(skb); |
749 | struct xfrm_offload *xo = xfrm_offload(skb); |
750 | struct crypto_aead *aead = x->data; |
751 | int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(tfm: aead); |
752 | int ihl; |
753 | |
754 | if (!xo || !(xo->flags & CRYPTO_DONE)) |
755 | kfree(ESP_SKB_CB(skb)->tmp); |
756 | |
757 | if (unlikely(err)) |
758 | goto out; |
759 | |
760 | err = esp_remove_trailer(skb); |
761 | if (unlikely(err < 0)) |
762 | goto out; |
763 | |
764 | iph = ip_hdr(skb); |
765 | ihl = iph->ihl * 4; |
766 | |
767 | if (x->encap) { |
768 | struct xfrm_encap_tmpl *encap = x->encap; |
769 | struct tcphdr *th = (void *)(skb_network_header(skb) + ihl); |
770 | struct udphdr *uh = (void *)(skb_network_header(skb) + ihl); |
771 | __be16 source; |
772 | |
773 | switch (x->encap->encap_type) { |
774 | case TCP_ENCAP_ESPINTCP: |
775 | source = th->source; |
776 | break; |
777 | case UDP_ENCAP_ESPINUDP: |
778 | case UDP_ENCAP_ESPINUDP_NON_IKE: |
779 | source = uh->source; |
780 | break; |
781 | default: |
782 | WARN_ON_ONCE(1); |
783 | err = -EINVAL; |
784 | goto out; |
785 | } |
786 | |
787 | /* |
788 | * 1) if the NAT-T peer's IP or port changed then |
789 | * advertise the change to the keying daemon. |
790 | * This is an inbound SA, so just compare |
791 | * SRC ports. |
792 | */ |
793 | if (iph->saddr != x->props.saddr.a4 || |
794 | source != encap->encap_sport) { |
795 | xfrm_address_t ipaddr; |
796 | |
797 | ipaddr.a4 = iph->saddr; |
798 | km_new_mapping(x, ipaddr: &ipaddr, sport: source); |
799 | |
800 | /* XXX: perhaps add an extra |
801 | * policy check here, to see |
802 | * if we should allow or |
803 | * reject a packet from a |
804 | * different source |
805 | * address/port. |
806 | */ |
807 | } |
808 | |
809 | /* |
810 | * 2) ignore UDP/TCP checksums in case |
811 | * of NAT-T in Transport Mode, or |
812 | * perform other post-processing fixes |
813 | * as per draft-ietf-ipsec-udp-encaps-06, |
814 | * section 3.1.2 |
815 | */ |
816 | if (x->props.mode == XFRM_MODE_TRANSPORT) |
817 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
818 | } |
819 | |
820 | skb_pull_rcsum(skb, len: hlen); |
821 | if (x->props.mode == XFRM_MODE_TUNNEL) |
822 | skb_reset_transport_header(skb); |
823 | else |
824 | skb_set_transport_header(skb, offset: -ihl); |
825 | |
826 | /* RFC4303: Drop dummy packets without any error */ |
827 | if (err == IPPROTO_NONE) |
828 | err = -EINVAL; |
829 | |
830 | out: |
831 | return err; |
832 | } |
833 | EXPORT_SYMBOL_GPL(esp_input_done2); |
834 | |
835 | static void esp_input_done(void *data, int err) |
836 | { |
837 | struct sk_buff *skb = data; |
838 | |
839 | xfrm_input_resume(skb, nexthdr: esp_input_done2(skb, err)); |
840 | } |
841 | |
842 | static void (struct sk_buff *skb) |
843 | { |
844 | esp_restore_header(skb, offset: 0); |
845 | __skb_pull(skb, len: 4); |
846 | } |
847 | |
848 | static void (struct sk_buff *skb, __be32 *seqhi) |
849 | { |
850 | struct xfrm_state *x = xfrm_input_state(skb); |
851 | struct ip_esp_hdr *esph; |
852 | |
853 | /* For ESN we move the header forward by 4 bytes to |
854 | * accommodate the high bits. We will move it back after |
855 | * decryption. |
856 | */ |
857 | if ((x->props.flags & XFRM_STATE_ESN)) { |
858 | esph = skb_push(skb, len: 4); |
859 | *seqhi = esph->spi; |
860 | esph->spi = esph->seq_no; |
861 | esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; |
862 | } |
863 | } |
864 | |
865 | static void esp_input_done_esn(void *data, int err) |
866 | { |
867 | struct sk_buff *skb = data; |
868 | |
869 | esp_input_restore_header(skb); |
870 | esp_input_done(data, err); |
871 | } |
872 | |
873 | /* |
874 | * Note: detecting truncated vs. non-truncated authentication data is very |
875 | * expensive, so we only support truncated data, which is the recommended |
876 | * and common case. |
877 | */ |
878 | static int esp_input(struct xfrm_state *x, struct sk_buff *skb) |
879 | { |
880 | struct crypto_aead *aead = x->data; |
881 | struct aead_request *req; |
882 | struct sk_buff *trailer; |
883 | int ivlen = crypto_aead_ivsize(tfm: aead); |
884 | int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen; |
885 | int nfrags; |
886 | int assoclen; |
887 | int seqhilen; |
888 | __be32 *seqhi; |
889 | void *tmp; |
890 | u8 *iv; |
891 | struct scatterlist *sg; |
892 | int err = -EINVAL; |
893 | |
894 | if (!pskb_may_pull(skb, len: sizeof(struct ip_esp_hdr) + ivlen)) |
895 | goto out; |
896 | |
897 | if (elen <= 0) |
898 | goto out; |
899 | |
900 | assoclen = sizeof(struct ip_esp_hdr); |
901 | seqhilen = 0; |
902 | |
903 | if (x->props.flags & XFRM_STATE_ESN) { |
904 | seqhilen += sizeof(__be32); |
905 | assoclen += seqhilen; |
906 | } |
907 | |
908 | if (!skb_cloned(skb)) { |
909 | if (!skb_is_nonlinear(skb)) { |
910 | nfrags = 1; |
911 | |
912 | goto skip_cow; |
913 | } else if (!skb_has_frag_list(skb)) { |
914 | nfrags = skb_shinfo(skb)->nr_frags; |
915 | nfrags++; |
916 | |
917 | goto skip_cow; |
918 | } |
919 | } |
920 | |
921 | err = skb_cow_data(skb, tailbits: 0, trailer: &trailer); |
922 | if (err < 0) |
923 | goto out; |
924 | |
925 | nfrags = err; |
926 | |
927 | skip_cow: |
928 | err = -ENOMEM; |
929 | tmp = esp_alloc_tmp(aead, nfrags, extralen: seqhilen); |
930 | if (!tmp) |
931 | goto out; |
932 | |
933 | ESP_SKB_CB(skb)->tmp = tmp; |
934 | seqhi = esp_tmp_extra(tmp); |
935 | iv = esp_tmp_iv(aead, tmp, extralen: seqhilen); |
936 | req = esp_tmp_req(aead, iv); |
937 | sg = esp_req_sg(aead, req); |
938 | |
939 | esp_input_set_header(skb, seqhi); |
940 | |
941 | sg_init_table(sg, nfrags); |
942 | err = skb_to_sgvec(skb, sg, offset: 0, len: skb->len); |
943 | if (unlikely(err < 0)) { |
944 | kfree(objp: tmp); |
945 | goto out; |
946 | } |
947 | |
948 | skb->ip_summed = CHECKSUM_NONE; |
949 | |
950 | if ((x->props.flags & XFRM_STATE_ESN)) |
951 | aead_request_set_callback(req, flags: 0, compl: esp_input_done_esn, data: skb); |
952 | else |
953 | aead_request_set_callback(req, flags: 0, compl: esp_input_done, data: skb); |
954 | |
955 | aead_request_set_crypt(req, src: sg, dst: sg, cryptlen: elen + ivlen, iv); |
956 | aead_request_set_ad(req, assoclen); |
957 | |
958 | err = crypto_aead_decrypt(req); |
959 | if (err == -EINPROGRESS) |
960 | goto out; |
961 | |
962 | if ((x->props.flags & XFRM_STATE_ESN)) |
963 | esp_input_restore_header(skb); |
964 | |
965 | err = esp_input_done2(skb, err); |
966 | |
967 | out: |
968 | return err; |
969 | } |
970 | |
971 | static int esp4_err(struct sk_buff *skb, u32 info) |
972 | { |
973 | struct net *net = dev_net(dev: skb->dev); |
974 | const struct iphdr *iph = (const struct iphdr *)skb->data; |
975 | struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); |
976 | struct xfrm_state *x; |
977 | |
978 | switch (icmp_hdr(skb)->type) { |
979 | case ICMP_DEST_UNREACH: |
980 | if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) |
981 | return 0; |
982 | break; |
983 | case ICMP_REDIRECT: |
984 | break; |
985 | default: |
986 | return 0; |
987 | } |
988 | |
989 | x = xfrm_state_lookup(net, mark: skb->mark, daddr: (const xfrm_address_t *)&iph->daddr, |
990 | spi: esph->spi, IPPROTO_ESP, AF_INET); |
991 | if (!x) |
992 | return 0; |
993 | |
994 | if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) |
995 | ipv4_update_pmtu(skb, net, mtu: info, oif: 0, IPPROTO_ESP); |
996 | else |
997 | ipv4_redirect(skb, net, oif: 0, IPPROTO_ESP); |
998 | xfrm_state_put(x); |
999 | |
1000 | return 0; |
1001 | } |
1002 | |
1003 | static void esp_destroy(struct xfrm_state *x) |
1004 | { |
1005 | struct crypto_aead *aead = x->data; |
1006 | |
1007 | if (!aead) |
1008 | return; |
1009 | |
1010 | crypto_free_aead(tfm: aead); |
1011 | } |
1012 | |
1013 | static int esp_init_aead(struct xfrm_state *x, struct netlink_ext_ack *extack) |
1014 | { |
1015 | char aead_name[CRYPTO_MAX_ALG_NAME]; |
1016 | struct crypto_aead *aead; |
1017 | int err; |
1018 | |
1019 | if (snprintf(buf: aead_name, CRYPTO_MAX_ALG_NAME, fmt: "%s(%s)" , |
1020 | x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) { |
1021 | NL_SET_ERR_MSG(extack, "Algorithm name is too long" ); |
1022 | return -ENAMETOOLONG; |
1023 | } |
1024 | |
1025 | aead = crypto_alloc_aead(alg_name: aead_name, type: 0, mask: 0); |
1026 | err = PTR_ERR(ptr: aead); |
1027 | if (IS_ERR(ptr: aead)) |
1028 | goto error; |
1029 | |
1030 | x->data = aead; |
1031 | |
1032 | err = crypto_aead_setkey(tfm: aead, key: x->aead->alg_key, |
1033 | keylen: (x->aead->alg_key_len + 7) / 8); |
1034 | if (err) |
1035 | goto error; |
1036 | |
1037 | err = crypto_aead_setauthsize(tfm: aead, authsize: x->aead->alg_icv_len / 8); |
1038 | if (err) |
1039 | goto error; |
1040 | |
1041 | return 0; |
1042 | |
1043 | error: |
1044 | NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations" ); |
1045 | return err; |
1046 | } |
1047 | |
1048 | static int esp_init_authenc(struct xfrm_state *x, |
1049 | struct netlink_ext_ack *extack) |
1050 | { |
1051 | struct crypto_aead *aead; |
1052 | struct crypto_authenc_key_param *param; |
1053 | struct rtattr *rta; |
1054 | char *key; |
1055 | char *p; |
1056 | char authenc_name[CRYPTO_MAX_ALG_NAME]; |
1057 | unsigned int keylen; |
1058 | int err; |
1059 | |
1060 | err = -ENAMETOOLONG; |
1061 | |
1062 | if ((x->props.flags & XFRM_STATE_ESN)) { |
1063 | if (snprintf(buf: authenc_name, CRYPTO_MAX_ALG_NAME, |
1064 | fmt: "%s%sauthencesn(%s,%s)%s" , |
1065 | x->geniv ?: "" , x->geniv ? "(" : "" , |
1066 | x->aalg ? x->aalg->alg_name : "digest_null" , |
1067 | x->ealg->alg_name, |
1068 | x->geniv ? ")" : "" ) >= CRYPTO_MAX_ALG_NAME) { |
1069 | NL_SET_ERR_MSG(extack, "Algorithm name is too long" ); |
1070 | goto error; |
1071 | } |
1072 | } else { |
1073 | if (snprintf(buf: authenc_name, CRYPTO_MAX_ALG_NAME, |
1074 | fmt: "%s%sauthenc(%s,%s)%s" , |
1075 | x->geniv ?: "" , x->geniv ? "(" : "" , |
1076 | x->aalg ? x->aalg->alg_name : "digest_null" , |
1077 | x->ealg->alg_name, |
1078 | x->geniv ? ")" : "" ) >= CRYPTO_MAX_ALG_NAME) { |
1079 | NL_SET_ERR_MSG(extack, "Algorithm name is too long" ); |
1080 | goto error; |
1081 | } |
1082 | } |
1083 | |
1084 | aead = crypto_alloc_aead(alg_name: authenc_name, type: 0, mask: 0); |
1085 | err = PTR_ERR(ptr: aead); |
1086 | if (IS_ERR(ptr: aead)) { |
1087 | NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations" ); |
1088 | goto error; |
1089 | } |
1090 | |
1091 | x->data = aead; |
1092 | |
1093 | keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + |
1094 | (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); |
1095 | err = -ENOMEM; |
1096 | key = kmalloc(size: keylen, GFP_KERNEL); |
1097 | if (!key) |
1098 | goto error; |
1099 | |
1100 | p = key; |
1101 | rta = (void *)p; |
1102 | rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; |
1103 | rta->rta_len = RTA_LENGTH(sizeof(*param)); |
1104 | param = RTA_DATA(rta); |
1105 | p += RTA_SPACE(sizeof(*param)); |
1106 | |
1107 | if (x->aalg) { |
1108 | struct xfrm_algo_desc *aalg_desc; |
1109 | |
1110 | memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); |
1111 | p += (x->aalg->alg_key_len + 7) / 8; |
1112 | |
1113 | aalg_desc = xfrm_aalg_get_byname(name: x->aalg->alg_name, probe: 0); |
1114 | BUG_ON(!aalg_desc); |
1115 | |
1116 | err = -EINVAL; |
1117 | if (aalg_desc->uinfo.auth.icv_fullbits / 8 != |
1118 | crypto_aead_authsize(tfm: aead)) { |
1119 | NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations" ); |
1120 | goto free_key; |
1121 | } |
1122 | |
1123 | err = crypto_aead_setauthsize( |
1124 | tfm: aead, authsize: x->aalg->alg_trunc_len / 8); |
1125 | if (err) { |
1126 | NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations" ); |
1127 | goto free_key; |
1128 | } |
1129 | } |
1130 | |
1131 | param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); |
1132 | memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); |
1133 | |
1134 | err = crypto_aead_setkey(tfm: aead, key, keylen); |
1135 | |
1136 | free_key: |
1137 | kfree_sensitive(objp: key); |
1138 | |
1139 | error: |
1140 | return err; |
1141 | } |
1142 | |
1143 | static int esp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack) |
1144 | { |
1145 | struct crypto_aead *aead; |
1146 | u32 align; |
1147 | int err; |
1148 | |
1149 | x->data = NULL; |
1150 | |
1151 | if (x->aead) { |
1152 | err = esp_init_aead(x, extack); |
1153 | } else if (x->ealg) { |
1154 | err = esp_init_authenc(x, extack); |
1155 | } else { |
1156 | NL_SET_ERR_MSG(extack, "ESP: AEAD or CRYPT must be provided" ); |
1157 | err = -EINVAL; |
1158 | } |
1159 | |
1160 | if (err) |
1161 | goto error; |
1162 | |
1163 | aead = x->data; |
1164 | |
1165 | x->props.header_len = sizeof(struct ip_esp_hdr) + |
1166 | crypto_aead_ivsize(tfm: aead); |
1167 | if (x->props.mode == XFRM_MODE_TUNNEL) |
1168 | x->props.header_len += sizeof(struct iphdr); |
1169 | else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6) |
1170 | x->props.header_len += IPV4_BEET_PHMAXLEN; |
1171 | if (x->encap) { |
1172 | struct xfrm_encap_tmpl *encap = x->encap; |
1173 | |
1174 | switch (encap->encap_type) { |
1175 | default: |
1176 | NL_SET_ERR_MSG(extack, "Unsupported encapsulation type for ESP" ); |
1177 | err = -EINVAL; |
1178 | goto error; |
1179 | case UDP_ENCAP_ESPINUDP: |
1180 | x->props.header_len += sizeof(struct udphdr); |
1181 | break; |
1182 | case UDP_ENCAP_ESPINUDP_NON_IKE: |
1183 | x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32); |
1184 | break; |
1185 | #ifdef CONFIG_INET_ESPINTCP |
1186 | case TCP_ENCAP_ESPINTCP: |
1187 | /* only the length field, TCP encap is done by |
1188 | * the socket |
1189 | */ |
1190 | x->props.header_len += 2; |
1191 | break; |
1192 | #endif |
1193 | } |
1194 | } |
1195 | |
1196 | align = ALIGN(crypto_aead_blocksize(aead), 4); |
1197 | x->props.trailer_len = align + 1 + crypto_aead_authsize(tfm: aead); |
1198 | |
1199 | error: |
1200 | return err; |
1201 | } |
1202 | |
1203 | static int esp4_rcv_cb(struct sk_buff *skb, int err) |
1204 | { |
1205 | return 0; |
1206 | } |
1207 | |
1208 | static const struct xfrm_type esp_type = |
1209 | { |
1210 | .owner = THIS_MODULE, |
1211 | .proto = IPPROTO_ESP, |
1212 | .flags = XFRM_TYPE_REPLAY_PROT, |
1213 | .init_state = esp_init_state, |
1214 | .destructor = esp_destroy, |
1215 | .input = esp_input, |
1216 | .output = esp_output, |
1217 | }; |
1218 | |
1219 | static struct xfrm4_protocol esp4_protocol = { |
1220 | .handler = xfrm4_rcv, |
1221 | .input_handler = xfrm_input, |
1222 | .cb_handler = esp4_rcv_cb, |
1223 | .err_handler = esp4_err, |
1224 | .priority = 0, |
1225 | }; |
1226 | |
1227 | static int __init esp4_init(void) |
1228 | { |
1229 | if (xfrm_register_type(type: &esp_type, AF_INET) < 0) { |
1230 | pr_info("%s: can't add xfrm type\n" , __func__); |
1231 | return -EAGAIN; |
1232 | } |
1233 | if (xfrm4_protocol_register(handler: &esp4_protocol, IPPROTO_ESP) < 0) { |
1234 | pr_info("%s: can't add protocol\n" , __func__); |
1235 | xfrm_unregister_type(type: &esp_type, AF_INET); |
1236 | return -EAGAIN; |
1237 | } |
1238 | return 0; |
1239 | } |
1240 | |
1241 | static void __exit esp4_fini(void) |
1242 | { |
1243 | if (xfrm4_protocol_deregister(handler: &esp4_protocol, IPPROTO_ESP) < 0) |
1244 | pr_info("%s: can't remove protocol\n" , __func__); |
1245 | xfrm_unregister_type(type: &esp_type, AF_INET); |
1246 | } |
1247 | |
1248 | module_init(esp4_init); |
1249 | module_exit(esp4_fini); |
1250 | MODULE_LICENSE("GPL" ); |
1251 | MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP); |
1252 | |