1 | /* Copyright (c) 2018, Mellanox Technologies All rights reserved. |
2 | * |
3 | * This software is available to you under a choice of one of two |
4 | * licenses. You may choose to be licensed under the terms of the GNU |
5 | * General Public License (GPL) Version 2, available from the file |
6 | * COPYING in the main directory of this source tree, or the |
7 | * OpenIB.org BSD license below: |
8 | * |
9 | * Redistribution and use in source and binary forms, with or |
10 | * without modification, are permitted provided that the following |
11 | * conditions are met: |
12 | * |
13 | * - Redistributions of source code must retain the above |
14 | * copyright notice, this list of conditions and the following |
15 | * disclaimer. |
16 | * |
17 | * - Redistributions in binary form must reproduce the above |
18 | * copyright notice, this list of conditions and the following |
19 | * disclaimer in the documentation and/or other materials |
20 | * provided with the distribution. |
21 | * |
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
23 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
24 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
25 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
26 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
27 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
28 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
29 | * SOFTWARE. |
30 | */ |
31 | |
32 | #include <crypto/aead.h> |
33 | #include <linux/highmem.h> |
34 | #include <linux/module.h> |
35 | #include <linux/netdevice.h> |
36 | #include <net/dst.h> |
37 | #include <net/inet_connection_sock.h> |
38 | #include <net/tcp.h> |
39 | #include <net/tls.h> |
40 | |
41 | #include "tls.h" |
42 | #include "trace.h" |
43 | |
44 | /* device_offload_lock is used to synchronize tls_dev_add |
45 | * against NETDEV_DOWN notifications. |
46 | */ |
47 | static DECLARE_RWSEM(device_offload_lock); |
48 | |
49 | static struct workqueue_struct *destruct_wq __read_mostly; |
50 | |
51 | static LIST_HEAD(tls_device_list); |
52 | static LIST_HEAD(tls_device_down_list); |
53 | static DEFINE_SPINLOCK(tls_device_lock); |
54 | |
55 | static struct page *dummy_page; |
56 | |
57 | static void tls_device_free_ctx(struct tls_context *ctx) |
58 | { |
59 | if (ctx->tx_conf == TLS_HW) |
60 | kfree(objp: tls_offload_ctx_tx(tls_ctx: ctx)); |
61 | |
62 | if (ctx->rx_conf == TLS_HW) |
63 | kfree(objp: tls_offload_ctx_rx(tls_ctx: ctx)); |
64 | |
65 | tls_ctx_free(NULL, ctx); |
66 | } |
67 | |
68 | static void tls_device_tx_del_task(struct work_struct *work) |
69 | { |
70 | struct tls_offload_context_tx *offload_ctx = |
71 | container_of(work, struct tls_offload_context_tx, destruct_work); |
72 | struct tls_context *ctx = offload_ctx->ctx; |
73 | struct net_device *netdev; |
74 | |
75 | /* Safe, because this is the destroy flow, refcount is 0, so |
76 | * tls_device_down can't store this field in parallel. |
77 | */ |
78 | netdev = rcu_dereference_protected(ctx->netdev, |
79 | !refcount_read(&ctx->refcount)); |
80 | |
81 | netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX); |
82 | dev_put(dev: netdev); |
83 | ctx->netdev = NULL; |
84 | tls_device_free_ctx(ctx); |
85 | } |
86 | |
87 | static void tls_device_queue_ctx_destruction(struct tls_context *ctx) |
88 | { |
89 | struct net_device *netdev; |
90 | unsigned long flags; |
91 | bool async_cleanup; |
92 | |
93 | spin_lock_irqsave(&tls_device_lock, flags); |
94 | if (unlikely(!refcount_dec_and_test(&ctx->refcount))) { |
95 | spin_unlock_irqrestore(lock: &tls_device_lock, flags); |
96 | return; |
97 | } |
98 | |
99 | list_del(entry: &ctx->list); /* Remove from tls_device_list / tls_device_down_list */ |
100 | |
101 | /* Safe, because this is the destroy flow, refcount is 0, so |
102 | * tls_device_down can't store this field in parallel. |
103 | */ |
104 | netdev = rcu_dereference_protected(ctx->netdev, |
105 | !refcount_read(&ctx->refcount)); |
106 | |
107 | async_cleanup = netdev && ctx->tx_conf == TLS_HW; |
108 | if (async_cleanup) { |
109 | struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(tls_ctx: ctx); |
110 | |
111 | /* queue_work inside the spinlock |
112 | * to make sure tls_device_down waits for that work. |
113 | */ |
114 | queue_work(wq: destruct_wq, work: &offload_ctx->destruct_work); |
115 | } |
116 | spin_unlock_irqrestore(lock: &tls_device_lock, flags); |
117 | |
118 | if (!async_cleanup) |
119 | tls_device_free_ctx(ctx); |
120 | } |
121 | |
122 | /* We assume that the socket is already connected */ |
123 | static struct net_device *get_netdev_for_sock(struct sock *sk) |
124 | { |
125 | struct dst_entry *dst = sk_dst_get(sk); |
126 | struct net_device *netdev = NULL; |
127 | |
128 | if (likely(dst)) { |
129 | netdev = netdev_sk_get_lowest_dev(dev: dst->dev, sk); |
130 | dev_hold(dev: netdev); |
131 | } |
132 | |
133 | dst_release(dst); |
134 | |
135 | return netdev; |
136 | } |
137 | |
138 | static void destroy_record(struct tls_record_info *record) |
139 | { |
140 | int i; |
141 | |
142 | for (i = 0; i < record->num_frags; i++) |
143 | __skb_frag_unref(frag: &record->frags[i], recycle: false); |
144 | kfree(objp: record); |
145 | } |
146 | |
147 | static void delete_all_records(struct tls_offload_context_tx *offload_ctx) |
148 | { |
149 | struct tls_record_info *info, *temp; |
150 | |
151 | list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) { |
152 | list_del(entry: &info->list); |
153 | destroy_record(record: info); |
154 | } |
155 | |
156 | offload_ctx->retransmit_hint = NULL; |
157 | } |
158 | |
159 | static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq) |
160 | { |
161 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
162 | struct tls_record_info *info, *temp; |
163 | struct tls_offload_context_tx *ctx; |
164 | u64 deleted_records = 0; |
165 | unsigned long flags; |
166 | |
167 | if (!tls_ctx) |
168 | return; |
169 | |
170 | ctx = tls_offload_ctx_tx(tls_ctx); |
171 | |
172 | spin_lock_irqsave(&ctx->lock, flags); |
173 | info = ctx->retransmit_hint; |
174 | if (info && !before(seq1: acked_seq, seq2: info->end_seq)) |
175 | ctx->retransmit_hint = NULL; |
176 | |
177 | list_for_each_entry_safe(info, temp, &ctx->records_list, list) { |
178 | if (before(seq1: acked_seq, seq2: info->end_seq)) |
179 | break; |
180 | list_del(entry: &info->list); |
181 | |
182 | destroy_record(record: info); |
183 | deleted_records++; |
184 | } |
185 | |
186 | ctx->unacked_record_sn += deleted_records; |
187 | spin_unlock_irqrestore(lock: &ctx->lock, flags); |
188 | } |
189 | |
190 | /* At this point, there should be no references on this |
191 | * socket and no in-flight SKBs associated with this |
192 | * socket, so it is safe to free all the resources. |
193 | */ |
194 | void tls_device_sk_destruct(struct sock *sk) |
195 | { |
196 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
197 | struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); |
198 | |
199 | tls_ctx->sk_destruct(sk); |
200 | |
201 | if (tls_ctx->tx_conf == TLS_HW) { |
202 | if (ctx->open_record) |
203 | destroy_record(record: ctx->open_record); |
204 | delete_all_records(offload_ctx: ctx); |
205 | crypto_free_aead(tfm: ctx->aead_send); |
206 | clean_acked_data_disable(icsk: inet_csk(sk)); |
207 | } |
208 | |
209 | tls_device_queue_ctx_destruction(ctx: tls_ctx); |
210 | } |
211 | EXPORT_SYMBOL_GPL(tls_device_sk_destruct); |
212 | |
213 | void tls_device_free_resources_tx(struct sock *sk) |
214 | { |
215 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
216 | |
217 | tls_free_partial_record(sk, ctx: tls_ctx); |
218 | } |
219 | |
220 | void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq) |
221 | { |
222 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
223 | |
224 | trace_tls_device_tx_resync_req(sk, tcp_seq: got_seq, exp_tcp_seq: exp_seq); |
225 | WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags)); |
226 | } |
227 | EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request); |
228 | |
229 | static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx, |
230 | u32 seq) |
231 | { |
232 | struct net_device *netdev; |
233 | struct sk_buff *skb; |
234 | int err = 0; |
235 | u8 *rcd_sn; |
236 | |
237 | skb = tcp_write_queue_tail(sk); |
238 | if (skb) |
239 | TCP_SKB_CB(skb)->eor = 1; |
240 | |
241 | rcd_sn = tls_ctx->tx.rec_seq; |
242 | |
243 | trace_tls_device_tx_resync_send(sk, tcp_seq: seq, rec_no: rcd_sn); |
244 | down_read(sem: &device_offload_lock); |
245 | netdev = rcu_dereference_protected(tls_ctx->netdev, |
246 | lockdep_is_held(&device_offload_lock)); |
247 | if (netdev) |
248 | err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, |
249 | rcd_sn, |
250 | TLS_OFFLOAD_CTX_DIR_TX); |
251 | up_read(sem: &device_offload_lock); |
252 | if (err) |
253 | return; |
254 | |
255 | clear_bit_unlock(nr: TLS_TX_SYNC_SCHED, addr: &tls_ctx->flags); |
256 | } |
257 | |
258 | static void tls_append_frag(struct tls_record_info *record, |
259 | struct page_frag *pfrag, |
260 | int size) |
261 | { |
262 | skb_frag_t *frag; |
263 | |
264 | frag = &record->frags[record->num_frags - 1]; |
265 | if (skb_frag_page(frag) == pfrag->page && |
266 | skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) { |
267 | skb_frag_size_add(frag, delta: size); |
268 | } else { |
269 | ++frag; |
270 | skb_frag_fill_page_desc(frag, page: pfrag->page, off: pfrag->offset, |
271 | size); |
272 | ++record->num_frags; |
273 | get_page(page: pfrag->page); |
274 | } |
275 | |
276 | pfrag->offset += size; |
277 | record->len += size; |
278 | } |
279 | |
280 | static int tls_push_record(struct sock *sk, |
281 | struct tls_context *ctx, |
282 | struct tls_offload_context_tx *offload_ctx, |
283 | struct tls_record_info *record, |
284 | int flags) |
285 | { |
286 | struct tls_prot_info *prot = &ctx->prot_info; |
287 | struct tcp_sock *tp = tcp_sk(sk); |
288 | skb_frag_t *frag; |
289 | int i; |
290 | |
291 | record->end_seq = tp->write_seq + record->len; |
292 | list_add_tail_rcu(new: &record->list, head: &offload_ctx->records_list); |
293 | offload_ctx->open_record = NULL; |
294 | |
295 | if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags)) |
296 | tls_device_resync_tx(sk, tls_ctx: ctx, seq: tp->write_seq); |
297 | |
298 | tls_advance_record_sn(sk, prot, ctx: &ctx->tx); |
299 | |
300 | for (i = 0; i < record->num_frags; i++) { |
301 | frag = &record->frags[i]; |
302 | sg_unmark_end(sg: &offload_ctx->sg_tx_data[i]); |
303 | sg_set_page(sg: &offload_ctx->sg_tx_data[i], page: skb_frag_page(frag), |
304 | len: skb_frag_size(frag), offset: skb_frag_off(frag)); |
305 | sk_mem_charge(sk, size: skb_frag_size(frag)); |
306 | get_page(page: skb_frag_page(frag)); |
307 | } |
308 | sg_mark_end(sg: &offload_ctx->sg_tx_data[record->num_frags - 1]); |
309 | |
310 | /* all ready, send */ |
311 | return tls_push_sg(sk, ctx, sg: offload_ctx->sg_tx_data, first_offset: 0, flags); |
312 | } |
313 | |
314 | static void tls_device_record_close(struct sock *sk, |
315 | struct tls_context *ctx, |
316 | struct tls_record_info *record, |
317 | struct page_frag *pfrag, |
318 | unsigned char record_type) |
319 | { |
320 | struct tls_prot_info *prot = &ctx->prot_info; |
321 | struct page_frag dummy_tag_frag; |
322 | |
323 | /* append tag |
324 | * device will fill in the tag, we just need to append a placeholder |
325 | * use socket memory to improve coalescing (re-using a single buffer |
326 | * increases frag count) |
327 | * if we can't allocate memory now use the dummy page |
328 | */ |
329 | if (unlikely(pfrag->size - pfrag->offset < prot->tag_size) && |
330 | !skb_page_frag_refill(sz: prot->tag_size, pfrag, prio: sk->sk_allocation)) { |
331 | dummy_tag_frag.page = dummy_page; |
332 | dummy_tag_frag.offset = 0; |
333 | pfrag = &dummy_tag_frag; |
334 | } |
335 | tls_append_frag(record, pfrag, size: prot->tag_size); |
336 | |
337 | /* fill prepend */ |
338 | tls_fill_prepend(ctx, buf: skb_frag_address(frag: &record->frags[0]), |
339 | plaintext_len: record->len - prot->overhead_size, |
340 | record_type); |
341 | } |
342 | |
343 | static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx, |
344 | struct page_frag *pfrag, |
345 | size_t prepend_size) |
346 | { |
347 | struct tls_record_info *record; |
348 | skb_frag_t *frag; |
349 | |
350 | record = kmalloc(size: sizeof(*record), GFP_KERNEL); |
351 | if (!record) |
352 | return -ENOMEM; |
353 | |
354 | frag = &record->frags[0]; |
355 | skb_frag_fill_page_desc(frag, page: pfrag->page, off: pfrag->offset, |
356 | size: prepend_size); |
357 | |
358 | get_page(page: pfrag->page); |
359 | pfrag->offset += prepend_size; |
360 | |
361 | record->num_frags = 1; |
362 | record->len = prepend_size; |
363 | offload_ctx->open_record = record; |
364 | return 0; |
365 | } |
366 | |
367 | static int tls_do_allocation(struct sock *sk, |
368 | struct tls_offload_context_tx *offload_ctx, |
369 | struct page_frag *pfrag, |
370 | size_t prepend_size) |
371 | { |
372 | int ret; |
373 | |
374 | if (!offload_ctx->open_record) { |
375 | if (unlikely(!skb_page_frag_refill(prepend_size, pfrag, |
376 | sk->sk_allocation))) { |
377 | READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk); |
378 | sk_stream_moderate_sndbuf(sk); |
379 | return -ENOMEM; |
380 | } |
381 | |
382 | ret = tls_create_new_record(offload_ctx, pfrag, prepend_size); |
383 | if (ret) |
384 | return ret; |
385 | |
386 | if (pfrag->size > pfrag->offset) |
387 | return 0; |
388 | } |
389 | |
390 | if (!sk_page_frag_refill(sk, pfrag)) |
391 | return -ENOMEM; |
392 | |
393 | return 0; |
394 | } |
395 | |
396 | static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i) |
397 | { |
398 | size_t pre_copy, nocache; |
399 | |
400 | pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1); |
401 | if (pre_copy) { |
402 | pre_copy = min(pre_copy, bytes); |
403 | if (copy_from_iter(addr, bytes: pre_copy, i) != pre_copy) |
404 | return -EFAULT; |
405 | bytes -= pre_copy; |
406 | addr += pre_copy; |
407 | } |
408 | |
409 | nocache = round_down(bytes, SMP_CACHE_BYTES); |
410 | if (copy_from_iter_nocache(addr, bytes: nocache, i) != nocache) |
411 | return -EFAULT; |
412 | bytes -= nocache; |
413 | addr += nocache; |
414 | |
415 | if (bytes && copy_from_iter(addr, bytes, i) != bytes) |
416 | return -EFAULT; |
417 | |
418 | return 0; |
419 | } |
420 | |
421 | static int tls_push_data(struct sock *sk, |
422 | struct iov_iter *iter, |
423 | size_t size, int flags, |
424 | unsigned char record_type) |
425 | { |
426 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
427 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
428 | struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); |
429 | struct tls_record_info *record; |
430 | int tls_push_record_flags; |
431 | struct page_frag *pfrag; |
432 | size_t orig_size = size; |
433 | u32 max_open_record_len; |
434 | bool more = false; |
435 | bool done = false; |
436 | int copy, rc = 0; |
437 | long timeo; |
438 | |
439 | if (flags & |
440 | ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | |
441 | MSG_SPLICE_PAGES | MSG_EOR)) |
442 | return -EOPNOTSUPP; |
443 | |
444 | if ((flags & (MSG_MORE | MSG_EOR)) == (MSG_MORE | MSG_EOR)) |
445 | return -EINVAL; |
446 | |
447 | if (unlikely(sk->sk_err)) |
448 | return -sk->sk_err; |
449 | |
450 | flags |= MSG_SENDPAGE_DECRYPTED; |
451 | tls_push_record_flags = flags | MSG_MORE; |
452 | |
453 | timeo = sock_sndtimeo(sk, noblock: flags & MSG_DONTWAIT); |
454 | if (tls_is_partially_sent_record(ctx: tls_ctx)) { |
455 | rc = tls_push_partial_record(sk, ctx: tls_ctx, flags); |
456 | if (rc < 0) |
457 | return rc; |
458 | } |
459 | |
460 | pfrag = sk_page_frag(sk); |
461 | |
462 | /* TLS_HEADER_SIZE is not counted as part of the TLS record, and |
463 | * we need to leave room for an authentication tag. |
464 | */ |
465 | max_open_record_len = TLS_MAX_PAYLOAD_SIZE + |
466 | prot->prepend_size; |
467 | do { |
468 | rc = tls_do_allocation(sk, offload_ctx: ctx, pfrag, prepend_size: prot->prepend_size); |
469 | if (unlikely(rc)) { |
470 | rc = sk_stream_wait_memory(sk, timeo_p: &timeo); |
471 | if (!rc) |
472 | continue; |
473 | |
474 | record = ctx->open_record; |
475 | if (!record) |
476 | break; |
477 | handle_error: |
478 | if (record_type != TLS_RECORD_TYPE_DATA) { |
479 | /* avoid sending partial |
480 | * record with type != |
481 | * application_data |
482 | */ |
483 | size = orig_size; |
484 | destroy_record(record); |
485 | ctx->open_record = NULL; |
486 | } else if (record->len > prot->prepend_size) { |
487 | goto last_record; |
488 | } |
489 | |
490 | break; |
491 | } |
492 | |
493 | record = ctx->open_record; |
494 | |
495 | copy = min_t(size_t, size, max_open_record_len - record->len); |
496 | if (copy && (flags & MSG_SPLICE_PAGES)) { |
497 | struct page_frag zc_pfrag; |
498 | struct page **pages = &zc_pfrag.page; |
499 | size_t off; |
500 | |
501 | rc = iov_iter_extract_pages(i: iter, pages: &pages, |
502 | maxsize: copy, maxpages: 1, extraction_flags: 0, offset0: &off); |
503 | if (rc <= 0) { |
504 | if (rc == 0) |
505 | rc = -EIO; |
506 | goto handle_error; |
507 | } |
508 | copy = rc; |
509 | |
510 | if (WARN_ON_ONCE(!sendpage_ok(zc_pfrag.page))) { |
511 | iov_iter_revert(i: iter, bytes: copy); |
512 | rc = -EIO; |
513 | goto handle_error; |
514 | } |
515 | |
516 | zc_pfrag.offset = off; |
517 | zc_pfrag.size = copy; |
518 | tls_append_frag(record, pfrag: &zc_pfrag, size: copy); |
519 | } else if (copy) { |
520 | copy = min_t(size_t, copy, pfrag->size - pfrag->offset); |
521 | |
522 | rc = tls_device_copy_data(page_address(pfrag->page) + |
523 | pfrag->offset, bytes: copy, |
524 | i: iter); |
525 | if (rc) |
526 | goto handle_error; |
527 | tls_append_frag(record, pfrag, size: copy); |
528 | } |
529 | |
530 | size -= copy; |
531 | if (!size) { |
532 | last_record: |
533 | tls_push_record_flags = flags; |
534 | if (flags & MSG_MORE) { |
535 | more = true; |
536 | break; |
537 | } |
538 | |
539 | done = true; |
540 | } |
541 | |
542 | if (done || record->len >= max_open_record_len || |
543 | (record->num_frags >= MAX_SKB_FRAGS - 1)) { |
544 | tls_device_record_close(sk, ctx: tls_ctx, record, |
545 | pfrag, record_type); |
546 | |
547 | rc = tls_push_record(sk, |
548 | ctx: tls_ctx, |
549 | offload_ctx: ctx, |
550 | record, |
551 | flags: tls_push_record_flags); |
552 | if (rc < 0) |
553 | break; |
554 | } |
555 | } while (!done); |
556 | |
557 | tls_ctx->pending_open_record_frags = more; |
558 | |
559 | if (orig_size - size > 0) |
560 | rc = orig_size - size; |
561 | |
562 | return rc; |
563 | } |
564 | |
565 | int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) |
566 | { |
567 | unsigned char record_type = TLS_RECORD_TYPE_DATA; |
568 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
569 | int rc; |
570 | |
571 | if (!tls_ctx->zerocopy_sendfile) |
572 | msg->msg_flags &= ~MSG_SPLICE_PAGES; |
573 | |
574 | mutex_lock(&tls_ctx->tx_lock); |
575 | lock_sock(sk); |
576 | |
577 | if (unlikely(msg->msg_controllen)) { |
578 | rc = tls_process_cmsg(sk, msg, record_type: &record_type); |
579 | if (rc) |
580 | goto out; |
581 | } |
582 | |
583 | rc = tls_push_data(sk, iter: &msg->msg_iter, size, flags: msg->msg_flags, |
584 | record_type); |
585 | |
586 | out: |
587 | release_sock(sk); |
588 | mutex_unlock(lock: &tls_ctx->tx_lock); |
589 | return rc; |
590 | } |
591 | |
592 | void tls_device_splice_eof(struct socket *sock) |
593 | { |
594 | struct sock *sk = sock->sk; |
595 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
596 | struct iov_iter iter = {}; |
597 | |
598 | if (!tls_is_partially_sent_record(ctx: tls_ctx)) |
599 | return; |
600 | |
601 | mutex_lock(&tls_ctx->tx_lock); |
602 | lock_sock(sk); |
603 | |
604 | if (tls_is_partially_sent_record(ctx: tls_ctx)) { |
605 | iov_iter_bvec(i: &iter, ITER_SOURCE, NULL, nr_segs: 0, count: 0); |
606 | tls_push_data(sk, iter: &iter, size: 0, flags: 0, record_type: TLS_RECORD_TYPE_DATA); |
607 | } |
608 | |
609 | release_sock(sk); |
610 | mutex_unlock(lock: &tls_ctx->tx_lock); |
611 | } |
612 | |
613 | struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, |
614 | u32 seq, u64 *p_record_sn) |
615 | { |
616 | u64 record_sn = context->hint_record_sn; |
617 | struct tls_record_info *info, *last; |
618 | |
619 | info = context->retransmit_hint; |
620 | if (!info || |
621 | before(seq1: seq, seq2: info->end_seq - info->len)) { |
622 | /* if retransmit_hint is irrelevant start |
623 | * from the beginning of the list |
624 | */ |
625 | info = list_first_entry_or_null(&context->records_list, |
626 | struct tls_record_info, list); |
627 | if (!info) |
628 | return NULL; |
629 | /* send the start_marker record if seq number is before the |
630 | * tls offload start marker sequence number. This record is |
631 | * required to handle TCP packets which are before TLS offload |
632 | * started. |
633 | * And if it's not start marker, look if this seq number |
634 | * belongs to the list. |
635 | */ |
636 | if (likely(!tls_record_is_start_marker(info))) { |
637 | /* we have the first record, get the last record to see |
638 | * if this seq number belongs to the list. |
639 | */ |
640 | last = list_last_entry(&context->records_list, |
641 | struct tls_record_info, list); |
642 | |
643 | if (!between(seq1: seq, seq2: tls_record_start_seq(rec: info), |
644 | seq3: last->end_seq)) |
645 | return NULL; |
646 | } |
647 | record_sn = context->unacked_record_sn; |
648 | } |
649 | |
650 | /* We just need the _rcu for the READ_ONCE() */ |
651 | rcu_read_lock(); |
652 | list_for_each_entry_from_rcu(info, &context->records_list, list) { |
653 | if (before(seq1: seq, seq2: info->end_seq)) { |
654 | if (!context->retransmit_hint || |
655 | after(info->end_seq, |
656 | context->retransmit_hint->end_seq)) { |
657 | context->hint_record_sn = record_sn; |
658 | context->retransmit_hint = info; |
659 | } |
660 | *p_record_sn = record_sn; |
661 | goto exit_rcu_unlock; |
662 | } |
663 | record_sn++; |
664 | } |
665 | info = NULL; |
666 | |
667 | exit_rcu_unlock: |
668 | rcu_read_unlock(); |
669 | return info; |
670 | } |
671 | EXPORT_SYMBOL(tls_get_record); |
672 | |
673 | static int tls_device_push_pending_record(struct sock *sk, int flags) |
674 | { |
675 | struct iov_iter iter; |
676 | |
677 | iov_iter_kvec(i: &iter, ITER_SOURCE, NULL, nr_segs: 0, count: 0); |
678 | return tls_push_data(sk, iter: &iter, size: 0, flags, record_type: TLS_RECORD_TYPE_DATA); |
679 | } |
680 | |
681 | void tls_device_write_space(struct sock *sk, struct tls_context *ctx) |
682 | { |
683 | if (tls_is_partially_sent_record(ctx)) { |
684 | gfp_t sk_allocation = sk->sk_allocation; |
685 | |
686 | WARN_ON_ONCE(sk->sk_write_pending); |
687 | |
688 | sk->sk_allocation = GFP_ATOMIC; |
689 | tls_push_partial_record(sk, ctx, |
690 | MSG_DONTWAIT | MSG_NOSIGNAL | |
691 | MSG_SENDPAGE_DECRYPTED); |
692 | sk->sk_allocation = sk_allocation; |
693 | } |
694 | } |
695 | |
696 | static void tls_device_resync_rx(struct tls_context *tls_ctx, |
697 | struct sock *sk, u32 seq, u8 *rcd_sn) |
698 | { |
699 | struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); |
700 | struct net_device *netdev; |
701 | |
702 | trace_tls_device_rx_resync_send(sk, tcp_seq: seq, rec_no: rcd_sn, sync_type: rx_ctx->resync_type); |
703 | rcu_read_lock(); |
704 | netdev = rcu_dereference(tls_ctx->netdev); |
705 | if (netdev) |
706 | netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn, |
707 | TLS_OFFLOAD_CTX_DIR_RX); |
708 | rcu_read_unlock(); |
709 | TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC); |
710 | } |
711 | |
712 | static bool |
713 | tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async, |
714 | s64 resync_req, u32 *seq, u16 *rcd_delta) |
715 | { |
716 | u32 is_async = resync_req & RESYNC_REQ_ASYNC; |
717 | u32 req_seq = resync_req >> 32; |
718 | u32 req_end = req_seq + ((resync_req >> 16) & 0xffff); |
719 | u16 i; |
720 | |
721 | *rcd_delta = 0; |
722 | |
723 | if (is_async) { |
724 | /* shouldn't get to wraparound: |
725 | * too long in async stage, something bad happened |
726 | */ |
727 | if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) |
728 | return false; |
729 | |
730 | /* asynchronous stage: log all headers seq such that |
731 | * req_seq <= seq <= end_seq, and wait for real resync request |
732 | */ |
733 | if (before(seq1: *seq, seq2: req_seq)) |
734 | return false; |
735 | if (!after(*seq, req_end) && |
736 | resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX) |
737 | resync_async->log[resync_async->loglen++] = *seq; |
738 | |
739 | resync_async->rcd_delta++; |
740 | |
741 | return false; |
742 | } |
743 | |
744 | /* synchronous stage: check against the logged entries and |
745 | * proceed to check the next entries if no match was found |
746 | */ |
747 | for (i = 0; i < resync_async->loglen; i++) |
748 | if (req_seq == resync_async->log[i] && |
749 | atomic64_try_cmpxchg(v: &resync_async->req, old: &resync_req, new: 0)) { |
750 | *rcd_delta = resync_async->rcd_delta - i; |
751 | *seq = req_seq; |
752 | resync_async->loglen = 0; |
753 | resync_async->rcd_delta = 0; |
754 | return true; |
755 | } |
756 | |
757 | resync_async->loglen = 0; |
758 | resync_async->rcd_delta = 0; |
759 | |
760 | if (req_seq == *seq && |
761 | atomic64_try_cmpxchg(v: &resync_async->req, |
762 | old: &resync_req, new: 0)) |
763 | return true; |
764 | |
765 | return false; |
766 | } |
767 | |
768 | void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) |
769 | { |
770 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
771 | struct tls_offload_context_rx *rx_ctx; |
772 | u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE]; |
773 | u32 sock_data, is_req_pending; |
774 | struct tls_prot_info *prot; |
775 | s64 resync_req; |
776 | u16 rcd_delta; |
777 | u32 req_seq; |
778 | |
779 | if (tls_ctx->rx_conf != TLS_HW) |
780 | return; |
781 | if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) |
782 | return; |
783 | |
784 | prot = &tls_ctx->prot_info; |
785 | rx_ctx = tls_offload_ctx_rx(tls_ctx); |
786 | memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size); |
787 | |
788 | switch (rx_ctx->resync_type) { |
789 | case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ: |
790 | resync_req = atomic64_read(v: &rx_ctx->resync_req); |
791 | req_seq = resync_req >> 32; |
792 | seq += TLS_HEADER_SIZE - 1; |
793 | is_req_pending = resync_req; |
794 | |
795 | if (likely(!is_req_pending) || req_seq != seq || |
796 | !atomic64_try_cmpxchg(v: &rx_ctx->resync_req, old: &resync_req, new: 0)) |
797 | return; |
798 | break; |
799 | case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT: |
800 | if (likely(!rx_ctx->resync_nh_do_now)) |
801 | return; |
802 | |
803 | /* head of next rec is already in, note that the sock_inq will |
804 | * include the currently parsed message when called from parser |
805 | */ |
806 | sock_data = tcp_inq(sk); |
807 | if (sock_data > rcd_len) { |
808 | trace_tls_device_rx_resync_nh_delay(sk, sock_data, |
809 | rec_len: rcd_len); |
810 | return; |
811 | } |
812 | |
813 | rx_ctx->resync_nh_do_now = 0; |
814 | seq += rcd_len; |
815 | tls_bigint_increment(seq: rcd_sn, len: prot->rec_seq_size); |
816 | break; |
817 | case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC: |
818 | resync_req = atomic64_read(v: &rx_ctx->resync_async->req); |
819 | is_req_pending = resync_req; |
820 | if (likely(!is_req_pending)) |
821 | return; |
822 | |
823 | if (!tls_device_rx_resync_async(resync_async: rx_ctx->resync_async, |
824 | resync_req, seq: &seq, rcd_delta: &rcd_delta)) |
825 | return; |
826 | tls_bigint_subtract(seq: rcd_sn, n: rcd_delta); |
827 | break; |
828 | } |
829 | |
830 | tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn); |
831 | } |
832 | |
833 | static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx, |
834 | struct tls_offload_context_rx *ctx, |
835 | struct sock *sk, struct sk_buff *skb) |
836 | { |
837 | struct strp_msg *rxm; |
838 | |
839 | /* device will request resyncs by itself based on stream scan */ |
840 | if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT) |
841 | return; |
842 | /* already scheduled */ |
843 | if (ctx->resync_nh_do_now) |
844 | return; |
845 | /* seen decrypted fragments since last fully-failed record */ |
846 | if (ctx->resync_nh_reset) { |
847 | ctx->resync_nh_reset = 0; |
848 | ctx->resync_nh.decrypted_failed = 1; |
849 | ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL; |
850 | return; |
851 | } |
852 | |
853 | if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt) |
854 | return; |
855 | |
856 | /* doing resync, bump the next target in case it fails */ |
857 | if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL) |
858 | ctx->resync_nh.decrypted_tgt *= 2; |
859 | else |
860 | ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL; |
861 | |
862 | rxm = strp_msg(skb); |
863 | |
864 | /* head of next rec is already in, parser will sync for us */ |
865 | if (tcp_inq(sk) > rxm->full_len) { |
866 | trace_tls_device_rx_resync_nh_schedule(sk); |
867 | ctx->resync_nh_do_now = 1; |
868 | } else { |
869 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
870 | u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE]; |
871 | |
872 | memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size); |
873 | tls_bigint_increment(seq: rcd_sn, len: prot->rec_seq_size); |
874 | |
875 | tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq, |
876 | rcd_sn); |
877 | } |
878 | } |
879 | |
880 | static int |
881 | tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx) |
882 | { |
883 | struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx); |
884 | const struct tls_cipher_desc *cipher_desc; |
885 | int err, offset, copy, data_len, pos; |
886 | struct sk_buff *skb, *skb_iter; |
887 | struct scatterlist sg[1]; |
888 | struct strp_msg *rxm; |
889 | char *orig_buf, *buf; |
890 | |
891 | cipher_desc = get_cipher_desc(cipher_type: tls_ctx->crypto_recv.info.cipher_type); |
892 | DEBUG_NET_WARN_ON_ONCE(!cipher_desc || !cipher_desc->offloadable); |
893 | |
894 | rxm = strp_msg(skb: tls_strp_msg(ctx: sw_ctx)); |
895 | orig_buf = kmalloc(size: rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv, |
896 | flags: sk->sk_allocation); |
897 | if (!orig_buf) |
898 | return -ENOMEM; |
899 | buf = orig_buf; |
900 | |
901 | err = tls_strp_msg_cow(ctx: sw_ctx); |
902 | if (unlikely(err)) |
903 | goto free_buf; |
904 | |
905 | skb = tls_strp_msg(ctx: sw_ctx); |
906 | rxm = strp_msg(skb); |
907 | offset = rxm->offset; |
908 | |
909 | sg_init_table(sg, 1); |
910 | sg_set_buf(sg: &sg[0], buf, |
911 | buflen: rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv); |
912 | err = skb_copy_bits(skb, offset, to: buf, TLS_HEADER_SIZE + cipher_desc->iv); |
913 | if (err) |
914 | goto free_buf; |
915 | |
916 | /* We are interested only in the decrypted data not the auth */ |
917 | err = decrypt_skb(sk, sgout: sg); |
918 | if (err != -EBADMSG) |
919 | goto free_buf; |
920 | else |
921 | err = 0; |
922 | |
923 | data_len = rxm->full_len - cipher_desc->tag; |
924 | |
925 | if (skb_pagelen(skb) > offset) { |
926 | copy = min_t(int, skb_pagelen(skb) - offset, data_len); |
927 | |
928 | if (skb->decrypted) { |
929 | err = skb_store_bits(skb, offset, from: buf, len: copy); |
930 | if (err) |
931 | goto free_buf; |
932 | } |
933 | |
934 | offset += copy; |
935 | buf += copy; |
936 | } |
937 | |
938 | pos = skb_pagelen(skb); |
939 | skb_walk_frags(skb, skb_iter) { |
940 | int frag_pos; |
941 | |
942 | /* Practically all frags must belong to msg if reencrypt |
943 | * is needed with current strparser and coalescing logic, |
944 | * but strparser may "get optimized", so let's be safe. |
945 | */ |
946 | if (pos + skb_iter->len <= offset) |
947 | goto done_with_frag; |
948 | if (pos >= data_len + rxm->offset) |
949 | break; |
950 | |
951 | frag_pos = offset - pos; |
952 | copy = min_t(int, skb_iter->len - frag_pos, |
953 | data_len + rxm->offset - offset); |
954 | |
955 | if (skb_iter->decrypted) { |
956 | err = skb_store_bits(skb: skb_iter, offset: frag_pos, from: buf, len: copy); |
957 | if (err) |
958 | goto free_buf; |
959 | } |
960 | |
961 | offset += copy; |
962 | buf += copy; |
963 | done_with_frag: |
964 | pos += skb_iter->len; |
965 | } |
966 | |
967 | free_buf: |
968 | kfree(objp: orig_buf); |
969 | return err; |
970 | } |
971 | |
972 | int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx) |
973 | { |
974 | struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx); |
975 | struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx); |
976 | struct sk_buff *skb = tls_strp_msg(ctx: sw_ctx); |
977 | struct strp_msg *rxm = strp_msg(skb); |
978 | int is_decrypted, is_encrypted; |
979 | |
980 | if (!tls_strp_msg_mixed_decrypted(ctx: sw_ctx)) { |
981 | is_decrypted = skb->decrypted; |
982 | is_encrypted = !is_decrypted; |
983 | } else { |
984 | is_decrypted = 0; |
985 | is_encrypted = 0; |
986 | } |
987 | |
988 | trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len, |
989 | rec_no: tls_ctx->rx.rec_seq, rec_len: rxm->full_len, |
990 | encrypted: is_encrypted, decrypted: is_decrypted); |
991 | |
992 | if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) { |
993 | if (likely(is_encrypted || is_decrypted)) |
994 | return is_decrypted; |
995 | |
996 | /* After tls_device_down disables the offload, the next SKB will |
997 | * likely have initial fragments decrypted, and final ones not |
998 | * decrypted. We need to reencrypt that single SKB. |
999 | */ |
1000 | return tls_device_reencrypt(sk, tls_ctx); |
1001 | } |
1002 | |
1003 | /* Return immediately if the record is either entirely plaintext or |
1004 | * entirely ciphertext. Otherwise handle reencrypt partially decrypted |
1005 | * record. |
1006 | */ |
1007 | if (is_decrypted) { |
1008 | ctx->resync_nh_reset = 1; |
1009 | return is_decrypted; |
1010 | } |
1011 | if (is_encrypted) { |
1012 | tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb); |
1013 | return 0; |
1014 | } |
1015 | |
1016 | ctx->resync_nh_reset = 1; |
1017 | return tls_device_reencrypt(sk, tls_ctx); |
1018 | } |
1019 | |
1020 | static void tls_device_attach(struct tls_context *ctx, struct sock *sk, |
1021 | struct net_device *netdev) |
1022 | { |
1023 | if (sk->sk_destruct != tls_device_sk_destruct) { |
1024 | refcount_set(r: &ctx->refcount, n: 1); |
1025 | dev_hold(dev: netdev); |
1026 | RCU_INIT_POINTER(ctx->netdev, netdev); |
1027 | spin_lock_irq(lock: &tls_device_lock); |
1028 | list_add_tail(new: &ctx->list, head: &tls_device_list); |
1029 | spin_unlock_irq(lock: &tls_device_lock); |
1030 | |
1031 | ctx->sk_destruct = sk->sk_destruct; |
1032 | smp_store_release(&sk->sk_destruct, tls_device_sk_destruct); |
1033 | } |
1034 | } |
1035 | |
1036 | static struct tls_offload_context_tx *alloc_offload_ctx_tx(struct tls_context *ctx) |
1037 | { |
1038 | struct tls_offload_context_tx *offload_ctx; |
1039 | __be64 rcd_sn; |
1040 | |
1041 | offload_ctx = kzalloc(size: sizeof(*offload_ctx), GFP_KERNEL); |
1042 | if (!offload_ctx) |
1043 | return NULL; |
1044 | |
1045 | INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task); |
1046 | INIT_LIST_HEAD(list: &offload_ctx->records_list); |
1047 | spin_lock_init(&offload_ctx->lock); |
1048 | sg_init_table(offload_ctx->sg_tx_data, |
1049 | ARRAY_SIZE(offload_ctx->sg_tx_data)); |
1050 | |
1051 | /* start at rec_seq - 1 to account for the start marker record */ |
1052 | memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn)); |
1053 | offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1; |
1054 | |
1055 | offload_ctx->ctx = ctx; |
1056 | |
1057 | return offload_ctx; |
1058 | } |
1059 | |
1060 | int tls_set_device_offload(struct sock *sk) |
1061 | { |
1062 | struct tls_record_info *start_marker_record; |
1063 | struct tls_offload_context_tx *offload_ctx; |
1064 | const struct tls_cipher_desc *cipher_desc; |
1065 | struct tls_crypto_info *crypto_info; |
1066 | struct tls_prot_info *prot; |
1067 | struct net_device *netdev; |
1068 | struct tls_context *ctx; |
1069 | struct sk_buff *skb; |
1070 | char *iv, *rec_seq; |
1071 | int rc; |
1072 | |
1073 | ctx = tls_get_ctx(sk); |
1074 | prot = &ctx->prot_info; |
1075 | |
1076 | if (ctx->priv_ctx_tx) |
1077 | return -EEXIST; |
1078 | |
1079 | netdev = get_netdev_for_sock(sk); |
1080 | if (!netdev) { |
1081 | pr_err_ratelimited("%s: netdev not found\n" , __func__); |
1082 | return -EINVAL; |
1083 | } |
1084 | |
1085 | if (!(netdev->features & NETIF_F_HW_TLS_TX)) { |
1086 | rc = -EOPNOTSUPP; |
1087 | goto release_netdev; |
1088 | } |
1089 | |
1090 | crypto_info = &ctx->crypto_send.info; |
1091 | if (crypto_info->version != TLS_1_2_VERSION) { |
1092 | rc = -EOPNOTSUPP; |
1093 | goto release_netdev; |
1094 | } |
1095 | |
1096 | cipher_desc = get_cipher_desc(cipher_type: crypto_info->cipher_type); |
1097 | if (!cipher_desc || !cipher_desc->offloadable) { |
1098 | rc = -EINVAL; |
1099 | goto release_netdev; |
1100 | } |
1101 | |
1102 | rc = init_prot_info(prot, crypto_info, cipher_desc); |
1103 | if (rc) |
1104 | goto release_netdev; |
1105 | |
1106 | iv = crypto_info_iv(crypto_info, cipher_desc); |
1107 | rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc); |
1108 | |
1109 | memcpy(ctx->tx.iv + cipher_desc->salt, iv, cipher_desc->iv); |
1110 | memcpy(ctx->tx.rec_seq, rec_seq, cipher_desc->rec_seq); |
1111 | |
1112 | start_marker_record = kmalloc(size: sizeof(*start_marker_record), GFP_KERNEL); |
1113 | if (!start_marker_record) { |
1114 | rc = -ENOMEM; |
1115 | goto release_netdev; |
1116 | } |
1117 | |
1118 | offload_ctx = alloc_offload_ctx_tx(ctx); |
1119 | if (!offload_ctx) { |
1120 | rc = -ENOMEM; |
1121 | goto free_marker_record; |
1122 | } |
1123 | |
1124 | rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info); |
1125 | if (rc) |
1126 | goto free_offload_ctx; |
1127 | |
1128 | start_marker_record->end_seq = tcp_sk(sk)->write_seq; |
1129 | start_marker_record->len = 0; |
1130 | start_marker_record->num_frags = 0; |
1131 | list_add_tail(new: &start_marker_record->list, head: &offload_ctx->records_list); |
1132 | |
1133 | clean_acked_data_enable(icsk: inet_csk(sk), cad: &tls_icsk_clean_acked); |
1134 | ctx->push_pending_record = tls_device_push_pending_record; |
1135 | |
1136 | /* TLS offload is greatly simplified if we don't send |
1137 | * SKBs where only part of the payload needs to be encrypted. |
1138 | * So mark the last skb in the write queue as end of record. |
1139 | */ |
1140 | skb = tcp_write_queue_tail(sk); |
1141 | if (skb) |
1142 | TCP_SKB_CB(skb)->eor = 1; |
1143 | |
1144 | /* Avoid offloading if the device is down |
1145 | * We don't want to offload new flows after |
1146 | * the NETDEV_DOWN event |
1147 | * |
1148 | * device_offload_lock is taken in tls_devices's NETDEV_DOWN |
1149 | * handler thus protecting from the device going down before |
1150 | * ctx was added to tls_device_list. |
1151 | */ |
1152 | down_read(sem: &device_offload_lock); |
1153 | if (!(netdev->flags & IFF_UP)) { |
1154 | rc = -EINVAL; |
1155 | goto release_lock; |
1156 | } |
1157 | |
1158 | ctx->priv_ctx_tx = offload_ctx; |
1159 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX, |
1160 | &ctx->crypto_send.info, |
1161 | tcp_sk(sk)->write_seq); |
1162 | trace_tls_device_offload_set(sk, dir: TLS_OFFLOAD_CTX_DIR_TX, |
1163 | tcp_sk(sk)->write_seq, rec_no: rec_seq, ret: rc); |
1164 | if (rc) |
1165 | goto release_lock; |
1166 | |
1167 | tls_device_attach(ctx, sk, netdev); |
1168 | up_read(sem: &device_offload_lock); |
1169 | |
1170 | /* following this assignment tls_is_skb_tx_device_offloaded |
1171 | * will return true and the context might be accessed |
1172 | * by the netdev's xmit function. |
1173 | */ |
1174 | smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb); |
1175 | dev_put(dev: netdev); |
1176 | |
1177 | return 0; |
1178 | |
1179 | release_lock: |
1180 | up_read(sem: &device_offload_lock); |
1181 | clean_acked_data_disable(icsk: inet_csk(sk)); |
1182 | crypto_free_aead(tfm: offload_ctx->aead_send); |
1183 | free_offload_ctx: |
1184 | kfree(objp: offload_ctx); |
1185 | ctx->priv_ctx_tx = NULL; |
1186 | free_marker_record: |
1187 | kfree(objp: start_marker_record); |
1188 | release_netdev: |
1189 | dev_put(dev: netdev); |
1190 | return rc; |
1191 | } |
1192 | |
1193 | int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) |
1194 | { |
1195 | struct tls12_crypto_info_aes_gcm_128 *info; |
1196 | struct tls_offload_context_rx *context; |
1197 | struct net_device *netdev; |
1198 | int rc = 0; |
1199 | |
1200 | if (ctx->crypto_recv.info.version != TLS_1_2_VERSION) |
1201 | return -EOPNOTSUPP; |
1202 | |
1203 | netdev = get_netdev_for_sock(sk); |
1204 | if (!netdev) { |
1205 | pr_err_ratelimited("%s: netdev not found\n" , __func__); |
1206 | return -EINVAL; |
1207 | } |
1208 | |
1209 | if (!(netdev->features & NETIF_F_HW_TLS_RX)) { |
1210 | rc = -EOPNOTSUPP; |
1211 | goto release_netdev; |
1212 | } |
1213 | |
1214 | /* Avoid offloading if the device is down |
1215 | * We don't want to offload new flows after |
1216 | * the NETDEV_DOWN event |
1217 | * |
1218 | * device_offload_lock is taken in tls_devices's NETDEV_DOWN |
1219 | * handler thus protecting from the device going down before |
1220 | * ctx was added to tls_device_list. |
1221 | */ |
1222 | down_read(sem: &device_offload_lock); |
1223 | if (!(netdev->flags & IFF_UP)) { |
1224 | rc = -EINVAL; |
1225 | goto release_lock; |
1226 | } |
1227 | |
1228 | context = kzalloc(size: sizeof(*context), GFP_KERNEL); |
1229 | if (!context) { |
1230 | rc = -ENOMEM; |
1231 | goto release_lock; |
1232 | } |
1233 | context->resync_nh_reset = 1; |
1234 | |
1235 | ctx->priv_ctx_rx = context; |
1236 | rc = tls_set_sw_offload(sk, tx: 0); |
1237 | if (rc) |
1238 | goto release_ctx; |
1239 | |
1240 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, |
1241 | &ctx->crypto_recv.info, |
1242 | tcp_sk(sk)->copied_seq); |
1243 | info = (void *)&ctx->crypto_recv.info; |
1244 | trace_tls_device_offload_set(sk, dir: TLS_OFFLOAD_CTX_DIR_RX, |
1245 | tcp_sk(sk)->copied_seq, rec_no: info->rec_seq, ret: rc); |
1246 | if (rc) |
1247 | goto free_sw_resources; |
1248 | |
1249 | tls_device_attach(ctx, sk, netdev); |
1250 | up_read(sem: &device_offload_lock); |
1251 | |
1252 | dev_put(dev: netdev); |
1253 | |
1254 | return 0; |
1255 | |
1256 | free_sw_resources: |
1257 | up_read(sem: &device_offload_lock); |
1258 | tls_sw_free_resources_rx(sk); |
1259 | down_read(sem: &device_offload_lock); |
1260 | release_ctx: |
1261 | ctx->priv_ctx_rx = NULL; |
1262 | release_lock: |
1263 | up_read(sem: &device_offload_lock); |
1264 | release_netdev: |
1265 | dev_put(dev: netdev); |
1266 | return rc; |
1267 | } |
1268 | |
1269 | void tls_device_offload_cleanup_rx(struct sock *sk) |
1270 | { |
1271 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
1272 | struct net_device *netdev; |
1273 | |
1274 | down_read(sem: &device_offload_lock); |
1275 | netdev = rcu_dereference_protected(tls_ctx->netdev, |
1276 | lockdep_is_held(&device_offload_lock)); |
1277 | if (!netdev) |
1278 | goto out; |
1279 | |
1280 | netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx, |
1281 | TLS_OFFLOAD_CTX_DIR_RX); |
1282 | |
1283 | if (tls_ctx->tx_conf != TLS_HW) { |
1284 | dev_put(dev: netdev); |
1285 | rcu_assign_pointer(tls_ctx->netdev, NULL); |
1286 | } else { |
1287 | set_bit(nr: TLS_RX_DEV_CLOSED, addr: &tls_ctx->flags); |
1288 | } |
1289 | out: |
1290 | up_read(sem: &device_offload_lock); |
1291 | tls_sw_release_resources_rx(sk); |
1292 | } |
1293 | |
1294 | static int tls_device_down(struct net_device *netdev) |
1295 | { |
1296 | struct tls_context *ctx, *tmp; |
1297 | unsigned long flags; |
1298 | LIST_HEAD(list); |
1299 | |
1300 | /* Request a write lock to block new offload attempts */ |
1301 | down_write(sem: &device_offload_lock); |
1302 | |
1303 | spin_lock_irqsave(&tls_device_lock, flags); |
1304 | list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) { |
1305 | struct net_device *ctx_netdev = |
1306 | rcu_dereference_protected(ctx->netdev, |
1307 | lockdep_is_held(&device_offload_lock)); |
1308 | |
1309 | if (ctx_netdev != netdev || |
1310 | !refcount_inc_not_zero(r: &ctx->refcount)) |
1311 | continue; |
1312 | |
1313 | list_move(list: &ctx->list, head: &list); |
1314 | } |
1315 | spin_unlock_irqrestore(lock: &tls_device_lock, flags); |
1316 | |
1317 | list_for_each_entry_safe(ctx, tmp, &list, list) { |
1318 | /* Stop offloaded TX and switch to the fallback. |
1319 | * tls_is_skb_tx_device_offloaded will return false. |
1320 | */ |
1321 | WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw); |
1322 | |
1323 | /* Stop the RX and TX resync. |
1324 | * tls_dev_resync must not be called after tls_dev_del. |
1325 | */ |
1326 | rcu_assign_pointer(ctx->netdev, NULL); |
1327 | |
1328 | /* Start skipping the RX resync logic completely. */ |
1329 | set_bit(nr: TLS_RX_DEV_DEGRADED, addr: &ctx->flags); |
1330 | |
1331 | /* Sync with inflight packets. After this point: |
1332 | * TX: no non-encrypted packets will be passed to the driver. |
1333 | * RX: resync requests from the driver will be ignored. |
1334 | */ |
1335 | synchronize_net(); |
1336 | |
1337 | /* Release the offload context on the driver side. */ |
1338 | if (ctx->tx_conf == TLS_HW) |
1339 | netdev->tlsdev_ops->tls_dev_del(netdev, ctx, |
1340 | TLS_OFFLOAD_CTX_DIR_TX); |
1341 | if (ctx->rx_conf == TLS_HW && |
1342 | !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags)) |
1343 | netdev->tlsdev_ops->tls_dev_del(netdev, ctx, |
1344 | TLS_OFFLOAD_CTX_DIR_RX); |
1345 | |
1346 | dev_put(dev: netdev); |
1347 | |
1348 | /* Move the context to a separate list for two reasons: |
1349 | * 1. When the context is deallocated, list_del is called. |
1350 | * 2. It's no longer an offloaded context, so we don't want to |
1351 | * run offload-specific code on this context. |
1352 | */ |
1353 | spin_lock_irqsave(&tls_device_lock, flags); |
1354 | list_move_tail(list: &ctx->list, head: &tls_device_down_list); |
1355 | spin_unlock_irqrestore(lock: &tls_device_lock, flags); |
1356 | |
1357 | /* Device contexts for RX and TX will be freed in on sk_destruct |
1358 | * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW. |
1359 | * Now release the ref taken above. |
1360 | */ |
1361 | if (refcount_dec_and_test(r: &ctx->refcount)) { |
1362 | /* sk_destruct ran after tls_device_down took a ref, and |
1363 | * it returned early. Complete the destruction here. |
1364 | */ |
1365 | list_del(entry: &ctx->list); |
1366 | tls_device_free_ctx(ctx); |
1367 | } |
1368 | } |
1369 | |
1370 | up_write(sem: &device_offload_lock); |
1371 | |
1372 | flush_workqueue(destruct_wq); |
1373 | |
1374 | return NOTIFY_DONE; |
1375 | } |
1376 | |
1377 | static int tls_dev_event(struct notifier_block *this, unsigned long event, |
1378 | void *ptr) |
1379 | { |
1380 | struct net_device *dev = netdev_notifier_info_to_dev(info: ptr); |
1381 | |
1382 | if (!dev->tlsdev_ops && |
1383 | !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX))) |
1384 | return NOTIFY_DONE; |
1385 | |
1386 | switch (event) { |
1387 | case NETDEV_REGISTER: |
1388 | case NETDEV_FEAT_CHANGE: |
1389 | if (netif_is_bond_master(dev)) |
1390 | return NOTIFY_DONE; |
1391 | if ((dev->features & NETIF_F_HW_TLS_RX) && |
1392 | !dev->tlsdev_ops->tls_dev_resync) |
1393 | return NOTIFY_BAD; |
1394 | |
1395 | if (dev->tlsdev_ops && |
1396 | dev->tlsdev_ops->tls_dev_add && |
1397 | dev->tlsdev_ops->tls_dev_del) |
1398 | return NOTIFY_DONE; |
1399 | else |
1400 | return NOTIFY_BAD; |
1401 | case NETDEV_DOWN: |
1402 | return tls_device_down(netdev: dev); |
1403 | } |
1404 | return NOTIFY_DONE; |
1405 | } |
1406 | |
1407 | static struct notifier_block tls_dev_notifier = { |
1408 | .notifier_call = tls_dev_event, |
1409 | }; |
1410 | |
1411 | int __init tls_device_init(void) |
1412 | { |
1413 | int err; |
1414 | |
1415 | dummy_page = alloc_page(GFP_KERNEL); |
1416 | if (!dummy_page) |
1417 | return -ENOMEM; |
1418 | |
1419 | destruct_wq = alloc_workqueue(fmt: "ktls_device_destruct" , flags: 0, max_active: 0); |
1420 | if (!destruct_wq) { |
1421 | err = -ENOMEM; |
1422 | goto err_free_dummy; |
1423 | } |
1424 | |
1425 | err = register_netdevice_notifier(nb: &tls_dev_notifier); |
1426 | if (err) |
1427 | goto err_destroy_wq; |
1428 | |
1429 | return 0; |
1430 | |
1431 | err_destroy_wq: |
1432 | destroy_workqueue(wq: destruct_wq); |
1433 | err_free_dummy: |
1434 | put_page(page: dummy_page); |
1435 | return err; |
1436 | } |
1437 | |
1438 | void __exit tls_device_cleanup(void) |
1439 | { |
1440 | unregister_netdevice_notifier(nb: &tls_dev_notifier); |
1441 | destroy_workqueue(wq: destruct_wq); |
1442 | clean_acked_data_flush(); |
1443 | put_page(page: dummy_page); |
1444 | } |
1445 | |