1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
2 | // Copyright (c) 2019 Mellanox Technologies. |
3 | |
4 | #include <net/inet6_hashtables.h> |
5 | #include "en_accel/en_accel.h" |
6 | #include "en_accel/ktls.h" |
7 | #include "en_accel/ktls_txrx.h" |
8 | #include "en_accel/ktls_utils.h" |
9 | #include "en_accel/fs_tcp.h" |
10 | |
11 | struct accel_rule { |
12 | struct work_struct work; |
13 | struct mlx5e_priv *priv; |
14 | struct mlx5_flow_handle *rule; |
15 | }; |
16 | |
17 | #define PROGRESS_PARAMS_WRITE_UNIT 64 |
18 | #define PROGRESS_PARAMS_PADDED_SIZE \ |
19 | (ALIGN(sizeof(struct mlx5_wqe_tls_progress_params_seg), \ |
20 | PROGRESS_PARAMS_WRITE_UNIT)) |
21 | |
22 | struct mlx5e_ktls_rx_resync_buf { |
23 | union { |
24 | struct mlx5_wqe_tls_progress_params_seg progress; |
25 | u8 pad[PROGRESS_PARAMS_PADDED_SIZE]; |
26 | } ____cacheline_aligned_in_smp; |
27 | dma_addr_t dma_addr; |
28 | struct mlx5e_ktls_offload_context_rx *priv_rx; |
29 | }; |
30 | |
31 | enum { |
32 | MLX5E_PRIV_RX_FLAG_DELETING, |
33 | MLX5E_NUM_PRIV_RX_FLAGS, |
34 | }; |
35 | |
36 | struct mlx5e_ktls_rx_resync_ctx { |
37 | struct tls_offload_resync_async core; |
38 | struct work_struct work; |
39 | struct mlx5e_priv *priv; |
40 | refcount_t refcnt; |
41 | __be64 sw_rcd_sn_be; |
42 | u32 seq; |
43 | }; |
44 | |
45 | struct mlx5e_ktls_offload_context_rx { |
46 | union mlx5e_crypto_info crypto_info; |
47 | struct accel_rule rule; |
48 | struct sock *sk; |
49 | struct mlx5e_rq_stats *rq_stats; |
50 | struct mlx5e_tls_sw_stats *sw_stats; |
51 | struct completion add_ctx; |
52 | struct mlx5e_tir tir; |
53 | struct mlx5_crypto_dek *dek; |
54 | u32 rxq; |
55 | DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS); |
56 | |
57 | /* resync */ |
58 | spinlock_t lock; /* protects resync fields */ |
59 | struct mlx5e_ktls_rx_resync_ctx resync; |
60 | struct list_head list; |
61 | }; |
62 | |
63 | static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx) |
64 | { |
65 | if (!refcount_dec_and_test(r: &priv_rx->resync.refcnt)) |
66 | return false; |
67 | |
68 | kfree(objp: priv_rx); |
69 | return true; |
70 | } |
71 | |
72 | static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx) |
73 | { |
74 | refcount_inc(r: &priv_rx->resync.refcnt); |
75 | } |
76 | |
77 | struct mlx5e_ktls_resync_resp { |
78 | /* protects list changes */ |
79 | spinlock_t lock; |
80 | struct list_head list; |
81 | }; |
82 | |
83 | void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) |
84 | { |
85 | kvfree(addr: resp_list); |
86 | } |
87 | |
88 | struct mlx5e_ktls_resync_resp * |
89 | mlx5e_ktls_rx_resync_create_resp_list(void) |
90 | { |
91 | struct mlx5e_ktls_resync_resp *resp_list; |
92 | |
93 | resp_list = kvzalloc(size: sizeof(*resp_list), GFP_KERNEL); |
94 | if (!resp_list) |
95 | return ERR_PTR(error: -ENOMEM); |
96 | |
97 | INIT_LIST_HEAD(list: &resp_list->list); |
98 | spin_lock_init(&resp_list->lock); |
99 | |
100 | return resp_list; |
101 | } |
102 | |
103 | static void accel_rule_handle_work(struct work_struct *work) |
104 | { |
105 | struct mlx5e_ktls_offload_context_rx *priv_rx; |
106 | struct accel_rule *accel_rule; |
107 | struct mlx5_flow_handle *rule; |
108 | |
109 | accel_rule = container_of(work, struct accel_rule, work); |
110 | priv_rx = container_of(accel_rule, struct mlx5e_ktls_offload_context_rx, rule); |
111 | if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) |
112 | goto out; |
113 | |
114 | rule = mlx5e_accel_fs_add_sk(fs: accel_rule->priv->fs, sk: priv_rx->sk, |
115 | tirn: mlx5e_tir_get_tirn(tir: &priv_rx->tir), |
116 | MLX5_FS_DEFAULT_FLOW_TAG); |
117 | if (!IS_ERR_OR_NULL(ptr: rule)) |
118 | accel_rule->rule = rule; |
119 | out: |
120 | complete(&priv_rx->add_ctx); |
121 | } |
122 | |
123 | static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv) |
124 | { |
125 | INIT_WORK(&rule->work, accel_rule_handle_work); |
126 | rule->priv = priv; |
127 | } |
128 | |
129 | static void icosq_fill_wi(struct mlx5e_icosq *sq, u16 pi, |
130 | struct mlx5e_icosq_wqe_info *wi) |
131 | { |
132 | sq->db.wqe_info[pi] = *wi; |
133 | } |
134 | |
135 | static struct mlx5_wqe_ctrl_seg * |
136 | post_static_params(struct mlx5e_icosq *sq, |
137 | struct mlx5e_ktls_offload_context_rx *priv_rx) |
138 | { |
139 | struct mlx5e_set_tls_static_params_wqe *wqe; |
140 | struct mlx5e_icosq_wqe_info wi; |
141 | u16 pi, num_wqebbs; |
142 | |
143 | num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS; |
144 | if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) |
145 | return ERR_PTR(error: -ENOSPC); |
146 | |
147 | pi = mlx5e_icosq_get_next_pi(sq, size: num_wqebbs); |
148 | wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); |
149 | mlx5e_ktls_build_static_params(wqe, pc: sq->pc, sqn: sq->sqn, crypto_info: &priv_rx->crypto_info, |
150 | tis_tir_num: mlx5e_tir_get_tirn(tir: &priv_rx->tir), |
151 | key_id: mlx5_crypto_dek_get_id(dek: priv_rx->dek), |
152 | resync_tcp_sn: priv_rx->resync.seq, fence: false, |
153 | direction: TLS_OFFLOAD_CTX_DIR_RX); |
154 | wi = (struct mlx5e_icosq_wqe_info) { |
155 | .wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS, |
156 | .num_wqebbs = num_wqebbs, |
157 | .tls_set_params.priv_rx = priv_rx, |
158 | }; |
159 | icosq_fill_wi(sq, pi, wi: &wi); |
160 | sq->pc += num_wqebbs; |
161 | |
162 | return &wqe->ctrl; |
163 | } |
164 | |
165 | static struct mlx5_wqe_ctrl_seg * |
166 | post_progress_params(struct mlx5e_icosq *sq, |
167 | struct mlx5e_ktls_offload_context_rx *priv_rx, |
168 | u32 next_record_tcp_sn) |
169 | { |
170 | struct mlx5e_set_tls_progress_params_wqe *wqe; |
171 | struct mlx5e_icosq_wqe_info wi; |
172 | u16 pi, num_wqebbs; |
173 | |
174 | num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS; |
175 | if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) |
176 | return ERR_PTR(error: -ENOSPC); |
177 | |
178 | pi = mlx5e_icosq_get_next_pi(sq, size: num_wqebbs); |
179 | wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi); |
180 | mlx5e_ktls_build_progress_params(wqe, pc: sq->pc, sqn: sq->sqn, |
181 | tis_tir_num: mlx5e_tir_get_tirn(tir: &priv_rx->tir), |
182 | fence: false, next_record_tcp_sn, |
183 | direction: TLS_OFFLOAD_CTX_DIR_RX); |
184 | wi = (struct mlx5e_icosq_wqe_info) { |
185 | .wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS, |
186 | .num_wqebbs = num_wqebbs, |
187 | .tls_set_params.priv_rx = priv_rx, |
188 | }; |
189 | |
190 | icosq_fill_wi(sq, pi, wi: &wi); |
191 | sq->pc += num_wqebbs; |
192 | |
193 | return &wqe->ctrl; |
194 | } |
195 | |
196 | static int post_rx_param_wqes(struct mlx5e_channel *c, |
197 | struct mlx5e_ktls_offload_context_rx *priv_rx, |
198 | u32 next_record_tcp_sn) |
199 | { |
200 | struct mlx5_wqe_ctrl_seg *cseg; |
201 | struct mlx5e_icosq *sq; |
202 | int err; |
203 | |
204 | err = 0; |
205 | sq = &c->async_icosq; |
206 | spin_lock_bh(lock: &c->async_icosq_lock); |
207 | |
208 | cseg = post_static_params(sq, priv_rx); |
209 | if (IS_ERR(ptr: cseg)) |
210 | goto err_out; |
211 | cseg = post_progress_params(sq, priv_rx, next_record_tcp_sn); |
212 | if (IS_ERR(ptr: cseg)) |
213 | goto err_out; |
214 | |
215 | mlx5e_notify_hw(wq: &sq->wq, pc: sq->pc, uar_map: sq->uar_map, ctrl: cseg); |
216 | unlock: |
217 | spin_unlock_bh(lock: &c->async_icosq_lock); |
218 | |
219 | return err; |
220 | |
221 | err_out: |
222 | priv_rx->rq_stats->tls_resync_req_skip++; |
223 | err = PTR_ERR(ptr: cseg); |
224 | complete(&priv_rx->add_ctx); |
225 | goto unlock; |
226 | } |
227 | |
228 | static void |
229 | mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx, |
230 | struct mlx5e_ktls_offload_context_rx *priv_rx) |
231 | { |
232 | struct mlx5e_ktls_offload_context_rx **ctx = |
233 | __tls_driver_ctx(tls_ctx, direction: TLS_OFFLOAD_CTX_DIR_RX); |
234 | |
235 | BUILD_BUG_ON(sizeof(priv_rx) > TLS_DRIVER_STATE_SIZE_RX); |
236 | |
237 | *ctx = priv_rx; |
238 | } |
239 | |
240 | static struct mlx5e_ktls_offload_context_rx * |
241 | mlx5e_get_ktls_rx_priv_ctx(struct tls_context *tls_ctx) |
242 | { |
243 | struct mlx5e_ktls_offload_context_rx **ctx = |
244 | __tls_driver_ctx(tls_ctx, direction: TLS_OFFLOAD_CTX_DIR_RX); |
245 | |
246 | return *ctx; |
247 | } |
248 | |
249 | /* Re-sync */ |
250 | /* Runs in work context */ |
251 | static int |
252 | resync_post_get_progress_params(struct mlx5e_icosq *sq, |
253 | struct mlx5e_ktls_offload_context_rx *priv_rx) |
254 | { |
255 | struct mlx5e_get_tls_progress_params_wqe *wqe; |
256 | struct mlx5e_ktls_rx_resync_buf *buf; |
257 | struct mlx5e_icosq_wqe_info wi; |
258 | struct mlx5_wqe_ctrl_seg *cseg; |
259 | struct mlx5_seg_get_psv *psv; |
260 | struct device *pdev; |
261 | int err; |
262 | u16 pi; |
263 | |
264 | buf = kzalloc(size: sizeof(*buf), GFP_KERNEL); |
265 | if (unlikely(!buf)) { |
266 | err = -ENOMEM; |
267 | goto err_out; |
268 | } |
269 | |
270 | pdev = mlx5_core_dma_dev(dev: sq->channel->mdev); |
271 | buf->dma_addr = dma_map_single(pdev, &buf->progress, |
272 | PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); |
273 | if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) { |
274 | err = -ENOMEM; |
275 | goto err_free; |
276 | } |
277 | |
278 | buf->priv_rx = priv_rx; |
279 | |
280 | spin_lock_bh(lock: &sq->channel->async_icosq_lock); |
281 | |
282 | if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) { |
283 | spin_unlock_bh(lock: &sq->channel->async_icosq_lock); |
284 | err = -ENOSPC; |
285 | goto err_dma_unmap; |
286 | } |
287 | |
288 | pi = mlx5e_icosq_get_next_pi(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS); |
289 | wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi); |
290 | |
291 | #define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS)) |
292 | |
293 | cseg = &wqe->ctrl; |
294 | cseg->opmod_idx_opcode = |
295 | cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_GET_PSV | |
296 | (MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS << 24)); |
297 | cseg->qpn_ds = |
298 | cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | GET_PSV_DS_CNT); |
299 | |
300 | psv = &wqe->psv; |
301 | psv->num_psv = 1 << 4; |
302 | psv->l_key = sq->channel->mkey_be; |
303 | psv->psv_index[0] = cpu_to_be32(mlx5e_tir_get_tirn(&priv_rx->tir)); |
304 | psv->va = cpu_to_be64(buf->dma_addr); |
305 | |
306 | wi = (struct mlx5e_icosq_wqe_info) { |
307 | .wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS, |
308 | .num_wqebbs = MLX5E_KTLS_GET_PROGRESS_WQEBBS, |
309 | .tls_get_params.buf = buf, |
310 | }; |
311 | icosq_fill_wi(sq, pi, wi: &wi); |
312 | sq->pc++; |
313 | mlx5e_notify_hw(wq: &sq->wq, pc: sq->pc, uar_map: sq->uar_map, ctrl: cseg); |
314 | spin_unlock_bh(lock: &sq->channel->async_icosq_lock); |
315 | |
316 | return 0; |
317 | |
318 | err_dma_unmap: |
319 | dma_unmap_single(pdev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); |
320 | err_free: |
321 | kfree(objp: buf); |
322 | err_out: |
323 | priv_rx->rq_stats->tls_resync_req_skip++; |
324 | return err; |
325 | } |
326 | |
327 | /* Function is called with elevated refcount. |
328 | * It decreases it only if no WQE is posted. |
329 | */ |
330 | static void resync_handle_work(struct work_struct *work) |
331 | { |
332 | struct mlx5e_ktls_offload_context_rx *priv_rx; |
333 | struct mlx5e_ktls_rx_resync_ctx *resync; |
334 | struct mlx5e_channel *c; |
335 | struct mlx5e_icosq *sq; |
336 | |
337 | resync = container_of(work, struct mlx5e_ktls_rx_resync_ctx, work); |
338 | priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync); |
339 | |
340 | if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { |
341 | mlx5e_ktls_priv_rx_put(priv_rx); |
342 | return; |
343 | } |
344 | |
345 | c = resync->priv->channels.c[priv_rx->rxq]; |
346 | sq = &c->async_icosq; |
347 | |
348 | if (resync_post_get_progress_params(sq, priv_rx)) |
349 | mlx5e_ktls_priv_rx_put(priv_rx); |
350 | } |
351 | |
352 | static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync, |
353 | struct mlx5e_priv *priv) |
354 | { |
355 | INIT_WORK(&resync->work, resync_handle_work); |
356 | resync->priv = priv; |
357 | refcount_set(r: &resync->refcnt, n: 1); |
358 | } |
359 | |
360 | /* Function can be called with the refcount being either elevated or not. |
361 | * It does not affect the refcount. |
362 | */ |
363 | static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx, |
364 | struct mlx5e_channel *c) |
365 | { |
366 | struct mlx5e_ktls_resync_resp *ktls_resync; |
367 | struct mlx5e_icosq *sq; |
368 | bool trigger_poll; |
369 | |
370 | sq = &c->async_icosq; |
371 | ktls_resync = sq->ktls_resync; |
372 | trigger_poll = false; |
373 | |
374 | spin_lock_bh(lock: &ktls_resync->lock); |
375 | spin_lock_bh(lock: &priv_rx->lock); |
376 | switch (priv_rx->crypto_info.crypto_info.cipher_type) { |
377 | case TLS_CIPHER_AES_GCM_128: { |
378 | struct tls12_crypto_info_aes_gcm_128 *info = |
379 | &priv_rx->crypto_info.crypto_info_128; |
380 | |
381 | memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, |
382 | sizeof(info->rec_seq)); |
383 | break; |
384 | } |
385 | case TLS_CIPHER_AES_GCM_256: { |
386 | struct tls12_crypto_info_aes_gcm_256 *info = |
387 | &priv_rx->crypto_info.crypto_info_256; |
388 | |
389 | memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, |
390 | sizeof(info->rec_seq)); |
391 | break; |
392 | } |
393 | default: |
394 | WARN_ONCE(1, "Unsupported cipher type %u\n" , |
395 | priv_rx->crypto_info.crypto_info.cipher_type); |
396 | spin_unlock_bh(lock: &priv_rx->lock); |
397 | spin_unlock_bh(lock: &ktls_resync->lock); |
398 | return; |
399 | } |
400 | |
401 | if (list_empty(head: &priv_rx->list)) { |
402 | list_add_tail(new: &priv_rx->list, head: &ktls_resync->list); |
403 | trigger_poll = !test_and_set_bit(nr: MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, addr: &sq->state); |
404 | } |
405 | spin_unlock_bh(lock: &priv_rx->lock); |
406 | spin_unlock_bh(lock: &ktls_resync->lock); |
407 | |
408 | if (!trigger_poll) |
409 | return; |
410 | |
411 | if (!napi_if_scheduled_mark_missed(n: &c->napi)) { |
412 | spin_lock_bh(lock: &c->async_icosq_lock); |
413 | mlx5e_trigger_irq(sq); |
414 | spin_unlock_bh(lock: &c->async_icosq_lock); |
415 | } |
416 | } |
417 | |
418 | /* Function can be called with the refcount being either elevated or not. |
419 | * It decreases the refcount and may free the kTLS priv context. |
420 | * Refcount is not elevated only if tls_dev_del has been called, but GET_PSV was |
421 | * already in flight. |
422 | */ |
423 | void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, |
424 | struct mlx5e_icosq *sq) |
425 | { |
426 | struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf; |
427 | struct mlx5e_ktls_offload_context_rx *priv_rx; |
428 | u8 tracker_state, auth_state, *ctx; |
429 | struct device *dev; |
430 | u32 hw_seq; |
431 | |
432 | priv_rx = buf->priv_rx; |
433 | dev = mlx5_core_dma_dev(dev: sq->channel->mdev); |
434 | if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) |
435 | goto out; |
436 | |
437 | dma_sync_single_for_cpu(dev, addr: buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, |
438 | dir: DMA_FROM_DEVICE); |
439 | |
440 | ctx = buf->progress.ctx; |
441 | tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state); |
442 | auth_state = MLX5_GET(tls_progress_params, ctx, auth_state); |
443 | if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING || |
444 | auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) { |
445 | priv_rx->rq_stats->tls_resync_req_skip++; |
446 | goto out; |
447 | } |
448 | |
449 | hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn); |
450 | tls_offload_rx_resync_async_request_end(sk: priv_rx->sk, cpu_to_be32(hw_seq)); |
451 | priv_rx->rq_stats->tls_resync_req_end++; |
452 | out: |
453 | mlx5e_ktls_priv_rx_put(priv_rx); |
454 | dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); |
455 | kfree(objp: buf); |
456 | } |
457 | |
458 | /* Runs in NAPI. |
459 | * Function elevates the refcount, unless no work is queued. |
460 | */ |
461 | static bool resync_queue_get_psv(struct sock *sk) |
462 | { |
463 | struct mlx5e_ktls_offload_context_rx *priv_rx; |
464 | struct mlx5e_ktls_rx_resync_ctx *resync; |
465 | |
466 | priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx: tls_get_ctx(sk)); |
467 | if (unlikely(!priv_rx)) |
468 | return false; |
469 | |
470 | if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) |
471 | return false; |
472 | |
473 | resync = &priv_rx->resync; |
474 | mlx5e_ktls_priv_rx_get(priv_rx); |
475 | if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) |
476 | mlx5e_ktls_priv_rx_put(priv_rx); |
477 | |
478 | return true; |
479 | } |
480 | |
481 | /* Runs in NAPI */ |
482 | static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb) |
483 | { |
484 | struct ethhdr *eth = (struct ethhdr *)(skb->data); |
485 | struct net_device *netdev = rq->netdev; |
486 | struct net *net = dev_net(dev: netdev); |
487 | struct sock *sk = NULL; |
488 | unsigned int datalen; |
489 | struct iphdr *iph; |
490 | struct tcphdr *th; |
491 | __be32 seq; |
492 | int depth = 0; |
493 | |
494 | __vlan_get_protocol(skb, type: eth->h_proto, depth: &depth); |
495 | iph = (struct iphdr *)(skb->data + depth); |
496 | |
497 | if (iph->version == 4) { |
498 | depth += sizeof(struct iphdr); |
499 | th = (void *)iph + sizeof(struct iphdr); |
500 | |
501 | sk = inet_lookup_established(net, hashinfo: net->ipv4.tcp_death_row.hashinfo, |
502 | saddr: iph->saddr, sport: th->source, daddr: iph->daddr, |
503 | dport: th->dest, dif: netdev->ifindex); |
504 | #if IS_ENABLED(CONFIG_IPV6) |
505 | } else { |
506 | struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph; |
507 | |
508 | depth += sizeof(struct ipv6hdr); |
509 | th = (void *)ipv6h + sizeof(struct ipv6hdr); |
510 | |
511 | sk = __inet6_lookup_established(net, hashinfo: net->ipv4.tcp_death_row.hashinfo, |
512 | saddr: &ipv6h->saddr, sport: th->source, |
513 | daddr: &ipv6h->daddr, ntohs(th->dest), |
514 | dif: netdev->ifindex, sdif: 0); |
515 | #endif |
516 | } |
517 | |
518 | depth += sizeof(struct tcphdr); |
519 | |
520 | if (unlikely(!sk)) |
521 | return; |
522 | |
523 | if (unlikely(sk->sk_state == TCP_TIME_WAIT)) |
524 | goto unref; |
525 | |
526 | if (unlikely(!resync_queue_get_psv(sk))) |
527 | goto unref; |
528 | |
529 | seq = th->seq; |
530 | datalen = skb->len - depth; |
531 | tls_offload_rx_resync_async_request_start(sk, seq, len: datalen); |
532 | rq->stats->tls_resync_req_start++; |
533 | |
534 | unref: |
535 | sock_gen_put(sk); |
536 | } |
537 | |
538 | void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, |
539 | u32 seq, u8 *rcd_sn) |
540 | { |
541 | struct mlx5e_ktls_offload_context_rx *priv_rx; |
542 | struct mlx5e_ktls_rx_resync_ctx *resync; |
543 | struct mlx5e_priv *priv; |
544 | struct mlx5e_channel *c; |
545 | |
546 | priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx: tls_get_ctx(sk)); |
547 | if (unlikely(!priv_rx)) |
548 | return; |
549 | |
550 | resync = &priv_rx->resync; |
551 | resync->sw_rcd_sn_be = *(__be64 *)rcd_sn; |
552 | resync->seq = seq; |
553 | |
554 | priv = netdev_priv(dev: netdev); |
555 | c = priv->channels.c[priv_rx->rxq]; |
556 | |
557 | resync_handle_seq_match(priv_rx, c); |
558 | } |
559 | |
560 | /* End of resync section */ |
561 | |
562 | void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, |
563 | struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) |
564 | { |
565 | struct mlx5e_rq_stats *stats = rq->stats; |
566 | |
567 | switch (get_cqe_tls_offload(cqe)) { |
568 | case CQE_TLS_OFFLOAD_DECRYPTED: |
569 | skb->decrypted = 1; |
570 | stats->tls_decrypted_packets++; |
571 | stats->tls_decrypted_bytes += *cqe_bcnt; |
572 | break; |
573 | case CQE_TLS_OFFLOAD_RESYNC: |
574 | stats->tls_resync_req_pkt++; |
575 | resync_update_sn(rq, skb); |
576 | break; |
577 | default: /* CQE_TLS_OFFLOAD_ERROR: */ |
578 | stats->tls_err++; |
579 | break; |
580 | } |
581 | } |
582 | |
583 | void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi) |
584 | { |
585 | struct mlx5e_ktls_offload_context_rx *priv_rx = wi->tls_set_params.priv_rx; |
586 | struct accel_rule *rule = &priv_rx->rule; |
587 | |
588 | if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { |
589 | complete(&priv_rx->add_ctx); |
590 | return; |
591 | } |
592 | queue_work(wq: rule->priv->tls->rx_wq, work: &rule->work); |
593 | } |
594 | |
595 | static int mlx5e_ktls_sk_get_rxq(struct sock *sk) |
596 | { |
597 | int rxq = sk_rx_queue_get(sk); |
598 | |
599 | if (unlikely(rxq == -1)) |
600 | rxq = 0; |
601 | |
602 | return rxq; |
603 | } |
604 | |
605 | int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, |
606 | struct tls_crypto_info *crypto_info, |
607 | u32 start_offload_tcp_sn) |
608 | { |
609 | struct mlx5e_ktls_offload_context_rx *priv_rx; |
610 | struct mlx5e_ktls_rx_resync_ctx *resync; |
611 | struct tls_context *tls_ctx; |
612 | struct mlx5_crypto_dek *dek; |
613 | struct mlx5e_priv *priv; |
614 | int rxq, err; |
615 | |
616 | tls_ctx = tls_get_ctx(sk); |
617 | priv = netdev_priv(dev: netdev); |
618 | priv_rx = kzalloc(size: sizeof(*priv_rx), GFP_KERNEL); |
619 | if (unlikely(!priv_rx)) |
620 | return -ENOMEM; |
621 | |
622 | switch (crypto_info->cipher_type) { |
623 | case TLS_CIPHER_AES_GCM_128: |
624 | priv_rx->crypto_info.crypto_info_128 = |
625 | *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; |
626 | break; |
627 | case TLS_CIPHER_AES_GCM_256: |
628 | priv_rx->crypto_info.crypto_info_256 = |
629 | *(struct tls12_crypto_info_aes_gcm_256 *)crypto_info; |
630 | break; |
631 | default: |
632 | WARN_ONCE(1, "Unsupported cipher type %u\n" , |
633 | crypto_info->cipher_type); |
634 | err = -EOPNOTSUPP; |
635 | goto err_cipher_type; |
636 | } |
637 | |
638 | dek = mlx5_ktls_create_key(dek_pool: priv->tls->dek_pool, crypto_info); |
639 | if (IS_ERR(ptr: dek)) { |
640 | err = PTR_ERR(ptr: dek); |
641 | goto err_cipher_type; |
642 | } |
643 | priv_rx->dek = dek; |
644 | |
645 | INIT_LIST_HEAD(list: &priv_rx->list); |
646 | spin_lock_init(&priv_rx->lock); |
647 | |
648 | rxq = mlx5e_ktls_sk_get_rxq(sk); |
649 | priv_rx->rxq = rxq; |
650 | priv_rx->sk = sk; |
651 | |
652 | priv_rx->rq_stats = &priv->channel_stats[rxq]->rq; |
653 | priv_rx->sw_stats = &priv->tls->sw_stats; |
654 | mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx); |
655 | |
656 | err = mlx5e_rx_res_tls_tir_create(res: priv->rx_res, rxq, tir: &priv_rx->tir); |
657 | if (err) |
658 | goto err_create_tir; |
659 | |
660 | init_completion(x: &priv_rx->add_ctx); |
661 | |
662 | accel_rule_init(rule: &priv_rx->rule, priv); |
663 | resync = &priv_rx->resync; |
664 | resync_init(resync, priv); |
665 | tls_offload_ctx_rx(tls_ctx)->resync_async = &resync->core; |
666 | tls_offload_rx_resync_set_type(sk, type: TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC); |
667 | |
668 | err = post_rx_param_wqes(c: priv->channels.c[rxq], priv_rx, next_record_tcp_sn: start_offload_tcp_sn); |
669 | if (err) |
670 | goto err_post_wqes; |
671 | |
672 | atomic64_inc(v: &priv_rx->sw_stats->rx_tls_ctx); |
673 | |
674 | return 0; |
675 | |
676 | err_post_wqes: |
677 | mlx5e_tir_destroy(tir: &priv_rx->tir); |
678 | err_create_tir: |
679 | mlx5_ktls_destroy_key(dek_pool: priv->tls->dek_pool, dek: priv_rx->dek); |
680 | err_cipher_type: |
681 | kfree(objp: priv_rx); |
682 | return err; |
683 | } |
684 | |
685 | void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) |
686 | { |
687 | struct mlx5e_ktls_offload_context_rx *priv_rx; |
688 | struct mlx5e_ktls_rx_resync_ctx *resync; |
689 | struct mlx5e_priv *priv; |
690 | |
691 | priv = netdev_priv(dev: netdev); |
692 | |
693 | priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx); |
694 | set_bit(nr: MLX5E_PRIV_RX_FLAG_DELETING, addr: priv_rx->flags); |
695 | mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL); |
696 | synchronize_net(); /* Sync with NAPI */ |
697 | if (!cancel_work_sync(work: &priv_rx->rule.work)) |
698 | /* completion is needed, as the priv_rx in the add flow |
699 | * is maintained on the wqe info (wi), not on the socket. |
700 | */ |
701 | wait_for_completion(&priv_rx->add_ctx); |
702 | resync = &priv_rx->resync; |
703 | if (cancel_work_sync(work: &resync->work)) |
704 | mlx5e_ktls_priv_rx_put(priv_rx); |
705 | |
706 | atomic64_inc(v: &priv_rx->sw_stats->rx_tls_del); |
707 | if (priv_rx->rule.rule) |
708 | mlx5e_accel_fs_del_sk(rule: priv_rx->rule.rule); |
709 | |
710 | mlx5e_tir_destroy(tir: &priv_rx->tir); |
711 | mlx5_ktls_destroy_key(dek_pool: priv->tls->dek_pool, dek: priv_rx->dek); |
712 | /* priv_rx should normally be freed here, but if there is an outstanding |
713 | * GET_PSV, deallocation will be delayed until the CQE for GET_PSV is |
714 | * processed. |
715 | */ |
716 | mlx5e_ktls_priv_rx_put(priv_rx); |
717 | } |
718 | |
719 | bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget) |
720 | { |
721 | struct mlx5e_ktls_offload_context_rx *priv_rx, *tmp; |
722 | struct mlx5e_ktls_resync_resp *ktls_resync; |
723 | struct mlx5_wqe_ctrl_seg *db_cseg; |
724 | struct mlx5e_icosq *sq; |
725 | LIST_HEAD(local_list); |
726 | int i, j; |
727 | |
728 | sq = &c->async_icosq; |
729 | |
730 | if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) |
731 | return false; |
732 | |
733 | ktls_resync = sq->ktls_resync; |
734 | db_cseg = NULL; |
735 | i = 0; |
736 | |
737 | spin_lock(lock: &ktls_resync->lock); |
738 | list_for_each_entry_safe(priv_rx, tmp, &ktls_resync->list, list) { |
739 | list_move(list: &priv_rx->list, head: &local_list); |
740 | if (++i == budget) |
741 | break; |
742 | } |
743 | if (list_empty(head: &ktls_resync->list)) |
744 | clear_bit(nr: MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, addr: &sq->state); |
745 | spin_unlock(lock: &ktls_resync->lock); |
746 | |
747 | spin_lock(lock: &c->async_icosq_lock); |
748 | for (j = 0; j < i; j++) { |
749 | struct mlx5_wqe_ctrl_seg *cseg; |
750 | |
751 | priv_rx = list_first_entry(&local_list, |
752 | struct mlx5e_ktls_offload_context_rx, |
753 | list); |
754 | spin_lock(lock: &priv_rx->lock); |
755 | cseg = post_static_params(sq, priv_rx); |
756 | if (IS_ERR(ptr: cseg)) { |
757 | spin_unlock(lock: &priv_rx->lock); |
758 | break; |
759 | } |
760 | list_del_init(entry: &priv_rx->list); |
761 | spin_unlock(lock: &priv_rx->lock); |
762 | db_cseg = cseg; |
763 | } |
764 | if (db_cseg) |
765 | mlx5e_notify_hw(wq: &sq->wq, pc: sq->pc, uar_map: sq->uar_map, ctrl: db_cseg); |
766 | spin_unlock(lock: &c->async_icosq_lock); |
767 | |
768 | priv_rx->rq_stats->tls_resync_res_ok += j; |
769 | |
770 | if (!list_empty(head: &local_list)) { |
771 | /* This happens only if ICOSQ is full. |
772 | * There is no need to mark busy or explicitly ask for a NAPI cycle, |
773 | * it will be triggered by the outstanding ICOSQ completions. |
774 | */ |
775 | spin_lock(lock: &ktls_resync->lock); |
776 | list_splice(list: &local_list, head: &ktls_resync->list); |
777 | set_bit(nr: MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, addr: &sq->state); |
778 | spin_unlock(lock: &ktls_resync->lock); |
779 | priv_rx->rq_stats->tls_resync_res_retry++; |
780 | } |
781 | |
782 | return i == budget; |
783 | } |
784 | |