1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2020 Marvell. */
3
4#include "otx2_cpt_common.h"
5#include "otx2_cptpf.h"
6#include "rvu_reg.h"
7
8/* Fastpath ipsec opcode with inplace processing */
9#define CPT_INLINE_RX_OPCODE (0x26 | (1 << 6))
10#define CN10K_CPT_INLINE_RX_OPCODE (0x29 | (1 << 6))
11
12#define cpt_inline_rx_opcode(pdev) \
13({ \
14 u8 opcode; \
15 if (is_dev_otx2(pdev)) \
16 opcode = CPT_INLINE_RX_OPCODE; \
17 else \
18 opcode = CN10K_CPT_INLINE_RX_OPCODE; \
19 (opcode); \
20})
21
22/*
23 * CPT PF driver version, It will be incremented by 1 for every feature
24 * addition in CPT mailbox messages.
25 */
26#define OTX2_CPT_PF_DRV_VERSION 0x1
27
28static int forward_to_af(struct otx2_cptpf_dev *cptpf,
29 struct otx2_cptvf_info *vf,
30 struct mbox_msghdr *req, int size)
31{
32 struct mbox_msghdr *msg;
33 int ret;
34
35 mutex_lock(&cptpf->lock);
36 msg = otx2_mbox_alloc_msg(mbox: &cptpf->afpf_mbox, devid: 0, size);
37 if (msg == NULL) {
38 mutex_unlock(lock: &cptpf->lock);
39 return -ENOMEM;
40 }
41
42 memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr),
43 (uint8_t *)req + sizeof(struct mbox_msghdr), size);
44 msg->id = req->id;
45 msg->pcifunc = req->pcifunc;
46 msg->sig = req->sig;
47 msg->ver = req->ver;
48
49 ret = otx2_cpt_sync_mbox_msg(mbox: &cptpf->afpf_mbox);
50 /* Error code -EIO indicate there is a communication failure
51 * to the AF. Rest of the error codes indicate that AF processed
52 * VF messages and set the error codes in response messages
53 * (if any) so simply forward responses to VF.
54 */
55 if (ret == -EIO) {
56 dev_warn(&cptpf->pdev->dev,
57 "AF not responding to VF%d messages\n", vf->vf_id);
58 mutex_unlock(lock: &cptpf->lock);
59 return ret;
60 }
61 mutex_unlock(lock: &cptpf->lock);
62 return 0;
63}
64
65static int handle_msg_get_caps(struct otx2_cptpf_dev *cptpf,
66 struct otx2_cptvf_info *vf,
67 struct mbox_msghdr *req)
68{
69 struct otx2_cpt_caps_rsp *rsp;
70
71 rsp = (struct otx2_cpt_caps_rsp *)
72 otx2_mbox_alloc_msg(mbox: &cptpf->vfpf_mbox, devid: vf->vf_id,
73 size: sizeof(*rsp));
74 if (!rsp)
75 return -ENOMEM;
76
77 rsp->hdr.id = MBOX_MSG_GET_CAPS;
78 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
79 rsp->hdr.pcifunc = req->pcifunc;
80 rsp->cpt_pf_drv_version = OTX2_CPT_PF_DRV_VERSION;
81 rsp->cpt_revision = cptpf->eng_grps.rid;
82 memcpy(&rsp->eng_caps, &cptpf->eng_caps, sizeof(rsp->eng_caps));
83
84 return 0;
85}
86
87static int handle_msg_get_eng_grp_num(struct otx2_cptpf_dev *cptpf,
88 struct otx2_cptvf_info *vf,
89 struct mbox_msghdr *req)
90{
91 struct otx2_cpt_egrp_num_msg *grp_req;
92 struct otx2_cpt_egrp_num_rsp *rsp;
93
94 grp_req = (struct otx2_cpt_egrp_num_msg *)req;
95 rsp = (struct otx2_cpt_egrp_num_rsp *)
96 otx2_mbox_alloc_msg(mbox: &cptpf->vfpf_mbox, devid: vf->vf_id, size: sizeof(*rsp));
97 if (!rsp)
98 return -ENOMEM;
99
100 rsp->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
101 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
102 rsp->hdr.pcifunc = req->pcifunc;
103 rsp->eng_type = grp_req->eng_type;
104 rsp->eng_grp_num = otx2_cpt_get_eng_grp(eng_grps: &cptpf->eng_grps,
105 eng_type: grp_req->eng_type);
106
107 return 0;
108}
109
110static int handle_msg_kvf_limits(struct otx2_cptpf_dev *cptpf,
111 struct otx2_cptvf_info *vf,
112 struct mbox_msghdr *req)
113{
114 struct otx2_cpt_kvf_limits_rsp *rsp;
115
116 rsp = (struct otx2_cpt_kvf_limits_rsp *)
117 otx2_mbox_alloc_msg(mbox: &cptpf->vfpf_mbox, devid: vf->vf_id, size: sizeof(*rsp));
118 if (!rsp)
119 return -ENOMEM;
120
121 rsp->hdr.id = MBOX_MSG_GET_KVF_LIMITS;
122 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
123 rsp->hdr.pcifunc = req->pcifunc;
124 rsp->kvf_limits = cptpf->kvf_limits;
125
126 return 0;
127}
128
129static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev *cptpf,
130 int sso_pf_func, u8 slot)
131{
132 struct cpt_inline_ipsec_cfg_msg *req;
133 struct pci_dev *pdev = cptpf->pdev;
134
135 req = (struct cpt_inline_ipsec_cfg_msg *)
136 otx2_mbox_alloc_msg_rsp(mbox: &cptpf->afpf_mbox, devid: 0,
137 size: sizeof(*req), size_rsp: sizeof(struct msg_rsp));
138 if (req == NULL) {
139 dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
140 return -EFAULT;
141 }
142 memset(req, 0, sizeof(*req));
143 req->hdr.id = MBOX_MSG_CPT_INLINE_IPSEC_CFG;
144 req->hdr.sig = OTX2_MBOX_REQ_SIG;
145 req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
146 req->dir = CPT_INLINE_INBOUND;
147 req->slot = slot;
148 req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd;
149 req->sso_pf_func = sso_pf_func;
150 req->enable = 1;
151
152 return otx2_cpt_send_mbox_msg(mbox: &cptpf->afpf_mbox, pdev);
153}
154
155static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
156 struct otx2_cpt_rx_inline_lf_cfg *req)
157{
158 struct nix_inline_ipsec_cfg *nix_req;
159 struct pci_dev *pdev = cptpf->pdev;
160 int ret;
161
162 nix_req = (struct nix_inline_ipsec_cfg *)
163 otx2_mbox_alloc_msg_rsp(mbox: &cptpf->afpf_mbox, devid: 0,
164 size: sizeof(*nix_req),
165 size_rsp: sizeof(struct msg_rsp));
166 if (nix_req == NULL) {
167 dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
168 return -EFAULT;
169 }
170 memset(nix_req, 0, sizeof(*nix_req));
171 nix_req->hdr.id = MBOX_MSG_NIX_INLINE_IPSEC_CFG;
172 nix_req->hdr.sig = OTX2_MBOX_REQ_SIG;
173 nix_req->enable = 1;
174 nix_req->credit_th = req->credit_th;
175 nix_req->bpid = req->bpid;
176 if (!req->credit || req->credit > OTX2_CPT_INST_QLEN_MSGS)
177 nix_req->cpt_credit = OTX2_CPT_INST_QLEN_MSGS - 1;
178 else
179 nix_req->cpt_credit = req->credit - 1;
180 nix_req->gen_cfg.egrp = egrp;
181 if (req->opcode)
182 nix_req->gen_cfg.opcode = req->opcode;
183 else
184 nix_req->gen_cfg.opcode = cpt_inline_rx_opcode(pdev);
185 nix_req->gen_cfg.param1 = req->param1;
186 nix_req->gen_cfg.param2 = req->param2;
187 nix_req->inst_qsel.cpt_pf_func = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
188 nix_req->inst_qsel.cpt_slot = 0;
189 ret = otx2_cpt_send_mbox_msg(mbox: &cptpf->afpf_mbox, pdev);
190 if (ret)
191 return ret;
192
193 if (cptpf->has_cpt1) {
194 ret = send_inline_ipsec_inbound_msg(cptpf, sso_pf_func: req->sso_pf_func, slot: 1);
195 if (ret)
196 return ret;
197 }
198
199 return send_inline_ipsec_inbound_msg(cptpf, sso_pf_func: req->sso_pf_func, slot: 0);
200}
201
202int
203otx2_inline_cptlf_setup(struct otx2_cptpf_dev *cptpf,
204 struct otx2_cptlfs_info *lfs, u8 egrp, int num_lfs)
205{
206 int ret;
207
208 ret = otx2_cptlf_init(lfs, eng_grp_msk: 1 << egrp, OTX2_CPT_QUEUE_HI_PRIO, lfs_num: 1);
209 if (ret) {
210 dev_err(&cptpf->pdev->dev,
211 "LF configuration failed for RX inline ipsec.\n");
212 return ret;
213 }
214
215 /* Get msix offsets for attached LFs */
216 ret = otx2_cpt_msix_offset_msg(lfs);
217 if (ret)
218 goto cleanup_lf;
219
220 /* Register for CPT LF Misc interrupts */
221 ret = otx2_cptlf_register_misc_interrupts(lfs);
222 if (ret)
223 goto free_irq;
224
225 return 0;
226free_irq:
227 otx2_cptlf_unregister_misc_interrupts(lfs);
228cleanup_lf:
229 otx2_cptlf_shutdown(lfs);
230 return ret;
231}
232
233void
234otx2_inline_cptlf_cleanup(struct otx2_cptlfs_info *lfs)
235{
236 /* Unregister misc interrupt */
237 otx2_cptlf_unregister_misc_interrupts(lfs);
238
239 /* Cleanup LFs */
240 otx2_cptlf_shutdown(lfs);
241}
242
243static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
244 struct mbox_msghdr *req)
245{
246 struct otx2_cpt_rx_inline_lf_cfg *cfg_req;
247 int num_lfs = 1, ret;
248 u8 egrp;
249
250 cfg_req = (struct otx2_cpt_rx_inline_lf_cfg *)req;
251 if (cptpf->lfs.lfs_num) {
252 dev_err(&cptpf->pdev->dev,
253 "LF is already configured for RX inline ipsec.\n");
254 return -EEXIST;
255 }
256 /*
257 * Allow LFs to execute requests destined to only grp IE_TYPES and
258 * set queue priority of each LF to high
259 */
260 egrp = otx2_cpt_get_eng_grp(eng_grps: &cptpf->eng_grps, eng_type: OTX2_CPT_IE_TYPES);
261 if (egrp == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
262 dev_err(&cptpf->pdev->dev,
263 "Engine group for inline ipsec is not available\n");
264 return -ENOENT;
265 }
266
267 otx2_cptlf_set_dev_info(lfs: &cptpf->lfs, pdev: cptpf->pdev, reg_base: cptpf->reg_base,
268 mbox: &cptpf->afpf_mbox, blkaddr: BLKADDR_CPT0);
269 cptpf->lfs.global_slot = 0;
270 cptpf->lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
271 cptpf->lfs.ctx_ilen = cfg_req->ctx_ilen;
272
273 ret = otx2_inline_cptlf_setup(cptpf, lfs: &cptpf->lfs, egrp, num_lfs);
274 if (ret) {
275 dev_err(&cptpf->pdev->dev, "Inline-Ipsec CPT0 LF setup failed.\n");
276 return ret;
277 }
278
279 if (cptpf->has_cpt1) {
280 cptpf->rsrc_req_blkaddr = BLKADDR_CPT1;
281 otx2_cptlf_set_dev_info(lfs: &cptpf->cpt1_lfs, pdev: cptpf->pdev,
282 reg_base: cptpf->reg_base, mbox: &cptpf->afpf_mbox,
283 blkaddr: BLKADDR_CPT1);
284 cptpf->cpt1_lfs.global_slot = num_lfs;
285 cptpf->cpt1_lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
286 cptpf->cpt1_lfs.ctx_ilen = cfg_req->ctx_ilen;
287 ret = otx2_inline_cptlf_setup(cptpf, lfs: &cptpf->cpt1_lfs, egrp,
288 num_lfs);
289 if (ret) {
290 dev_err(&cptpf->pdev->dev, "Inline CPT1 LF setup failed.\n");
291 goto lf_cleanup;
292 }
293 cptpf->rsrc_req_blkaddr = 0;
294 }
295
296 ret = rx_inline_ipsec_lf_cfg(cptpf, egrp, req: cfg_req);
297 if (ret)
298 goto lf1_cleanup;
299
300 return 0;
301
302lf1_cleanup:
303 otx2_inline_cptlf_cleanup(lfs: &cptpf->cpt1_lfs);
304lf_cleanup:
305 otx2_inline_cptlf_cleanup(lfs: &cptpf->lfs);
306 return ret;
307}
308
309static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
310 struct otx2_cptvf_info *vf,
311 struct mbox_msghdr *req, int size)
312{
313 int err = 0;
314
315 /* Check if msg is valid, if not reply with an invalid msg */
316 if (req->sig != OTX2_MBOX_REQ_SIG)
317 goto inval_msg;
318
319 switch (req->id) {
320 case MBOX_MSG_GET_ENG_GRP_NUM:
321 err = handle_msg_get_eng_grp_num(cptpf, vf, req);
322 break;
323 case MBOX_MSG_GET_CAPS:
324 err = handle_msg_get_caps(cptpf, vf, req);
325 break;
326 case MBOX_MSG_GET_KVF_LIMITS:
327 err = handle_msg_kvf_limits(cptpf, vf, req);
328 break;
329 case MBOX_MSG_RX_INLINE_IPSEC_LF_CFG:
330 err = handle_msg_rx_inline_ipsec_lf_cfg(cptpf, req);
331 break;
332
333 default:
334 err = forward_to_af(cptpf, vf, req, size);
335 break;
336 }
337 return err;
338
339inval_msg:
340 otx2_reply_invalid_msg(mbox: &cptpf->vfpf_mbox, devid: vf->vf_id, pcifunc: 0, id: req->id);
341 otx2_mbox_msg_send(mbox: &cptpf->vfpf_mbox, devid: vf->vf_id);
342 return err;
343}
344
345irqreturn_t otx2_cptpf_vfpf_mbox_intr(int __always_unused irq, void *arg)
346{
347 struct otx2_cptpf_dev *cptpf = arg;
348 struct otx2_cptvf_info *vf;
349 int i, vf_idx;
350 u64 intr;
351
352 /*
353 * Check which VF has raised an interrupt and schedule
354 * corresponding work queue to process the messages
355 */
356 for (i = 0; i < 2; i++) {
357 /* Read the interrupt bits */
358 intr = otx2_cpt_read64(reg_base: cptpf->reg_base, blk: BLKADDR_RVUM, slot: 0,
359 RVU_PF_VFPF_MBOX_INTX(i));
360
361 for (vf_idx = i * 64; vf_idx < cptpf->enabled_vfs; vf_idx++) {
362 vf = &cptpf->vf[vf_idx];
363 if (intr & (1ULL << vf->intr_idx)) {
364 queue_work(wq: cptpf->vfpf_mbox_wq,
365 work: &vf->vfpf_mbox_work);
366 /* Clear the interrupt */
367 otx2_cpt_write64(reg_base: cptpf->reg_base, blk: BLKADDR_RVUM,
368 slot: 0, RVU_PF_VFPF_MBOX_INTX(i),
369 BIT_ULL(vf->intr_idx));
370 }
371 }
372 }
373 return IRQ_HANDLED;
374}
375
376void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
377{
378 struct otx2_cptpf_dev *cptpf;
379 struct otx2_cptvf_info *vf;
380 struct otx2_mbox_dev *mdev;
381 struct mbox_hdr *req_hdr;
382 struct mbox_msghdr *msg;
383 struct otx2_mbox *mbox;
384 int offset, i, err;
385
386 vf = container_of(work, struct otx2_cptvf_info, vfpf_mbox_work);
387 cptpf = vf->cptpf;
388 mbox = &cptpf->vfpf_mbox;
389 /* sync with mbox memory region */
390 smp_rmb();
391 mdev = &mbox->dev[vf->vf_id];
392 /* Process received mbox messages */
393 req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
394 offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
395
396 for (i = 0; i < req_hdr->num_msgs; i++) {
397 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
398
399 /* Set which VF sent this message based on mbox IRQ */
400 msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) |
401 ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
402
403 err = cptpf_handle_vf_req(cptpf, vf, req: msg,
404 size: msg->next_msgoff - offset);
405 /*
406 * Behave as the AF, drop the msg if there is
407 * no memory, timeout handling also goes here
408 */
409 if (err == -ENOMEM || err == -EIO)
410 break;
411 offset = msg->next_msgoff;
412 /* Write barrier required for VF responses which are handled by
413 * PF driver and not forwarded to AF.
414 */
415 smp_wmb();
416 }
417 /* Send mbox responses to VF */
418 if (mdev->num_msgs)
419 otx2_mbox_msg_send(mbox, devid: vf->vf_id);
420}
421
422irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg)
423{
424 struct otx2_cptpf_dev *cptpf = arg;
425 struct otx2_mbox_dev *mdev;
426 struct otx2_mbox *mbox;
427 struct mbox_hdr *hdr;
428 u64 intr;
429
430 /* Read the interrupt bits */
431 intr = otx2_cpt_read64(reg_base: cptpf->reg_base, blk: BLKADDR_RVUM, slot: 0, RVU_PF_INT);
432
433 if (intr & 0x1ULL) {
434 mbox = &cptpf->afpf_mbox;
435 mdev = &mbox->dev[0];
436 hdr = mdev->mbase + mbox->rx_start;
437 if (hdr->num_msgs)
438 /* Schedule work queue function to process the MBOX request */
439 queue_work(wq: cptpf->afpf_mbox_wq, work: &cptpf->afpf_mbox_work);
440
441 mbox = &cptpf->afpf_mbox_up;
442 mdev = &mbox->dev[0];
443 hdr = mdev->mbase + mbox->rx_start;
444 if (hdr->num_msgs)
445 /* Schedule work queue function to process the MBOX request */
446 queue_work(wq: cptpf->afpf_mbox_wq, work: &cptpf->afpf_mbox_up_work);
447 /* Clear and ack the interrupt */
448 otx2_cpt_write64(reg_base: cptpf->reg_base, blk: BLKADDR_RVUM, slot: 0, RVU_PF_INT,
449 val: 0x1ULL);
450 }
451 return IRQ_HANDLED;
452}
453
454static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
455 struct mbox_msghdr *msg)
456{
457 struct otx2_cptlfs_info *lfs = &cptpf->lfs;
458 struct device *dev = &cptpf->pdev->dev;
459 struct cpt_rd_wr_reg_msg *rsp_rd_wr;
460 struct msix_offset_rsp *rsp_msix;
461 int i;
462
463 if (msg->id >= MBOX_MSG_MAX) {
464 dev_err(dev, "MBOX msg with unknown ID %d\n", msg->id);
465 return;
466 }
467 if (msg->sig != OTX2_MBOX_RSP_SIG) {
468 dev_err(dev, "MBOX msg with wrong signature %x, ID %d\n",
469 msg->sig, msg->id);
470 return;
471 }
472 if (cptpf->rsrc_req_blkaddr == BLKADDR_CPT1)
473 lfs = &cptpf->cpt1_lfs;
474
475 switch (msg->id) {
476 case MBOX_MSG_READY:
477 cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
478 RVU_PFVF_PF_MASK;
479 break;
480 case MBOX_MSG_MSIX_OFFSET:
481 rsp_msix = (struct msix_offset_rsp *) msg;
482 for (i = 0; i < rsp_msix->cptlfs; i++)
483 lfs->lf[i].msix_offset = rsp_msix->cptlf_msixoff[i];
484
485 for (i = 0; i < rsp_msix->cpt1_lfs; i++)
486 lfs->lf[i].msix_offset = rsp_msix->cpt1_lf_msixoff[i];
487 break;
488 case MBOX_MSG_CPT_RD_WR_REGISTER:
489 rsp_rd_wr = (struct cpt_rd_wr_reg_msg *)msg;
490 if (msg->rc) {
491 dev_err(dev, "Reg %llx rd/wr(%d) failed %d\n",
492 rsp_rd_wr->reg_offset, rsp_rd_wr->is_write,
493 msg->rc);
494 return;
495 }
496 if (!rsp_rd_wr->is_write)
497 *rsp_rd_wr->ret_val = rsp_rd_wr->val;
498 break;
499 case MBOX_MSG_ATTACH_RESOURCES:
500 if (!msg->rc)
501 lfs->are_lfs_attached = 1;
502 break;
503 case MBOX_MSG_DETACH_RESOURCES:
504 if (!msg->rc)
505 lfs->are_lfs_attached = 0;
506 break;
507 case MBOX_MSG_CPT_INLINE_IPSEC_CFG:
508 case MBOX_MSG_NIX_INLINE_IPSEC_CFG:
509 case MBOX_MSG_CPT_LF_RESET:
510 break;
511
512 default:
513 dev_err(dev,
514 "Unsupported msg %d received.\n", msg->id);
515 break;
516 }
517}
518
519static void forward_to_vf(struct otx2_cptpf_dev *cptpf, struct mbox_msghdr *msg,
520 int vf_id, int size)
521{
522 struct otx2_mbox *vfpf_mbox;
523 struct mbox_msghdr *fwd;
524
525 if (msg->id >= MBOX_MSG_MAX) {
526 dev_err(&cptpf->pdev->dev,
527 "MBOX msg with unknown ID %d\n", msg->id);
528 return;
529 }
530 if (msg->sig != OTX2_MBOX_RSP_SIG) {
531 dev_err(&cptpf->pdev->dev,
532 "MBOX msg with wrong signature %x, ID %d\n",
533 msg->sig, msg->id);
534 return;
535 }
536 vfpf_mbox = &cptpf->vfpf_mbox;
537 vf_id--;
538 if (vf_id >= cptpf->enabled_vfs) {
539 dev_err(&cptpf->pdev->dev,
540 "MBOX msg to unknown VF: %d >= %d\n",
541 vf_id, cptpf->enabled_vfs);
542 return;
543 }
544 if (msg->id == MBOX_MSG_VF_FLR)
545 return;
546
547 fwd = otx2_mbox_alloc_msg(mbox: vfpf_mbox, devid: vf_id, size);
548 if (!fwd) {
549 dev_err(&cptpf->pdev->dev,
550 "Forwarding to VF%d failed.\n", vf_id);
551 return;
552 }
553 memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
554 (uint8_t *)msg + sizeof(struct mbox_msghdr), size);
555 fwd->id = msg->id;
556 fwd->pcifunc = msg->pcifunc;
557 fwd->sig = msg->sig;
558 fwd->ver = msg->ver;
559 fwd->rc = msg->rc;
560}
561
562/* Handle mailbox messages received from AF */
563void otx2_cptpf_afpf_mbox_handler(struct work_struct *work)
564{
565 struct otx2_cptpf_dev *cptpf;
566 struct otx2_mbox *afpf_mbox;
567 struct otx2_mbox_dev *mdev;
568 struct mbox_hdr *rsp_hdr;
569 struct mbox_msghdr *msg;
570 int offset, vf_id, i;
571
572 cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_work);
573 afpf_mbox = &cptpf->afpf_mbox;
574 mdev = &afpf_mbox->dev[0];
575 /* Sync mbox data into memory */
576 smp_wmb();
577
578 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + afpf_mbox->rx_start);
579 offset = ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
580
581 for (i = 0; i < rsp_hdr->num_msgs; i++) {
582 msg = (struct mbox_msghdr *)(mdev->mbase + afpf_mbox->rx_start +
583 offset);
584 vf_id = (msg->pcifunc >> RVU_PFVF_FUNC_SHIFT) &
585 RVU_PFVF_FUNC_MASK;
586 if (vf_id > 0)
587 forward_to_vf(cptpf, msg, vf_id,
588 size: msg->next_msgoff - offset);
589 else
590 process_afpf_mbox_msg(cptpf, msg);
591
592 offset = msg->next_msgoff;
593 /* Sync VF response ready to be sent */
594 smp_wmb();
595 mdev->msgs_acked++;
596 }
597 otx2_mbox_reset(mbox: afpf_mbox, devid: 0);
598}
599
600static void handle_msg_cpt_inst_lmtst(struct otx2_cptpf_dev *cptpf,
601 struct mbox_msghdr *msg)
602{
603 struct cpt_inst_lmtst_req *req = (struct cpt_inst_lmtst_req *)msg;
604 struct otx2_cptlfs_info *lfs = &cptpf->lfs;
605 struct msg_rsp *rsp;
606
607 if (cptpf->lfs.lfs_num)
608 lfs->ops->send_cmd((union otx2_cpt_inst_s *)req->inst, 1,
609 &lfs->lf[0]);
610
611 rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(mbox: &cptpf->afpf_mbox_up, devid: 0,
612 size: sizeof(*rsp));
613 if (!rsp)
614 return;
615
616 rsp->hdr.id = msg->id;
617 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
618 rsp->hdr.pcifunc = 0;
619 rsp->hdr.rc = 0;
620}
621
622static void process_afpf_mbox_up_msg(struct otx2_cptpf_dev *cptpf,
623 struct mbox_msghdr *msg)
624{
625 if (msg->id >= MBOX_MSG_MAX) {
626 dev_err(&cptpf->pdev->dev,
627 "MBOX msg with unknown ID %d\n", msg->id);
628 return;
629 }
630
631 switch (msg->id) {
632 case MBOX_MSG_CPT_INST_LMTST:
633 handle_msg_cpt_inst_lmtst(cptpf, msg);
634 break;
635 default:
636 otx2_reply_invalid_msg(mbox: &cptpf->afpf_mbox_up, devid: 0, pcifunc: 0, id: msg->id);
637 }
638}
639
640void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work)
641{
642 struct otx2_cptpf_dev *cptpf;
643 struct otx2_mbox_dev *mdev;
644 struct mbox_hdr *rsp_hdr;
645 struct mbox_msghdr *msg;
646 struct otx2_mbox *mbox;
647 int offset, i;
648
649 cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_up_work);
650 mbox = &cptpf->afpf_mbox_up;
651 mdev = &mbox->dev[0];
652 /* Sync mbox data into memory */
653 smp_wmb();
654
655 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
656 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
657
658 for (i = 0; i < rsp_hdr->num_msgs; i++) {
659 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
660
661 process_afpf_mbox_up_msg(cptpf, msg);
662
663 offset = mbox->rx_start + msg->next_msgoff;
664 }
665 otx2_mbox_msg_send(mbox, devid: 0);
666}
667

source code of linux/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c