Lines Matching refs:cptpf

28 static int forward_to_af(struct otx2_cptpf_dev *cptpf,
35 mutex_lock(&cptpf->lock);
36 msg = otx2_mbox_alloc_msg(&cptpf->afpf_mbox, 0, size);
38 mutex_unlock(&cptpf->lock);
49 ret = otx2_cpt_sync_mbox_msg(&cptpf->afpf_mbox);
56 dev_warn(&cptpf->pdev->dev,
58 mutex_unlock(&cptpf->lock);
61 mutex_unlock(&cptpf->lock);
65 static int handle_msg_get_caps(struct otx2_cptpf_dev *cptpf,
72 otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id,
81 rsp->cpt_revision = cptpf->eng_grps.rid;
82 memcpy(&rsp->eng_caps, &cptpf->eng_caps, sizeof(rsp->eng_caps));
87 static int handle_msg_get_eng_grp_num(struct otx2_cptpf_dev *cptpf,
96 otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
104 rsp->eng_grp_num = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
110 static int handle_msg_kvf_limits(struct otx2_cptpf_dev *cptpf,
117 otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
124 rsp->kvf_limits = cptpf->kvf_limits;
129 static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev *cptpf,
133 struct pci_dev *pdev = cptpf->pdev;
136 otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0,
145 req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
148 req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd;
152 return otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
155 static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
159 struct pci_dev *pdev = cptpf->pdev;
163 otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0,
187 nix_req->inst_qsel.cpt_pf_func = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
189 ret = otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
193 if (cptpf->has_cpt1) {
194 ret = send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 1);
199 return send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 0);
203 otx2_inline_cptlf_setup(struct otx2_cptpf_dev *cptpf,
210 dev_err(&cptpf->pdev->dev,
243 static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
251 if (cptpf->lfs.lfs_num) {
252 dev_err(&cptpf->pdev->dev,
260 egrp = otx2_cpt_get_eng_grp(&cptpf->eng_grps, OTX2_CPT_IE_TYPES);
262 dev_err(&cptpf->pdev->dev,
267 otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
268 &cptpf->afpf_mbox, BLKADDR_CPT0);
269 cptpf->lfs.global_slot = 0;
270 cptpf->lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
271 cptpf->lfs.ctx_ilen = cfg_req->ctx_ilen;
273 ret = otx2_inline_cptlf_setup(cptpf, &cptpf->lfs, egrp, num_lfs);
275 dev_err(&cptpf->pdev->dev, "Inline-Ipsec CPT0 LF setup failed.\n");
279 if (cptpf->has_cpt1) {
280 cptpf->rsrc_req_blkaddr = BLKADDR_CPT1;
281 otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
282 cptpf->reg_base, &cptpf->afpf_mbox,
284 cptpf->cpt1_lfs.global_slot = num_lfs;
285 cptpf->cpt1_lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
286 cptpf->cpt1_lfs.ctx_ilen = cfg_req->ctx_ilen;
287 ret = otx2_inline_cptlf_setup(cptpf, &cptpf->cpt1_lfs, egrp,
290 dev_err(&cptpf->pdev->dev, "Inline CPT1 LF setup failed.\n");
293 cptpf->rsrc_req_blkaddr = 0;
296 ret = rx_inline_ipsec_lf_cfg(cptpf, egrp, cfg_req);
303 otx2_inline_cptlf_cleanup(&cptpf->cpt1_lfs);
305 otx2_inline_cptlf_cleanup(&cptpf->lfs);
309 static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
321 err = handle_msg_get_eng_grp_num(cptpf, vf, req);
324 err = handle_msg_get_caps(cptpf, vf, req);
327 err = handle_msg_kvf_limits(cptpf, vf, req);
330 err = handle_msg_rx_inline_ipsec_lf_cfg(cptpf, req);
334 err = forward_to_af(cptpf, vf, req, size);
340 otx2_reply_invalid_msg(&cptpf->vfpf_mbox, vf->vf_id, 0, req->id);
341 otx2_mbox_msg_send(&cptpf->vfpf_mbox, vf->vf_id);
347 struct otx2_cptpf_dev *cptpf = arg;
358 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
361 for (vf_idx = i * 64; vf_idx < cptpf->enabled_vfs; vf_idx++) {
362 vf = &cptpf->vf[vf_idx];
364 queue_work(cptpf->vfpf_mbox_wq,
367 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM,
378 struct otx2_cptpf_dev *cptpf;
387 cptpf = vf->cptpf;
388 mbox = &cptpf->vfpf_mbox;
400 msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) |
403 err = cptpf_handle_vf_req(cptpf, vf, msg,
424 struct otx2_cptpf_dev *cptpf = arg;
431 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT);
434 mbox = &cptpf->afpf_mbox;
439 queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work);
441 mbox = &cptpf->afpf_mbox_up;
446 queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_up_work);
448 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT,
454 static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
457 struct otx2_cptlfs_info *lfs = &cptpf->lfs;
458 struct device *dev = &cptpf->pdev->dev;
472 if (cptpf->rsrc_req_blkaddr == BLKADDR_CPT1)
473 lfs = &cptpf->cpt1_lfs;
477 cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
519 static void forward_to_vf(struct otx2_cptpf_dev *cptpf, struct mbox_msghdr *msg,
526 dev_err(&cptpf->pdev->dev,
531 dev_err(&cptpf->pdev->dev,
536 vfpf_mbox = &cptpf->vfpf_mbox;
538 if (vf_id >= cptpf->enabled_vfs) {
539 dev_err(&cptpf->pdev->dev,
541 vf_id, cptpf->enabled_vfs);
549 dev_err(&cptpf->pdev->dev,
565 struct otx2_cptpf_dev *cptpf;
572 cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_work);
573 afpf_mbox = &cptpf->afpf_mbox;
587 forward_to_vf(cptpf, msg, vf_id,
590 process_afpf_mbox_msg(cptpf, msg);
600 static void handle_msg_cpt_inst_lmtst(struct otx2_cptpf_dev *cptpf,
604 struct otx2_cptlfs_info *lfs = &cptpf->lfs;
607 if (cptpf->lfs.lfs_num)
611 rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(&cptpf->afpf_mbox_up, 0,
622 static void process_afpf_mbox_up_msg(struct otx2_cptpf_dev *cptpf,
626 dev_err(&cptpf->pdev->dev,
633 handle_msg_cpt_inst_lmtst(cptpf, msg);
636 otx2_reply_invalid_msg(&cptpf->afpf_mbox_up, 0, 0, msg->id);
642 struct otx2_cptpf_dev *cptpf;
649 cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_up_work);
650 mbox = &cptpf->afpf_mbox_up;
661 process_afpf_mbox_up_msg(cptpf, msg);