1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021 Marvell. */
3
4 #include <linux/soc/marvell/octeontx2/asm.h>
5 #include "otx2_cptpf.h"
6 #include "otx2_cptvf.h"
7 #include "otx2_cptlf.h"
8 #include "cn10k_cpt.h"
9
10 static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
11 struct otx2_cptlf_info *lf);
12
13 static struct cpt_hw_ops otx2_hw_ops = {
14 .send_cmd = otx2_cpt_send_cmd,
15 .cpt_get_compcode = otx2_cpt_get_compcode,
16 .cpt_get_uc_compcode = otx2_cpt_get_uc_compcode,
17 };
18
19 static struct cpt_hw_ops cn10k_hw_ops = {
20 .send_cmd = cn10k_cpt_send_cmd,
21 .cpt_get_compcode = cn10k_cpt_get_compcode,
22 .cpt_get_uc_compcode = cn10k_cpt_get_uc_compcode,
23 };
24
cn10k_cpt_send_cmd(union otx2_cpt_inst_s * cptinst,u32 insts_num,struct otx2_cptlf_info * lf)25 static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
26 struct otx2_cptlf_info *lf)
27 {
28 void __iomem *lmtline = lf->lmtline;
29 u64 val = (lf->slot & 0x7FF);
30 u64 tar_addr = 0;
31
32 /* tar_addr<6:4> = Size of first LMTST - 1 in units of 128b. */
33 tar_addr |= (__force u64)lf->ioreg |
34 (((OTX2_CPT_INST_SIZE/16) - 1) & 0x7) << 4;
35 /*
36 * Make sure memory areas pointed in CPT_INST_S
37 * are flushed before the instruction is sent to CPT
38 */
39 dma_wmb();
40
41 /* Copy CPT command to LMTLINE */
42 memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
43 cn10k_lmt_flush(val, tar_addr);
44 }
45
cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev * cptpf)46 int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
47 {
48 struct pci_dev *pdev = cptpf->pdev;
49 resource_size_t size;
50 u64 lmt_base;
51
52 if (!test_bit(CN10K_LMTST, &cptpf->cap_flag)) {
53 cptpf->lfs.ops = &otx2_hw_ops;
54 return 0;
55 }
56
57 cptpf->lfs.ops = &cn10k_hw_ops;
58 lmt_base = readq(cptpf->reg_base + RVU_PF_LMTLINE_ADDR);
59 if (!lmt_base) {
60 dev_err(&pdev->dev, "PF LMTLINE address not configured\n");
61 return -ENOMEM;
62 }
63 size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
64 size -= ((1 + cptpf->max_vfs) * MBOX_SIZE);
65 cptpf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, lmt_base, size);
66 if (!cptpf->lfs.lmt_base) {
67 dev_err(&pdev->dev,
68 "Mapping of PF LMTLINE address failed\n");
69 return -ENOMEM;
70 }
71
72 return 0;
73 }
74 EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
75
cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev * cptvf)76 int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
77 {
78 struct pci_dev *pdev = cptvf->pdev;
79 resource_size_t offset, size;
80
81 if (!test_bit(CN10K_LMTST, &cptvf->cap_flag)) {
82 cptvf->lfs.ops = &otx2_hw_ops;
83 return 0;
84 }
85
86 cptvf->lfs.ops = &cn10k_hw_ops;
87 offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
88 size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
89 /* Map VF LMILINE region */
90 cptvf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, offset, size);
91 if (!cptvf->lfs.lmt_base) {
92 dev_err(&pdev->dev, "Unable to map BAR4\n");
93 return -ENOMEM;
94 }
95
96 return 0;
97 }
98 EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
99