Lines Matching defs:oct

39 void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct)
43 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
46 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%llx\n",
48 CVM_CAST64(octeon_read_csr64(oct, CN23XX_WIN_WR_MASK_REG)));
49 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
51 CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)));
52 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
54 lio_pci_readq(oct, CN23XX_RST_SOFT_RST));
57 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
59 lio_pci_readq(oct, CN23XX_DPI_DMA_CONTROL));
62 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
65 lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_ENB(i)));
66 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
69 lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_BUF(i)));
72 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", "CN23XX_DPI_CTL",
73 CN23XX_DPI_CTL, lio_pci_readq(oct, CN23XX_DPI_CTL));
76 pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
77 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
81 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
82 "CN23XX_DPI_SLI_PRTX_CFG", oct->pcie_port,
83 CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
84 lio_pci_readq(oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port)));
87 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
88 "CN23XX_SLI_S2M_PORTX_CTL", oct->pcie_port,
89 CVM_CAST64(CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)),
91 oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
93 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
95 (u64)octeon_read_csr64(oct, CN23XX_SLI_PKT_IOQ_RING_RST));
99 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
101 CVM_CAST64(CN23XX_SLI_PKT_MAC_RINFO64(i, oct->pf_num)),
103 (oct, CN23XX_SLI_PKT_MAC_RINFO64
104 (i, oct->pf_num))));
109 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
113 (oct, CN23XX_SLI_IQ_PKT_CONTROL64(i))));
117 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
119 CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_OQ_WMARK)));
122 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
126 oct, CN23XX_SLI_OQ_PKT_CONTROL(i))));
127 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
131 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(i))));
135 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
140 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
147 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
151 oct, CN23XX_SLI_IQ_BASE_ADDR64(i))));
152 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
156 (oct, CN23XX_SLI_IQ_SIZE(i))));
157 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
161 oct, CN23XX_SLI_IQ_DOORBELL(i))));
162 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
166 oct, CN23XX_SLI_IQ_INSTR_COUNT64(i))));
171 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
175 oct, CN23XX_SLI_OQ_BASE_ADDR64(i))));
176 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
180 (oct, CN23XX_SLI_OQ_SIZE(i))));
181 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
185 oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(i))));
186 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
190 oct, CN23XX_SLI_OQ_PKTS_SENT(i))));
191 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
195 oct, CN23XX_SLI_OQ_PKTS_CREDIT(i))));
198 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
201 CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_TIME_INT)));
202 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
205 CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_CNT_INT)));
208 static int cn23xx_pf_soft_reset(struct octeon_device *oct)
210 octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
212 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: BIST enabled for CN23XX soft reset\n",
213 oct->octeon_id);
215 octeon_write_csr64(oct, CN23XX_SLI_SCRATCH1, 0x1234ULL);
218 lio_pci_readq(oct, CN23XX_RST_SOFT_RST);
219 lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST);
224 if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
225 dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n",
226 oct->octeon_id);
230 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n",
231 oct->octeon_id);
234 octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
239 static void cn23xx_enable_error_reporting(struct octeon_device *oct)
244 pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
248 pci_read_config_dword(oct->pci_dev,
251 pci_read_config_dword(oct->pci_dev,
254 dev_err(&oct->pci_dev->dev, "PCI-E Fatal error detected;\n"
264 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Enabling PCI-E error reporting..\n",
265 oct->octeon_id);
266 pci_write_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, regval);
269 static u32 cn23xx_coprocessor_clock(struct octeon_device *oct)
276 return (((lio_pci_readq(oct, CN23XX_RST_BOOT) >> 24) & 0x3f) * 50);
279 u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
282 u32 oqticks_per_us = cn23xx_coprocessor_clock(oct);
284 oct->pfvf_hsword.coproc_tics_per_us = oqticks_per_us;
301 static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
303 u16 mac_no = oct->pcie_port;
304 u16 pf_num = oct->pf_num;
310 dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n",
315 octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));
317 if (oct->rev_id == OCTEON_CN23XX_REV_1_1) {
327 (oct->sriov_info.trs << CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS);
329 temp = oct->sriov_info.rings_per_vf & 0xff;
333 temp = oct->sriov_info.max_vfs & 0xff;
337 octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
340 dev_dbg(&oct->pci_dev->dev, "SLI_PKT_MAC(%d)_PF(%d)_RINFO : 0x%016llx\n",
342 (oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num)));
345 static int cn23xx_reset_io_queues(struct octeon_device *oct)
352 srn = oct->sriov_info.pf_srn;
353 ern = srn + oct->sriov_info.num_pf_rings;
361 d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
363 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
368 u64 reg_val = octeon_read_csr64(oct,
374 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
377 dev_err(&oct->pci_dev->dev,
384 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
388 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
390 dev_err(&oct->pci_dev->dev,
400 static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
402 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
409 pf_num = oct->pf_num;
411 srn = oct->sriov_info.pf_srn;
412 ern = srn + oct->sriov_info.num_pf_rings;
414 if (cn23xx_reset_io_queues(oct))
423 reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
426 if (q_no < oct->sriov_info.pf_srn) {
427 vf_num = q_no / oct->sriov_info.rings_per_vf;
436 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
446 iq = oct->instr_queue[q_no];
450 inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr +
454 octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
458 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
475 static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
481 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
483 srn = oct->sriov_info.pf_srn;
484 ern = srn + oct->sriov_info.num_pf_rings;
487 octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 32);
490 octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 0);
494 reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
527 octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val);
529 /* Enabling these interrupt in oct->fn_list.enable_interrupt()
535 oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
537 octeon_write_csr64(oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
543 writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
549 if ((oct->rev_id == OCTEON_CN23XX_REV_1_0) ||
550 (oct->rev_id == OCTEON_CN23XX_REV_1_1))
551 writeq(readq((u8 *)oct->mmio[0].hw_addr +
553 (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL);
556 if (oct->pf_num)
558 (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S);
561 (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN_W1S);
564 static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
566 cn23xx_enable_error_reporting(oct);
569 cn23xx_setup_global_mac_regs(oct);
571 if (cn23xx_pf_setup_global_input_regs(oct))
574 cn23xx_pf_setup_global_output_regs(oct);
579 octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL,
583 octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER);
587 static void cn23xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
589 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
592 iq_no += oct->sriov_info.pf_srn;
595 octeon_write_csr64(oct, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
597 octeon_write_csr(oct, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);
603 (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_DOORBELL(iq_no);
605 (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
606 dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
614 if (oct->msix_on) {
628 static void cn23xx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
631 struct octeon_droq *droq = oct->droq[oq_no];
632 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
636 oq_no += oct->sriov_info.pf_srn;
638 octeon_write_csr64(oct, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),
640 octeon_write_csr(oct, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
642 octeon_write_csr(oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
647 (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_SENT(oq_no);
649 (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_CREDIT(oq_no);
651 if (!oct->msix_on) {
655 octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
657 octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
663 octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
665 octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
669 oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
673 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(oq_no),
682 struct octeon_device *oct = mbox->oct_dev;
686 if (oct->rev_id < OCTEON_CN23XX_REV_1_1) {
691 for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) {
692 q_no = i * oct->sriov_info.rings_per_vf;
694 val64 = readq(oct->mbox[q_no]->mbox_write_reg);
697 if (octeon_mbox_read(oct->mbox[q_no]))
699 oct->mbox[q_no]);
709 static int cn23xx_setup_pf_mbox(struct octeon_device *oct)
712 u16 mac_no = oct->pcie_port;
713 u16 pf_num = oct->pf_num;
716 if (!oct->sriov_info.max_vfs)
719 for (i = 0; i < oct->sriov_info.max_vfs; i++) {
720 q_no = i * oct->sriov_info.rings_per_vf;
728 mbox->oct_dev = oct;
735 mbox->mbox_int_reg = (u8 *)oct->mmio[0].hw_addr +
739 mbox->mbox_write_reg = (u8 *)oct->mmio[0].hw_addr +
743 mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr +
751 oct->mbox[q_no] = mbox;
756 if (oct->rev_id < OCTEON_CN23XX_REV_1_1)
757 schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work,
765 vfree(oct->mbox[i]);
771 static int cn23xx_free_pf_mbox(struct octeon_device *oct)
775 if (!oct->sriov_info.max_vfs)
778 for (i = 0; i < oct->sriov_info.max_vfs; i++) {
779 q_no = i * oct->sriov_info.rings_per_vf;
781 &oct->mbox[q_no]->mbox_poll_wk.work);
782 vfree(oct->mbox[q_no]);
788 static int cn23xx_enable_io_queues(struct octeon_device *oct)
794 srn = oct->sriov_info.pf_srn;
795 ern = srn + oct->num_iqs;
799 if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
801 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
804 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
808 if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
813 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
821 oct,
825 dev_err(&oct->pci_dev->dev,
832 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
836 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
838 dev_err(&oct->pci_dev->dev,
845 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
848 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
854 if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
856 oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
858 octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no),
865 static void cn23xx_disable_io_queues(struct octeon_device *oct)
872 srn = oct->sriov_info.pf_srn;
873 ern = srn + oct->num_iqs;
881 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
885 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
892 oct, CN23XX_SLI_PKT_IOQ_RING_RST));
895 oct, CN23XX_SLI_PKT_IOQ_RING_RST));
900 octeon_write_csr(oct, CN23XX_SLI_IQ_DOORBELL(q_no), 0xFFFFFFFF);
901 while (octeon_read_csr64(oct, CN23XX_SLI_IQ_DOORBELL(q_no)) &&
916 oct, CN23XX_SLI_PKT_IOQ_RING_RST));
919 oct, CN23XX_SLI_PKT_IOQ_RING_RST));
924 octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_CREDIT(q_no),
926 while (octeon_read_csr64(oct,
934 oct, CN23XX_SLI_OQ_PKTS_SENT(q_no)));
935 octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no),
943 struct octeon_device *oct = ioq_vector->oct_dev;
946 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
948 dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
951 dev_err(&oct->pci_dev->dev, "23XX bringup FIXME: oct pfnum:%d ioq_vector->ioq_num :%d droq is NULL\n",
952 oct->pf_num, ioq_vector->ioq_num);
982 static void cn23xx_handle_pf_mbox_intr(struct octeon_device *oct)
988 mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
990 for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) {
991 q_no = i * oct->sriov_info.rings_per_vf;
995 oct->mbox[0]->mbox_int_reg);
996 if (octeon_mbox_read(oct->mbox[q_no])) {
997 work = &oct->mbox[q_no]->mbox_poll_wk.work;
1007 struct octeon_device *oct = (struct octeon_device *)dev;
1008 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1011 dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
1014 oct->int_status = 0;
1017 dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Error Intr: 0x%016llx\n",
1018 oct->octeon_id, CVM_CAST64(intr64));
1022 cn23xx_handle_pf_mbox_intr(oct);
1024 if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) {
1026 oct->int_status |= OCT_DEV_INTR_PKT_DATA;
1030 oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
1032 oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
1040 static void cn23xx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
1048 oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1050 lio_pci_writeq(oct, (READ_ONCE(bar1) & 0xFFFFFFFEULL),
1051 CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1053 oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1061 lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK),
1062 CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1065 oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)));
1068 static void cn23xx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask)
1070 lio_pci_writeq(oct, mask,
1071 CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1074 static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx)
1077 oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1101 static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
1103 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1115 (oct->sriov_info.max_vfs > 0)) {
1116 if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) {
1124 static void cn23xx_disable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
1126 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1137 (oct->sriov_info.max_vfs > 0)) {
1138 if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) {
1146 static void cn23xx_get_pcie_qlmport(struct octeon_device *oct)
1148 oct->pcie_port = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
1150 dev_dbg(&oct->pci_dev->dev, "OCTEON: CN23xx uses PCIE Port %d\n",
1151 oct->pcie_port);
1154 static int cn23xx_get_pf_num(struct octeon_device *oct)
1163 if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL,
1165 oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
1175 pkt0_in_ctl = octeon_read_csr64(oct,
1179 mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
1182 d64 = octeon_read_csr64(oct,
1186 dev_err(&oct->pci_dev->dev,
1189 oct->pf_num = pfnum;
1192 dev_err(&oct->pci_dev->dev,
1200 static void cn23xx_setup_reg_address(struct octeon_device *oct)
1202 u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
1203 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1205 oct->reg_list.pci_win_wr_addr_hi =
1207 oct->reg_list.pci_win_wr_addr_lo =
1209 oct->reg_list.pci_win_wr_addr =
1212 oct->reg_list.pci_win_rd_addr_hi =
1214 oct->reg_list.pci_win_rd_addr_lo =
1216 oct->reg_list.pci_win_rd_addr =
1219 oct->reg_list.pci_win_wr_data_hi =
1221 oct->reg_list.pci_win_wr_data_lo =
1223 oct->reg_list.pci_win_wr_data =
1226 oct->reg_list.pci_win_rd_data_hi =
1228 oct->reg_list.pci_win_rd_data_lo =
1230 oct->reg_list.pci_win_rd_data =
1233 cn23xx_get_pcie_qlmport(oct);
1236 if (!oct->msix_on)
1238 if (oct->rev_id >= OCTEON_CN23XX_REV_1_1)
1243 CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
1246 CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
1249 int cn23xx_sriov_config(struct octeon_device *oct)
1251 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1257 (struct octeon_config *)oct_get_config_info(oct, LIO_23XX);
1258 switch (oct->rev_id) {
1273 if (oct->sriov_info.num_pf_rings)
1274 num_pf_rings = oct->sriov_info.num_pf_rings;
1292 oct->sriov_info.trs = total_rings;
1293 oct->sriov_info.max_vfs = max_vfs;
1294 oct->sriov_info.rings_per_vf = rings_per_vf;
1295 oct->sriov_info.pf_srn = pf_srn;
1296 oct->sriov_info.num_pf_rings = num_pf_rings;
1297 dev_notice(&oct->pci_dev->dev, "trs:%d max_vfs:%d rings_per_vf:%d pf_srn:%d num_pf_rings:%d\n",
1298 oct->sriov_info.trs, oct->sriov_info.max_vfs,
1299 oct->sriov_info.rings_per_vf, oct->sriov_info.pf_srn,
1300 oct->sriov_info.num_pf_rings);
1302 oct->sriov_info.sriov_enabled = 0;
1307 int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
1312 pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_0, &data32);
1314 pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_1, &data32);
1316 pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_2, &data32);
1318 pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_3, &data32);
1323 dev_err(&oct->pci_dev->dev, "device BAR0 unassigned\n");
1325 dev_err(&oct->pci_dev->dev, "device BAR1 unassigned\n");
1329 if (octeon_map_pci_barx(oct, 0, 0))
1332 if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
1333 dev_err(&oct->pci_dev->dev, "%s CN23XX BAR1 map failed\n",
1335 octeon_unmap_pci_barx(oct, 0);
1339 if (cn23xx_get_pf_num(oct) != 0)
1342 if (cn23xx_sriov_config(oct)) {
1343 octeon_unmap_pci_barx(oct, 0);
1344 octeon_unmap_pci_barx(oct, 1);
1348 octeon_write_csr64(oct, CN23XX_SLI_MAC_CREDIT_CNT, 0x3F802080802080ULL);
1350 oct->fn_list.setup_iq_regs = cn23xx_setup_iq_regs;
1351 oct->fn_list.setup_oq_regs = cn23xx_setup_oq_regs;
1352 oct->fn_list.setup_mbox = cn23xx_setup_pf_mbox;
1353 oct->fn_list.free_mbox = cn23xx_free_pf_mbox;
1355 oct->fn_list.process_interrupt_regs = cn23xx_interrupt_handler;
1356 oct->fn_list.msix_interrupt_handler = cn23xx_pf_msix_interrupt_handler;
1358 oct->fn_list.soft_reset = cn23xx_pf_soft_reset;
1359 oct->fn_list.setup_device_regs = cn23xx_setup_pf_device_regs;
1360 oct->fn_list.update_iq_read_idx = cn23xx_update_read_index;
1362 oct->fn_list.bar1_idx_setup = cn23xx_bar1_idx_setup;
1363 oct->fn_list.bar1_idx_write = cn23xx_bar1_idx_write;
1364 oct->fn_list.bar1_idx_read = cn23xx_bar1_idx_read;
1366 oct->fn_list.enable_interrupt = cn23xx_enable_pf_interrupt;
1367 oct->fn_list.disable_interrupt = cn23xx_disable_pf_interrupt;
1369 oct->fn_list.enable_io_queues = cn23xx_enable_io_queues;
1370 oct->fn_list.disable_io_queues = cn23xx_disable_io_queues;
1372 cn23xx_setup_reg_address(oct);
1374 oct->coproc_clock_rate = 1000000ULL * cn23xx_coprocessor_clock(oct);
1380 int validate_cn23xx_pf_config_info(struct octeon_device *oct,
1384 dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
1391 dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
1399 dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
1405 dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
1411 dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
1419 int cn23xx_fw_loaded(struct octeon_device *oct)
1431 if (atomic_read(oct->adapter_refcount) > 1)
1434 val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
1439 void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
1442 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vfidx)) {
1455 mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
1456 octeon_mbox_write(oct, &mbox_cmd);
1462 cn23xx_get_vf_stats_callback(struct octeon_device *oct,
1471 int cn23xx_get_vf_stats(struct octeon_device *oct, int vfidx,
1479 if (!(oct->sriov_info.vf_drv_loaded_mask & (1ULL << vfidx)))
1490 mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
1498 octeon_mbox_write(oct, &mbox_cmd);
1506 octeon_mbox_cancel(oct, 0);
1507 dev_err(&oct->pci_dev->dev, "Unable to get stats from VF-%d, timedout\n",