Lines Matching refs:ts

103 static void qtnf_deassert_intx(struct qtnf_pcie_topaz_state *ts)
105 void __iomem *reg = ts->base.sysctl_bar + TOPAZ_PCIE_CFG0_OFFSET;
113 static inline int qtnf_topaz_intx_asserted(struct qtnf_pcie_topaz_state *ts)
115 void __iomem *reg = ts->base.sysctl_bar + TOPAZ_PCIE_CFG0_OFFSET;
121 static void qtnf_topaz_reset_ep(struct qtnf_pcie_topaz_state *ts)
124 TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
126 pci_restore_state(ts->base.pdev);
129 static void setup_rx_irqs(struct qtnf_pcie_topaz_state *ts)
131 void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
133 ts->dma_msi_imwr = readl(reg);
136 static void enable_rx_irqs(struct qtnf_pcie_topaz_state *ts)
138 void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
140 qtnf_non_posted_write(ts->dma_msi_imwr, reg);
143 static void disable_rx_irqs(struct qtnf_pcie_topaz_state *ts)
145 void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
147 qtnf_non_posted_write(QTN_HOST_LO32(ts->dma_msi_dummy), reg);
152 struct qtnf_pcie_topaz_state *ts = arg;
155 TOPAZ_CTL_M2L_INT(ts->base.sysctl_bar));
183 static int topaz_alloc_bd_table(struct qtnf_pcie_topaz_state *ts,
187 struct qtnf_pcie_bus_priv *priv = &ts->base;
205 ts->tx_bd_vbase = vaddr;
209 ts->tx_bd_vbase[i].info |= cpu_to_le32(QTN_BD_EMPTY);
221 ts->rx_bd_vbase = vaddr;
233 ts->ep_next_rx_pkt = &extra_params->param1;
236 ts->txqueue_wake = &extra_params->param2;
237 ts->ep_pmstate = &extra_params->param3;
238 ts->dma_msi_dummy = paddr + QTNF_BD_PARAM_OFFSET(4);
244 topaz_skb2rbd_attach(struct qtnf_pcie_topaz_state *ts, u16 index, u32 wrap)
246 struct qtnf_topaz_rx_bd *rxbd = &ts->rx_bd_vbase[index];
252 ts->base.rx_skb[index] = NULL;
256 ts->base.rx_skb[index] = skb;
258 paddr = dma_map_single(&ts->base.pdev->dev, skb->data, SKB_BUF_SIZE,
260 if (dma_mapping_error(&ts->base.pdev->dev, paddr)) {
268 ts->base.rx_bd_w_index = index;
273 static int topaz_alloc_rx_buffers(struct qtnf_pcie_topaz_state *ts)
278 memset(ts->rx_bd_vbase, 0x0,
279 ts->base.rx_bd_num * sizeof(struct qtnf_topaz_rx_bd));
281 for (i = 0; i < ts->base.rx_bd_num; i++) {
282 ret = topaz_skb2rbd_attach(ts, i, 0);
287 ts->rx_bd_vbase[ts->base.rx_bd_num - 1].info |=
294 static void qtnf_topaz_free_xfer_buffers(struct qtnf_pcie_topaz_state *ts)
296 struct qtnf_pcie_bus_priv *priv = &ts->base;
306 rxbd = &ts->rx_bd_vbase[i];
321 txbd = &ts->tx_bd_vbase[i];
334 static int qtnf_pcie_topaz_init_xfer(struct qtnf_pcie_topaz_state *ts,
338 struct qtnf_topaz_bda __iomem *bda = ts->bda;
339 struct qtnf_pcie_bus_priv *priv = &ts->base;
376 ret = topaz_alloc_bd_table(ts, bda);
382 ret = topaz_alloc_rx_buffers(ts);
391 static void qtnf_topaz_data_tx_reclaim(struct qtnf_pcie_topaz_state *ts)
393 struct qtnf_pcie_bus_priv *priv = &ts->base;
404 tx_done_index = readl(ts->ep_next_rx_pkt);
415 txbd = &ts->tx_bd_vbase[i];
447 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
451 ts->base.tx_stopped = 1;
454 writel(0x0, ts->txqueue_wake);
461 TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
464 tasklet_hi_schedule(&ts->base.reclaim_tq);
469 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
472 ready = readl(ts->txqueue_wake);
478 TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
482 static int qtnf_tx_queue_ready(struct qtnf_pcie_topaz_state *ts)
484 struct qtnf_pcie_bus_priv *priv = &ts->base;
488 qtnf_topaz_data_tx_reclaim(ts);
503 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
504 struct qtnf_pcie_bus_priv *priv = &ts->base;
505 struct qtnf_topaz_bda __iomem *bda = ts->bda;
515 if (!qtnf_tx_queue_ready(ts)) {
532 txbd = &ts->tx_bd_vbase[i];
560 qtnf_topaz_data_tx_reclaim(ts);
568 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
569 struct qtnf_pcie_bus_priv *priv = &ts->base;
571 if (!priv->msi_enabled && !qtnf_topaz_intx_asserted(ts))
575 qtnf_deassert_intx(ts);
583 disable_rx_irqs(ts);
592 static int qtnf_rx_data_ready(struct qtnf_pcie_topaz_state *ts)
594 u16 index = ts->base.rx_bd_r_index;
598 rxbd = &ts->rx_bd_vbase[index];
610 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
611 struct qtnf_pcie_bus_priv *priv = &ts->base;
626 if (!qtnf_rx_data_ready(ts))
630 rxbd = &ts->rx_bd_vbase[r_idx];
681 if (((++ts->rx_pkt_count) & RX_DONE_INTR_MSK) == 0)
698 ret = topaz_skb2rbd_attach(ts, w_idx,
713 enable_rx_irqs(ts);
722 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
725 tasklet_hi_schedule(&ts->base.reclaim_tq);
730 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
733 enable_rx_irqs(ts);
738 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
740 disable_rx_irqs(ts);
758 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
760 seq_printf(s, "pcie_irq_count(%u)\n", ts->base.pcie_irq_count);
768 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
769 struct qtnf_pcie_bus_priv *priv = &ts->base;
770 u32 tx_done_index = readl(ts->ep_next_rx_pkt);
800 static void qtnf_reset_dma_offset(struct qtnf_pcie_topaz_state *ts)
802 struct qtnf_topaz_bda __iomem *bda = ts->bda;
811 static int qtnf_pcie_endian_detect(struct qtnf_pcie_topaz_state *ts)
813 struct qtnf_topaz_bda __iomem *bda = ts->bda;
852 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
853 struct qtnf_topaz_bda __iomem *bda = ts->bda;
857 ret = qtnf_pcie_endian_detect(ts);
863 writeb(ts->base.msi_enabled, &ts->bda->bda_rc_msi_enabled);
864 qtnf_reset_dma_offset(ts);
869 if (ts->base.flashboot)
876 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_HOST_RDY);
877 if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_RDY,
886 static int qtnf_post_init_ep(struct qtnf_pcie_topaz_state *ts)
888 struct pci_dev *pdev = ts->base.pdev;
890 setup_rx_irqs(ts);
891 disable_rx_irqs(ts);
893 if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_QLINK_DONE,
902 qtnf_ep_fw_load(struct qtnf_pcie_topaz_state *ts, const u8 *fw, u32 fw_size)
904 struct qtnf_topaz_bda __iomem *bda = ts->bda;
905 struct pci_dev *pdev = ts->base.pdev;
919 blksize = ts->base.fw_blksize;
942 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_HOST_LOAD);
943 if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_EP_RDY,
960 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_RDY);
961 if (qtnf_poll_state(&ts->bda->bda_bootstate,
977 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_RDY);
978 if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_DONE,
986 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_END);
987 if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_LOAD_DONE,
1003 static int qtnf_topaz_fw_upload(struct qtnf_pcie_topaz_state *ts,
1007 struct pci_dev *pdev = ts->base.pdev;
1010 if (qtnf_poll_state(&ts->bda->bda_bootstate,
1025 ret = qtnf_ep_fw_load(ts, fw->data, fw->size);
1037 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
1038 int bootloader_needed = readl(&ts->bda->bda_flags) & QTN_BDA_XMIT_UBOOT;
1039 struct pci_dev *pdev = ts->base.pdev;
1042 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_BOOT);
1045 ret = qtnf_topaz_fw_upload(ts, QTN_PCI_TOPAZ_BOOTLD_NAME);
1053 qtnf_set_state(&ts->bda->bda_bootstate,
1057 if (ts->base.flashboot) {
1060 ret = qtnf_poll_state(&ts->bda->bda_bootstate,
1066 ret = qtnf_topaz_fw_upload(ts, QTN_PCI_TOPAZ_FW_NAME);
1070 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_START);
1071 ret = qtnf_poll_state(&ts->bda->bda_bootstate,
1079 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_RUN);
1080 ret = qtnf_poll_state(&ts->bda->bda_bootstate,
1089 ret = qtnf_post_init_ep(ts);
1110 struct qtnf_pcie_topaz_state *ts = from_tasklet(ts, t, base.reclaim_tq);
1112 qtnf_topaz_data_tx_reclaim(ts);
1123 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
1124 struct pci_dev *pdev = ts->base.pdev;
1131 ts->bda = ts->base.epmem_bar;
1134 if (ts->base.msi_enabled)
1155 ret = qtnf_pcie_topaz_init_xfer(ts, tx_bd_num, rx_bd_num);
1161 tasklet_setup(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn);
1166 ipc_int.arg = ts;
1167 qtnf_pcie_init_shm_ipc(&ts->base, &ts->bda->bda_shm_reg1,
1168 &ts->bda->bda_shm_reg2, &ipc_int);
1175 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
1177 qtnf_topaz_reset_ep(ts);
1178 qtnf_topaz_free_xfer_buffers(ts);
1184 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
1185 struct pci_dev *pdev = ts->base.pdev;
1187 writel((u32 __force)PCI_D3hot, ts->ep_pmstate);
1190 TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
1201 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
1202 struct pci_dev *pdev = ts->base.pdev;
1208 writel((u32 __force)PCI_D0, ts->ep_pmstate);
1211 TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
1220 struct qtnf_pcie_topaz_state *ts;
1222 bus = devm_kzalloc(&pdev->dev, sizeof(*bus) + sizeof(*ts), GFP_KERNEL);
1226 ts = get_bus_priv(bus);
1227 ts->base.probe_cb = qtnf_pcie_topaz_probe;
1228 ts->base.remove_cb = qtnf_pcie_topaz_remove;
1229 ts->base.dma_mask_get_cb = qtnf_topaz_dma_mask_get;
1231 ts->base.resume_cb = qtnf_pcie_topaz_resume;
1232 ts->base.suspend_cb = qtnf_pcie_topaz_suspend;