Lines Matching defs:cl

65 #define PORT_DATA_SEL_TXA(cl)	(1 << ((cl)*2))
66 #define PORT_DATA_SEL_TXB(cl) (2 << ((cl)*2))
111 #define REG_DMA_ADAPTER_A(cl) (0x8000 * (cl))
112 #define REG_DMA_ADAPTER_B(cl) (0x8000 * (cl) + 0x4000)
167 static void mca_modify(struct mca_cluster *cl, int regoffset, u32 mask, u32 val)
169 __iomem void *ptr = cl->base + regoffset;
195 struct mca_cluster *cl = mca_dai_to_cluster(dai);
205 mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
207 mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
209 mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
217 WARN_ON(readl_relaxed(cl->base + serdes_unit + REG_SERDES_STATUS) &
219 mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
221 mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
222 FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1));
232 struct mca_cluster *cl = mca_dai_to_cluster(dai);
240 mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
248 mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
259 static int mca_fe_enable_clocks(struct mca_cluster *cl)
261 struct mca_data *mca = cl->host;
264 ret = clk_prepare_enable(cl->clk_parent);
268 cl->no, ret);
277 cl->pd_link = device_link_add(mca->dev, cl->pd_dev,
280 if (!cl->pd_link) {
282 "cluster %d: unable to prop-up power domain\n", cl->no);
283 clk_disable_unprepare(cl->clk_parent);
287 writel_relaxed(cl->no + 1, cl->base + REG_SYNCGEN_MCLK_SEL);
288 mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN,
290 mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, STATUS_MCLK_EN);
295 static void mca_fe_disable_clocks(struct mca_cluster *cl)
297 mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN, 0);
298 mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, 0);
300 device_link_del(cl->pd_link);
301 clk_disable_unprepare(cl->clk_parent);
304 static bool mca_fe_clocks_in_use(struct mca_cluster *cl)
306 struct mca_data *mca = cl->host;
314 if (be_cl->port_driver != cl->no)
331 struct mca_cluster *cl = mca_dai_to_cluster(dai);
332 struct mca_data *mca = cl->host;
336 if (cl->port_driver < 0)
339 fe_cl = &mca->clusters[cl->port_driver];
353 cl->clocks_in_use[substream->stream] = true;
361 struct mca_cluster *cl = mca_dai_to_cluster(dai);
362 struct mca_data *mca = cl->host;
365 if (cl->port_driver < 0)
373 fe_cl = &mca->clusters[cl->port_driver];
377 cl->clocks_in_use[substream->stream] = false;
393 static int mca_configure_serdes(struct mca_cluster *cl, int serdes_unit,
397 __iomem void *serdes_base = cl->base + serdes_unit;
420 serdes_conf |= FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1);
435 mca_modify(cl,
461 dev_err(cl->host->dev,
470 struct mca_cluster *cl = mca_dai_to_cluster(dai);
472 cl->tdm_slots = slots;
473 cl->tdm_slot_width = slot_width;
474 cl->tdm_tx_mask = tx_mask;
475 cl->tdm_rx_mask = rx_mask;
482 struct mca_cluster *cl = mca_dai_to_cluster(dai);
483 struct mca_data *mca = cl->host;
522 mca_modify(cl, CLUSTER_TX_OFF + REG_TX_SERDES_CONF,
524 mca_modify(cl, CLUSTER_RX_OFF + REG_RX_SERDES_CONF,
527 cl->base + CLUSTER_TX_OFF + REG_TX_SERDES_BITSTART);
529 cl->base + CLUSTER_RX_OFF + REG_RX_SERDES_BITSTART);
540 struct mca_cluster *cl = mca_dai_to_cluster(dai);
542 cl->bclk_ratio = ratio;
569 struct mca_cluster *cl = mca_dai_to_cluster(dai);
570 struct mca_data *mca = cl->host;
580 if (!cl->tdm_slot_width) {
589 tdm_slot_width = cl->tdm_slot_width;
590 tdm_slots = cl->tdm_slots;
591 tdm_mask = is_tx ? cl->tdm_tx_mask : cl->tdm_rx_mask;
594 if (cl->bclk_ratio)
595 bclk_ratio = cl->bclk_ratio;
631 ret = mca_configure_serdes(cl, is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF,
650 writel_relaxed(regval, mca->switch_base + REG_DMA_ADAPTER_A(cl->no));
654 mca->switch_base + REG_DMA_ADAPTER_A(cl->no));
657 mca->switch_base + REG_DMA_ADAPTER_B(cl->no));
660 if (!mca_fe_clocks_in_use(cl)) {
665 cl->base + REG_SYNCGEN_HI_PERIOD);
667 cl->base + REG_SYNCGEN_LO_PERIOD);
669 cl->base + REG_MCLK_CONF);
671 ret = clk_set_rate(cl->clk_parent, bclk_ratio * samp_rate);
674 cl->no, ret);
690 static bool mca_be_started(struct mca_cluster *cl)
695 if (cl->port_started[stream])
705 struct mca_cluster *cl = mca_dai_to_cluster(dai);
707 struct mca_data *mca = cl->host;
726 if (mca_be_started(cl)) {
732 if (cl->port_driver != fe_cl->no)
735 cl->port_started[substream->stream] = true;
740 cl->base + REG_PORT_ENABLES);
742 cl->base + REG_PORT_CLOCK_SEL);
744 cl->base + REG_PORT_DATA_SEL);
746 cl->port_driver = fe_cl->no;
748 cl->port_started[substream->stream] = true;
756 struct mca_cluster *cl = mca_dai_to_cluster(dai);
757 struct mca_data *mca = cl->host;
759 cl->port_started[substream->stream] = false;
761 if (!mca_be_started(cl)) {
766 writel_relaxed(0, cl->base + REG_PORT_ENABLES);
767 writel_relaxed(0, cl->base + REG_PORT_DATA_SEL);
769 cl->port_driver = -1;
815 struct mca_cluster *cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(rtd, 0));
816 struct dma_chan *chan = cl->dma_chans[substream->stream];
896 static struct dma_chan *mca_request_dma_channel(struct mca_cluster *cl, unsigned int stream)
900 char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL,
901 is_tx ? "tx%da" : "rx%da", cl->no);
903 char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL,
904 is_tx ? "tx%da" : "rx%db", cl->no);
906 return of_dma_request_slave_channel(cl->host->dev->of_node, name);
914 struct mca_cluster *cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(rtd, 0));
924 if (!substream || !cl->dma_chans[i])
927 dma_release_channel(cl->dma_chans[i]);
928 cl->dma_chans[i] = NULL;
936 struct mca_cluster *cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(rtd, 0));
950 chan = mca_request_dma_channel(cl, i);
959 i, cl->no, chan);
966 cl->dma_chans[i] = chan;
991 struct mca_cluster *cl = &mca->clusters[i];
993 if (!IS_ERR_OR_NULL(cl->clk_parent))
994 clk_put(cl->clk_parent);
996 if (!IS_ERR_OR_NULL(cl->pd_dev))
997 dev_pm_domain_detach(cl->pd_dev, true);
1068 struct mca_cluster *cl = &clusters[i];
1073 cl->host = mca;
1074 cl->no = i;
1075 cl->base = base + CLUSTER_STRIDE * i;
1076 cl->port_driver = -1;
1077 cl->clk_parent = of_clk_get(pdev->dev.of_node, i);
1078 if (IS_ERR(cl->clk_parent)) {
1080 i, PTR_ERR(cl->clk_parent));
1081 ret = PTR_ERR(cl->clk_parent);
1084 cl->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, i + 1);
1085 if (IS_ERR(cl->pd_dev)) {
1088 PTR_ERR(cl->pd_dev));
1089 ret = PTR_ERR(cl->pd_dev);