Lines Matching refs:pd

22  * @pd: PRCI context
26 * address of the PRCI register target described by @pd, and return
31 * Return: the contents of the register described by @pd and @offs.
33 static u32 __prci_readl(struct __prci_data *pd, u32 offs)
35 return readl_relaxed(pd->va + offs);
38 static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
40 writel_relaxed(v, pd->va + offs);
118 * @pd: PRCI context
122 * the PRCI identified by @pd, and store it into the local configuration
126 * @pd and @pwd from changing during execution.
128 static void __prci_wrpll_read_cfg0(struct __prci_data *pd,
131 __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
136 * @pd: PRCI context
146 * @pd and @pwd from changing during execution.
148 static void __prci_wrpll_write_cfg0(struct __prci_data *pd,
152 __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
160 * @pd: PRCI context
164 static void __prci_wrpll_write_cfg1(struct __prci_data *pd,
168 __prci_writel(enable, pwd->cfg1_offs, pd);
207 struct __prci_data *pd = pc->pd;
215 pwd->enable_bypass(pd);
217 __prci_wrpll_write_cfg0(pd, pwd, &pwd->c);
228 struct __prci_data *pd = pc->pd;
231 r = __prci_readl(pd, pwd->cfg1_offs);
243 struct __prci_data *pd = pc->pd;
248 __prci_wrpll_write_cfg1(pd, pwd, PRCI_COREPLLCFG1_CKE_MASK);
251 pwd->disable_bypass(pd);
260 struct __prci_data *pd = pc->pd;
264 pwd->enable_bypass(pd);
266 r = __prci_readl(pd, pwd->cfg1_offs);
269 __prci_wrpll_write_cfg1(pd, pwd, r);
278 struct __prci_data *pd = pc->pd;
282 v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
295 struct __prci_data *pd = pc->pd;
296 u32 div = __prci_readl(pd, PRCI_HFPCLKPLLDIV_OFFSET);
307 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
314 void sifive_prci_coreclksel_use_hfclk(struct __prci_data *pd)
318 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
320 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
322 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
328 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
335 void sifive_prci_coreclksel_use_corepll(struct __prci_data *pd)
339 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
341 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
343 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
349 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
357 void sifive_prci_coreclksel_use_final_corepll(struct __prci_data *pd)
361 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
363 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
365 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
371 * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
378 void sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data *pd)
382 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
384 __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
386 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
392 * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
399 void sifive_prci_corepllsel_use_corepll(struct __prci_data *pd)
403 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
405 __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
407 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
413 * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
420 void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd)
424 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
426 __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
428 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
434 * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
441 void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd)
445 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
447 __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
449 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
456 struct __prci_data *pd = pc->pd;
459 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET);
470 struct __prci_data *pd = pc->pd;
476 __prci_writel(1, PRCI_PCIE_AUX_OFFSET, pd);
477 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); /* barrier */
485 struct __prci_data *pd = pc->pd;
488 __prci_writel(0, PRCI_PCIE_AUX_OFFSET, pd);
489 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); /* barrier */
496 * @pd: The pointer for PRCI per-device instance data
504 static int __prci_register_clocks(struct device *dev, struct __prci_data *pd,
528 pic->pd = pd;
531 __prci_wrpll_read_cfg0(pd, pic->pwd);
547 pd->hw_clks.hws[i] = &pic->hw;
550 pd->hw_clks.num = i;
553 &pd->hw_clks);
571 struct __prci_data *pd;
577 pd = devm_kzalloc(dev, struct_size(pd, hw_clks.hws, desc->num_clks), GFP_KERNEL);
578 if (!pd)
581 pd->va = devm_platform_ioremap_resource(pdev, 0);
582 if (IS_ERR(pd->va))
583 return PTR_ERR(pd->va);
585 pd->reset.rcdev.owner = THIS_MODULE;
586 pd->reset.rcdev.nr_resets = PRCI_RST_NR;
587 pd->reset.rcdev.ops = &reset_simple_ops;
588 pd->reset.rcdev.of_node = pdev->dev.of_node;
589 pd->reset.active_low = true;
590 pd->reset.membase = pd->va + PRCI_DEVICESRESETREG_OFFSET;
591 spin_lock_init(&pd->reset.lock);
593 r = devm_reset_controller_register(&pdev->dev, &pd->reset.rcdev);
598 r = __prci_register_clocks(dev, pd, desc);