Lines Matching refs:qp

56 	struct qcom_icc_provider *qp = to_qcom_provider(provider);
61 rc = regmap_update_bits(qp->regmap,
62 qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
68 return regmap_update_bits(qp->regmap,
69 qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
74 static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp,
93 return regmap_update_bits(qp->regmap,
94 qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port),
100 struct qcom_icc_provider *qp;
109 qp = to_qcom_provider(provider);
119 rc = qcom_icc_bimc_set_qos_health(qp,
129 return regmap_update_bits(qp->regmap,
130 qp->qos_offset + M_BKE_EN_ADDR(qn->qos.qos_port),
134 static int qcom_icc_noc_set_qos_priority(struct qcom_icc_provider *qp,
142 rc = regmap_update_bits(qp->regmap,
143 qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
148 return regmap_update_bits(qp->regmap,
149 qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
155 struct qcom_icc_provider *qp;
163 qp = to_qcom_provider(provider);
175 rc = qcom_icc_noc_set_qos_priority(qp, &qn->qos);
185 return regmap_update_bits(qp->regmap,
186 qp->qos_offset + NOC_QOS_MODEn_ADDR(qn->qos.qos_port),
192 struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
197 switch (qp->type) {
294 static u64 qcom_icc_calc_rate(struct qcom_icc_provider *qp, struct qcom_icc_node *qn, int ctx)
327 struct qcom_icc_provider *qp = to_qcom_provider(provider);
340 qcom_icc_calc_rate(qp, qn, ctx));
350 struct qcom_icc_provider *qp;
358 qp = to_qcom_provider(provider);
375 if (!qp->bus_clk_desc && !qp->bus_clk)
382 if (qp->keep_alive)
386 if (qp->bus_clk) {
390 return clk_set_rate(qp->bus_clk, active_rate);
397 if (active_rate != qp->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE]) {
398 ret = qcom_icc_rpm_set_bus_rate(qp->bus_clk_desc, QCOM_SMD_RPM_ACTIVE_STATE,
404 qp->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE] = active_rate;
407 if (sleep_rate != qp->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE]) {
408 ret = qcom_icc_rpm_set_bus_rate(qp->bus_clk_desc, QCOM_SMD_RPM_SLEEP_STATE,
414 qp->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE] = sleep_rate;
421 active_rate = qcom_icc_calc_rate(qp, src_qn, QCOM_SMD_RPM_ACTIVE_STATE);
422 sleep_rate = qcom_icc_calc_rate(qp, src_qn, QCOM_SMD_RPM_SLEEP_STATE);
454 struct qcom_icc_provider *qp;
480 qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
481 if (!qp)
484 qp->intf_clks = devm_kcalloc(dev, cd_num, sizeof(*qp->intf_clks), GFP_KERNEL);
485 if (!qp->intf_clks)
489 qp->bus_clk_desc = devm_kzalloc(dev, sizeof(*qp->bus_clk_desc),
491 if (!qp->bus_clk_desc)
494 qp->bus_clk_desc = desc->bus_clk_desc;
497 qp->bus_clk = devm_clk_get_optional(dev, "bus");
498 if (IS_ERR(qp->bus_clk))
499 return PTR_ERR(qp->bus_clk);
507 qp->num_intf_clks = cd_num;
509 qp->intf_clks[i].id = cds[i];
511 qp->keep_alive = desc->keep_alive;
512 qp->type = desc->type;
513 qp->qos_offset = desc->qos_offset;
522 qp->regmap = dev_get_regmap(dev->parent, NULL);
523 if (qp->regmap)
532 qp->regmap = devm_regmap_init_mmio(dev, mmio, desc->regmap_cfg);
533 if (IS_ERR(qp->regmap)) {
535 return PTR_ERR(qp->regmap);
540 ret = clk_prepare_enable(qp->bus_clk);
544 ret = devm_clk_bulk_get(dev, qp->num_intf_clks, qp->intf_clks);
548 provider = &qp->provider;
559 ret = clk_bulk_prepare_enable(qp->num_intf_clks, qp->intf_clks);
567 qnodes[i]->ab_coeff = qp->ab_coeff;
570 qnodes[i]->ib_coeff = qp->ib_coeff;
574 clk_bulk_disable_unprepare(qp->num_intf_clks,
575 qp->intf_clks);
592 clk_bulk_disable_unprepare(qp->num_intf_clks,
593 qp->intf_clks);
602 clk_bulk_disable_unprepare(qp->num_intf_clks, qp->intf_clks);
608 platform_set_drvdata(pdev, qp);
624 clk_disable_unprepare(qp->bus_clk);
632 struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
634 icc_provider_deregister(&qp->provider);
635 icc_nodes_remove(&qp->provider);
636 clk_disable_unprepare(qp->bus_clk);