Lines Matching refs:ctx

126 static int xgene_hwmon_pcc_rd(struct xgene_hwmon_dev *ctx, u32 *msg)
128 struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
133 mutex_lock(&ctx->rd_mutex);
134 init_completion(&ctx->rd_complete);
135 ctx->resp_pending = true;
139 cpu_to_le32(PCC_SIGNATURE | ctx->mbox_idx));
155 rc = mbox_send_message(ctx->mbox_chan, msg);
157 dev_err(ctx->dev, "Mailbox send error %d\n", rc);
160 if (!wait_for_completion_timeout(&ctx->rd_complete,
161 usecs_to_jiffies(ctx->usecs_lat))) {
162 dev_err(ctx->dev, "Mailbox operation timed out\n");
168 if (MSG_TYPE(ctx->sync_msg.msg) == MSG_TYPE_ERR) {
173 msg[0] = ctx->sync_msg.msg;
174 msg[1] = ctx->sync_msg.param1;
175 msg[2] = ctx->sync_msg.param2;
178 mbox_chan_txdone(ctx->mbox_chan, 0);
179 ctx->resp_pending = false;
180 mutex_unlock(&ctx->rd_mutex);
184 static int xgene_hwmon_rd(struct xgene_hwmon_dev *ctx, u32 *msg)
188 mutex_lock(&ctx->rd_mutex);
189 init_completion(&ctx->rd_complete);
190 ctx->resp_pending = true;
192 rc = mbox_send_message(ctx->mbox_chan, msg);
194 dev_err(ctx->dev, "Mailbox send error %d\n", rc);
198 if (!wait_for_completion_timeout(&ctx->rd_complete,
200 dev_err(ctx->dev, "Mailbox operation timed out\n");
206 if (MSG_TYPE(ctx->sync_msg.msg) == MSG_TYPE_ERR) {
211 msg[0] = ctx->sync_msg.msg;
212 msg[1] = ctx->sync_msg.param1;
213 msg[2] = ctx->sync_msg.param2;
216 ctx->resp_pending = false;
217 mutex_unlock(&ctx->rd_mutex);
221 static int xgene_hwmon_reg_map_rd(struct xgene_hwmon_dev *ctx, u32 addr,
232 rc = xgene_hwmon_rd(ctx, msg);
234 rc = xgene_hwmon_pcc_rd(ctx, msg);
250 static int xgene_hwmon_get_notification_msg(struct xgene_hwmon_dev *ctx,
260 rc = xgene_hwmon_pcc_rd(ctx, msg);
271 static int xgene_hwmon_get_cpu_pwr(struct xgene_hwmon_dev *ctx, u32 *val)
276 rc = xgene_hwmon_reg_map_rd(ctx, PMD_PWR_REG, &watt);
280 rc = xgene_hwmon_reg_map_rd(ctx, PMD_PWR_MW_REG, &mwatt);
288 static int xgene_hwmon_get_io_pwr(struct xgene_hwmon_dev *ctx, u32 *val)
293 rc = xgene_hwmon_reg_map_rd(ctx, SOC_PWR_REG, &watt);
297 rc = xgene_hwmon_reg_map_rd(ctx, SOC_PWR_MW_REG, &mwatt);
305 static int xgene_hwmon_get_temp(struct xgene_hwmon_dev *ctx, u32 *val)
307 return xgene_hwmon_reg_map_rd(ctx, SOC_TEMP_REG, val);
317 struct xgene_hwmon_dev *ctx = dev_get_drvdata(dev);
321 rc = xgene_hwmon_get_temp(ctx, &val);
341 struct xgene_hwmon_dev *ctx = dev_get_drvdata(dev);
343 return sysfs_emit(buf, "%d\n", ctx->temp_critical_alarm);
364 struct xgene_hwmon_dev *ctx = dev_get_drvdata(dev);
368 rc = xgene_hwmon_get_cpu_pwr(ctx, &val);
379 struct xgene_hwmon_dev *ctx = dev_get_drvdata(dev);
383 rc = xgene_hwmon_get_io_pwr(ctx, &val);
411 static int xgene_hwmon_tpc_alarm(struct xgene_hwmon_dev *ctx,
414 ctx->temp_critical_alarm = !!amsg->param2;
415 sysfs_notify(&ctx->dev->kobj, NULL, "temp1_critical_alarm");
420 static void xgene_hwmon_process_pwrmsg(struct xgene_hwmon_dev *ctx,
425 xgene_hwmon_tpc_alarm(ctx, amsg);
434 struct xgene_hwmon_dev *ctx;
437 ctx = container_of(work, struct xgene_hwmon_dev, workq);
438 while (kfifo_out_spinlocked(&ctx->async_msg_fifo, &amsg,
440 &ctx->kfifo_lock)) {
446 ret = xgene_hwmon_get_notification_msg(ctx,
453 xgene_hwmon_process_pwrmsg(ctx, &amsg);
457 static int xgene_hwmon_rx_ready(struct xgene_hwmon_dev *ctx, void *msg)
459 if (IS_ERR_OR_NULL(ctx->hwmon_dev) && !ctx->resp_pending) {
461 kfifo_in_spinlocked(&ctx->async_msg_fifo, msg,
463 &ctx->kfifo_lock);
475 struct xgene_hwmon_dev *ctx = to_xgene_hwmon_dev(cl);
483 if (xgene_hwmon_rx_ready(ctx, msg) < 0)
496 if (ctx->resp_pending &&
503 ctx->sync_msg.msg = ((u32 *)msg)[0];
504 ctx->sync_msg.param1 = ((u32 *)msg)[1];
505 ctx->sync_msg.param2 = ((u32 *)msg)[2];
508 complete(&ctx->rd_complete);
514 kfifo_in_spinlocked(&ctx->async_msg_fifo, msg,
515 sizeof(struct slimpro_resp_msg), &ctx->kfifo_lock);
517 schedule_work(&ctx->workq);
525 struct xgene_hwmon_dev *ctx = to_xgene_hwmon_dev(cl);
526 struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
535 if (xgene_hwmon_rx_ready(ctx, &amsg) < 0)
554 if (ctx->resp_pending &&
564 ctx->sync_msg.msg = ((u32 *)msg)[0];
565 ctx->sync_msg.param1 = ((u32 *)msg)[1];
566 ctx->sync_msg.param2 = ((u32 *)msg)[2];
569 complete(&ctx->rd_complete);
583 kfifo_in_spinlocked(&ctx->async_msg_fifo, &amsg,
584 sizeof(struct slimpro_resp_msg), &ctx->kfifo_lock);
586 schedule_work(&ctx->workq);
611 struct xgene_hwmon_dev *ctx;
615 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
616 if (!ctx)
619 ctx->dev = &pdev->dev;
620 platform_set_drvdata(pdev, ctx);
621 cl = &ctx->mbox_client;
623 spin_lock_init(&ctx->kfifo_lock);
624 mutex_init(&ctx->rd_mutex);
626 rc = kfifo_alloc(&ctx->async_msg_fifo,
632 INIT_WORK(&ctx->workq, xgene_hwmon_evt_work);
642 ctx->mbox_chan = mbox_request_channel(cl, 0);
643 if (IS_ERR(ctx->mbox_chan)) {
664 &ctx->mbox_idx)) {
671 pcc_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
679 ctx->pcc_chan = pcc_chan;
680 ctx->mbox_chan = pcc_chan->mchan;
682 if (!ctx->mbox_chan->mbox->txdone_irq) {
692 ctx->comm_base_addr = pcc_chan->shmem_base_addr;
693 if (ctx->comm_base_addr) {
695 ctx->pcc_comm_addr = (void __force *)devm_ioremap(&pdev->dev,
696 ctx->comm_base_addr,
699 ctx->pcc_comm_addr = devm_memremap(&pdev->dev,
700 ctx->comm_base_addr,
709 if (!ctx->pcc_comm_addr) {
721 ctx->usecs_lat = PCC_NUM_RETRIES * pcc_chan->latency;
724 ctx->hwmon_dev = hwmon_device_register_with_groups(ctx->dev,
726 ctx,
728 if (IS_ERR(ctx->hwmon_dev)) {
730 rc = PTR_ERR(ctx->hwmon_dev);
737 schedule_work(&ctx->workq);
745 mbox_free_channel(ctx->mbox_chan);
747 pcc_mbox_free_channel(ctx->pcc_chan);
749 kfifo_free(&ctx->async_msg_fifo);
756 struct xgene_hwmon_dev *ctx = platform_get_drvdata(pdev);
758 cancel_work_sync(&ctx->workq);
759 hwmon_device_unregister(ctx->hwmon_dev);
760 kfifo_free(&ctx->async_msg_fifo);
762 mbox_free_channel(ctx->mbox_chan);
764 pcc_mbox_free_channel(ctx->pcc_chan);