1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Tegra host1x Syncpoints
4 *
5 * Copyright (c) 2010-2013, NVIDIA Corporation.
6 */
7
8#include <linux/io.h>
9
10#include "../dev.h"
11#include "../syncpt.h"
12
13/*
14 * Write the current syncpoint value back to hw.
15 */
16static void syncpt_restore(struct host1x_syncpt *sp)
17{
18	u32 min = host1x_syncpt_read_min(sp);
19	struct host1x *host = sp->host;
20
21	host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id));
22}
23
24/*
25 * Write the current waitbase value back to hw.
26 */
27static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
28{
29#if HOST1X_HW < 7
30	struct host1x *host = sp->host;
31
32	host1x_sync_writel(host, sp->base_val,
33			   HOST1X_SYNC_SYNCPT_BASE(sp->id));
34#endif
35}
36
37/*
38 * Read waitbase value from hw.
39 */
40static void syncpt_read_wait_base(struct host1x_syncpt *sp)
41{
42#if HOST1X_HW < 7
43	struct host1x *host = sp->host;
44
45	sp->base_val =
46		host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
47#endif
48}
49
50/*
51 * Updates the last value read from hardware.
52 */
53static u32 syncpt_load(struct host1x_syncpt *sp)
54{
55	struct host1x *host = sp->host;
56	u32 old, live;
57
58	/* Loop in case there's a race writing to min_val */
59	do {
60		old = host1x_syncpt_read_min(sp);
61		live = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT(sp->id));
62	} while ((u32)atomic_cmpxchg(&sp->min_val, old, live) != old);
63
64	if (!host1x_syncpt_check_max(sp, live))
65		dev_err(host->dev, "%s failed: id=%u, min=%d, max=%d\n",
66			__func__, sp->id, host1x_syncpt_read_min(sp),
67			host1x_syncpt_read_max(sp));
68
69	return live;
70}
71
72/*
73 * Write a cpu syncpoint increment to the hardware, without touching
74 * the cache.
75 */
76static int syncpt_cpu_incr(struct host1x_syncpt *sp)
77{
78	struct host1x *host = sp->host;
79	u32 reg_offset = sp->id / 32;
80
81	if (!host1x_syncpt_client_managed(sp) &&
82	    host1x_syncpt_idle(sp))
83		return -EINVAL;
84
85	host1x_sync_writel(host, BIT(sp->id % 32),
86			   HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
87	wmb();
88
89	return 0;
90}
91
92/**
93 * syncpt_assign_to_channel() - Assign syncpoint to channel
94 * @sp: syncpoint
95 * @ch: channel
96 *
97 * On chips with the syncpoint protection feature (Tegra186+), assign @sp to
98 * @ch, preventing other channels from incrementing the syncpoints. If @ch is
99 * NULL, unassigns the syncpoint.
100 *
101 * On older chips, do nothing.
102 */
103static void syncpt_assign_to_channel(struct host1x_syncpt *sp,
104				  struct host1x_channel *ch)
105{
106#if HOST1X_HW >= 6
107	struct host1x *host = sp->host;
108
109	host1x_sync_writel(host,
110			   HOST1X_SYNC_SYNCPT_CH_APP_CH(ch ? ch->id : 0xff),
111			   HOST1X_SYNC_SYNCPT_CH_APP(sp->id));
112#endif
113}
114
115/**
116 * syncpt_enable_protection() - Enable syncpoint protection
117 * @host: host1x instance
118 *
119 * On chips with the syncpoint protection feature (Tegra186+), enable this
120 * feature. On older chips, do nothing.
121 */
122static void syncpt_enable_protection(struct host1x *host)
123{
124#if HOST1X_HW >= 6
125	if (!host->hv_regs)
126		return;
127
128	host1x_hypervisor_writel(host, HOST1X_HV_SYNCPT_PROT_EN_CH_EN,
129				 HOST1X_HV_SYNCPT_PROT_EN);
130#endif
131}
132
133static const struct host1x_syncpt_ops host1x_syncpt_ops = {
134	.restore = syncpt_restore,
135	.restore_wait_base = syncpt_restore_wait_base,
136	.load_wait_base = syncpt_read_wait_base,
137	.load = syncpt_load,
138	.cpu_incr = syncpt_cpu_incr,
139	.assign_to_channel = syncpt_assign_to_channel,
140	.enable_protection = syncpt_enable_protection,
141};
142