1/*	$NetBSD: drm_modeset_lock.c,v 1.5 2021/12/18 23:44:57 riastradh Exp $	*/
2
3/*
4 * Copyright (C) 2014 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include <sys/cdefs.h>
27__KERNEL_RCSID(0, "$NetBSD: drm_modeset_lock.c,v 1.5 2021/12/18 23:44:57 riastradh Exp $");
28
29#include <drm/drm_atomic.h>
30#include <drm/drm_crtc.h>
31#include <drm/drm_device.h>
32#include <drm/drm_modeset_lock.h>
33
34/**
35 * DOC: kms locking
36 *
37 * As KMS moves toward more fine grained locking, and atomic ioctl where
38 * userspace can indirectly control locking order, it becomes necessary
39 * to use &ww_mutex and acquire-contexts to avoid deadlocks.  But because
40 * the locking is more distributed around the driver code, we want a bit
41 * of extra utility/tracking out of our acquire-ctx.  This is provided
42 * by &struct drm_modeset_lock and &struct drm_modeset_acquire_ctx.
43 *
44 * For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.rst
45 *
46 * The basic usage pattern is to::
47 *
48 *     drm_modeset_acquire_init(ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
49 *     retry:
50 *     foreach (lock in random_ordered_set_of_locks) {
51 *         ret = drm_modeset_lock(lock, ctx)
52 *         if (ret == -EDEADLK) {
53 *             ret = drm_modeset_backoff(ctx);
54 *             if (!ret)
55 *                 goto retry;
56 *         }
57 *         if (ret)
58 *             goto out;
59 *     }
60 *     ... do stuff ...
61 *     out:
62 *     drm_modeset_drop_locks(ctx);
63 *     drm_modeset_acquire_fini(ctx);
64 *
65 * For convenience this control flow is implemented in
66 * DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END() for the case
67 * where all modeset locks need to be taken through drm_modeset_lock_all_ctx().
68 *
69 * If all that is needed is a single modeset lock, then the &struct
70 * drm_modeset_acquire_ctx is not needed and the locking can be simplified
71 * by passing a NULL instead of ctx in the drm_modeset_lock() call or
72 * calling  drm_modeset_lock_single_interruptible(). To unlock afterwards
73 * call drm_modeset_unlock().
74 *
75 * On top of these per-object locks using &ww_mutex there's also an overall
76 * &drm_mode_config.mutex, for protecting everything else. Mostly this means
77 * probe state of connectors, and preventing hotplug add/removal of connectors.
78 *
79 * Finally there's a bunch of dedicated locks to protect drm core internal
80 * lists and lookup data structures.
81 */
82
83static DEFINE_WW_CLASS(crtc_ww_class);
84
85/**
86 * drm_modeset_lock_all - take all modeset locks
87 * @dev: DRM device
88 *
89 * This function takes all modeset locks, suitable where a more fine-grained
90 * scheme isn't (yet) implemented. Locks must be dropped by calling the
91 * drm_modeset_unlock_all() function.
92 *
93 * This function is deprecated. It allocates a lock acquisition context and
94 * stores it in &drm_device.mode_config. This facilitate conversion of
95 * existing code because it removes the need to manually deal with the
96 * acquisition context, but it is also brittle because the context is global
97 * and care must be taken not to nest calls. New code should use the
98 * drm_modeset_lock_all_ctx() function and pass in the context explicitly.
99 */
100void drm_modeset_lock_all(struct drm_device *dev)
101{
102	struct drm_mode_config *config = &dev->mode_config;
103	struct drm_modeset_acquire_ctx *ctx;
104	int ret;
105
106	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
107	if (WARN_ON(!ctx))
108		return;
109
110	mutex_lock(&config->mutex);
111
112	drm_modeset_acquire_init(ctx, 0);
113
114retry:
115	ret = drm_modeset_lock_all_ctx(dev, ctx);
116	if (ret < 0) {
117		if (ret == -EDEADLK) {
118			drm_modeset_backoff(ctx);
119			goto retry;
120		}
121
122		drm_modeset_acquire_fini(ctx);
123		kfree(ctx);
124		return;
125	}
126	ww_acquire_done(&ctx->ww_ctx);
127
128	WARN_ON(config->acquire_ctx);
129
130	/*
131	 * We hold the locks now, so it is safe to stash the acquisition
132	 * context for drm_modeset_unlock_all().
133	 */
134	config->acquire_ctx = ctx;
135
136	drm_warn_on_modeset_not_all_locked(dev);
137}
138EXPORT_SYMBOL(drm_modeset_lock_all);
139
140/**
141 * drm_modeset_unlock_all - drop all modeset locks
142 * @dev: DRM device
143 *
144 * This function drops all modeset locks taken by a previous call to the
145 * drm_modeset_lock_all() function.
146 *
147 * This function is deprecated. It uses the lock acquisition context stored
148 * in &drm_device.mode_config. This facilitates conversion of existing
149 * code because it removes the need to manually deal with the acquisition
150 * context, but it is also brittle because the context is global and care must
151 * be taken not to nest calls. New code should pass the acquisition context
152 * directly to the drm_modeset_drop_locks() function.
153 */
154void drm_modeset_unlock_all(struct drm_device *dev)
155{
156	struct drm_mode_config *config = &dev->mode_config;
157	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
158
159	if (WARN_ON(!ctx))
160		return;
161
162	config->acquire_ctx = NULL;
163	drm_modeset_drop_locks(ctx);
164	drm_modeset_acquire_fini(ctx);
165
166	kfree(ctx);
167
168	mutex_unlock(&dev->mode_config.mutex);
169}
170EXPORT_SYMBOL(drm_modeset_unlock_all);
171
172/**
173 * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
174 * @dev: device
175 *
176 * Useful as a debug assert.
177 */
178void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
179{
180	struct drm_crtc *crtc;
181
182	/* Locking is currently fubar in the panic handler. */
183	if (oops_in_progress)
184		return;
185
186	drm_for_each_crtc(crtc, dev)
187		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
188
189	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
190	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
191}
192EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
193
194/**
195 * drm_modeset_acquire_init - initialize acquire context
196 * @ctx: the acquire context
197 * @flags: 0 or %DRM_MODESET_ACQUIRE_INTERRUPTIBLE
198 *
199 * When passing %DRM_MODESET_ACQUIRE_INTERRUPTIBLE to @flags,
200 * all calls to drm_modeset_lock() will perform an interruptible
201 * wait.
202 */
203void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
204		uint32_t flags)
205{
206	memset(ctx, 0, sizeof(*ctx));
207	ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
208	INIT_LIST_HEAD(&ctx->locked);
209
210	if (flags & DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
211		ctx->interruptible = true;
212}
213EXPORT_SYMBOL(drm_modeset_acquire_init);
214
215/**
216 * drm_modeset_acquire_fini - cleanup acquire context
217 * @ctx: the acquire context
218 */
219void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
220{
221	ww_acquire_fini(&ctx->ww_ctx);
222}
223EXPORT_SYMBOL(drm_modeset_acquire_fini);
224
225/**
226 * drm_modeset_drop_locks - drop all locks
227 * @ctx: the acquire context
228 *
229 * Drop all locks currently held against this acquire context.
230 */
231void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
232{
233	WARN_ON(ctx->contended);
234	while (!list_empty(&ctx->locked)) {
235		struct drm_modeset_lock *lock;
236
237		lock = list_first_entry(&ctx->locked,
238				struct drm_modeset_lock, head);
239
240		drm_modeset_unlock(lock);
241	}
242}
243EXPORT_SYMBOL(drm_modeset_drop_locks);
244
245static inline int modeset_lock(struct drm_modeset_lock *lock,
246		struct drm_modeset_acquire_ctx *ctx,
247		bool interruptible, bool slow)
248{
249	int ret;
250
251	WARN_ON(ctx->contended);
252
253	if (ctx->trylock_only) {
254		lockdep_assert_held(&ctx->ww_ctx);
255
256		if (!ww_mutex_trylock(&lock->mutex))
257			return -EBUSY;
258		else
259			return 0;
260	} else if (interruptible && slow) {
261		ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
262	} else if (interruptible) {
263		ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
264	} else if (slow) {
265		ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
266		ret = 0;
267	} else {
268		ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
269	}
270	if (!ret) {
271		WARN_ON(!list_empty(&lock->head));
272		list_add(&lock->head, &ctx->locked);
273	} else if (ret == -EALREADY) {
274		/* we already hold the lock.. this is fine.  For atomic
275		 * we will need to be able to drm_modeset_lock() things
276		 * without having to keep track of what is already locked
277		 * or not.
278		 */
279		ret = 0;
280	} else if (ret == -EDEADLK) {
281		ctx->contended = lock;
282	}
283
284	return ret;
285}
286
287/**
288 * drm_modeset_backoff - deadlock avoidance backoff
289 * @ctx: the acquire context
290 *
291 * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
292 * you must call this function to drop all currently held locks and
293 * block until the contended lock becomes available.
294 *
295 * This function returns 0 on success, or -ERESTARTSYS if this context
296 * is initialized with %DRM_MODESET_ACQUIRE_INTERRUPTIBLE and the
297 * wait has been interrupted.
298 */
299int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
300{
301	struct drm_modeset_lock *contended = ctx->contended;
302
303	ctx->contended = NULL;
304
305	if (WARN_ON(!contended))
306		return 0;
307
308	drm_modeset_drop_locks(ctx);
309
310	return modeset_lock(contended, ctx, ctx->interruptible, true);
311}
312EXPORT_SYMBOL(drm_modeset_backoff);
313
314/**
315 * drm_modeset_lock_init - initialize lock
316 * @lock: lock to init
317 */
318void drm_modeset_lock_init(struct drm_modeset_lock *lock)
319{
320	ww_mutex_init(&lock->mutex, &crtc_ww_class);
321	INIT_LIST_HEAD(&lock->head);
322}
323EXPORT_SYMBOL(drm_modeset_lock_init);
324
325/**
326 * drm_modeset_lock - take modeset lock
327 * @lock: lock to take
328 * @ctx: acquire ctx
329 *
330 * If @ctx is not NULL, then its ww acquire context is used and the
331 * lock will be tracked by the context and can be released by calling
332 * drm_modeset_drop_locks().  If -EDEADLK is returned, this means a
333 * deadlock scenario has been detected and it is an error to attempt
334 * to take any more locks without first calling drm_modeset_backoff().
335 *
336 * If the @ctx is not NULL and initialized with
337 * %DRM_MODESET_ACQUIRE_INTERRUPTIBLE, this function will fail with
338 * -ERESTARTSYS when interrupted.
339 *
340 * If @ctx is NULL then the function call behaves like a normal,
341 * uninterruptible non-nesting mutex_lock() call.
342 */
343int drm_modeset_lock(struct drm_modeset_lock *lock,
344		struct drm_modeset_acquire_ctx *ctx)
345{
346	if (ctx)
347		return modeset_lock(lock, ctx, ctx->interruptible, false);
348
349	ww_mutex_lock(&lock->mutex, NULL);
350	return 0;
351}
352EXPORT_SYMBOL(drm_modeset_lock);
353
354/**
355 * drm_modeset_lock_single_interruptible - take a single modeset lock
356 * @lock: lock to take
357 *
358 * This function behaves as drm_modeset_lock() with a NULL context,
359 * but performs interruptible waits.
360 *
361 * This function returns 0 on success, or -ERESTARTSYS when interrupted.
362 */
363int drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock)
364{
365	return ww_mutex_lock_interruptible(&lock->mutex, NULL);
366}
367EXPORT_SYMBOL(drm_modeset_lock_single_interruptible);
368
369/**
370 * drm_modeset_unlock - drop modeset lock
371 * @lock: lock to release
372 */
373void drm_modeset_unlock(struct drm_modeset_lock *lock)
374{
375	list_del_init(&lock->head);
376	ww_mutex_unlock(&lock->mutex);
377}
378EXPORT_SYMBOL(drm_modeset_unlock);
379
380/**
381 * drm_modeset_lock_all_ctx - take all modeset locks
382 * @dev: DRM device
383 * @ctx: lock acquisition context
384 *
385 * This function takes all modeset locks, suitable where a more fine-grained
386 * scheme isn't (yet) implemented.
387 *
388 * Unlike drm_modeset_lock_all(), it doesn't take the &drm_mode_config.mutex
389 * since that lock isn't required for modeset state changes. Callers which
390 * need to grab that lock too need to do so outside of the acquire context
391 * @ctx.
392 *
393 * Locks acquired with this function should be released by calling the
394 * drm_modeset_drop_locks() function on @ctx.
395 *
396 * See also: DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END()
397 *
398 * Returns: 0 on success or a negative error-code on failure.
399 */
400int drm_modeset_lock_all_ctx(struct drm_device *dev,
401			     struct drm_modeset_acquire_ctx *ctx)
402{
403	struct drm_private_obj *privobj;
404	struct drm_crtc *crtc;
405	struct drm_plane *plane;
406	int ret;
407
408	ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
409	if (ret)
410		return ret;
411
412	drm_for_each_crtc(crtc, dev) {
413		ret = drm_modeset_lock(&crtc->mutex, ctx);
414		if (ret)
415			return ret;
416	}
417
418	drm_for_each_plane(plane, dev) {
419		ret = drm_modeset_lock(&plane->mutex, ctx);
420		if (ret)
421			return ret;
422	}
423
424	drm_for_each_privobj(privobj, dev) {
425		ret = drm_modeset_lock(&privobj->lock, ctx);
426		if (ret)
427			return ret;
428	}
429
430	return 0;
431}
432EXPORT_SYMBOL(drm_modeset_lock_all_ctx);
433