1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/locking/mutex.c
4 *
5 * Mutexes: blocking mutual exclusion locks
6 *
7 * Started by Ingo Molnar:
8 *
9 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 *
11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12 * David Howells for suggestions and improvements.
13 *
14 *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 *    from the -rt tree, where it was originally implemented for rtmutexes
16 *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17 *    and Sven Dietrich.
18 *
19 * Also see Documentation/locking/mutex-design.rst.
20 */
21#include <linux/mutex.h>
22#include <linux/ww_mutex.h>
23#include <linux/sched/signal.h>
24#include <linux/sched/rt.h>
25#include <linux/sched/wake_q.h>
26#include <linux/sched/debug.h>
27#include <linux/export.h>
28#include <linux/spinlock.h>
29#include <linux/interrupt.h>
30#include <linux/debug_locks.h>
31#include <linux/osq_lock.h>
32
33#define CREATE_TRACE_POINTS
34#include <trace/events/lock.h>
35
36#ifndef CONFIG_PREEMPT_RT
37#include "mutex.h"
38
39#ifdef CONFIG_DEBUG_MUTEXES
40# define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
41#else
42# define MUTEX_WARN_ON(cond)
43#endif
44
45void
46__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
47{
48	atomic_long_set(&lock->owner, 0);
49	raw_spin_lock_init(&lock->wait_lock);
50	INIT_LIST_HEAD(&lock->wait_list);
51#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
52	osq_lock_init(&lock->osq);
53#endif
54
55	debug_mutex_init(lock, name, key);
56}
57EXPORT_SYMBOL(__mutex_init);
58
59/*
60 * @owner: contains: 'struct task_struct *' to the current lock owner,
61 * NULL means not owned. Since task_struct pointers are aligned at
62 * at least L1_CACHE_BYTES, we have low bits to store extra state.
63 *
64 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
65 * Bit1 indicates unlock needs to hand the lock to the top-waiter
66 * Bit2 indicates handoff has been done and we're waiting for pickup.
67 */
68#define MUTEX_FLAG_WAITERS	0x01
69#define MUTEX_FLAG_HANDOFF	0x02
70#define MUTEX_FLAG_PICKUP	0x04
71
72#define MUTEX_FLAGS		0x07
73
74/*
75 * Internal helper function; C doesn't allow us to hide it :/
76 *
77 * DO NOT USE (outside of mutex code).
78 */
79static inline struct task_struct *__mutex_owner(struct mutex *lock)
80{
81	return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
82}
83
84static inline struct task_struct *__owner_task(unsigned long owner)
85{
86	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
87}
88
89bool mutex_is_locked(struct mutex *lock)
90{
91	return __mutex_owner(lock) != NULL;
92}
93EXPORT_SYMBOL(mutex_is_locked);
94
95static inline unsigned long __owner_flags(unsigned long owner)
96{
97	return owner & MUTEX_FLAGS;
98}
99
100/*
101 * Returns: __mutex_owner(lock) on failure or NULL on success.
102 */
103static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
104{
105	unsigned long owner, curr = (unsigned long)current;
106
107	owner = atomic_long_read(&lock->owner);
108	for (;;) { /* must loop, can race against a flag */
109		unsigned long flags = __owner_flags(owner);
110		unsigned long task = owner & ~MUTEX_FLAGS;
111
112		if (task) {
113			if (flags & MUTEX_FLAG_PICKUP) {
114				if (task != curr)
115					break;
116				flags &= ~MUTEX_FLAG_PICKUP;
117			} else if (handoff) {
118				if (flags & MUTEX_FLAG_HANDOFF)
119					break;
120				flags |= MUTEX_FLAG_HANDOFF;
121			} else {
122				break;
123			}
124		} else {
125			MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
126			task = curr;
127		}
128
129		if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
130			if (task == curr)
131				return NULL;
132			break;
133		}
134	}
135
136	return __owner_task(owner);
137}
138
139/*
140 * Trylock or set HANDOFF
141 */
142static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
143{
144	return !__mutex_trylock_common(lock, handoff);
145}
146
147/*
148 * Actual trylock that will work on any unlocked state.
149 */
150static inline bool __mutex_trylock(struct mutex *lock)
151{
152	return !__mutex_trylock_common(lock, false);
153}
154
155#ifndef CONFIG_DEBUG_LOCK_ALLOC
156/*
157 * Lockdep annotations are contained to the slow paths for simplicity.
158 * There is nothing that would stop spreading the lockdep annotations outwards
159 * except more code.
160 */
161
162/*
163 * Optimistic trylock that only works in the uncontended case. Make sure to
164 * follow with a __mutex_trylock() before failing.
165 */
166static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
167{
168	unsigned long curr = (unsigned long)current;
169	unsigned long zero = 0UL;
170
171	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
172		return true;
173
174	return false;
175}
176
177static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
178{
179	unsigned long curr = (unsigned long)current;
180
181	return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
182}
183#endif
184
185static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
186{
187	atomic_long_or(flag, &lock->owner);
188}
189
190static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
191{
192	atomic_long_andnot(flag, &lock->owner);
193}
194
195static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
196{
197	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
198}
199
200/*
201 * Add @waiter to a given location in the lock wait_list and set the
202 * FLAG_WAITERS flag if it's the first waiter.
203 */
204static void
205__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
206		   struct list_head *list)
207{
208	debug_mutex_add_waiter(lock, waiter, current);
209
210	list_add_tail(&waiter->list, list);
211	if (__mutex_waiter_is_first(lock, waiter))
212		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
213}
214
215static void
216__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
217{
218	list_del(&waiter->list);
219	if (likely(list_empty(&lock->wait_list)))
220		__mutex_clear_flag(lock, MUTEX_FLAGS);
221
222	debug_mutex_remove_waiter(lock, waiter, current);
223}
224
225/*
226 * Give up ownership to a specific task, when @task = NULL, this is equivalent
227 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
228 * WAITERS. Provides RELEASE semantics like a regular unlock, the
229 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
230 */
231static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
232{
233	unsigned long owner = atomic_long_read(&lock->owner);
234
235	for (;;) {
236		unsigned long new;
237
238		MUTEX_WARN_ON(__owner_task(owner) != current);
239		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
240
241		new = (owner & MUTEX_FLAG_WAITERS);
242		new |= (unsigned long)task;
243		if (task)
244			new |= MUTEX_FLAG_PICKUP;
245
246		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
247			break;
248	}
249}
250
251#ifndef CONFIG_DEBUG_LOCK_ALLOC
252/*
253 * We split the mutex lock/unlock logic into separate fastpath and
254 * slowpath functions, to reduce the register pressure on the fastpath.
255 * We also put the fastpath first in the kernel image, to make sure the
256 * branch is predicted by the CPU as default-untaken.
257 */
258static void __sched __mutex_lock_slowpath(struct mutex *lock);
259
260/**
261 * mutex_lock - acquire the mutex
262 * @lock: the mutex to be acquired
263 *
264 * Lock the mutex exclusively for this task. If the mutex is not
265 * available right now, it will sleep until it can get it.
266 *
267 * The mutex must later on be released by the same task that
268 * acquired it. Recursive locking is not allowed. The task
269 * may not exit without first unlocking the mutex. Also, kernel
270 * memory where the mutex resides must not be freed with
271 * the mutex still locked. The mutex must first be initialized
272 * (or statically defined) before it can be locked. memset()-ing
273 * the mutex to 0 is not allowed.
274 *
275 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
276 * checks that will enforce the restrictions and will also do
277 * deadlock debugging)
278 *
279 * This function is similar to (but not equivalent to) down().
280 */
281void __sched mutex_lock(struct mutex *lock)
282{
283	might_sleep();
284
285	if (!__mutex_trylock_fast(lock))
286		__mutex_lock_slowpath(lock);
287}
288EXPORT_SYMBOL(mutex_lock);
289#endif
290
291#include "ww_mutex.h"
292
293#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
294
295/*
296 * Trylock variant that returns the owning task on failure.
297 */
298static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
299{
300	return __mutex_trylock_common(lock, false);
301}
302
303static inline
304bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
305			    struct mutex_waiter *waiter)
306{
307	struct ww_mutex *ww;
308
309	ww = container_of(lock, struct ww_mutex, base);
310
311	/*
312	 * If ww->ctx is set the contents are undefined, only
313	 * by acquiring wait_lock there is a guarantee that
314	 * they are not invalid when reading.
315	 *
316	 * As such, when deadlock detection needs to be
317	 * performed the optimistic spinning cannot be done.
318	 *
319	 * Check this in every inner iteration because we may
320	 * be racing against another thread's ww_mutex_lock.
321	 */
322	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
323		return false;
324
325	/*
326	 * If we aren't on the wait list yet, cancel the spin
327	 * if there are waiters. We want  to avoid stealing the
328	 * lock from a waiter with an earlier stamp, since the
329	 * other thread may already own a lock that we also
330	 * need.
331	 */
332	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
333		return false;
334
335	/*
336	 * Similarly, stop spinning if we are no longer the
337	 * first waiter.
338	 */
339	if (waiter && !__mutex_waiter_is_first(lock, waiter))
340		return false;
341
342	return true;
343}
344
345/*
346 * Look out! "owner" is an entirely speculative pointer access and not
347 * reliable.
348 *
349 * "noinline" so that this function shows up on perf profiles.
350 */
351static noinline
352bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
353			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
354{
355	bool ret = true;
356
357	lockdep_assert_preemption_disabled();
358
359	while (__mutex_owner(lock) == owner) {
360		/*
361		 * Ensure we emit the owner->on_cpu, dereference _after_
362		 * checking lock->owner still matches owner. And we already
363		 * disabled preemption which is equal to the RCU read-side
364		 * crital section in optimistic spinning code. Thus the
365		 * task_strcut structure won't go away during the spinning
366		 * period
367		 */
368		barrier();
369
370		/*
371		 * Use vcpu_is_preempted to detect lock holder preemption issue.
372		 */
373		if (!owner_on_cpu(owner) || need_resched()) {
374			ret = false;
375			break;
376		}
377
378		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
379			ret = false;
380			break;
381		}
382
383		cpu_relax();
384	}
385
386	return ret;
387}
388
389/*
390 * Initial check for entering the mutex spinning loop
391 */
392static inline int mutex_can_spin_on_owner(struct mutex *lock)
393{
394	struct task_struct *owner;
395	int retval = 1;
396
397	lockdep_assert_preemption_disabled();
398
399	if (need_resched())
400		return 0;
401
402	/*
403	 * We already disabled preemption which is equal to the RCU read-side
404	 * crital section in optimistic spinning code. Thus the task_strcut
405	 * structure won't go away during the spinning period.
406	 */
407	owner = __mutex_owner(lock);
408	if (owner)
409		retval = owner_on_cpu(owner);
410
411	/*
412	 * If lock->owner is not set, the mutex has been released. Return true
413	 * such that we'll trylock in the spin path, which is a faster option
414	 * than the blocking slow path.
415	 */
416	return retval;
417}
418
419/*
420 * Optimistic spinning.
421 *
422 * We try to spin for acquisition when we find that the lock owner
423 * is currently running on a (different) CPU and while we don't
424 * need to reschedule. The rationale is that if the lock owner is
425 * running, it is likely to release the lock soon.
426 *
427 * The mutex spinners are queued up using MCS lock so that only one
428 * spinner can compete for the mutex. However, if mutex spinning isn't
429 * going to happen, there is no point in going through the lock/unlock
430 * overhead.
431 *
432 * Returns true when the lock was taken, otherwise false, indicating
433 * that we need to jump to the slowpath and sleep.
434 *
435 * The waiter flag is set to true if the spinner is a waiter in the wait
436 * queue. The waiter-spinner will spin on the lock directly and concurrently
437 * with the spinner at the head of the OSQ, if present, until the owner is
438 * changed to itself.
439 */
440static __always_inline bool
441mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
442		      struct mutex_waiter *waiter)
443{
444	if (!waiter) {
445		/*
446		 * The purpose of the mutex_can_spin_on_owner() function is
447		 * to eliminate the overhead of osq_lock() and osq_unlock()
448		 * in case spinning isn't possible. As a waiter-spinner
449		 * is not going to take OSQ lock anyway, there is no need
450		 * to call mutex_can_spin_on_owner().
451		 */
452		if (!mutex_can_spin_on_owner(lock))
453			goto fail;
454
455		/*
456		 * In order to avoid a stampede of mutex spinners trying to
457		 * acquire the mutex all at once, the spinners need to take a
458		 * MCS (queued) lock first before spinning on the owner field.
459		 */
460		if (!osq_lock(&lock->osq))
461			goto fail;
462	}
463
464	for (;;) {
465		struct task_struct *owner;
466
467		/* Try to acquire the mutex... */
468		owner = __mutex_trylock_or_owner(lock);
469		if (!owner)
470			break;
471
472		/*
473		 * There's an owner, wait for it to either
474		 * release the lock or go to sleep.
475		 */
476		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
477			goto fail_unlock;
478
479		/*
480		 * The cpu_relax() call is a compiler barrier which forces
481		 * everything in this loop to be re-loaded. We don't need
482		 * memory barriers as we'll eventually observe the right
483		 * values at the cost of a few extra spins.
484		 */
485		cpu_relax();
486	}
487
488	if (!waiter)
489		osq_unlock(&lock->osq);
490
491	return true;
492
493
494fail_unlock:
495	if (!waiter)
496		osq_unlock(&lock->osq);
497
498fail:
499	/*
500	 * If we fell out of the spin path because of need_resched(),
501	 * reschedule now, before we try-lock the mutex. This avoids getting
502	 * scheduled out right after we obtained the mutex.
503	 */
504	if (need_resched()) {
505		/*
506		 * We _should_ have TASK_RUNNING here, but just in case
507		 * we do not, make it so, otherwise we might get stuck.
508		 */
509		__set_current_state(TASK_RUNNING);
510		schedule_preempt_disabled();
511	}
512
513	return false;
514}
515#else
516static __always_inline bool
517mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
518		      struct mutex_waiter *waiter)
519{
520	return false;
521}
522#endif
523
524static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
525
526/**
527 * mutex_unlock - release the mutex
528 * @lock: the mutex to be released
529 *
530 * Unlock a mutex that has been locked by this task previously.
531 *
532 * This function must not be used in interrupt context. Unlocking
533 * of a not locked mutex is not allowed.
534 *
535 * The caller must ensure that the mutex stays alive until this function has
536 * returned - mutex_unlock() can NOT directly be used to release an object such
537 * that another concurrent task can free it.
538 * Mutexes are different from spinlocks & refcounts in this aspect.
539 *
540 * This function is similar to (but not equivalent to) up().
541 */
542void __sched mutex_unlock(struct mutex *lock)
543{
544#ifndef CONFIG_DEBUG_LOCK_ALLOC
545	if (__mutex_unlock_fast(lock))
546		return;
547#endif
548	__mutex_unlock_slowpath(lock, _RET_IP_);
549}
550EXPORT_SYMBOL(mutex_unlock);
551
552/**
553 * ww_mutex_unlock - release the w/w mutex
554 * @lock: the mutex to be released
555 *
556 * Unlock a mutex that has been locked by this task previously with any of the
557 * ww_mutex_lock* functions (with or without an acquire context). It is
558 * forbidden to release the locks after releasing the acquire context.
559 *
560 * This function must not be used in interrupt context. Unlocking
561 * of a unlocked mutex is not allowed.
562 */
563void __sched ww_mutex_unlock(struct ww_mutex *lock)
564{
565	__ww_mutex_unlock(lock);
566	mutex_unlock(&lock->base);
567}
568EXPORT_SYMBOL(ww_mutex_unlock);
569
570/*
571 * Lock a mutex (possibly interruptible), slowpath:
572 */
573static __always_inline int __sched
574__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
575		    struct lockdep_map *nest_lock, unsigned long ip,
576		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
577{
578	struct mutex_waiter waiter;
579	struct ww_mutex *ww;
580	int ret;
581
582	if (!use_ww_ctx)
583		ww_ctx = NULL;
584
585	might_sleep();
586
587	MUTEX_WARN_ON(lock->magic != lock);
588
589	ww = container_of(lock, struct ww_mutex, base);
590	if (ww_ctx) {
591		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
592			return -EALREADY;
593
594		/*
595		 * Reset the wounded flag after a kill. No other process can
596		 * race and wound us here since they can't have a valid owner
597		 * pointer if we don't have any locks held.
598		 */
599		if (ww_ctx->acquired == 0)
600			ww_ctx->wounded = 0;
601
602#ifdef CONFIG_DEBUG_LOCK_ALLOC
603		nest_lock = &ww_ctx->dep_map;
604#endif
605	}
606
607	preempt_disable();
608	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
609
610	trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
611	if (__mutex_trylock(lock) ||
612	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
613		/* got the lock, yay! */
614		lock_acquired(&lock->dep_map, ip);
615		if (ww_ctx)
616			ww_mutex_set_context_fastpath(ww, ww_ctx);
617		trace_contention_end(lock, 0);
618		preempt_enable();
619		return 0;
620	}
621
622	raw_spin_lock(&lock->wait_lock);
623	/*
624	 * After waiting to acquire the wait_lock, try again.
625	 */
626	if (__mutex_trylock(lock)) {
627		if (ww_ctx)
628			__ww_mutex_check_waiters(lock, ww_ctx);
629
630		goto skip_wait;
631	}
632
633	debug_mutex_lock_common(lock, &waiter);
634	waiter.task = current;
635	if (use_ww_ctx)
636		waiter.ww_ctx = ww_ctx;
637
638	lock_contended(&lock->dep_map, ip);
639
640	if (!use_ww_ctx) {
641		/* add waiting tasks to the end of the waitqueue (FIFO): */
642		__mutex_add_waiter(lock, &waiter, &lock->wait_list);
643	} else {
644		/*
645		 * Add in stamp order, waking up waiters that must kill
646		 * themselves.
647		 */
648		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
649		if (ret)
650			goto err_early_kill;
651	}
652
653	set_current_state(state);
654	trace_contention_begin(lock, LCB_F_MUTEX);
655	for (;;) {
656		bool first;
657
658		/*
659		 * Once we hold wait_lock, we're serialized against
660		 * mutex_unlock() handing the lock off to us, do a trylock
661		 * before testing the error conditions to make sure we pick up
662		 * the handoff.
663		 */
664		if (__mutex_trylock(lock))
665			goto acquired;
666
667		/*
668		 * Check for signals and kill conditions while holding
669		 * wait_lock. This ensures the lock cancellation is ordered
670		 * against mutex_unlock() and wake-ups do not go missing.
671		 */
672		if (signal_pending_state(state, current)) {
673			ret = -EINTR;
674			goto err;
675		}
676
677		if (ww_ctx) {
678			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
679			if (ret)
680				goto err;
681		}
682
683		raw_spin_unlock(&lock->wait_lock);
684		schedule_preempt_disabled();
685
686		first = __mutex_waiter_is_first(lock, &waiter);
687
688		set_current_state(state);
689		/*
690		 * Here we order against unlock; we must either see it change
691		 * state back to RUNNING and fall through the next schedule(),
692		 * or we must see its unlock and acquire.
693		 */
694		if (__mutex_trylock_or_handoff(lock, first))
695			break;
696
697		if (first) {
698			trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
699			if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
700				break;
701			trace_contention_begin(lock, LCB_F_MUTEX);
702		}
703
704		raw_spin_lock(&lock->wait_lock);
705	}
706	raw_spin_lock(&lock->wait_lock);
707acquired:
708	__set_current_state(TASK_RUNNING);
709
710	if (ww_ctx) {
711		/*
712		 * Wound-Wait; we stole the lock (!first_waiter), check the
713		 * waiters as anyone might want to wound us.
714		 */
715		if (!ww_ctx->is_wait_die &&
716		    !__mutex_waiter_is_first(lock, &waiter))
717			__ww_mutex_check_waiters(lock, ww_ctx);
718	}
719
720	__mutex_remove_waiter(lock, &waiter);
721
722	debug_mutex_free_waiter(&waiter);
723
724skip_wait:
725	/* got the lock - cleanup and rejoice! */
726	lock_acquired(&lock->dep_map, ip);
727	trace_contention_end(lock, 0);
728
729	if (ww_ctx)
730		ww_mutex_lock_acquired(ww, ww_ctx);
731
732	raw_spin_unlock(&lock->wait_lock);
733	preempt_enable();
734	return 0;
735
736err:
737	__set_current_state(TASK_RUNNING);
738	__mutex_remove_waiter(lock, &waiter);
739err_early_kill:
740	trace_contention_end(lock, ret);
741	raw_spin_unlock(&lock->wait_lock);
742	debug_mutex_free_waiter(&waiter);
743	mutex_release(&lock->dep_map, ip);
744	preempt_enable();
745	return ret;
746}
747
748static int __sched
749__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
750	     struct lockdep_map *nest_lock, unsigned long ip)
751{
752	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
753}
754
755static int __sched
756__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
757		unsigned long ip, struct ww_acquire_ctx *ww_ctx)
758{
759	return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
760}
761
762/**
763 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
764 * @ww: mutex to lock
765 * @ww_ctx: optional w/w acquire context
766 *
767 * Trylocks a mutex with the optional acquire context; no deadlock detection is
768 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
769 *
770 * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
771 * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
772 *
773 * A mutex acquired with this function must be released with ww_mutex_unlock.
774 */
775int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
776{
777	if (!ww_ctx)
778		return mutex_trylock(&ww->base);
779
780	MUTEX_WARN_ON(ww->base.magic != &ww->base);
781
782	/*
783	 * Reset the wounded flag after a kill. No other process can
784	 * race and wound us here, since they can't have a valid owner
785	 * pointer if we don't have any locks held.
786	 */
787	if (ww_ctx->acquired == 0)
788		ww_ctx->wounded = 0;
789
790	if (__mutex_trylock(&ww->base)) {
791		ww_mutex_set_context_fastpath(ww, ww_ctx);
792		mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
793		return 1;
794	}
795
796	return 0;
797}
798EXPORT_SYMBOL(ww_mutex_trylock);
799
800#ifdef CONFIG_DEBUG_LOCK_ALLOC
801void __sched
802mutex_lock_nested(struct mutex *lock, unsigned int subclass)
803{
804	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
805}
806
807EXPORT_SYMBOL_GPL(mutex_lock_nested);
808
809void __sched
810_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
811{
812	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
813}
814EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
815
816int __sched
817mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
818{
819	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
820}
821EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
822
823int __sched
824mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
825{
826	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
827}
828EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
829
830void __sched
831mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
832{
833	int token;
834
835	might_sleep();
836
837	token = io_schedule_prepare();
838	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
839			    subclass, NULL, _RET_IP_, NULL, 0);
840	io_schedule_finish(token);
841}
842EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
843
844static inline int
845ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
846{
847#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
848	unsigned tmp;
849
850	if (ctx->deadlock_inject_countdown-- == 0) {
851		tmp = ctx->deadlock_inject_interval;
852		if (tmp > UINT_MAX/4)
853			tmp = UINT_MAX;
854		else
855			tmp = tmp*2 + tmp + tmp/2;
856
857		ctx->deadlock_inject_interval = tmp;
858		ctx->deadlock_inject_countdown = tmp;
859		ctx->contending_lock = lock;
860
861		ww_mutex_unlock(lock);
862
863		return -EDEADLK;
864	}
865#endif
866
867	return 0;
868}
869
870int __sched
871ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
872{
873	int ret;
874
875	might_sleep();
876	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
877			       0, _RET_IP_, ctx);
878	if (!ret && ctx && ctx->acquired > 1)
879		return ww_mutex_deadlock_injection(lock, ctx);
880
881	return ret;
882}
883EXPORT_SYMBOL_GPL(ww_mutex_lock);
884
885int __sched
886ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
887{
888	int ret;
889
890	might_sleep();
891	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
892			      0, _RET_IP_, ctx);
893
894	if (!ret && ctx && ctx->acquired > 1)
895		return ww_mutex_deadlock_injection(lock, ctx);
896
897	return ret;
898}
899EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
900
901#endif
902
903/*
904 * Release the lock, slowpath:
905 */
906static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
907{
908	struct task_struct *next = NULL;
909	DEFINE_WAKE_Q(wake_q);
910	unsigned long owner;
911
912	mutex_release(&lock->dep_map, ip);
913
914	/*
915	 * Release the lock before (potentially) taking the spinlock such that
916	 * other contenders can get on with things ASAP.
917	 *
918	 * Except when HANDOFF, in that case we must not clear the owner field,
919	 * but instead set it to the top waiter.
920	 */
921	owner = atomic_long_read(&lock->owner);
922	for (;;) {
923		MUTEX_WARN_ON(__owner_task(owner) != current);
924		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
925
926		if (owner & MUTEX_FLAG_HANDOFF)
927			break;
928
929		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
930			if (owner & MUTEX_FLAG_WAITERS)
931				break;
932
933			return;
934		}
935	}
936
937	raw_spin_lock(&lock->wait_lock);
938	debug_mutex_unlock(lock);
939	if (!list_empty(&lock->wait_list)) {
940		/* get the first entry from the wait-list: */
941		struct mutex_waiter *waiter =
942			list_first_entry(&lock->wait_list,
943					 struct mutex_waiter, list);
944
945		next = waiter->task;
946
947		debug_mutex_wake_waiter(lock, waiter);
948		wake_q_add(&wake_q, next);
949	}
950
951	if (owner & MUTEX_FLAG_HANDOFF)
952		__mutex_handoff(lock, next);
953
954	raw_spin_unlock(&lock->wait_lock);
955
956	wake_up_q(&wake_q);
957}
958
959#ifndef CONFIG_DEBUG_LOCK_ALLOC
960/*
961 * Here come the less common (and hence less performance-critical) APIs:
962 * mutex_lock_interruptible() and mutex_trylock().
963 */
964static noinline int __sched
965__mutex_lock_killable_slowpath(struct mutex *lock);
966
967static noinline int __sched
968__mutex_lock_interruptible_slowpath(struct mutex *lock);
969
970/**
971 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
972 * @lock: The mutex to be acquired.
973 *
974 * Lock the mutex like mutex_lock().  If a signal is delivered while the
975 * process is sleeping, this function will return without acquiring the
976 * mutex.
977 *
978 * Context: Process context.
979 * Return: 0 if the lock was successfully acquired or %-EINTR if a
980 * signal arrived.
981 */
982int __sched mutex_lock_interruptible(struct mutex *lock)
983{
984	might_sleep();
985
986	if (__mutex_trylock_fast(lock))
987		return 0;
988
989	return __mutex_lock_interruptible_slowpath(lock);
990}
991
992EXPORT_SYMBOL(mutex_lock_interruptible);
993
994/**
995 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
996 * @lock: The mutex to be acquired.
997 *
998 * Lock the mutex like mutex_lock().  If a signal which will be fatal to
999 * the current process is delivered while the process is sleeping, this
1000 * function will return without acquiring the mutex.
1001 *
1002 * Context: Process context.
1003 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1004 * fatal signal arrived.
1005 */
1006int __sched mutex_lock_killable(struct mutex *lock)
1007{
1008	might_sleep();
1009
1010	if (__mutex_trylock_fast(lock))
1011		return 0;
1012
1013	return __mutex_lock_killable_slowpath(lock);
1014}
1015EXPORT_SYMBOL(mutex_lock_killable);
1016
1017/**
1018 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1019 * @lock: The mutex to be acquired.
1020 *
1021 * Lock the mutex like mutex_lock().  While the task is waiting for this
1022 * mutex, it will be accounted as being in the IO wait state by the
1023 * scheduler.
1024 *
1025 * Context: Process context.
1026 */
1027void __sched mutex_lock_io(struct mutex *lock)
1028{
1029	int token;
1030
1031	token = io_schedule_prepare();
1032	mutex_lock(lock);
1033	io_schedule_finish(token);
1034}
1035EXPORT_SYMBOL_GPL(mutex_lock_io);
1036
1037static noinline void __sched
1038__mutex_lock_slowpath(struct mutex *lock)
1039{
1040	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1041}
1042
1043static noinline int __sched
1044__mutex_lock_killable_slowpath(struct mutex *lock)
1045{
1046	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1047}
1048
1049static noinline int __sched
1050__mutex_lock_interruptible_slowpath(struct mutex *lock)
1051{
1052	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1053}
1054
1055static noinline int __sched
1056__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1057{
1058	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
1059			       _RET_IP_, ctx);
1060}
1061
1062static noinline int __sched
1063__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1064					    struct ww_acquire_ctx *ctx)
1065{
1066	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
1067			       _RET_IP_, ctx);
1068}
1069
1070#endif
1071
1072/**
1073 * mutex_trylock - try to acquire the mutex, without waiting
1074 * @lock: the mutex to be acquired
1075 *
1076 * Try to acquire the mutex atomically. Returns 1 if the mutex
1077 * has been acquired successfully, and 0 on contention.
1078 *
1079 * NOTE: this function follows the spin_trylock() convention, so
1080 * it is negated from the down_trylock() return values! Be careful
1081 * about this when converting semaphore users to mutexes.
1082 *
1083 * This function must not be used in interrupt context. The
1084 * mutex must be released by the same task that acquired it.
1085 */
1086int __sched mutex_trylock(struct mutex *lock)
1087{
1088	bool locked;
1089
1090	MUTEX_WARN_ON(lock->magic != lock);
1091
1092	locked = __mutex_trylock(lock);
1093	if (locked)
1094		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1095
1096	return locked;
1097}
1098EXPORT_SYMBOL(mutex_trylock);
1099
1100#ifndef CONFIG_DEBUG_LOCK_ALLOC
1101int __sched
1102ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1103{
1104	might_sleep();
1105
1106	if (__mutex_trylock_fast(&lock->base)) {
1107		if (ctx)
1108			ww_mutex_set_context_fastpath(lock, ctx);
1109		return 0;
1110	}
1111
1112	return __ww_mutex_lock_slowpath(lock, ctx);
1113}
1114EXPORT_SYMBOL(ww_mutex_lock);
1115
1116int __sched
1117ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1118{
1119	might_sleep();
1120
1121	if (__mutex_trylock_fast(&lock->base)) {
1122		if (ctx)
1123			ww_mutex_set_context_fastpath(lock, ctx);
1124		return 0;
1125	}
1126
1127	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1128}
1129EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1130
1131#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
1132#endif /* !CONFIG_PREEMPT_RT */
1133
1134EXPORT_TRACEPOINT_SYMBOL_GPL(contention_begin);
1135EXPORT_TRACEPOINT_SYMBOL_GPL(contention_end);
1136
1137/**
1138 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1139 * @cnt: the atomic which we are to dec
1140 * @lock: the mutex to return holding if we dec to 0
1141 *
1142 * return true and hold lock if we dec to 0, return false otherwise
1143 */
1144int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1145{
1146	/* dec if we can't possibly hit 0 */
1147	if (atomic_add_unless(cnt, -1, 1))
1148		return 0;
1149	/* we might hit 0, so take the lock */
1150	mutex_lock(lock);
1151	if (!atomic_dec_and_test(cnt)) {
1152		/* when we actually did the dec, we didn't hit 0 */
1153		mutex_unlock(lock);
1154		return 0;
1155	}
1156	/* we hit 0, and we hold the lock */
1157	return 1;
1158}
1159EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1160