1/* SPDX-License-Identifier: GPL-2.0-only */
2#ifndef _ASM_RISCV_MEMBARRIER_H
3#define _ASM_RISCV_MEMBARRIER_H
4
5static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
6					     struct mm_struct *next,
7					     struct task_struct *tsk)
8{
9	/*
10	 * Only need the full barrier when switching between processes.
11	 * Barrier when switching from kernel to userspace is not
12	 * required here, given that it is implied by mmdrop(). Barrier
13	 * when switching from userspace to kernel is not needed after
14	 * store to rq->curr.
15	 */
16	if (IS_ENABLED(CONFIG_SMP) &&
17	    likely(!(atomic_read(&next->membarrier_state) &
18		     (MEMBARRIER_STATE_PRIVATE_EXPEDITED |
19		      MEMBARRIER_STATE_GLOBAL_EXPEDITED)) || !prev))
20		return;
21
22	/*
23	 * The membarrier system call requires a full memory barrier
24	 * after storing to rq->curr, before going back to user-space.
25	 *
26	 * This barrier is also needed for the SYNC_CORE command when
27	 * switching between processes; in particular, on a transition
28	 * from a thread belonging to another mm to a thread belonging
29	 * to the mm for which a membarrier SYNC_CORE is done on CPU0:
30	 *
31	 *   - [CPU0] sets all bits in the mm icache_stale_mask (in
32	 *     prepare_sync_core_cmd());
33	 *
34	 *   - [CPU1] stores to rq->curr (by the scheduler);
35	 *
36	 *   - [CPU0] loads rq->curr within membarrier and observes
37	 *     cpu_rq(1)->curr->mm != mm, so the IPI is skipped on
38	 *     CPU1; this means membarrier relies on switch_mm() to
39	 *     issue the sync-core;
40	 *
41	 *   - [CPU1] switch_mm() loads icache_stale_mask; if the bit
42	 *     is zero, switch_mm() may incorrectly skip the sync-core.
43	 *
44	 * Matches a full barrier in the proximity of the membarrier
45	 * system call entry.
46	 */
47	smp_mb();
48}
49
50#endif /* _ASM_RISCV_MEMBARRIER_H */
51