sem.c revision 4241c1a3
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/ipc/sem.c
4 * Copyright (C) 1992 Krishna Balasubramanian
5 * Copyright (C) 1995 Eric Schenk, Bruno Haible
6 *
7 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
8 *
9 * SMP-threaded, sysctl's added
10 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
11 * Enforced range limit on SEM_UNDO
12 * (c) 2001 Red Hat Inc
13 * Lockless wakeup
14 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
15 * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
16 * Further wakeup optimizations, documentation
17 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
18 *
19 * support for audit of ipc object properties and permission changes
20 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
21 *
22 * namespaces support
23 * OpenVZ, SWsoft Inc.
24 * Pavel Emelianov <xemul@openvz.org>
25 *
26 * Implementation notes: (May 2010)
27 * This file implements System V semaphores.
28 *
29 * User space visible behavior:
30 * - FIFO ordering for semop() operations (just FIFO, not starvation
31 *   protection)
32 * - multiple semaphore operations that alter the same semaphore in
33 *   one semop() are handled.
34 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
35 *   SETALL calls.
36 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
37 * - undo adjustments at process exit are limited to 0..SEMVMX.
38 * - namespace are supported.
39 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
40 *   to /proc/sys/kernel/sem.
41 * - statistics about the usage are reported in /proc/sysvipc/sem.
42 *
43 * Internals:
44 * - scalability:
45 *   - all global variables are read-mostly.
46 *   - semop() calls and semctl(RMID) are synchronized by RCU.
47 *   - most operations do write operations (actually: spin_lock calls) to
48 *     the per-semaphore array structure.
49 *   Thus: Perfect SMP scaling between independent semaphore arrays.
50 *         If multiple semaphores in one array are used, then cache line
51 *         trashing on the semaphore array spinlock will limit the scaling.
52 * - semncnt and semzcnt are calculated on demand in count_semcnt()
53 * - the task that performs a successful semop() scans the list of all
54 *   sleeping tasks and completes any pending operations that can be fulfilled.
55 *   Semaphores are actively given to waiting tasks (necessary for FIFO).
56 *   (see update_queue())
57 * - To improve the scalability, the actual wake-up calls are performed after
58 *   dropping all locks. (see wake_up_sem_queue_prepare())
59 * - All work is done by the waker, the woken up task does not have to do
60 *   anything - not even acquiring a lock or dropping a refcount.
61 * - A woken up task may not even touch the semaphore array anymore, it may
62 *   have been destroyed already by a semctl(RMID).
63 * - UNDO values are stored in an array (one per process and per
64 *   semaphore array, lazily allocated). For backwards compatibility, multiple
65 *   modes for the UNDO variables are supported (per process, per thread)
66 *   (see copy_semundo, CLONE_SYSVSEM)
67 * - There are two lists of the pending operations: a per-array list
68 *   and per-semaphore list (stored in the array). This allows to achieve FIFO
69 *   ordering without always scanning all pending operations.
70 *   The worst-case behavior is nevertheless O(N^2) for N wakeups.
71 */
72
73#include <linux/compat.h>
74#include <linux/slab.h>
75#include <linux/spinlock.h>
76#include <linux/init.h>
77#include <linux/proc_fs.h>
78#include <linux/time.h>
79#include <linux/security.h>
80#include <linux/syscalls.h>
81#include <linux/audit.h>
82#include <linux/capability.h>
83#include <linux/seq_file.h>
84#include <linux/rwsem.h>
85#include <linux/nsproxy.h>
86#include <linux/ipc_namespace.h>
87#include <linux/sched/wake_q.h>
88#include <linux/nospec.h>
89#include <linux/rhashtable.h>
90
91#include <linux/uaccess.h>
92#include "util.h"
93
94/* One semaphore structure for each semaphore in the system. */
95struct sem {
96	int	semval;		/* current value */
97	/*
98	 * PID of the process that last modified the semaphore. For
99	 * Linux, specifically these are:
100	 *  - semop
101	 *  - semctl, via SETVAL and SETALL.
102	 *  - at task exit when performing undo adjustments (see exit_sem).
103	 */
104	struct pid *sempid;
105	spinlock_t	lock;	/* spinlock for fine-grained semtimedop */
106	struct list_head pending_alter; /* pending single-sop operations */
107					/* that alter the semaphore */
108	struct list_head pending_const; /* pending single-sop operations */
109					/* that do not alter the semaphore*/
110	time64_t	 sem_otime;	/* candidate for sem_otime */
111} ____cacheline_aligned_in_smp;
112
113/* One sem_array data structure for each set of semaphores in the system. */
114struct sem_array {
115	struct kern_ipc_perm	sem_perm;	/* permissions .. see ipc.h */
116	time64_t		sem_ctime;	/* create/last semctl() time */
117	struct list_head	pending_alter;	/* pending operations */
118						/* that alter the array */
119	struct list_head	pending_const;	/* pending complex operations */
120						/* that do not alter semvals */
121	struct list_head	list_id;	/* undo requests on this array */
122	int			sem_nsems;	/* no. of semaphores in array */
123	int			complex_count;	/* pending complex operations */
124	unsigned int		use_global_lock;/* >0: global lock required */
125
126	struct sem		sems[];
127} __randomize_layout;
128
129/* One queue for each sleeping process in the system. */
130struct sem_queue {
131	struct list_head	list;	 /* queue of pending operations */
132	struct task_struct	*sleeper; /* this process */
133	struct sem_undo		*undo;	 /* undo structure */
134	struct pid		*pid;	 /* process id of requesting process */
135	int			status;	 /* completion status of operation */
136	struct sembuf		*sops;	 /* array of pending operations */
137	struct sembuf		*blocking; /* the operation that blocked */
138	int			nsops;	 /* number of operations */
139	bool			alter;	 /* does *sops alter the array? */
140	bool                    dupsop;	 /* sops on more than one sem_num */
141};
142
143/* Each task has a list of undo requests. They are executed automatically
144 * when the process exits.
145 */
146struct sem_undo {
147	struct list_head	list_proc;	/* per-process list: *
148						 * all undos from one process
149						 * rcu protected */
150	struct rcu_head		rcu;		/* rcu struct for sem_undo */
151	struct sem_undo_list	*ulp;		/* back ptr to sem_undo_list */
152	struct list_head	list_id;	/* per semaphore array list:
153						 * all undos for one array */
154	int			semid;		/* semaphore set identifier */
155	short			*semadj;	/* array of adjustments */
156						/* one per semaphore */
157};
158
159/* sem_undo_list controls shared access to the list of sem_undo structures
160 * that may be shared among all a CLONE_SYSVSEM task group.
161 */
162struct sem_undo_list {
163	refcount_t		refcnt;
164	spinlock_t		lock;
165	struct list_head	list_proc;
166};
167
168
169#define sem_ids(ns)	((ns)->ids[IPC_SEM_IDS])
170
171static int newary(struct ipc_namespace *, struct ipc_params *);
172static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
173#ifdef CONFIG_PROC_FS
174static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
175#endif
176
177#define SEMMSL_FAST	256 /* 512 bytes on stack */
178#define SEMOPM_FAST	64  /* ~ 372 bytes on stack */
179
180/*
181 * Switching from the mode suitable for simple ops
182 * to the mode for complex ops is costly. Therefore:
183 * use some hysteresis
184 */
185#define USE_GLOBAL_LOCK_HYSTERESIS	10
186
187/*
188 * Locking:
189 * a) global sem_lock() for read/write
190 *	sem_undo.id_next,
191 *	sem_array.complex_count,
192 *	sem_array.pending{_alter,_const},
193 *	sem_array.sem_undo
194 *
195 * b) global or semaphore sem_lock() for read/write:
196 *	sem_array.sems[i].pending_{const,alter}:
197 *
198 * c) special:
199 *	sem_undo_list.list_proc:
200 *	* undo_list->lock for write
201 *	* rcu for read
202 *	use_global_lock:
203 *	* global sem_lock() for write
204 *	* either local or global sem_lock() for read.
205 *
206 * Memory ordering:
207 * Most ordering is enforced by using spin_lock() and spin_unlock().
208 * The special case is use_global_lock:
209 * Setting it from non-zero to 0 is a RELEASE, this is ensured by
210 * using smp_store_release().
211 * Testing if it is non-zero is an ACQUIRE, this is ensured by using
212 * smp_load_acquire().
213 * Setting it from 0 to non-zero must be ordered with regards to
214 * this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
215 * is inside a spin_lock() and after a write from 0 to non-zero a
216 * spin_lock()+spin_unlock() is done.
217 */
218
219#define sc_semmsl	sem_ctls[0]
220#define sc_semmns	sem_ctls[1]
221#define sc_semopm	sem_ctls[2]
222#define sc_semmni	sem_ctls[3]
223
224int sem_init_ns(struct ipc_namespace *ns)
225{
226	ns->sc_semmsl = SEMMSL;
227	ns->sc_semmns = SEMMNS;
228	ns->sc_semopm = SEMOPM;
229	ns->sc_semmni = SEMMNI;
230	ns->used_sems = 0;
231	return ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
232}
233
234#ifdef CONFIG_IPC_NS
235void sem_exit_ns(struct ipc_namespace *ns)
236{
237	free_ipcs(ns, &sem_ids(ns), freeary);
238	idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
239	rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
240}
241#endif
242
243int __init sem_init(void)
244{
245	const int err = sem_init_ns(&init_ipc_ns);
246
247	ipc_init_proc_interface("sysvipc/sem",
248				"       key      semid perms      nsems   uid   gid  cuid  cgid      otime      ctime\n",
249				IPC_SEM_IDS, sysvipc_sem_proc_show);
250	return err;
251}
252
253/**
254 * unmerge_queues - unmerge queues, if possible.
255 * @sma: semaphore array
256 *
257 * The function unmerges the wait queues if complex_count is 0.
258 * It must be called prior to dropping the global semaphore array lock.
259 */
260static void unmerge_queues(struct sem_array *sma)
261{
262	struct sem_queue *q, *tq;
263
264	/* complex operations still around? */
265	if (sma->complex_count)
266		return;
267	/*
268	 * We will switch back to simple mode.
269	 * Move all pending operation back into the per-semaphore
270	 * queues.
271	 */
272	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
273		struct sem *curr;
274		curr = &sma->sems[q->sops[0].sem_num];
275
276		list_add_tail(&q->list, &curr->pending_alter);
277	}
278	INIT_LIST_HEAD(&sma->pending_alter);
279}
280
281/**
282 * merge_queues - merge single semop queues into global queue
283 * @sma: semaphore array
284 *
285 * This function merges all per-semaphore queues into the global queue.
286 * It is necessary to achieve FIFO ordering for the pending single-sop
287 * operations when a multi-semop operation must sleep.
288 * Only the alter operations must be moved, the const operations can stay.
289 */
290static void merge_queues(struct sem_array *sma)
291{
292	int i;
293	for (i = 0; i < sma->sem_nsems; i++) {
294		struct sem *sem = &sma->sems[i];
295
296		list_splice_init(&sem->pending_alter, &sma->pending_alter);
297	}
298}
299
300static void sem_rcu_free(struct rcu_head *head)
301{
302	struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
303	struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
304
305	security_sem_free(&sma->sem_perm);
306	kvfree(sma);
307}
308
309/*
310 * Enter the mode suitable for non-simple operations:
311 * Caller must own sem_perm.lock.
312 */
313static void complexmode_enter(struct sem_array *sma)
314{
315	int i;
316	struct sem *sem;
317
318	if (sma->use_global_lock > 0)  {
319		/*
320		 * We are already in global lock mode.
321		 * Nothing to do, just reset the
322		 * counter until we return to simple mode.
323		 */
324		sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
325		return;
326	}
327	sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
328
329	for (i = 0; i < sma->sem_nsems; i++) {
330		sem = &sma->sems[i];
331		spin_lock(&sem->lock);
332		spin_unlock(&sem->lock);
333	}
334}
335
336/*
337 * Try to leave the mode that disallows simple operations:
338 * Caller must own sem_perm.lock.
339 */
340static void complexmode_tryleave(struct sem_array *sma)
341{
342	if (sma->complex_count)  {
343		/* Complex ops are sleeping.
344		 * We must stay in complex mode
345		 */
346		return;
347	}
348	if (sma->use_global_lock == 1) {
349		/*
350		 * Immediately after setting use_global_lock to 0,
351		 * a simple op can start. Thus: all memory writes
352		 * performed by the current operation must be visible
353		 * before we set use_global_lock to 0.
354		 */
355		smp_store_release(&sma->use_global_lock, 0);
356	} else {
357		sma->use_global_lock--;
358	}
359}
360
361#define SEM_GLOBAL_LOCK	(-1)
362/*
363 * If the request contains only one semaphore operation, and there are
364 * no complex transactions pending, lock only the semaphore involved.
365 * Otherwise, lock the entire semaphore array, since we either have
366 * multiple semaphores in our own semops, or we need to look at
367 * semaphores from other pending complex operations.
368 */
369static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
370			      int nsops)
371{
372	struct sem *sem;
373	int idx;
374
375	if (nsops != 1) {
376		/* Complex operation - acquire a full lock */
377		ipc_lock_object(&sma->sem_perm);
378
379		/* Prevent parallel simple ops */
380		complexmode_enter(sma);
381		return SEM_GLOBAL_LOCK;
382	}
383
384	/*
385	 * Only one semaphore affected - try to optimize locking.
386	 * Optimized locking is possible if no complex operation
387	 * is either enqueued or processed right now.
388	 *
389	 * Both facts are tracked by use_global_mode.
390	 */
391	idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
392	sem = &sma->sems[idx];
393
394	/*
395	 * Initial check for use_global_lock. Just an optimization,
396	 * no locking, no memory barrier.
397	 */
398	if (!sma->use_global_lock) {
399		/*
400		 * It appears that no complex operation is around.
401		 * Acquire the per-semaphore lock.
402		 */
403		spin_lock(&sem->lock);
404
405		/* pairs with smp_store_release() */
406		if (!smp_load_acquire(&sma->use_global_lock)) {
407			/* fast path successful! */
408			return sops->sem_num;
409		}
410		spin_unlock(&sem->lock);
411	}
412
413	/* slow path: acquire the full lock */
414	ipc_lock_object(&sma->sem_perm);
415
416	if (sma->use_global_lock == 0) {
417		/*
418		 * The use_global_lock mode ended while we waited for
419		 * sma->sem_perm.lock. Thus we must switch to locking
420		 * with sem->lock.
421		 * Unlike in the fast path, there is no need to recheck
422		 * sma->use_global_lock after we have acquired sem->lock:
423		 * We own sma->sem_perm.lock, thus use_global_lock cannot
424		 * change.
425		 */
426		spin_lock(&sem->lock);
427
428		ipc_unlock_object(&sma->sem_perm);
429		return sops->sem_num;
430	} else {
431		/*
432		 * Not a false alarm, thus continue to use the global lock
433		 * mode. No need for complexmode_enter(), this was done by
434		 * the caller that has set use_global_mode to non-zero.
435		 */
436		return SEM_GLOBAL_LOCK;
437	}
438}
439
440static inline void sem_unlock(struct sem_array *sma, int locknum)
441{
442	if (locknum == SEM_GLOBAL_LOCK) {
443		unmerge_queues(sma);
444		complexmode_tryleave(sma);
445		ipc_unlock_object(&sma->sem_perm);
446	} else {
447		struct sem *sem = &sma->sems[locknum];
448		spin_unlock(&sem->lock);
449	}
450}
451
452/*
453 * sem_lock_(check_) routines are called in the paths where the rwsem
454 * is not held.
455 *
456 * The caller holds the RCU read lock.
457 */
458static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
459{
460	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
461
462	if (IS_ERR(ipcp))
463		return ERR_CAST(ipcp);
464
465	return container_of(ipcp, struct sem_array, sem_perm);
466}
467
468static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
469							int id)
470{
471	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
472
473	if (IS_ERR(ipcp))
474		return ERR_CAST(ipcp);
475
476	return container_of(ipcp, struct sem_array, sem_perm);
477}
478
479static inline void sem_lock_and_putref(struct sem_array *sma)
480{
481	sem_lock(sma, NULL, -1);
482	ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
483}
484
485static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
486{
487	ipc_rmid(&sem_ids(ns), &s->sem_perm);
488}
489
490static struct sem_array *sem_alloc(size_t nsems)
491{
492	struct sem_array *sma;
493	size_t size;
494
495	if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
496		return NULL;
497
498	size = sizeof(*sma) + nsems * sizeof(sma->sems[0]);
499	sma = kvmalloc(size, GFP_KERNEL);
500	if (unlikely(!sma))
501		return NULL;
502
503	memset(sma, 0, size);
504
505	return sma;
506}
507
508/**
509 * newary - Create a new semaphore set
510 * @ns: namespace
511 * @params: ptr to the structure that contains key, semflg and nsems
512 *
513 * Called with sem_ids.rwsem held (as a writer)
514 */
515static int newary(struct ipc_namespace *ns, struct ipc_params *params)
516{
517	int retval;
518	struct sem_array *sma;
519	key_t key = params->key;
520	int nsems = params->u.nsems;
521	int semflg = params->flg;
522	int i;
523
524	if (!nsems)
525		return -EINVAL;
526	if (ns->used_sems + nsems > ns->sc_semmns)
527		return -ENOSPC;
528
529	sma = sem_alloc(nsems);
530	if (!sma)
531		return -ENOMEM;
532
533	sma->sem_perm.mode = (semflg & S_IRWXUGO);
534	sma->sem_perm.key = key;
535
536	sma->sem_perm.security = NULL;
537	retval = security_sem_alloc(&sma->sem_perm);
538	if (retval) {
539		kvfree(sma);
540		return retval;
541	}
542
543	for (i = 0; i < nsems; i++) {
544		INIT_LIST_HEAD(&sma->sems[i].pending_alter);
545		INIT_LIST_HEAD(&sma->sems[i].pending_const);
546		spin_lock_init(&sma->sems[i].lock);
547	}
548
549	sma->complex_count = 0;
550	sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
551	INIT_LIST_HEAD(&sma->pending_alter);
552	INIT_LIST_HEAD(&sma->pending_const);
553	INIT_LIST_HEAD(&sma->list_id);
554	sma->sem_nsems = nsems;
555	sma->sem_ctime = ktime_get_real_seconds();
556
557	/* ipc_addid() locks sma upon success. */
558	retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
559	if (retval < 0) {
560		ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
561		return retval;
562	}
563	ns->used_sems += nsems;
564
565	sem_unlock(sma, -1);
566	rcu_read_unlock();
567
568	return sma->sem_perm.id;
569}
570
571
572/*
573 * Called with sem_ids.rwsem and ipcp locked.
574 */
575static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
576				struct ipc_params *params)
577{
578	struct sem_array *sma;
579
580	sma = container_of(ipcp, struct sem_array, sem_perm);
581	if (params->u.nsems > sma->sem_nsems)
582		return -EINVAL;
583
584	return 0;
585}
586
587long ksys_semget(key_t key, int nsems, int semflg)
588{
589	struct ipc_namespace *ns;
590	static const struct ipc_ops sem_ops = {
591		.getnew = newary,
592		.associate = security_sem_associate,
593		.more_checks = sem_more_checks,
594	};
595	struct ipc_params sem_params;
596
597	ns = current->nsproxy->ipc_ns;
598
599	if (nsems < 0 || nsems > ns->sc_semmsl)
600		return -EINVAL;
601
602	sem_params.key = key;
603	sem_params.flg = semflg;
604	sem_params.u.nsems = nsems;
605
606	return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
607}
608
609SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
610{
611	return ksys_semget(key, nsems, semflg);
612}
613
614/**
615 * perform_atomic_semop[_slow] - Attempt to perform semaphore
616 *                               operations on a given array.
617 * @sma: semaphore array
618 * @q: struct sem_queue that describes the operation
619 *
620 * Caller blocking are as follows, based the value
621 * indicated by the semaphore operation (sem_op):
622 *
623 *  (1) >0 never blocks.
624 *  (2)  0 (wait-for-zero operation): semval is non-zero.
625 *  (3) <0 attempting to decrement semval to a value smaller than zero.
626 *
627 * Returns 0 if the operation was possible.
628 * Returns 1 if the operation is impossible, the caller must sleep.
629 * Returns <0 for error codes.
630 */
631static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
632{
633	int result, sem_op, nsops;
634	struct pid *pid;
635	struct sembuf *sop;
636	struct sem *curr;
637	struct sembuf *sops;
638	struct sem_undo *un;
639
640	sops = q->sops;
641	nsops = q->nsops;
642	un = q->undo;
643
644	for (sop = sops; sop < sops + nsops; sop++) {
645		int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
646		curr = &sma->sems[idx];
647		sem_op = sop->sem_op;
648		result = curr->semval;
649
650		if (!sem_op && result)
651			goto would_block;
652
653		result += sem_op;
654		if (result < 0)
655			goto would_block;
656		if (result > SEMVMX)
657			goto out_of_range;
658
659		if (sop->sem_flg & SEM_UNDO) {
660			int undo = un->semadj[sop->sem_num] - sem_op;
661			/* Exceeding the undo range is an error. */
662			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
663				goto out_of_range;
664			un->semadj[sop->sem_num] = undo;
665		}
666
667		curr->semval = result;
668	}
669
670	sop--;
671	pid = q->pid;
672	while (sop >= sops) {
673		ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
674		sop--;
675	}
676
677	return 0;
678
679out_of_range:
680	result = -ERANGE;
681	goto undo;
682
683would_block:
684	q->blocking = sop;
685
686	if (sop->sem_flg & IPC_NOWAIT)
687		result = -EAGAIN;
688	else
689		result = 1;
690
691undo:
692	sop--;
693	while (sop >= sops) {
694		sem_op = sop->sem_op;
695		sma->sems[sop->sem_num].semval -= sem_op;
696		if (sop->sem_flg & SEM_UNDO)
697			un->semadj[sop->sem_num] += sem_op;
698		sop--;
699	}
700
701	return result;
702}
703
704static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
705{
706	int result, sem_op, nsops;
707	struct sembuf *sop;
708	struct sem *curr;
709	struct sembuf *sops;
710	struct sem_undo *un;
711
712	sops = q->sops;
713	nsops = q->nsops;
714	un = q->undo;
715
716	if (unlikely(q->dupsop))
717		return perform_atomic_semop_slow(sma, q);
718
719	/*
720	 * We scan the semaphore set twice, first to ensure that the entire
721	 * operation can succeed, therefore avoiding any pointless writes
722	 * to shared memory and having to undo such changes in order to block
723	 * until the operations can go through.
724	 */
725	for (sop = sops; sop < sops + nsops; sop++) {
726		int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
727
728		curr = &sma->sems[idx];
729		sem_op = sop->sem_op;
730		result = curr->semval;
731
732		if (!sem_op && result)
733			goto would_block; /* wait-for-zero */
734
735		result += sem_op;
736		if (result < 0)
737			goto would_block;
738
739		if (result > SEMVMX)
740			return -ERANGE;
741
742		if (sop->sem_flg & SEM_UNDO) {
743			int undo = un->semadj[sop->sem_num] - sem_op;
744
745			/* Exceeding the undo range is an error. */
746			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
747				return -ERANGE;
748		}
749	}
750
751	for (sop = sops; sop < sops + nsops; sop++) {
752		curr = &sma->sems[sop->sem_num];
753		sem_op = sop->sem_op;
754		result = curr->semval;
755
756		if (sop->sem_flg & SEM_UNDO) {
757			int undo = un->semadj[sop->sem_num] - sem_op;
758
759			un->semadj[sop->sem_num] = undo;
760		}
761		curr->semval += sem_op;
762		ipc_update_pid(&curr->sempid, q->pid);
763	}
764
765	return 0;
766
767would_block:
768	q->blocking = sop;
769	return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
770}
771
772static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
773					     struct wake_q_head *wake_q)
774{
775	wake_q_add(wake_q, q->sleeper);
776	/*
777	 * Rely on the above implicit barrier, such that we can
778	 * ensure that we hold reference to the task before setting
779	 * q->status. Otherwise we could race with do_exit if the
780	 * task is awoken by an external event before calling
781	 * wake_up_process().
782	 */
783	WRITE_ONCE(q->status, error);
784}
785
786static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
787{
788	list_del(&q->list);
789	if (q->nsops > 1)
790		sma->complex_count--;
791}
792
793/** check_restart(sma, q)
794 * @sma: semaphore array
795 * @q: the operation that just completed
796 *
797 * update_queue is O(N^2) when it restarts scanning the whole queue of
798 * waiting operations. Therefore this function checks if the restart is
799 * really necessary. It is called after a previously waiting operation
800 * modified the array.
801 * Note that wait-for-zero operations are handled without restart.
802 */
803static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
804{
805	/* pending complex alter operations are too difficult to analyse */
806	if (!list_empty(&sma->pending_alter))
807		return 1;
808
809	/* we were a sleeping complex operation. Too difficult */
810	if (q->nsops > 1)
811		return 1;
812
813	/* It is impossible that someone waits for the new value:
814	 * - complex operations always restart.
815	 * - wait-for-zero are handled seperately.
816	 * - q is a previously sleeping simple operation that
817	 *   altered the array. It must be a decrement, because
818	 *   simple increments never sleep.
819	 * - If there are older (higher priority) decrements
820	 *   in the queue, then they have observed the original
821	 *   semval value and couldn't proceed. The operation
822	 *   decremented to value - thus they won't proceed either.
823	 */
824	return 0;
825}
826
827/**
828 * wake_const_ops - wake up non-alter tasks
829 * @sma: semaphore array.
830 * @semnum: semaphore that was modified.
831 * @wake_q: lockless wake-queue head.
832 *
833 * wake_const_ops must be called after a semaphore in a semaphore array
834 * was set to 0. If complex const operations are pending, wake_const_ops must
835 * be called with semnum = -1, as well as with the number of each modified
836 * semaphore.
837 * The tasks that must be woken up are added to @wake_q. The return code
838 * is stored in q->pid.
839 * The function returns 1 if at least one operation was completed successfully.
840 */
841static int wake_const_ops(struct sem_array *sma, int semnum,
842			  struct wake_q_head *wake_q)
843{
844	struct sem_queue *q, *tmp;
845	struct list_head *pending_list;
846	int semop_completed = 0;
847
848	if (semnum == -1)
849		pending_list = &sma->pending_const;
850	else
851		pending_list = &sma->sems[semnum].pending_const;
852
853	list_for_each_entry_safe(q, tmp, pending_list, list) {
854		int error = perform_atomic_semop(sma, q);
855
856		if (error > 0)
857			continue;
858		/* operation completed, remove from queue & wakeup */
859		unlink_queue(sma, q);
860
861		wake_up_sem_queue_prepare(q, error, wake_q);
862		if (error == 0)
863			semop_completed = 1;
864	}
865
866	return semop_completed;
867}
868
869/**
870 * do_smart_wakeup_zero - wakeup all wait for zero tasks
871 * @sma: semaphore array
872 * @sops: operations that were performed
873 * @nsops: number of operations
874 * @wake_q: lockless wake-queue head
875 *
876 * Checks all required queue for wait-for-zero operations, based
877 * on the actual changes that were performed on the semaphore array.
878 * The function returns 1 if at least one operation was completed successfully.
879 */
880static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
881				int nsops, struct wake_q_head *wake_q)
882{
883	int i;
884	int semop_completed = 0;
885	int got_zero = 0;
886
887	/* first: the per-semaphore queues, if known */
888	if (sops) {
889		for (i = 0; i < nsops; i++) {
890			int num = sops[i].sem_num;
891
892			if (sma->sems[num].semval == 0) {
893				got_zero = 1;
894				semop_completed |= wake_const_ops(sma, num, wake_q);
895			}
896		}
897	} else {
898		/*
899		 * No sops means modified semaphores not known.
900		 * Assume all were changed.
901		 */
902		for (i = 0; i < sma->sem_nsems; i++) {
903			if (sma->sems[i].semval == 0) {
904				got_zero = 1;
905				semop_completed |= wake_const_ops(sma, i, wake_q);
906			}
907		}
908	}
909	/*
910	 * If one of the modified semaphores got 0,
911	 * then check the global queue, too.
912	 */
913	if (got_zero)
914		semop_completed |= wake_const_ops(sma, -1, wake_q);
915
916	return semop_completed;
917}
918
919
920/**
921 * update_queue - look for tasks that can be completed.
922 * @sma: semaphore array.
923 * @semnum: semaphore that was modified.
924 * @wake_q: lockless wake-queue head.
925 *
926 * update_queue must be called after a semaphore in a semaphore array
927 * was modified. If multiple semaphores were modified, update_queue must
928 * be called with semnum = -1, as well as with the number of each modified
929 * semaphore.
930 * The tasks that must be woken up are added to @wake_q. The return code
931 * is stored in q->pid.
932 * The function internally checks if const operations can now succeed.
933 *
934 * The function return 1 if at least one semop was completed successfully.
935 */
936static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
937{
938	struct sem_queue *q, *tmp;
939	struct list_head *pending_list;
940	int semop_completed = 0;
941
942	if (semnum == -1)
943		pending_list = &sma->pending_alter;
944	else
945		pending_list = &sma->sems[semnum].pending_alter;
946
947again:
948	list_for_each_entry_safe(q, tmp, pending_list, list) {
949		int error, restart;
950
951		/* If we are scanning the single sop, per-semaphore list of
952		 * one semaphore and that semaphore is 0, then it is not
953		 * necessary to scan further: simple increments
954		 * that affect only one entry succeed immediately and cannot
955		 * be in the  per semaphore pending queue, and decrements
956		 * cannot be successful if the value is already 0.
957		 */
958		if (semnum != -1 && sma->sems[semnum].semval == 0)
959			break;
960
961		error = perform_atomic_semop(sma, q);
962
963		/* Does q->sleeper still need to sleep? */
964		if (error > 0)
965			continue;
966
967		unlink_queue(sma, q);
968
969		if (error) {
970			restart = 0;
971		} else {
972			semop_completed = 1;
973			do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
974			restart = check_restart(sma, q);
975		}
976
977		wake_up_sem_queue_prepare(q, error, wake_q);
978		if (restart)
979			goto again;
980	}
981	return semop_completed;
982}
983
984/**
985 * set_semotime - set sem_otime
986 * @sma: semaphore array
987 * @sops: operations that modified the array, may be NULL
988 *
989 * sem_otime is replicated to avoid cache line trashing.
990 * This function sets one instance to the current time.
991 */
992static void set_semotime(struct sem_array *sma, struct sembuf *sops)
993{
994	if (sops == NULL) {
995		sma->sems[0].sem_otime = ktime_get_real_seconds();
996	} else {
997		sma->sems[sops[0].sem_num].sem_otime =
998						ktime_get_real_seconds();
999	}
1000}
1001
1002/**
1003 * do_smart_update - optimized update_queue
1004 * @sma: semaphore array
1005 * @sops: operations that were performed
1006 * @nsops: number of operations
1007 * @otime: force setting otime
1008 * @wake_q: lockless wake-queue head
1009 *
1010 * do_smart_update() does the required calls to update_queue and wakeup_zero,
1011 * based on the actual changes that were performed on the semaphore array.
1012 * Note that the function does not do the actual wake-up: the caller is
1013 * responsible for calling wake_up_q().
1014 * It is safe to perform this call after dropping all locks.
1015 */
1016static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
1017			    int otime, struct wake_q_head *wake_q)
1018{
1019	int i;
1020
1021	otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
1022
1023	if (!list_empty(&sma->pending_alter)) {
1024		/* semaphore array uses the global queue - just process it. */
1025		otime |= update_queue(sma, -1, wake_q);
1026	} else {
1027		if (!sops) {
1028			/*
1029			 * No sops, thus the modified semaphores are not
1030			 * known. Check all.
1031			 */
1032			for (i = 0; i < sma->sem_nsems; i++)
1033				otime |= update_queue(sma, i, wake_q);
1034		} else {
1035			/*
1036			 * Check the semaphores that were increased:
1037			 * - No complex ops, thus all sleeping ops are
1038			 *   decrease.
1039			 * - if we decreased the value, then any sleeping
1040			 *   semaphore ops wont be able to run: If the
1041			 *   previous value was too small, then the new
1042			 *   value will be too small, too.
1043			 */
1044			for (i = 0; i < nsops; i++) {
1045				if (sops[i].sem_op > 0) {
1046					otime |= update_queue(sma,
1047							      sops[i].sem_num, wake_q);
1048				}
1049			}
1050		}
1051	}
1052	if (otime)
1053		set_semotime(sma, sops);
1054}
1055
1056/*
1057 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1058 */
1059static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1060			bool count_zero)
1061{
1062	struct sembuf *sop = q->blocking;
1063
1064	/*
1065	 * Linux always (since 0.99.10) reported a task as sleeping on all
1066	 * semaphores. This violates SUS, therefore it was changed to the
1067	 * standard compliant behavior.
1068	 * Give the administrators a chance to notice that an application
1069	 * might misbehave because it relies on the Linux behavior.
1070	 */
1071	pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1072			"The task %s (%d) triggered the difference, watch for misbehavior.\n",
1073			current->comm, task_pid_nr(current));
1074
1075	if (sop->sem_num != semnum)
1076		return 0;
1077
1078	if (count_zero && sop->sem_op == 0)
1079		return 1;
1080	if (!count_zero && sop->sem_op < 0)
1081		return 1;
1082
1083	return 0;
1084}
1085
1086/* The following counts are associated to each semaphore:
1087 *   semncnt        number of tasks waiting on semval being nonzero
1088 *   semzcnt        number of tasks waiting on semval being zero
1089 *
1090 * Per definition, a task waits only on the semaphore of the first semop
1091 * that cannot proceed, even if additional operation would block, too.
1092 */
1093static int count_semcnt(struct sem_array *sma, ushort semnum,
1094			bool count_zero)
1095{
1096	struct list_head *l;
1097	struct sem_queue *q;
1098	int semcnt;
1099
1100	semcnt = 0;
1101	/* First: check the simple operations. They are easy to evaluate */
1102	if (count_zero)
1103		l = &sma->sems[semnum].pending_const;
1104	else
1105		l = &sma->sems[semnum].pending_alter;
1106
1107	list_for_each_entry(q, l, list) {
1108		/* all task on a per-semaphore list sleep on exactly
1109		 * that semaphore
1110		 */
1111		semcnt++;
1112	}
1113
1114	/* Then: check the complex operations. */
1115	list_for_each_entry(q, &sma->pending_alter, list) {
1116		semcnt += check_qop(sma, semnum, q, count_zero);
1117	}
1118	if (count_zero) {
1119		list_for_each_entry(q, &sma->pending_const, list) {
1120			semcnt += check_qop(sma, semnum, q, count_zero);
1121		}
1122	}
1123	return semcnt;
1124}
1125
1126/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1127 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1128 * remains locked on exit.
1129 */
1130static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1131{
1132	struct sem_undo *un, *tu;
1133	struct sem_queue *q, *tq;
1134	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1135	int i;
1136	DEFINE_WAKE_Q(wake_q);
1137
1138	/* Free the existing undo structures for this semaphore set.  */
1139	ipc_assert_locked_object(&sma->sem_perm);
1140	list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1141		list_del(&un->list_id);
1142		spin_lock(&un->ulp->lock);
1143		un->semid = -1;
1144		list_del_rcu(&un->list_proc);
1145		spin_unlock(&un->ulp->lock);
1146		kfree_rcu(un, rcu);
1147	}
1148
1149	/* Wake up all pending processes and let them fail with EIDRM. */
1150	list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1151		unlink_queue(sma, q);
1152		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1153	}
1154
1155	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1156		unlink_queue(sma, q);
1157		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1158	}
1159	for (i = 0; i < sma->sem_nsems; i++) {
1160		struct sem *sem = &sma->sems[i];
1161		list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1162			unlink_queue(sma, q);
1163			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1164		}
1165		list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1166			unlink_queue(sma, q);
1167			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1168		}
1169		ipc_update_pid(&sem->sempid, NULL);
1170	}
1171
1172	/* Remove the semaphore set from the IDR */
1173	sem_rmid(ns, sma);
1174	sem_unlock(sma, -1);
1175	rcu_read_unlock();
1176
1177	wake_up_q(&wake_q);
1178	ns->used_sems -= sma->sem_nsems;
1179	ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1180}
1181
1182static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1183{
1184	switch (version) {
1185	case IPC_64:
1186		return copy_to_user(buf, in, sizeof(*in));
1187	case IPC_OLD:
1188	    {
1189		struct semid_ds out;
1190
1191		memset(&out, 0, sizeof(out));
1192
1193		ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1194
1195		out.sem_otime	= in->sem_otime;
1196		out.sem_ctime	= in->sem_ctime;
1197		out.sem_nsems	= in->sem_nsems;
1198
1199		return copy_to_user(buf, &out, sizeof(out));
1200	    }
1201	default:
1202		return -EINVAL;
1203	}
1204}
1205
1206static time64_t get_semotime(struct sem_array *sma)
1207{
1208	int i;
1209	time64_t res;
1210
1211	res = sma->sems[0].sem_otime;
1212	for (i = 1; i < sma->sem_nsems; i++) {
1213		time64_t to = sma->sems[i].sem_otime;
1214
1215		if (to > res)
1216			res = to;
1217	}
1218	return res;
1219}
1220
1221static int semctl_stat(struct ipc_namespace *ns, int semid,
1222			 int cmd, struct semid64_ds *semid64)
1223{
1224	struct sem_array *sma;
1225	time64_t semotime;
1226	int err;
1227
1228	memset(semid64, 0, sizeof(*semid64));
1229
1230	rcu_read_lock();
1231	if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
1232		sma = sem_obtain_object(ns, semid);
1233		if (IS_ERR(sma)) {
1234			err = PTR_ERR(sma);
1235			goto out_unlock;
1236		}
1237	} else { /* IPC_STAT */
1238		sma = sem_obtain_object_check(ns, semid);
1239		if (IS_ERR(sma)) {
1240			err = PTR_ERR(sma);
1241			goto out_unlock;
1242		}
1243	}
1244
1245	/* see comment for SHM_STAT_ANY */
1246	if (cmd == SEM_STAT_ANY)
1247		audit_ipc_obj(&sma->sem_perm);
1248	else {
1249		err = -EACCES;
1250		if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1251			goto out_unlock;
1252	}
1253
1254	err = security_sem_semctl(&sma->sem_perm, cmd);
1255	if (err)
1256		goto out_unlock;
1257
1258	ipc_lock_object(&sma->sem_perm);
1259
1260	if (!ipc_valid_object(&sma->sem_perm)) {
1261		ipc_unlock_object(&sma->sem_perm);
1262		err = -EIDRM;
1263		goto out_unlock;
1264	}
1265
1266	kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
1267	semotime = get_semotime(sma);
1268	semid64->sem_otime = semotime;
1269	semid64->sem_ctime = sma->sem_ctime;
1270#ifndef CONFIG_64BIT
1271	semid64->sem_otime_high = semotime >> 32;
1272	semid64->sem_ctime_high = sma->sem_ctime >> 32;
1273#endif
1274	semid64->sem_nsems = sma->sem_nsems;
1275
1276	if (cmd == IPC_STAT) {
1277		/*
1278		 * As defined in SUS:
1279		 * Return 0 on success
1280		 */
1281		err = 0;
1282	} else {
1283		/*
1284		 * SEM_STAT and SEM_STAT_ANY (both Linux specific)
1285		 * Return the full id, including the sequence number
1286		 */
1287		err = sma->sem_perm.id;
1288	}
1289	ipc_unlock_object(&sma->sem_perm);
1290out_unlock:
1291	rcu_read_unlock();
1292	return err;
1293}
1294
1295static int semctl_info(struct ipc_namespace *ns, int semid,
1296			 int cmd, void __user *p)
1297{
1298	struct seminfo seminfo;
1299	int max_id;
1300	int err;
1301
1302	err = security_sem_semctl(NULL, cmd);
1303	if (err)
1304		return err;
1305
1306	memset(&seminfo, 0, sizeof(seminfo));
1307	seminfo.semmni = ns->sc_semmni;
1308	seminfo.semmns = ns->sc_semmns;
1309	seminfo.semmsl = ns->sc_semmsl;
1310	seminfo.semopm = ns->sc_semopm;
1311	seminfo.semvmx = SEMVMX;
1312	seminfo.semmnu = SEMMNU;
1313	seminfo.semmap = SEMMAP;
1314	seminfo.semume = SEMUME;
1315	down_read(&sem_ids(ns).rwsem);
1316	if (cmd == SEM_INFO) {
1317		seminfo.semusz = sem_ids(ns).in_use;
1318		seminfo.semaem = ns->used_sems;
1319	} else {
1320		seminfo.semusz = SEMUSZ;
1321		seminfo.semaem = SEMAEM;
1322	}
1323	max_id = ipc_get_maxid(&sem_ids(ns));
1324	up_read(&sem_ids(ns).rwsem);
1325	if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1326		return -EFAULT;
1327	return (max_id < 0) ? 0 : max_id;
1328}
1329
1330static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1331		int val)
1332{
1333	struct sem_undo *un;
1334	struct sem_array *sma;
1335	struct sem *curr;
1336	int err;
1337	DEFINE_WAKE_Q(wake_q);
1338
1339	if (val > SEMVMX || val < 0)
1340		return -ERANGE;
1341
1342	rcu_read_lock();
1343	sma = sem_obtain_object_check(ns, semid);
1344	if (IS_ERR(sma)) {
1345		rcu_read_unlock();
1346		return PTR_ERR(sma);
1347	}
1348
1349	if (semnum < 0 || semnum >= sma->sem_nsems) {
1350		rcu_read_unlock();
1351		return -EINVAL;
1352	}
1353
1354
1355	if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1356		rcu_read_unlock();
1357		return -EACCES;
1358	}
1359
1360	err = security_sem_semctl(&sma->sem_perm, SETVAL);
1361	if (err) {
1362		rcu_read_unlock();
1363		return -EACCES;
1364	}
1365
1366	sem_lock(sma, NULL, -1);
1367
1368	if (!ipc_valid_object(&sma->sem_perm)) {
1369		sem_unlock(sma, -1);
1370		rcu_read_unlock();
1371		return -EIDRM;
1372	}
1373
1374	semnum = array_index_nospec(semnum, sma->sem_nsems);
1375	curr = &sma->sems[semnum];
1376
1377	ipc_assert_locked_object(&sma->sem_perm);
1378	list_for_each_entry(un, &sma->list_id, list_id)
1379		un->semadj[semnum] = 0;
1380
1381	curr->semval = val;
1382	ipc_update_pid(&curr->sempid, task_tgid(current));
1383	sma->sem_ctime = ktime_get_real_seconds();
1384	/* maybe some queued-up processes were waiting for this */
1385	do_smart_update(sma, NULL, 0, 0, &wake_q);
1386	sem_unlock(sma, -1);
1387	rcu_read_unlock();
1388	wake_up_q(&wake_q);
1389	return 0;
1390}
1391
1392static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1393		int cmd, void __user *p)
1394{
1395	struct sem_array *sma;
1396	struct sem *curr;
1397	int err, nsems;
1398	ushort fast_sem_io[SEMMSL_FAST];
1399	ushort *sem_io = fast_sem_io;
1400	DEFINE_WAKE_Q(wake_q);
1401
1402	rcu_read_lock();
1403	sma = sem_obtain_object_check(ns, semid);
1404	if (IS_ERR(sma)) {
1405		rcu_read_unlock();
1406		return PTR_ERR(sma);
1407	}
1408
1409	nsems = sma->sem_nsems;
1410
1411	err = -EACCES;
1412	if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1413		goto out_rcu_wakeup;
1414
1415	err = security_sem_semctl(&sma->sem_perm, cmd);
1416	if (err)
1417		goto out_rcu_wakeup;
1418
1419	err = -EACCES;
1420	switch (cmd) {
1421	case GETALL:
1422	{
1423		ushort __user *array = p;
1424		int i;
1425
1426		sem_lock(sma, NULL, -1);
1427		if (!ipc_valid_object(&sma->sem_perm)) {
1428			err = -EIDRM;
1429			goto out_unlock;
1430		}
1431		if (nsems > SEMMSL_FAST) {
1432			if (!ipc_rcu_getref(&sma->sem_perm)) {
1433				err = -EIDRM;
1434				goto out_unlock;
1435			}
1436			sem_unlock(sma, -1);
1437			rcu_read_unlock();
1438			sem_io = kvmalloc_array(nsems, sizeof(ushort),
1439						GFP_KERNEL);
1440			if (sem_io == NULL) {
1441				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1442				return -ENOMEM;
1443			}
1444
1445			rcu_read_lock();
1446			sem_lock_and_putref(sma);
1447			if (!ipc_valid_object(&sma->sem_perm)) {
1448				err = -EIDRM;
1449				goto out_unlock;
1450			}
1451		}
1452		for (i = 0; i < sma->sem_nsems; i++)
1453			sem_io[i] = sma->sems[i].semval;
1454		sem_unlock(sma, -1);
1455		rcu_read_unlock();
1456		err = 0;
1457		if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1458			err = -EFAULT;
1459		goto out_free;
1460	}
1461	case SETALL:
1462	{
1463		int i;
1464		struct sem_undo *un;
1465
1466		if (!ipc_rcu_getref(&sma->sem_perm)) {
1467			err = -EIDRM;
1468			goto out_rcu_wakeup;
1469		}
1470		rcu_read_unlock();
1471
1472		if (nsems > SEMMSL_FAST) {
1473			sem_io = kvmalloc_array(nsems, sizeof(ushort),
1474						GFP_KERNEL);
1475			if (sem_io == NULL) {
1476				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1477				return -ENOMEM;
1478			}
1479		}
1480
1481		if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1482			ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1483			err = -EFAULT;
1484			goto out_free;
1485		}
1486
1487		for (i = 0; i < nsems; i++) {
1488			if (sem_io[i] > SEMVMX) {
1489				ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1490				err = -ERANGE;
1491				goto out_free;
1492			}
1493		}
1494		rcu_read_lock();
1495		sem_lock_and_putref(sma);
1496		if (!ipc_valid_object(&sma->sem_perm)) {
1497			err = -EIDRM;
1498			goto out_unlock;
1499		}
1500
1501		for (i = 0; i < nsems; i++) {
1502			sma->sems[i].semval = sem_io[i];
1503			ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
1504		}
1505
1506		ipc_assert_locked_object(&sma->sem_perm);
1507		list_for_each_entry(un, &sma->list_id, list_id) {
1508			for (i = 0; i < nsems; i++)
1509				un->semadj[i] = 0;
1510		}
1511		sma->sem_ctime = ktime_get_real_seconds();
1512		/* maybe some queued-up processes were waiting for this */
1513		do_smart_update(sma, NULL, 0, 0, &wake_q);
1514		err = 0;
1515		goto out_unlock;
1516	}
1517	/* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1518	}
1519	err = -EINVAL;
1520	if (semnum < 0 || semnum >= nsems)
1521		goto out_rcu_wakeup;
1522
1523	sem_lock(sma, NULL, -1);
1524	if (!ipc_valid_object(&sma->sem_perm)) {
1525		err = -EIDRM;
1526		goto out_unlock;
1527	}
1528
1529	semnum = array_index_nospec(semnum, nsems);
1530	curr = &sma->sems[semnum];
1531
1532	switch (cmd) {
1533	case GETVAL:
1534		err = curr->semval;
1535		goto out_unlock;
1536	case GETPID:
1537		err = pid_vnr(curr->sempid);
1538		goto out_unlock;
1539	case GETNCNT:
1540		err = count_semcnt(sma, semnum, 0);
1541		goto out_unlock;
1542	case GETZCNT:
1543		err = count_semcnt(sma, semnum, 1);
1544		goto out_unlock;
1545	}
1546
1547out_unlock:
1548	sem_unlock(sma, -1);
1549out_rcu_wakeup:
1550	rcu_read_unlock();
1551	wake_up_q(&wake_q);
1552out_free:
1553	if (sem_io != fast_sem_io)
1554		kvfree(sem_io);
1555	return err;
1556}
1557
1558static inline unsigned long
1559copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1560{
1561	switch (version) {
1562	case IPC_64:
1563		if (copy_from_user(out, buf, sizeof(*out)))
1564			return -EFAULT;
1565		return 0;
1566	case IPC_OLD:
1567	    {
1568		struct semid_ds tbuf_old;
1569
1570		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1571			return -EFAULT;
1572
1573		out->sem_perm.uid	= tbuf_old.sem_perm.uid;
1574		out->sem_perm.gid	= tbuf_old.sem_perm.gid;
1575		out->sem_perm.mode	= tbuf_old.sem_perm.mode;
1576
1577		return 0;
1578	    }
1579	default:
1580		return -EINVAL;
1581	}
1582}
1583
1584/*
1585 * This function handles some semctl commands which require the rwsem
1586 * to be held in write mode.
1587 * NOTE: no locks must be held, the rwsem is taken inside this function.
1588 */
1589static int semctl_down(struct ipc_namespace *ns, int semid,
1590		       int cmd, struct semid64_ds *semid64)
1591{
1592	struct sem_array *sma;
1593	int err;
1594	struct kern_ipc_perm *ipcp;
1595
1596	down_write(&sem_ids(ns).rwsem);
1597	rcu_read_lock();
1598
1599	ipcp = ipcctl_obtain_check(ns, &sem_ids(ns), semid, cmd,
1600				      &semid64->sem_perm, 0);
1601	if (IS_ERR(ipcp)) {
1602		err = PTR_ERR(ipcp);
1603		goto out_unlock1;
1604	}
1605
1606	sma = container_of(ipcp, struct sem_array, sem_perm);
1607
1608	err = security_sem_semctl(&sma->sem_perm, cmd);
1609	if (err)
1610		goto out_unlock1;
1611
1612	switch (cmd) {
1613	case IPC_RMID:
1614		sem_lock(sma, NULL, -1);
1615		/* freeary unlocks the ipc object and rcu */
1616		freeary(ns, ipcp);
1617		goto out_up;
1618	case IPC_SET:
1619		sem_lock(sma, NULL, -1);
1620		err = ipc_update_perm(&semid64->sem_perm, ipcp);
1621		if (err)
1622			goto out_unlock0;
1623		sma->sem_ctime = ktime_get_real_seconds();
1624		break;
1625	default:
1626		err = -EINVAL;
1627		goto out_unlock1;
1628	}
1629
1630out_unlock0:
1631	sem_unlock(sma, -1);
1632out_unlock1:
1633	rcu_read_unlock();
1634out_up:
1635	up_write(&sem_ids(ns).rwsem);
1636	return err;
1637}
1638
1639long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg)
1640{
1641	int version;
1642	struct ipc_namespace *ns;
1643	void __user *p = (void __user *)arg;
1644	struct semid64_ds semid64;
1645	int err;
1646
1647	if (semid < 0)
1648		return -EINVAL;
1649
1650	version = ipc_parse_version(&cmd);
1651	ns = current->nsproxy->ipc_ns;
1652
1653	switch (cmd) {
1654	case IPC_INFO:
1655	case SEM_INFO:
1656		return semctl_info(ns, semid, cmd, p);
1657	case IPC_STAT:
1658	case SEM_STAT:
1659	case SEM_STAT_ANY:
1660		err = semctl_stat(ns, semid, cmd, &semid64);
1661		if (err < 0)
1662			return err;
1663		if (copy_semid_to_user(p, &semid64, version))
1664			err = -EFAULT;
1665		return err;
1666	case GETALL:
1667	case GETVAL:
1668	case GETPID:
1669	case GETNCNT:
1670	case GETZCNT:
1671	case SETALL:
1672		return semctl_main(ns, semid, semnum, cmd, p);
1673	case SETVAL: {
1674		int val;
1675#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1676		/* big-endian 64bit */
1677		val = arg >> 32;
1678#else
1679		/* 32bit or little-endian 64bit */
1680		val = arg;
1681#endif
1682		return semctl_setval(ns, semid, semnum, val);
1683	}
1684	case IPC_SET:
1685		if (copy_semid_from_user(&semid64, p, version))
1686			return -EFAULT;
1687	case IPC_RMID:
1688		return semctl_down(ns, semid, cmd, &semid64);
1689	default:
1690		return -EINVAL;
1691	}
1692}
1693
1694SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1695{
1696	return ksys_semctl(semid, semnum, cmd, arg);
1697}
1698
1699#ifdef CONFIG_COMPAT
1700
1701struct compat_semid_ds {
1702	struct compat_ipc_perm sem_perm;
1703	compat_time_t sem_otime;
1704	compat_time_t sem_ctime;
1705	compat_uptr_t sem_base;
1706	compat_uptr_t sem_pending;
1707	compat_uptr_t sem_pending_last;
1708	compat_uptr_t undo;
1709	unsigned short sem_nsems;
1710};
1711
1712static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
1713					int version)
1714{
1715	memset(out, 0, sizeof(*out));
1716	if (version == IPC_64) {
1717		struct compat_semid64_ds __user *p = buf;
1718		return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
1719	} else {
1720		struct compat_semid_ds __user *p = buf;
1721		return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
1722	}
1723}
1724
1725static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
1726					int version)
1727{
1728	if (version == IPC_64) {
1729		struct compat_semid64_ds v;
1730		memset(&v, 0, sizeof(v));
1731		to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
1732		v.sem_otime	 = lower_32_bits(in->sem_otime);
1733		v.sem_otime_high = upper_32_bits(in->sem_otime);
1734		v.sem_ctime	 = lower_32_bits(in->sem_ctime);
1735		v.sem_ctime_high = upper_32_bits(in->sem_ctime);
1736		v.sem_nsems = in->sem_nsems;
1737		return copy_to_user(buf, &v, sizeof(v));
1738	} else {
1739		struct compat_semid_ds v;
1740		memset(&v, 0, sizeof(v));
1741		to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
1742		v.sem_otime = in->sem_otime;
1743		v.sem_ctime = in->sem_ctime;
1744		v.sem_nsems = in->sem_nsems;
1745		return copy_to_user(buf, &v, sizeof(v));
1746	}
1747}
1748
1749long compat_ksys_semctl(int semid, int semnum, int cmd, int arg)
1750{
1751	void __user *p = compat_ptr(arg);
1752	struct ipc_namespace *ns;
1753	struct semid64_ds semid64;
1754	int version = compat_ipc_parse_version(&cmd);
1755	int err;
1756
1757	ns = current->nsproxy->ipc_ns;
1758
1759	if (semid < 0)
1760		return -EINVAL;
1761
1762	switch (cmd & (~IPC_64)) {
1763	case IPC_INFO:
1764	case SEM_INFO:
1765		return semctl_info(ns, semid, cmd, p);
1766	case IPC_STAT:
1767	case SEM_STAT:
1768	case SEM_STAT_ANY:
1769		err = semctl_stat(ns, semid, cmd, &semid64);
1770		if (err < 0)
1771			return err;
1772		if (copy_compat_semid_to_user(p, &semid64, version))
1773			err = -EFAULT;
1774		return err;
1775	case GETVAL:
1776	case GETPID:
1777	case GETNCNT:
1778	case GETZCNT:
1779	case GETALL:
1780	case SETALL:
1781		return semctl_main(ns, semid, semnum, cmd, p);
1782	case SETVAL:
1783		return semctl_setval(ns, semid, semnum, arg);
1784	case IPC_SET:
1785		if (copy_compat_semid_from_user(&semid64, p, version))
1786			return -EFAULT;
1787		/* fallthru */
1788	case IPC_RMID:
1789		return semctl_down(ns, semid, cmd, &semid64);
1790	default:
1791		return -EINVAL;
1792	}
1793}
1794
1795COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
1796{
1797	return compat_ksys_semctl(semid, semnum, cmd, arg);
1798}
1799#endif
1800
1801/* If the task doesn't already have a undo_list, then allocate one
1802 * here.  We guarantee there is only one thread using this undo list,
1803 * and current is THE ONE
1804 *
1805 * If this allocation and assignment succeeds, but later
1806 * portions of this code fail, there is no need to free the sem_undo_list.
1807 * Just let it stay associated with the task, and it'll be freed later
1808 * at exit time.
1809 *
1810 * This can block, so callers must hold no locks.
1811 */
1812static inline int get_undo_list(struct sem_undo_list **undo_listp)
1813{
1814	struct sem_undo_list *undo_list;
1815
1816	undo_list = current->sysvsem.undo_list;
1817	if (!undo_list) {
1818		undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1819		if (undo_list == NULL)
1820			return -ENOMEM;
1821		spin_lock_init(&undo_list->lock);
1822		refcount_set(&undo_list->refcnt, 1);
1823		INIT_LIST_HEAD(&undo_list->list_proc);
1824
1825		current->sysvsem.undo_list = undo_list;
1826	}
1827	*undo_listp = undo_list;
1828	return 0;
1829}
1830
1831static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1832{
1833	struct sem_undo *un;
1834
1835	list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1836		if (un->semid == semid)
1837			return un;
1838	}
1839	return NULL;
1840}
1841
1842static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1843{
1844	struct sem_undo *un;
1845
1846	assert_spin_locked(&ulp->lock);
1847
1848	un = __lookup_undo(ulp, semid);
1849	if (un) {
1850		list_del_rcu(&un->list_proc);
1851		list_add_rcu(&un->list_proc, &ulp->list_proc);
1852	}
1853	return un;
1854}
1855
1856/**
1857 * find_alloc_undo - lookup (and if not present create) undo array
1858 * @ns: namespace
1859 * @semid: semaphore array id
1860 *
1861 * The function looks up (and if not present creates) the undo structure.
1862 * The size of the undo structure depends on the size of the semaphore
1863 * array, thus the alloc path is not that straightforward.
1864 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1865 * performs a rcu_read_lock().
1866 */
1867static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1868{
1869	struct sem_array *sma;
1870	struct sem_undo_list *ulp;
1871	struct sem_undo *un, *new;
1872	int nsems, error;
1873
1874	error = get_undo_list(&ulp);
1875	if (error)
1876		return ERR_PTR(error);
1877
1878	rcu_read_lock();
1879	spin_lock(&ulp->lock);
1880	un = lookup_undo(ulp, semid);
1881	spin_unlock(&ulp->lock);
1882	if (likely(un != NULL))
1883		goto out;
1884
1885	/* no undo structure around - allocate one. */
1886	/* step 1: figure out the size of the semaphore array */
1887	sma = sem_obtain_object_check(ns, semid);
1888	if (IS_ERR(sma)) {
1889		rcu_read_unlock();
1890		return ERR_CAST(sma);
1891	}
1892
1893	nsems = sma->sem_nsems;
1894	if (!ipc_rcu_getref(&sma->sem_perm)) {
1895		rcu_read_unlock();
1896		un = ERR_PTR(-EIDRM);
1897		goto out;
1898	}
1899	rcu_read_unlock();
1900
1901	/* step 2: allocate new undo structure */
1902	new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1903	if (!new) {
1904		ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1905		return ERR_PTR(-ENOMEM);
1906	}
1907
1908	/* step 3: Acquire the lock on semaphore array */
1909	rcu_read_lock();
1910	sem_lock_and_putref(sma);
1911	if (!ipc_valid_object(&sma->sem_perm)) {
1912		sem_unlock(sma, -1);
1913		rcu_read_unlock();
1914		kfree(new);
1915		un = ERR_PTR(-EIDRM);
1916		goto out;
1917	}
1918	spin_lock(&ulp->lock);
1919
1920	/*
1921	 * step 4: check for races: did someone else allocate the undo struct?
1922	 */
1923	un = lookup_undo(ulp, semid);
1924	if (un) {
1925		kfree(new);
1926		goto success;
1927	}
1928	/* step 5: initialize & link new undo structure */
1929	new->semadj = (short *) &new[1];
1930	new->ulp = ulp;
1931	new->semid = semid;
1932	assert_spin_locked(&ulp->lock);
1933	list_add_rcu(&new->list_proc, &ulp->list_proc);
1934	ipc_assert_locked_object(&sma->sem_perm);
1935	list_add(&new->list_id, &sma->list_id);
1936	un = new;
1937
1938success:
1939	spin_unlock(&ulp->lock);
1940	sem_unlock(sma, -1);
1941out:
1942	return un;
1943}
1944
1945static long do_semtimedop(int semid, struct sembuf __user *tsops,
1946		unsigned nsops, const struct timespec64 *timeout)
1947{
1948	int error = -EINVAL;
1949	struct sem_array *sma;
1950	struct sembuf fast_sops[SEMOPM_FAST];
1951	struct sembuf *sops = fast_sops, *sop;
1952	struct sem_undo *un;
1953	int max, locknum;
1954	bool undos = false, alter = false, dupsop = false;
1955	struct sem_queue queue;
1956	unsigned long dup = 0, jiffies_left = 0;
1957	struct ipc_namespace *ns;
1958
1959	ns = current->nsproxy->ipc_ns;
1960
1961	if (nsops < 1 || semid < 0)
1962		return -EINVAL;
1963	if (nsops > ns->sc_semopm)
1964		return -E2BIG;
1965	if (nsops > SEMOPM_FAST) {
1966		sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
1967		if (sops == NULL)
1968			return -ENOMEM;
1969	}
1970
1971	if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1972		error =  -EFAULT;
1973		goto out_free;
1974	}
1975
1976	if (timeout) {
1977		if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 ||
1978			timeout->tv_nsec >= 1000000000L) {
1979			error = -EINVAL;
1980			goto out_free;
1981		}
1982		jiffies_left = timespec64_to_jiffies(timeout);
1983	}
1984
1985	max = 0;
1986	for (sop = sops; sop < sops + nsops; sop++) {
1987		unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
1988
1989		if (sop->sem_num >= max)
1990			max = sop->sem_num;
1991		if (sop->sem_flg & SEM_UNDO)
1992			undos = true;
1993		if (dup & mask) {
1994			/*
1995			 * There was a previous alter access that appears
1996			 * to have accessed the same semaphore, thus use
1997			 * the dupsop logic. "appears", because the detection
1998			 * can only check % BITS_PER_LONG.
1999			 */
2000			dupsop = true;
2001		}
2002		if (sop->sem_op != 0) {
2003			alter = true;
2004			dup |= mask;
2005		}
2006	}
2007
2008	if (undos) {
2009		/* On success, find_alloc_undo takes the rcu_read_lock */
2010		un = find_alloc_undo(ns, semid);
2011		if (IS_ERR(un)) {
2012			error = PTR_ERR(un);
2013			goto out_free;
2014		}
2015	} else {
2016		un = NULL;
2017		rcu_read_lock();
2018	}
2019
2020	sma = sem_obtain_object_check(ns, semid);
2021	if (IS_ERR(sma)) {
2022		rcu_read_unlock();
2023		error = PTR_ERR(sma);
2024		goto out_free;
2025	}
2026
2027	error = -EFBIG;
2028	if (max >= sma->sem_nsems) {
2029		rcu_read_unlock();
2030		goto out_free;
2031	}
2032
2033	error = -EACCES;
2034	if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2035		rcu_read_unlock();
2036		goto out_free;
2037	}
2038
2039	error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
2040	if (error) {
2041		rcu_read_unlock();
2042		goto out_free;
2043	}
2044
2045	error = -EIDRM;
2046	locknum = sem_lock(sma, sops, nsops);
2047	/*
2048	 * We eventually might perform the following check in a lockless
2049	 * fashion, considering ipc_valid_object() locking constraints.
2050	 * If nsops == 1 and there is no contention for sem_perm.lock, then
2051	 * only a per-semaphore lock is held and it's OK to proceed with the
2052	 * check below. More details on the fine grained locking scheme
2053	 * entangled here and why it's RMID race safe on comments at sem_lock()
2054	 */
2055	if (!ipc_valid_object(&sma->sem_perm))
2056		goto out_unlock_free;
2057	/*
2058	 * semid identifiers are not unique - find_alloc_undo may have
2059	 * allocated an undo structure, it was invalidated by an RMID
2060	 * and now a new array with received the same id. Check and fail.
2061	 * This case can be detected checking un->semid. The existence of
2062	 * "un" itself is guaranteed by rcu.
2063	 */
2064	if (un && un->semid == -1)
2065		goto out_unlock_free;
2066
2067	queue.sops = sops;
2068	queue.nsops = nsops;
2069	queue.undo = un;
2070	queue.pid = task_tgid(current);
2071	queue.alter = alter;
2072	queue.dupsop = dupsop;
2073
2074	error = perform_atomic_semop(sma, &queue);
2075	if (error == 0) { /* non-blocking succesfull path */
2076		DEFINE_WAKE_Q(wake_q);
2077
2078		/*
2079		 * If the operation was successful, then do
2080		 * the required updates.
2081		 */
2082		if (alter)
2083			do_smart_update(sma, sops, nsops, 1, &wake_q);
2084		else
2085			set_semotime(sma, sops);
2086
2087		sem_unlock(sma, locknum);
2088		rcu_read_unlock();
2089		wake_up_q(&wake_q);
2090
2091		goto out_free;
2092	}
2093	if (error < 0) /* non-blocking error path */
2094		goto out_unlock_free;
2095
2096	/*
2097	 * We need to sleep on this operation, so we put the current
2098	 * task into the pending queue and go to sleep.
2099	 */
2100	if (nsops == 1) {
2101		struct sem *curr;
2102		int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
2103		curr = &sma->sems[idx];
2104
2105		if (alter) {
2106			if (sma->complex_count) {
2107				list_add_tail(&queue.list,
2108						&sma->pending_alter);
2109			} else {
2110
2111				list_add_tail(&queue.list,
2112						&curr->pending_alter);
2113			}
2114		} else {
2115			list_add_tail(&queue.list, &curr->pending_const);
2116		}
2117	} else {
2118		if (!sma->complex_count)
2119			merge_queues(sma);
2120
2121		if (alter)
2122			list_add_tail(&queue.list, &sma->pending_alter);
2123		else
2124			list_add_tail(&queue.list, &sma->pending_const);
2125
2126		sma->complex_count++;
2127	}
2128
2129	do {
2130		WRITE_ONCE(queue.status, -EINTR);
2131		queue.sleeper = current;
2132
2133		__set_current_state(TASK_INTERRUPTIBLE);
2134		sem_unlock(sma, locknum);
2135		rcu_read_unlock();
2136
2137		if (timeout)
2138			jiffies_left = schedule_timeout(jiffies_left);
2139		else
2140			schedule();
2141
2142		/*
2143		 * fastpath: the semop has completed, either successfully or
2144		 * not, from the syscall pov, is quite irrelevant to us at this
2145		 * point; we're done.
2146		 *
2147		 * We _do_ care, nonetheless, about being awoken by a signal or
2148		 * spuriously.  The queue.status is checked again in the
2149		 * slowpath (aka after taking sem_lock), such that we can detect
2150		 * scenarios where we were awakened externally, during the
2151		 * window between wake_q_add() and wake_up_q().
2152		 */
2153		error = READ_ONCE(queue.status);
2154		if (error != -EINTR) {
2155			/*
2156			 * User space could assume that semop() is a memory
2157			 * barrier: Without the mb(), the cpu could
2158			 * speculatively read in userspace stale data that was
2159			 * overwritten by the previous owner of the semaphore.
2160			 */
2161			smp_mb();
2162			goto out_free;
2163		}
2164
2165		rcu_read_lock();
2166		locknum = sem_lock(sma, sops, nsops);
2167
2168		if (!ipc_valid_object(&sma->sem_perm))
2169			goto out_unlock_free;
2170
2171		error = READ_ONCE(queue.status);
2172
2173		/*
2174		 * If queue.status != -EINTR we are woken up by another process.
2175		 * Leave without unlink_queue(), but with sem_unlock().
2176		 */
2177		if (error != -EINTR)
2178			goto out_unlock_free;
2179
2180		/*
2181		 * If an interrupt occurred we have to clean up the queue.
2182		 */
2183		if (timeout && jiffies_left == 0)
2184			error = -EAGAIN;
2185	} while (error == -EINTR && !signal_pending(current)); /* spurious */
2186
2187	unlink_queue(sma, &queue);
2188
2189out_unlock_free:
2190	sem_unlock(sma, locknum);
2191	rcu_read_unlock();
2192out_free:
2193	if (sops != fast_sops)
2194		kvfree(sops);
2195	return error;
2196}
2197
2198long ksys_semtimedop(int semid, struct sembuf __user *tsops,
2199		     unsigned int nsops, const struct __kernel_timespec __user *timeout)
2200{
2201	if (timeout) {
2202		struct timespec64 ts;
2203		if (get_timespec64(&ts, timeout))
2204			return -EFAULT;
2205		return do_semtimedop(semid, tsops, nsops, &ts);
2206	}
2207	return do_semtimedop(semid, tsops, nsops, NULL);
2208}
2209
2210SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
2211		unsigned int, nsops, const struct __kernel_timespec __user *, timeout)
2212{
2213	return ksys_semtimedop(semid, tsops, nsops, timeout);
2214}
2215
2216#ifdef CONFIG_COMPAT_32BIT_TIME
2217long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
2218			    unsigned int nsops,
2219			    const struct compat_timespec __user *timeout)
2220{
2221	if (timeout) {
2222		struct timespec64 ts;
2223		if (compat_get_timespec64(&ts, timeout))
2224			return -EFAULT;
2225		return do_semtimedop(semid, tsems, nsops, &ts);
2226	}
2227	return do_semtimedop(semid, tsems, nsops, NULL);
2228}
2229
2230COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems,
2231		       unsigned int, nsops,
2232		       const struct compat_timespec __user *, timeout)
2233{
2234	return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
2235}
2236#endif
2237
2238SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2239		unsigned, nsops)
2240{
2241	return do_semtimedop(semid, tsops, nsops, NULL);
2242}
2243
2244/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2245 * parent and child tasks.
2246 */
2247
2248int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2249{
2250	struct sem_undo_list *undo_list;
2251	int error;
2252
2253	if (clone_flags & CLONE_SYSVSEM) {
2254		error = get_undo_list(&undo_list);
2255		if (error)
2256			return error;
2257		refcount_inc(&undo_list->refcnt);
2258		tsk->sysvsem.undo_list = undo_list;
2259	} else
2260		tsk->sysvsem.undo_list = NULL;
2261
2262	return 0;
2263}
2264
2265/*
2266 * add semadj values to semaphores, free undo structures.
2267 * undo structures are not freed when semaphore arrays are destroyed
2268 * so some of them may be out of date.
2269 * IMPLEMENTATION NOTE: There is some confusion over whether the
2270 * set of adjustments that needs to be done should be done in an atomic
2271 * manner or not. That is, if we are attempting to decrement the semval
2272 * should we queue up and wait until we can do so legally?
2273 * The original implementation attempted to do this (queue and wait).
2274 * The current implementation does not do so. The POSIX standard
2275 * and SVID should be consulted to determine what behavior is mandated.
2276 */
2277void exit_sem(struct task_struct *tsk)
2278{
2279	struct sem_undo_list *ulp;
2280
2281	ulp = tsk->sysvsem.undo_list;
2282	if (!ulp)
2283		return;
2284	tsk->sysvsem.undo_list = NULL;
2285
2286	if (!refcount_dec_and_test(&ulp->refcnt))
2287		return;
2288
2289	for (;;) {
2290		struct sem_array *sma;
2291		struct sem_undo *un;
2292		int semid, i;
2293		DEFINE_WAKE_Q(wake_q);
2294
2295		cond_resched();
2296
2297		rcu_read_lock();
2298		un = list_entry_rcu(ulp->list_proc.next,
2299				    struct sem_undo, list_proc);
2300		if (&un->list_proc == &ulp->list_proc) {
2301			/*
2302			 * We must wait for freeary() before freeing this ulp,
2303			 * in case we raced with last sem_undo. There is a small
2304			 * possibility where we exit while freeary() didn't
2305			 * finish unlocking sem_undo_list.
2306			 */
2307			spin_lock(&ulp->lock);
2308			spin_unlock(&ulp->lock);
2309			rcu_read_unlock();
2310			break;
2311		}
2312		spin_lock(&ulp->lock);
2313		semid = un->semid;
2314		spin_unlock(&ulp->lock);
2315
2316		/* exit_sem raced with IPC_RMID, nothing to do */
2317		if (semid == -1) {
2318			rcu_read_unlock();
2319			continue;
2320		}
2321
2322		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2323		/* exit_sem raced with IPC_RMID, nothing to do */
2324		if (IS_ERR(sma)) {
2325			rcu_read_unlock();
2326			continue;
2327		}
2328
2329		sem_lock(sma, NULL, -1);
2330		/* exit_sem raced with IPC_RMID, nothing to do */
2331		if (!ipc_valid_object(&sma->sem_perm)) {
2332			sem_unlock(sma, -1);
2333			rcu_read_unlock();
2334			continue;
2335		}
2336		un = __lookup_undo(ulp, semid);
2337		if (un == NULL) {
2338			/* exit_sem raced with IPC_RMID+semget() that created
2339			 * exactly the same semid. Nothing to do.
2340			 */
2341			sem_unlock(sma, -1);
2342			rcu_read_unlock();
2343			continue;
2344		}
2345
2346		/* remove un from the linked lists */
2347		ipc_assert_locked_object(&sma->sem_perm);
2348		list_del(&un->list_id);
2349
2350		/* we are the last process using this ulp, acquiring ulp->lock
2351		 * isn't required. Besides that, we are also protected against
2352		 * IPC_RMID as we hold sma->sem_perm lock now
2353		 */
2354		list_del_rcu(&un->list_proc);
2355
2356		/* perform adjustments registered in un */
2357		for (i = 0; i < sma->sem_nsems; i++) {
2358			struct sem *semaphore = &sma->sems[i];
2359			if (un->semadj[i]) {
2360				semaphore->semval += un->semadj[i];
2361				/*
2362				 * Range checks of the new semaphore value,
2363				 * not defined by sus:
2364				 * - Some unices ignore the undo entirely
2365				 *   (e.g. HP UX 11i 11.22, Tru64 V5.1)
2366				 * - some cap the value (e.g. FreeBSD caps
2367				 *   at 0, but doesn't enforce SEMVMX)
2368				 *
2369				 * Linux caps the semaphore value, both at 0
2370				 * and at SEMVMX.
2371				 *
2372				 *	Manfred <manfred@colorfullife.com>
2373				 */
2374				if (semaphore->semval < 0)
2375					semaphore->semval = 0;
2376				if (semaphore->semval > SEMVMX)
2377					semaphore->semval = SEMVMX;
2378				ipc_update_pid(&semaphore->sempid, task_tgid(current));
2379			}
2380		}
2381		/* maybe some queued-up processes were waiting for this */
2382		do_smart_update(sma, NULL, 0, 1, &wake_q);
2383		sem_unlock(sma, -1);
2384		rcu_read_unlock();
2385		wake_up_q(&wake_q);
2386
2387		kfree_rcu(un, rcu);
2388	}
2389	kfree(ulp);
2390}
2391
2392#ifdef CONFIG_PROC_FS
2393static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2394{
2395	struct user_namespace *user_ns = seq_user_ns(s);
2396	struct kern_ipc_perm *ipcp = it;
2397	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
2398	time64_t sem_otime;
2399
2400	/*
2401	 * The proc interface isn't aware of sem_lock(), it calls
2402	 * ipc_lock_object() directly (in sysvipc_find_ipc).
2403	 * In order to stay compatible with sem_lock(), we must
2404	 * enter / leave complex_mode.
2405	 */
2406	complexmode_enter(sma);
2407
2408	sem_otime = get_semotime(sma);
2409
2410	seq_printf(s,
2411		   "%10d %10d  %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
2412		   sma->sem_perm.key,
2413		   sma->sem_perm.id,
2414		   sma->sem_perm.mode,
2415		   sma->sem_nsems,
2416		   from_kuid_munged(user_ns, sma->sem_perm.uid),
2417		   from_kgid_munged(user_ns, sma->sem_perm.gid),
2418		   from_kuid_munged(user_ns, sma->sem_perm.cuid),
2419		   from_kgid_munged(user_ns, sma->sem_perm.cgid),
2420		   sem_otime,
2421		   sma->sem_ctime);
2422
2423	complexmode_tryleave(sma);
2424
2425	return 0;
2426}
2427#endif
2428