pipe.c revision fe67f4dd
1// SPDX-License-Identifier: GPL-2.0
2/*
3 *  linux/fs/pipe.c
4 *
5 *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
6 */
7
8#include <linux/mm.h>
9#include <linux/file.h>
10#include <linux/poll.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/fs.h>
15#include <linux/log2.h>
16#include <linux/mount.h>
17#include <linux/pseudo_fs.h>
18#include <linux/magic.h>
19#include <linux/pipe_fs_i.h>
20#include <linux/uio.h>
21#include <linux/highmem.h>
22#include <linux/pagemap.h>
23#include <linux/audit.h>
24#include <linux/syscalls.h>
25#include <linux/fcntl.h>
26#include <linux/memcontrol.h>
27#include <linux/watch_queue.h>
28
29#include <linux/uaccess.h>
30#include <asm/ioctls.h>
31
32#include "internal.h"
33
34/*
35 * New pipe buffers will be restricted to this size while the user is exceeding
36 * their pipe buffer quota. The general pipe use case needs at least two
37 * buffers: one for data yet to be read, and one for new data. If this is less
38 * than two, then a write to a non-empty pipe may block even if the pipe is not
39 * full. This can occur with GNU make jobserver or similar uses of pipes as
40 * semaphores: multiple processes may be waiting to write tokens back to the
41 * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
42 *
43 * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
44 * own risk, namely: pipe writes to non-full pipes may block until the pipe is
45 * emptied.
46 */
47#define PIPE_MIN_DEF_BUFFERS 2
48
49/*
50 * The max size that a non-root user is allowed to grow the pipe. Can
51 * be set by root in /proc/sys/fs/pipe-max-size
52 */
53unsigned int pipe_max_size = 1048576;
54
55/* Maximum allocatable pages per user. Hard limit is unset by default, soft
56 * matches default values.
57 */
58unsigned long pipe_user_pages_hard;
59unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
60
61/*
62 * We use head and tail indices that aren't masked off, except at the point of
63 * dereference, but rather they're allowed to wrap naturally.  This means there
64 * isn't a dead spot in the buffer, but the ring has to be a power of two and
65 * <= 2^31.
66 * -- David Howells 2019-09-23.
67 *
68 * Reads with count = 0 should always return 0.
69 * -- Julian Bradfield 1999-06-07.
70 *
71 * FIFOs and Pipes now generate SIGIO for both readers and writers.
72 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
73 *
74 * pipe_read & write cleanup
75 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
76 */
77
78static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
79{
80	if (pipe->files)
81		mutex_lock_nested(&pipe->mutex, subclass);
82}
83
84void pipe_lock(struct pipe_inode_info *pipe)
85{
86	/*
87	 * pipe_lock() nests non-pipe inode locks (for writing to a file)
88	 */
89	pipe_lock_nested(pipe, I_MUTEX_PARENT);
90}
91EXPORT_SYMBOL(pipe_lock);
92
93void pipe_unlock(struct pipe_inode_info *pipe)
94{
95	if (pipe->files)
96		mutex_unlock(&pipe->mutex);
97}
98EXPORT_SYMBOL(pipe_unlock);
99
100static inline void __pipe_lock(struct pipe_inode_info *pipe)
101{
102	mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
103}
104
105static inline void __pipe_unlock(struct pipe_inode_info *pipe)
106{
107	mutex_unlock(&pipe->mutex);
108}
109
110void pipe_double_lock(struct pipe_inode_info *pipe1,
111		      struct pipe_inode_info *pipe2)
112{
113	BUG_ON(pipe1 == pipe2);
114
115	if (pipe1 < pipe2) {
116		pipe_lock_nested(pipe1, I_MUTEX_PARENT);
117		pipe_lock_nested(pipe2, I_MUTEX_CHILD);
118	} else {
119		pipe_lock_nested(pipe2, I_MUTEX_PARENT);
120		pipe_lock_nested(pipe1, I_MUTEX_CHILD);
121	}
122}
123
124static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
125				  struct pipe_buffer *buf)
126{
127	struct page *page = buf->page;
128
129	/*
130	 * If nobody else uses this page, and we don't already have a
131	 * temporary page, let's keep track of it as a one-deep
132	 * allocation cache. (Otherwise just release our reference to it)
133	 */
134	if (page_count(page) == 1 && !pipe->tmp_page)
135		pipe->tmp_page = page;
136	else
137		put_page(page);
138}
139
140static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe,
141		struct pipe_buffer *buf)
142{
143	struct page *page = buf->page;
144
145	if (page_count(page) != 1)
146		return false;
147	memcg_kmem_uncharge_page(page, 0);
148	__SetPageLocked(page);
149	return true;
150}
151
152/**
153 * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
154 * @pipe:	the pipe that the buffer belongs to
155 * @buf:	the buffer to attempt to steal
156 *
157 * Description:
158 *	This function attempts to steal the &struct page attached to
159 *	@buf. If successful, this function returns 0 and returns with
160 *	the page locked. The caller may then reuse the page for whatever
161 *	he wishes; the typical use is insertion into a different file
162 *	page cache.
163 */
164bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe,
165		struct pipe_buffer *buf)
166{
167	struct page *page = buf->page;
168
169	/*
170	 * A reference of one is golden, that means that the owner of this
171	 * page is the only one holding a reference to it. lock the page
172	 * and return OK.
173	 */
174	if (page_count(page) == 1) {
175		lock_page(page);
176		return true;
177	}
178	return false;
179}
180EXPORT_SYMBOL(generic_pipe_buf_try_steal);
181
182/**
183 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
184 * @pipe:	the pipe that the buffer belongs to
185 * @buf:	the buffer to get a reference to
186 *
187 * Description:
188 *	This function grabs an extra reference to @buf. It's used in
189 *	the tee() system call, when we duplicate the buffers in one
190 *	pipe into another.
191 */
192bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
193{
194	return try_get_page(buf->page);
195}
196EXPORT_SYMBOL(generic_pipe_buf_get);
197
198/**
199 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
200 * @pipe:	the pipe that the buffer belongs to
201 * @buf:	the buffer to put a reference to
202 *
203 * Description:
204 *	This function releases a reference to @buf.
205 */
206void generic_pipe_buf_release(struct pipe_inode_info *pipe,
207			      struct pipe_buffer *buf)
208{
209	put_page(buf->page);
210}
211EXPORT_SYMBOL(generic_pipe_buf_release);
212
213static const struct pipe_buf_operations anon_pipe_buf_ops = {
214	.release	= anon_pipe_buf_release,
215	.try_steal	= anon_pipe_buf_try_steal,
216	.get		= generic_pipe_buf_get,
217};
218
219/* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
220static inline bool pipe_readable(const struct pipe_inode_info *pipe)
221{
222	unsigned int head = READ_ONCE(pipe->head);
223	unsigned int tail = READ_ONCE(pipe->tail);
224	unsigned int writers = READ_ONCE(pipe->writers);
225
226	return !pipe_empty(head, tail) || !writers;
227}
228
229static ssize_t
230pipe_read(struct kiocb *iocb, struct iov_iter *to)
231{
232	size_t total_len = iov_iter_count(to);
233	struct file *filp = iocb->ki_filp;
234	struct pipe_inode_info *pipe = filp->private_data;
235	bool was_full, wake_next_reader = false;
236	ssize_t ret;
237
238	/* Null read succeeds. */
239	if (unlikely(total_len == 0))
240		return 0;
241
242	ret = 0;
243	__pipe_lock(pipe);
244
245	/*
246	 * We only wake up writers if the pipe was full when we started
247	 * reading in order to avoid unnecessary wakeups.
248	 *
249	 * But when we do wake up writers, we do so using a sync wakeup
250	 * (WF_SYNC), because we want them to get going and generate more
251	 * data for us.
252	 */
253	was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
254	for (;;) {
255		unsigned int head = pipe->head;
256		unsigned int tail = pipe->tail;
257		unsigned int mask = pipe->ring_size - 1;
258
259#ifdef CONFIG_WATCH_QUEUE
260		if (pipe->note_loss) {
261			struct watch_notification n;
262
263			if (total_len < 8) {
264				if (ret == 0)
265					ret = -ENOBUFS;
266				break;
267			}
268
269			n.type = WATCH_TYPE_META;
270			n.subtype = WATCH_META_LOSS_NOTIFICATION;
271			n.info = watch_sizeof(n);
272			if (copy_to_iter(&n, sizeof(n), to) != sizeof(n)) {
273				if (ret == 0)
274					ret = -EFAULT;
275				break;
276			}
277			ret += sizeof(n);
278			total_len -= sizeof(n);
279			pipe->note_loss = false;
280		}
281#endif
282
283		if (!pipe_empty(head, tail)) {
284			struct pipe_buffer *buf = &pipe->bufs[tail & mask];
285			size_t chars = buf->len;
286			size_t written;
287			int error;
288
289			if (chars > total_len) {
290				if (buf->flags & PIPE_BUF_FLAG_WHOLE) {
291					if (ret == 0)
292						ret = -ENOBUFS;
293					break;
294				}
295				chars = total_len;
296			}
297
298			error = pipe_buf_confirm(pipe, buf);
299			if (error) {
300				if (!ret)
301					ret = error;
302				break;
303			}
304
305			written = copy_page_to_iter(buf->page, buf->offset, chars, to);
306			if (unlikely(written < chars)) {
307				if (!ret)
308					ret = -EFAULT;
309				break;
310			}
311			ret += chars;
312			buf->offset += chars;
313			buf->len -= chars;
314
315			/* Was it a packet buffer? Clean up and exit */
316			if (buf->flags & PIPE_BUF_FLAG_PACKET) {
317				total_len = chars;
318				buf->len = 0;
319			}
320
321			if (!buf->len) {
322				pipe_buf_release(pipe, buf);
323				spin_lock_irq(&pipe->rd_wait.lock);
324#ifdef CONFIG_WATCH_QUEUE
325				if (buf->flags & PIPE_BUF_FLAG_LOSS)
326					pipe->note_loss = true;
327#endif
328				tail++;
329				pipe->tail = tail;
330				spin_unlock_irq(&pipe->rd_wait.lock);
331			}
332			total_len -= chars;
333			if (!total_len)
334				break;	/* common path: read succeeded */
335			if (!pipe_empty(head, tail))	/* More to do? */
336				continue;
337		}
338
339		if (!pipe->writers)
340			break;
341		if (ret)
342			break;
343		if (filp->f_flags & O_NONBLOCK) {
344			ret = -EAGAIN;
345			break;
346		}
347		__pipe_unlock(pipe);
348
349		/*
350		 * We only get here if we didn't actually read anything.
351		 *
352		 * However, we could have seen (and removed) a zero-sized
353		 * pipe buffer, and might have made space in the buffers
354		 * that way.
355		 *
356		 * You can't make zero-sized pipe buffers by doing an empty
357		 * write (not even in packet mode), but they can happen if
358		 * the writer gets an EFAULT when trying to fill a buffer
359		 * that already got allocated and inserted in the buffer
360		 * array.
361		 *
362		 * So we still need to wake up any pending writers in the
363		 * _very_ unlikely case that the pipe was full, but we got
364		 * no data.
365		 */
366		if (unlikely(was_full))
367			wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
368		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
369
370		/*
371		 * But because we didn't read anything, at this point we can
372		 * just return directly with -ERESTARTSYS if we're interrupted,
373		 * since we've done any required wakeups and there's no need
374		 * to mark anything accessed. And we've dropped the lock.
375		 */
376		if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
377			return -ERESTARTSYS;
378
379		__pipe_lock(pipe);
380		was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
381		wake_next_reader = true;
382	}
383	if (pipe_empty(pipe->head, pipe->tail))
384		wake_next_reader = false;
385	__pipe_unlock(pipe);
386
387	if (was_full)
388		wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
389	if (wake_next_reader)
390		wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
391	kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
392	if (ret > 0)
393		file_accessed(filp);
394	return ret;
395}
396
397static inline int is_packetized(struct file *file)
398{
399	return (file->f_flags & O_DIRECT) != 0;
400}
401
402/* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
403static inline bool pipe_writable(const struct pipe_inode_info *pipe)
404{
405	unsigned int head = READ_ONCE(pipe->head);
406	unsigned int tail = READ_ONCE(pipe->tail);
407	unsigned int max_usage = READ_ONCE(pipe->max_usage);
408
409	return !pipe_full(head, tail, max_usage) ||
410		!READ_ONCE(pipe->readers);
411}
412
413static ssize_t
414pipe_write(struct kiocb *iocb, struct iov_iter *from)
415{
416	struct file *filp = iocb->ki_filp;
417	struct pipe_inode_info *pipe = filp->private_data;
418	unsigned int head;
419	ssize_t ret = 0;
420	size_t total_len = iov_iter_count(from);
421	ssize_t chars;
422	bool was_empty = false;
423	bool wake_next_writer = false;
424
425	/* Null write succeeds. */
426	if (unlikely(total_len == 0))
427		return 0;
428
429	__pipe_lock(pipe);
430
431	if (!pipe->readers) {
432		send_sig(SIGPIPE, current, 0);
433		ret = -EPIPE;
434		goto out;
435	}
436
437#ifdef CONFIG_WATCH_QUEUE
438	if (pipe->watch_queue) {
439		ret = -EXDEV;
440		goto out;
441	}
442#endif
443
444	/*
445	 * If it wasn't empty we try to merge new data into
446	 * the last buffer.
447	 *
448	 * That naturally merges small writes, but it also
449	 * page-aligns the rest of the writes for large writes
450	 * spanning multiple pages.
451	 */
452	head = pipe->head;
453	was_empty = pipe_empty(head, pipe->tail);
454	chars = total_len & (PAGE_SIZE-1);
455	if (chars && !was_empty) {
456		unsigned int mask = pipe->ring_size - 1;
457		struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
458		int offset = buf->offset + buf->len;
459
460		if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
461		    offset + chars <= PAGE_SIZE) {
462			ret = pipe_buf_confirm(pipe, buf);
463			if (ret)
464				goto out;
465
466			ret = copy_page_from_iter(buf->page, offset, chars, from);
467			if (unlikely(ret < chars)) {
468				ret = -EFAULT;
469				goto out;
470			}
471
472			buf->len += ret;
473			if (!iov_iter_count(from))
474				goto out;
475		}
476	}
477
478	for (;;) {
479		if (!pipe->readers) {
480			send_sig(SIGPIPE, current, 0);
481			if (!ret)
482				ret = -EPIPE;
483			break;
484		}
485
486		head = pipe->head;
487		if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
488			unsigned int mask = pipe->ring_size - 1;
489			struct pipe_buffer *buf = &pipe->bufs[head & mask];
490			struct page *page = pipe->tmp_page;
491			int copied;
492
493			if (!page) {
494				page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
495				if (unlikely(!page)) {
496					ret = ret ? : -ENOMEM;
497					break;
498				}
499				pipe->tmp_page = page;
500			}
501
502			/* Allocate a slot in the ring in advance and attach an
503			 * empty buffer.  If we fault or otherwise fail to use
504			 * it, either the reader will consume it or it'll still
505			 * be there for the next write.
506			 */
507			spin_lock_irq(&pipe->rd_wait.lock);
508
509			head = pipe->head;
510			if (pipe_full(head, pipe->tail, pipe->max_usage)) {
511				spin_unlock_irq(&pipe->rd_wait.lock);
512				continue;
513			}
514
515			pipe->head = head + 1;
516			spin_unlock_irq(&pipe->rd_wait.lock);
517
518			/* Insert it into the buffer array */
519			buf = &pipe->bufs[head & mask];
520			buf->page = page;
521			buf->ops = &anon_pipe_buf_ops;
522			buf->offset = 0;
523			buf->len = 0;
524			if (is_packetized(filp))
525				buf->flags = PIPE_BUF_FLAG_PACKET;
526			else
527				buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
528			pipe->tmp_page = NULL;
529
530			copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
531			if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
532				if (!ret)
533					ret = -EFAULT;
534				break;
535			}
536			ret += copied;
537			buf->offset = 0;
538			buf->len = copied;
539
540			if (!iov_iter_count(from))
541				break;
542		}
543
544		if (!pipe_full(head, pipe->tail, pipe->max_usage))
545			continue;
546
547		/* Wait for buffer space to become available. */
548		if (filp->f_flags & O_NONBLOCK) {
549			if (!ret)
550				ret = -EAGAIN;
551			break;
552		}
553		if (signal_pending(current)) {
554			if (!ret)
555				ret = -ERESTARTSYS;
556			break;
557		}
558
559		/*
560		 * We're going to release the pipe lock and wait for more
561		 * space. We wake up any readers if necessary, and then
562		 * after waiting we need to re-check whether the pipe
563		 * become empty while we dropped the lock.
564		 */
565		__pipe_unlock(pipe);
566		if (was_empty)
567			wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
568		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
569		wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
570		__pipe_lock(pipe);
571		was_empty = pipe_empty(pipe->head, pipe->tail);
572		wake_next_writer = true;
573	}
574out:
575	if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
576		wake_next_writer = false;
577	__pipe_unlock(pipe);
578
579	/*
580	 * If we do do a wakeup event, we do a 'sync' wakeup, because we
581	 * want the reader to start processing things asap, rather than
582	 * leave the data pending.
583	 *
584	 * This is particularly important for small writes, because of
585	 * how (for example) the GNU make jobserver uses small writes to
586	 * wake up pending jobs
587	 *
588	 * Epoll nonsensically wants a wakeup whether the pipe
589	 * was already empty or not.
590	 */
591	if (was_empty || pipe->poll_usage)
592		wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
593	kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
594	if (wake_next_writer)
595		wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
596	if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
597		int err = file_update_time(filp);
598		if (err)
599			ret = err;
600		sb_end_write(file_inode(filp)->i_sb);
601	}
602	return ret;
603}
604
605static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
606{
607	struct pipe_inode_info *pipe = filp->private_data;
608	int count, head, tail, mask;
609
610	switch (cmd) {
611	case FIONREAD:
612		__pipe_lock(pipe);
613		count = 0;
614		head = pipe->head;
615		tail = pipe->tail;
616		mask = pipe->ring_size - 1;
617
618		while (tail != head) {
619			count += pipe->bufs[tail & mask].len;
620			tail++;
621		}
622		__pipe_unlock(pipe);
623
624		return put_user(count, (int __user *)arg);
625
626#ifdef CONFIG_WATCH_QUEUE
627	case IOC_WATCH_QUEUE_SET_SIZE: {
628		int ret;
629		__pipe_lock(pipe);
630		ret = watch_queue_set_size(pipe, arg);
631		__pipe_unlock(pipe);
632		return ret;
633	}
634
635	case IOC_WATCH_QUEUE_SET_FILTER:
636		return watch_queue_set_filter(
637			pipe, (struct watch_notification_filter __user *)arg);
638#endif
639
640	default:
641		return -ENOIOCTLCMD;
642	}
643}
644
645/* No kernel lock held - fine */
646static __poll_t
647pipe_poll(struct file *filp, poll_table *wait)
648{
649	__poll_t mask;
650	struct pipe_inode_info *pipe = filp->private_data;
651	unsigned int head, tail;
652
653	/* Epoll has some historical nasty semantics, this enables them */
654	pipe->poll_usage = 1;
655
656	/*
657	 * Reading pipe state only -- no need for acquiring the semaphore.
658	 *
659	 * But because this is racy, the code has to add the
660	 * entry to the poll table _first_ ..
661	 */
662	if (filp->f_mode & FMODE_READ)
663		poll_wait(filp, &pipe->rd_wait, wait);
664	if (filp->f_mode & FMODE_WRITE)
665		poll_wait(filp, &pipe->wr_wait, wait);
666
667	/*
668	 * .. and only then can you do the racy tests. That way,
669	 * if something changes and you got it wrong, the poll
670	 * table entry will wake you up and fix it.
671	 */
672	head = READ_ONCE(pipe->head);
673	tail = READ_ONCE(pipe->tail);
674
675	mask = 0;
676	if (filp->f_mode & FMODE_READ) {
677		if (!pipe_empty(head, tail))
678			mask |= EPOLLIN | EPOLLRDNORM;
679		if (!pipe->writers && filp->f_version != pipe->w_counter)
680			mask |= EPOLLHUP;
681	}
682
683	if (filp->f_mode & FMODE_WRITE) {
684		if (!pipe_full(head, tail, pipe->max_usage))
685			mask |= EPOLLOUT | EPOLLWRNORM;
686		/*
687		 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
688		 * behave exactly like pipes for poll().
689		 */
690		if (!pipe->readers)
691			mask |= EPOLLERR;
692	}
693
694	return mask;
695}
696
697static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
698{
699	int kill = 0;
700
701	spin_lock(&inode->i_lock);
702	if (!--pipe->files) {
703		inode->i_pipe = NULL;
704		kill = 1;
705	}
706	spin_unlock(&inode->i_lock);
707
708	if (kill)
709		free_pipe_info(pipe);
710}
711
712static int
713pipe_release(struct inode *inode, struct file *file)
714{
715	struct pipe_inode_info *pipe = file->private_data;
716
717	__pipe_lock(pipe);
718	if (file->f_mode & FMODE_READ)
719		pipe->readers--;
720	if (file->f_mode & FMODE_WRITE)
721		pipe->writers--;
722
723	/* Was that the last reader or writer, but not the other side? */
724	if (!pipe->readers != !pipe->writers) {
725		wake_up_interruptible_all(&pipe->rd_wait);
726		wake_up_interruptible_all(&pipe->wr_wait);
727		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
728		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
729	}
730	__pipe_unlock(pipe);
731
732	put_pipe_info(inode, pipe);
733	return 0;
734}
735
736static int
737pipe_fasync(int fd, struct file *filp, int on)
738{
739	struct pipe_inode_info *pipe = filp->private_data;
740	int retval = 0;
741
742	__pipe_lock(pipe);
743	if (filp->f_mode & FMODE_READ)
744		retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
745	if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
746		retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
747		if (retval < 0 && (filp->f_mode & FMODE_READ))
748			/* this can happen only if on == T */
749			fasync_helper(-1, filp, 0, &pipe->fasync_readers);
750	}
751	__pipe_unlock(pipe);
752	return retval;
753}
754
755unsigned long account_pipe_buffers(struct user_struct *user,
756				   unsigned long old, unsigned long new)
757{
758	return atomic_long_add_return(new - old, &user->pipe_bufs);
759}
760
761bool too_many_pipe_buffers_soft(unsigned long user_bufs)
762{
763	unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
764
765	return soft_limit && user_bufs > soft_limit;
766}
767
768bool too_many_pipe_buffers_hard(unsigned long user_bufs)
769{
770	unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
771
772	return hard_limit && user_bufs > hard_limit;
773}
774
775bool pipe_is_unprivileged_user(void)
776{
777	return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
778}
779
780struct pipe_inode_info *alloc_pipe_info(void)
781{
782	struct pipe_inode_info *pipe;
783	unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
784	struct user_struct *user = get_current_user();
785	unsigned long user_bufs;
786	unsigned int max_size = READ_ONCE(pipe_max_size);
787
788	pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
789	if (pipe == NULL)
790		goto out_free_uid;
791
792	if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
793		pipe_bufs = max_size >> PAGE_SHIFT;
794
795	user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
796
797	if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
798		user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
799		pipe_bufs = PIPE_MIN_DEF_BUFFERS;
800	}
801
802	if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
803		goto out_revert_acct;
804
805	pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
806			     GFP_KERNEL_ACCOUNT);
807
808	if (pipe->bufs) {
809		init_waitqueue_head(&pipe->rd_wait);
810		init_waitqueue_head(&pipe->wr_wait);
811		pipe->r_counter = pipe->w_counter = 1;
812		pipe->max_usage = pipe_bufs;
813		pipe->ring_size = pipe_bufs;
814		pipe->nr_accounted = pipe_bufs;
815		pipe->user = user;
816		mutex_init(&pipe->mutex);
817		return pipe;
818	}
819
820out_revert_acct:
821	(void) account_pipe_buffers(user, pipe_bufs, 0);
822	kfree(pipe);
823out_free_uid:
824	free_uid(user);
825	return NULL;
826}
827
828void free_pipe_info(struct pipe_inode_info *pipe)
829{
830	int i;
831
832#ifdef CONFIG_WATCH_QUEUE
833	if (pipe->watch_queue) {
834		watch_queue_clear(pipe->watch_queue);
835		put_watch_queue(pipe->watch_queue);
836	}
837#endif
838
839	(void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
840	free_uid(pipe->user);
841	for (i = 0; i < pipe->ring_size; i++) {
842		struct pipe_buffer *buf = pipe->bufs + i;
843		if (buf->ops)
844			pipe_buf_release(pipe, buf);
845	}
846	if (pipe->tmp_page)
847		__free_page(pipe->tmp_page);
848	kfree(pipe->bufs);
849	kfree(pipe);
850}
851
852static struct vfsmount *pipe_mnt __read_mostly;
853
854/*
855 * pipefs_dname() is called from d_path().
856 */
857static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
858{
859	return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
860				d_inode(dentry)->i_ino);
861}
862
863static const struct dentry_operations pipefs_dentry_operations = {
864	.d_dname	= pipefs_dname,
865};
866
867static struct inode * get_pipe_inode(void)
868{
869	struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
870	struct pipe_inode_info *pipe;
871
872	if (!inode)
873		goto fail_inode;
874
875	inode->i_ino = get_next_ino();
876
877	pipe = alloc_pipe_info();
878	if (!pipe)
879		goto fail_iput;
880
881	inode->i_pipe = pipe;
882	pipe->files = 2;
883	pipe->readers = pipe->writers = 1;
884	inode->i_fop = &pipefifo_fops;
885
886	/*
887	 * Mark the inode dirty from the very beginning,
888	 * that way it will never be moved to the dirty
889	 * list because "mark_inode_dirty()" will think
890	 * that it already _is_ on the dirty list.
891	 */
892	inode->i_state = I_DIRTY;
893	inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
894	inode->i_uid = current_fsuid();
895	inode->i_gid = current_fsgid();
896	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
897
898	return inode;
899
900fail_iput:
901	iput(inode);
902
903fail_inode:
904	return NULL;
905}
906
907int create_pipe_files(struct file **res, int flags)
908{
909	struct inode *inode = get_pipe_inode();
910	struct file *f;
911	int error;
912
913	if (!inode)
914		return -ENFILE;
915
916	if (flags & O_NOTIFICATION_PIPE) {
917		error = watch_queue_init(inode->i_pipe);
918		if (error) {
919			free_pipe_info(inode->i_pipe);
920			iput(inode);
921			return error;
922		}
923	}
924
925	f = alloc_file_pseudo(inode, pipe_mnt, "",
926				O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
927				&pipefifo_fops);
928	if (IS_ERR(f)) {
929		free_pipe_info(inode->i_pipe);
930		iput(inode);
931		return PTR_ERR(f);
932	}
933
934	f->private_data = inode->i_pipe;
935
936	res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
937				  &pipefifo_fops);
938	if (IS_ERR(res[0])) {
939		put_pipe_info(inode, inode->i_pipe);
940		fput(f);
941		return PTR_ERR(res[0]);
942	}
943	res[0]->private_data = inode->i_pipe;
944	res[1] = f;
945	stream_open(inode, res[0]);
946	stream_open(inode, res[1]);
947	return 0;
948}
949
950static int __do_pipe_flags(int *fd, struct file **files, int flags)
951{
952	int error;
953	int fdw, fdr;
954
955	if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE))
956		return -EINVAL;
957
958	error = create_pipe_files(files, flags);
959	if (error)
960		return error;
961
962	error = get_unused_fd_flags(flags);
963	if (error < 0)
964		goto err_read_pipe;
965	fdr = error;
966
967	error = get_unused_fd_flags(flags);
968	if (error < 0)
969		goto err_fdr;
970	fdw = error;
971
972	audit_fd_pair(fdr, fdw);
973	fd[0] = fdr;
974	fd[1] = fdw;
975	return 0;
976
977 err_fdr:
978	put_unused_fd(fdr);
979 err_read_pipe:
980	fput(files[0]);
981	fput(files[1]);
982	return error;
983}
984
985int do_pipe_flags(int *fd, int flags)
986{
987	struct file *files[2];
988	int error = __do_pipe_flags(fd, files, flags);
989	if (!error) {
990		fd_install(fd[0], files[0]);
991		fd_install(fd[1], files[1]);
992	}
993	return error;
994}
995
996/*
997 * sys_pipe() is the normal C calling standard for creating
998 * a pipe. It's not the way Unix traditionally does this, though.
999 */
1000static int do_pipe2(int __user *fildes, int flags)
1001{
1002	struct file *files[2];
1003	int fd[2];
1004	int error;
1005
1006	error = __do_pipe_flags(fd, files, flags);
1007	if (!error) {
1008		if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
1009			fput(files[0]);
1010			fput(files[1]);
1011			put_unused_fd(fd[0]);
1012			put_unused_fd(fd[1]);
1013			error = -EFAULT;
1014		} else {
1015			fd_install(fd[0], files[0]);
1016			fd_install(fd[1], files[1]);
1017		}
1018	}
1019	return error;
1020}
1021
1022SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1023{
1024	return do_pipe2(fildes, flags);
1025}
1026
1027SYSCALL_DEFINE1(pipe, int __user *, fildes)
1028{
1029	return do_pipe2(fildes, 0);
1030}
1031
1032/*
1033 * This is the stupid "wait for pipe to be readable or writable"
1034 * model.
1035 *
1036 * See pipe_read/write() for the proper kind of exclusive wait,
1037 * but that requires that we wake up any other readers/writers
1038 * if we then do not end up reading everything (ie the whole
1039 * "wake_next_reader/writer" logic in pipe_read/write()).
1040 */
1041void pipe_wait_readable(struct pipe_inode_info *pipe)
1042{
1043	pipe_unlock(pipe);
1044	wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe));
1045	pipe_lock(pipe);
1046}
1047
1048void pipe_wait_writable(struct pipe_inode_info *pipe)
1049{
1050	pipe_unlock(pipe);
1051	wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe));
1052	pipe_lock(pipe);
1053}
1054
1055/*
1056 * This depends on both the wait (here) and the wakeup (wake_up_partner)
1057 * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot
1058 * race with the count check and waitqueue prep.
1059 *
1060 * Normally in order to avoid races, you'd do the prepare_to_wait() first,
1061 * then check the condition you're waiting for, and only then sleep. But
1062 * because of the pipe lock, we can check the condition before being on
1063 * the wait queue.
1064 *
1065 * We use the 'rd_wait' waitqueue for pipe partner waiting.
1066 */
1067static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
1068{
1069	DEFINE_WAIT(rdwait);
1070	int cur = *cnt;
1071
1072	while (cur == *cnt) {
1073		prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE);
1074		pipe_unlock(pipe);
1075		schedule();
1076		finish_wait(&pipe->rd_wait, &rdwait);
1077		pipe_lock(pipe);
1078		if (signal_pending(current))
1079			break;
1080	}
1081	return cur == *cnt ? -ERESTARTSYS : 0;
1082}
1083
1084static void wake_up_partner(struct pipe_inode_info *pipe)
1085{
1086	wake_up_interruptible_all(&pipe->rd_wait);
1087}
1088
1089static int fifo_open(struct inode *inode, struct file *filp)
1090{
1091	struct pipe_inode_info *pipe;
1092	bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1093	int ret;
1094
1095	filp->f_version = 0;
1096
1097	spin_lock(&inode->i_lock);
1098	if (inode->i_pipe) {
1099		pipe = inode->i_pipe;
1100		pipe->files++;
1101		spin_unlock(&inode->i_lock);
1102	} else {
1103		spin_unlock(&inode->i_lock);
1104		pipe = alloc_pipe_info();
1105		if (!pipe)
1106			return -ENOMEM;
1107		pipe->files = 1;
1108		spin_lock(&inode->i_lock);
1109		if (unlikely(inode->i_pipe)) {
1110			inode->i_pipe->files++;
1111			spin_unlock(&inode->i_lock);
1112			free_pipe_info(pipe);
1113			pipe = inode->i_pipe;
1114		} else {
1115			inode->i_pipe = pipe;
1116			spin_unlock(&inode->i_lock);
1117		}
1118	}
1119	filp->private_data = pipe;
1120	/* OK, we have a pipe and it's pinned down */
1121
1122	__pipe_lock(pipe);
1123
1124	/* We can only do regular read/write on fifos */
1125	stream_open(inode, filp);
1126
1127	switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
1128	case FMODE_READ:
1129	/*
1130	 *  O_RDONLY
1131	 *  POSIX.1 says that O_NONBLOCK means return with the FIFO
1132	 *  opened, even when there is no process writing the FIFO.
1133	 */
1134		pipe->r_counter++;
1135		if (pipe->readers++ == 0)
1136			wake_up_partner(pipe);
1137
1138		if (!is_pipe && !pipe->writers) {
1139			if ((filp->f_flags & O_NONBLOCK)) {
1140				/* suppress EPOLLHUP until we have
1141				 * seen a writer */
1142				filp->f_version = pipe->w_counter;
1143			} else {
1144				if (wait_for_partner(pipe, &pipe->w_counter))
1145					goto err_rd;
1146			}
1147		}
1148		break;
1149
1150	case FMODE_WRITE:
1151	/*
1152	 *  O_WRONLY
1153	 *  POSIX.1 says that O_NONBLOCK means return -1 with
1154	 *  errno=ENXIO when there is no process reading the FIFO.
1155	 */
1156		ret = -ENXIO;
1157		if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1158			goto err;
1159
1160		pipe->w_counter++;
1161		if (!pipe->writers++)
1162			wake_up_partner(pipe);
1163
1164		if (!is_pipe && !pipe->readers) {
1165			if (wait_for_partner(pipe, &pipe->r_counter))
1166				goto err_wr;
1167		}
1168		break;
1169
1170	case FMODE_READ | FMODE_WRITE:
1171	/*
1172	 *  O_RDWR
1173	 *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1174	 *  This implementation will NEVER block on a O_RDWR open, since
1175	 *  the process can at least talk to itself.
1176	 */
1177
1178		pipe->readers++;
1179		pipe->writers++;
1180		pipe->r_counter++;
1181		pipe->w_counter++;
1182		if (pipe->readers == 1 || pipe->writers == 1)
1183			wake_up_partner(pipe);
1184		break;
1185
1186	default:
1187		ret = -EINVAL;
1188		goto err;
1189	}
1190
1191	/* Ok! */
1192	__pipe_unlock(pipe);
1193	return 0;
1194
1195err_rd:
1196	if (!--pipe->readers)
1197		wake_up_interruptible(&pipe->wr_wait);
1198	ret = -ERESTARTSYS;
1199	goto err;
1200
1201err_wr:
1202	if (!--pipe->writers)
1203		wake_up_interruptible_all(&pipe->rd_wait);
1204	ret = -ERESTARTSYS;
1205	goto err;
1206
1207err:
1208	__pipe_unlock(pipe);
1209
1210	put_pipe_info(inode, pipe);
1211	return ret;
1212}
1213
1214const struct file_operations pipefifo_fops = {
1215	.open		= fifo_open,
1216	.llseek		= no_llseek,
1217	.read_iter	= pipe_read,
1218	.write_iter	= pipe_write,
1219	.poll		= pipe_poll,
1220	.unlocked_ioctl	= pipe_ioctl,
1221	.release	= pipe_release,
1222	.fasync		= pipe_fasync,
1223	.splice_write	= iter_file_splice_write,
1224};
1225
1226/*
1227 * Currently we rely on the pipe array holding a power-of-2 number
1228 * of pages. Returns 0 on error.
1229 */
1230unsigned int round_pipe_size(unsigned long size)
1231{
1232	if (size > (1U << 31))
1233		return 0;
1234
1235	/* Minimum pipe size, as required by POSIX */
1236	if (size < PAGE_SIZE)
1237		return PAGE_SIZE;
1238
1239	return roundup_pow_of_two(size);
1240}
1241
1242/*
1243 * Resize the pipe ring to a number of slots.
1244 */
1245int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1246{
1247	struct pipe_buffer *bufs;
1248	unsigned int head, tail, mask, n;
1249
1250	/*
1251	 * We can shrink the pipe, if arg is greater than the ring occupancy.
1252	 * Since we don't expect a lot of shrink+grow operations, just free and
1253	 * allocate again like we would do for growing.  If the pipe currently
1254	 * contains more buffers than arg, then return busy.
1255	 */
1256	mask = pipe->ring_size - 1;
1257	head = pipe->head;
1258	tail = pipe->tail;
1259	n = pipe_occupancy(pipe->head, pipe->tail);
1260	if (nr_slots < n)
1261		return -EBUSY;
1262
1263	bufs = kcalloc(nr_slots, sizeof(*bufs),
1264		       GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1265	if (unlikely(!bufs))
1266		return -ENOMEM;
1267
1268	/*
1269	 * The pipe array wraps around, so just start the new one at zero
1270	 * and adjust the indices.
1271	 */
1272	if (n > 0) {
1273		unsigned int h = head & mask;
1274		unsigned int t = tail & mask;
1275		if (h > t) {
1276			memcpy(bufs, pipe->bufs + t,
1277			       n * sizeof(struct pipe_buffer));
1278		} else {
1279			unsigned int tsize = pipe->ring_size - t;
1280			if (h > 0)
1281				memcpy(bufs + tsize, pipe->bufs,
1282				       h * sizeof(struct pipe_buffer));
1283			memcpy(bufs, pipe->bufs + t,
1284			       tsize * sizeof(struct pipe_buffer));
1285		}
1286	}
1287
1288	head = n;
1289	tail = 0;
1290
1291	kfree(pipe->bufs);
1292	pipe->bufs = bufs;
1293	pipe->ring_size = nr_slots;
1294	if (pipe->max_usage > nr_slots)
1295		pipe->max_usage = nr_slots;
1296	pipe->tail = tail;
1297	pipe->head = head;
1298
1299	/* This might have made more room for writers */
1300	wake_up_interruptible(&pipe->wr_wait);
1301	return 0;
1302}
1303
1304/*
1305 * Allocate a new array of pipe buffers and copy the info over. Returns the
1306 * pipe size if successful, or return -ERROR on error.
1307 */
1308static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1309{
1310	unsigned long user_bufs;
1311	unsigned int nr_slots, size;
1312	long ret = 0;
1313
1314#ifdef CONFIG_WATCH_QUEUE
1315	if (pipe->watch_queue)
1316		return -EBUSY;
1317#endif
1318
1319	size = round_pipe_size(arg);
1320	nr_slots = size >> PAGE_SHIFT;
1321
1322	if (!nr_slots)
1323		return -EINVAL;
1324
1325	/*
1326	 * If trying to increase the pipe capacity, check that an
1327	 * unprivileged user is not trying to exceed various limits
1328	 * (soft limit check here, hard limit check just below).
1329	 * Decreasing the pipe capacity is always permitted, even
1330	 * if the user is currently over a limit.
1331	 */
1332	if (nr_slots > pipe->max_usage &&
1333			size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1334		return -EPERM;
1335
1336	user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots);
1337
1338	if (nr_slots > pipe->max_usage &&
1339			(too_many_pipe_buffers_hard(user_bufs) ||
1340			 too_many_pipe_buffers_soft(user_bufs)) &&
1341			pipe_is_unprivileged_user()) {
1342		ret = -EPERM;
1343		goto out_revert_acct;
1344	}
1345
1346	ret = pipe_resize_ring(pipe, nr_slots);
1347	if (ret < 0)
1348		goto out_revert_acct;
1349
1350	pipe->max_usage = nr_slots;
1351	pipe->nr_accounted = nr_slots;
1352	return pipe->max_usage * PAGE_SIZE;
1353
1354out_revert_acct:
1355	(void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted);
1356	return ret;
1357}
1358
1359/*
1360 * Note that i_pipe and i_cdev share the same location, so checking ->i_pipe is
1361 * not enough to verify that this is a pipe.
1362 */
1363struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
1364{
1365	struct pipe_inode_info *pipe = file->private_data;
1366
1367	if (file->f_op != &pipefifo_fops || !pipe)
1368		return NULL;
1369#ifdef CONFIG_WATCH_QUEUE
1370	if (for_splice && pipe->watch_queue)
1371		return NULL;
1372#endif
1373	return pipe;
1374}
1375
1376long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1377{
1378	struct pipe_inode_info *pipe;
1379	long ret;
1380
1381	pipe = get_pipe_info(file, false);
1382	if (!pipe)
1383		return -EBADF;
1384
1385	__pipe_lock(pipe);
1386
1387	switch (cmd) {
1388	case F_SETPIPE_SZ:
1389		ret = pipe_set_size(pipe, arg);
1390		break;
1391	case F_GETPIPE_SZ:
1392		ret = pipe->max_usage * PAGE_SIZE;
1393		break;
1394	default:
1395		ret = -EINVAL;
1396		break;
1397	}
1398
1399	__pipe_unlock(pipe);
1400	return ret;
1401}
1402
1403static const struct super_operations pipefs_ops = {
1404	.destroy_inode = free_inode_nonrcu,
1405	.statfs = simple_statfs,
1406};
1407
1408/*
1409 * pipefs should _never_ be mounted by userland - too much of security hassle,
1410 * no real gain from having the whole whorehouse mounted. So we don't need
1411 * any operations on the root directory. However, we need a non-trivial
1412 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1413 */
1414
1415static int pipefs_init_fs_context(struct fs_context *fc)
1416{
1417	struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1418	if (!ctx)
1419		return -ENOMEM;
1420	ctx->ops = &pipefs_ops;
1421	ctx->dops = &pipefs_dentry_operations;
1422	return 0;
1423}
1424
1425static struct file_system_type pipe_fs_type = {
1426	.name		= "pipefs",
1427	.init_fs_context = pipefs_init_fs_context,
1428	.kill_sb	= kill_anon_super,
1429};
1430
1431static int __init init_pipe_fs(void)
1432{
1433	int err = register_filesystem(&pipe_fs_type);
1434
1435	if (!err) {
1436		pipe_mnt = kern_mount(&pipe_fs_type);
1437		if (IS_ERR(pipe_mnt)) {
1438			err = PTR_ERR(pipe_mnt);
1439			unregister_filesystem(&pipe_fs_type);
1440		}
1441	}
1442	return err;
1443}
1444
1445fs_initcall(init_pipe_fs);
1446