1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/fs/pipe.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1991, 1992, 1999 Linus Torvalds
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/mm.h>
9*4882a593Smuzhiyun #include <linux/file.h>
10*4882a593Smuzhiyun #include <linux/poll.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/fs.h>
15*4882a593Smuzhiyun #include <linux/log2.h>
16*4882a593Smuzhiyun #include <linux/mount.h>
17*4882a593Smuzhiyun #include <linux/pseudo_fs.h>
18*4882a593Smuzhiyun #include <linux/magic.h>
19*4882a593Smuzhiyun #include <linux/pipe_fs_i.h>
20*4882a593Smuzhiyun #include <linux/uio.h>
21*4882a593Smuzhiyun #include <linux/highmem.h>
22*4882a593Smuzhiyun #include <linux/pagemap.h>
23*4882a593Smuzhiyun #include <linux/audit.h>
24*4882a593Smuzhiyun #include <linux/syscalls.h>
25*4882a593Smuzhiyun #include <linux/fcntl.h>
26*4882a593Smuzhiyun #include <linux/memcontrol.h>
27*4882a593Smuzhiyun #include <linux/watch_queue.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <linux/uaccess.h>
30*4882a593Smuzhiyun #include <asm/ioctls.h>
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #include "internal.h"
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun * New pipe buffers will be restricted to this size while the user is exceeding
36*4882a593Smuzhiyun * their pipe buffer quota. The general pipe use case needs at least two
37*4882a593Smuzhiyun * buffers: one for data yet to be read, and one for new data. If this is less
38*4882a593Smuzhiyun * than two, then a write to a non-empty pipe may block even if the pipe is not
39*4882a593Smuzhiyun * full. This can occur with GNU make jobserver or similar uses of pipes as
40*4882a593Smuzhiyun * semaphores: multiple processes may be waiting to write tokens back to the
41*4882a593Smuzhiyun * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
44*4882a593Smuzhiyun * own risk, namely: pipe writes to non-full pipes may block until the pipe is
45*4882a593Smuzhiyun * emptied.
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun #define PIPE_MIN_DEF_BUFFERS 2
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun * The max size that a non-root user is allowed to grow the pipe. Can
51*4882a593Smuzhiyun * be set by root in /proc/sys/fs/pipe-max-size
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun unsigned int pipe_max_size = 1048576;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* Maximum allocatable pages per user. Hard limit is unset by default, soft
56*4882a593Smuzhiyun * matches default values.
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun unsigned long pipe_user_pages_hard;
59*4882a593Smuzhiyun unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun * We use head and tail indices that aren't masked off, except at the point of
63*4882a593Smuzhiyun * dereference, but rather they're allowed to wrap naturally. This means there
64*4882a593Smuzhiyun * isn't a dead spot in the buffer, but the ring has to be a power of two and
65*4882a593Smuzhiyun * <= 2^31.
66*4882a593Smuzhiyun * -- David Howells 2019-09-23.
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * Reads with count = 0 should always return 0.
69*4882a593Smuzhiyun * -- Julian Bradfield 1999-06-07.
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * FIFOs and Pipes now generate SIGIO for both readers and writers.
72*4882a593Smuzhiyun * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
73*4882a593Smuzhiyun *
74*4882a593Smuzhiyun * pipe_read & write cleanup
75*4882a593Smuzhiyun * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun
pipe_lock_nested(struct pipe_inode_info * pipe,int subclass)78*4882a593Smuzhiyun static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun if (pipe->files)
81*4882a593Smuzhiyun mutex_lock_nested(&pipe->mutex, subclass);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
pipe_lock(struct pipe_inode_info * pipe)84*4882a593Smuzhiyun void pipe_lock(struct pipe_inode_info *pipe)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * pipe_lock() nests non-pipe inode locks (for writing to a file)
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun pipe_lock_nested(pipe, I_MUTEX_PARENT);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun EXPORT_SYMBOL(pipe_lock);
92*4882a593Smuzhiyun
pipe_unlock(struct pipe_inode_info * pipe)93*4882a593Smuzhiyun void pipe_unlock(struct pipe_inode_info *pipe)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun if (pipe->files)
96*4882a593Smuzhiyun mutex_unlock(&pipe->mutex);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun EXPORT_SYMBOL(pipe_unlock);
99*4882a593Smuzhiyun
__pipe_lock(struct pipe_inode_info * pipe)100*4882a593Smuzhiyun static inline void __pipe_lock(struct pipe_inode_info *pipe)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
__pipe_unlock(struct pipe_inode_info * pipe)105*4882a593Smuzhiyun static inline void __pipe_unlock(struct pipe_inode_info *pipe)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun mutex_unlock(&pipe->mutex);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
pipe_double_lock(struct pipe_inode_info * pipe1,struct pipe_inode_info * pipe2)110*4882a593Smuzhiyun void pipe_double_lock(struct pipe_inode_info *pipe1,
111*4882a593Smuzhiyun struct pipe_inode_info *pipe2)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun BUG_ON(pipe1 == pipe2);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun if (pipe1 < pipe2) {
116*4882a593Smuzhiyun pipe_lock_nested(pipe1, I_MUTEX_PARENT);
117*4882a593Smuzhiyun pipe_lock_nested(pipe2, I_MUTEX_CHILD);
118*4882a593Smuzhiyun } else {
119*4882a593Smuzhiyun pipe_lock_nested(pipe2, I_MUTEX_PARENT);
120*4882a593Smuzhiyun pipe_lock_nested(pipe1, I_MUTEX_CHILD);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
anon_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)124*4882a593Smuzhiyun static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
125*4882a593Smuzhiyun struct pipe_buffer *buf)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun struct page *page = buf->page;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun * If nobody else uses this page, and we don't already have a
131*4882a593Smuzhiyun * temporary page, let's keep track of it as a one-deep
132*4882a593Smuzhiyun * allocation cache. (Otherwise just release our reference to it)
133*4882a593Smuzhiyun */
134*4882a593Smuzhiyun if (page_count(page) == 1 && !pipe->tmp_page)
135*4882a593Smuzhiyun pipe->tmp_page = page;
136*4882a593Smuzhiyun else
137*4882a593Smuzhiyun put_page(page);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
anon_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)140*4882a593Smuzhiyun static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe,
141*4882a593Smuzhiyun struct pipe_buffer *buf)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun struct page *page = buf->page;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun if (page_count(page) != 1)
146*4882a593Smuzhiyun return false;
147*4882a593Smuzhiyun memcg_kmem_uncharge_page(page, 0);
148*4882a593Smuzhiyun __SetPageLocked(page);
149*4882a593Smuzhiyun return true;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /**
153*4882a593Smuzhiyun * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
154*4882a593Smuzhiyun * @pipe: the pipe that the buffer belongs to
155*4882a593Smuzhiyun * @buf: the buffer to attempt to steal
156*4882a593Smuzhiyun *
157*4882a593Smuzhiyun * Description:
158*4882a593Smuzhiyun * This function attempts to steal the &struct page attached to
159*4882a593Smuzhiyun * @buf. If successful, this function returns 0 and returns with
160*4882a593Smuzhiyun * the page locked. The caller may then reuse the page for whatever
161*4882a593Smuzhiyun * he wishes; the typical use is insertion into a different file
162*4882a593Smuzhiyun * page cache.
163*4882a593Smuzhiyun */
generic_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)164*4882a593Smuzhiyun bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe,
165*4882a593Smuzhiyun struct pipe_buffer *buf)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun struct page *page = buf->page;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /*
170*4882a593Smuzhiyun * A reference of one is golden, that means that the owner of this
171*4882a593Smuzhiyun * page is the only one holding a reference to it. lock the page
172*4882a593Smuzhiyun * and return OK.
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun if (page_count(page) == 1) {
175*4882a593Smuzhiyun lock_page(page);
176*4882a593Smuzhiyun return true;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun return false;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun EXPORT_SYMBOL(generic_pipe_buf_try_steal);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /**
183*4882a593Smuzhiyun * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
184*4882a593Smuzhiyun * @pipe: the pipe that the buffer belongs to
185*4882a593Smuzhiyun * @buf: the buffer to get a reference to
186*4882a593Smuzhiyun *
187*4882a593Smuzhiyun * Description:
188*4882a593Smuzhiyun * This function grabs an extra reference to @buf. It's used in
189*4882a593Smuzhiyun * in the tee() system call, when we duplicate the buffers in one
190*4882a593Smuzhiyun * pipe into another.
191*4882a593Smuzhiyun */
generic_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)192*4882a593Smuzhiyun bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun return try_get_page(buf->page);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun EXPORT_SYMBOL(generic_pipe_buf_get);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /**
199*4882a593Smuzhiyun * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
200*4882a593Smuzhiyun * @pipe: the pipe that the buffer belongs to
201*4882a593Smuzhiyun * @buf: the buffer to put a reference to
202*4882a593Smuzhiyun *
203*4882a593Smuzhiyun * Description:
204*4882a593Smuzhiyun * This function releases a reference to @buf.
205*4882a593Smuzhiyun */
generic_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)206*4882a593Smuzhiyun void generic_pipe_buf_release(struct pipe_inode_info *pipe,
207*4882a593Smuzhiyun struct pipe_buffer *buf)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun put_page(buf->page);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun EXPORT_SYMBOL(generic_pipe_buf_release);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun static const struct pipe_buf_operations anon_pipe_buf_ops = {
214*4882a593Smuzhiyun .release = anon_pipe_buf_release,
215*4882a593Smuzhiyun .try_steal = anon_pipe_buf_try_steal,
216*4882a593Smuzhiyun .get = generic_pipe_buf_get,
217*4882a593Smuzhiyun };
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
pipe_readable(const struct pipe_inode_info * pipe)220*4882a593Smuzhiyun static inline bool pipe_readable(const struct pipe_inode_info *pipe)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun unsigned int head = READ_ONCE(pipe->head);
223*4882a593Smuzhiyun unsigned int tail = READ_ONCE(pipe->tail);
224*4882a593Smuzhiyun unsigned int writers = READ_ONCE(pipe->writers);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun return !pipe_empty(head, tail) || !writers;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun static ssize_t
pipe_read(struct kiocb * iocb,struct iov_iter * to)230*4882a593Smuzhiyun pipe_read(struct kiocb *iocb, struct iov_iter *to)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun size_t total_len = iov_iter_count(to);
233*4882a593Smuzhiyun struct file *filp = iocb->ki_filp;
234*4882a593Smuzhiyun struct pipe_inode_info *pipe = filp->private_data;
235*4882a593Smuzhiyun bool was_full, wake_next_reader = false;
236*4882a593Smuzhiyun ssize_t ret;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* Null read succeeds. */
239*4882a593Smuzhiyun if (unlikely(total_len == 0))
240*4882a593Smuzhiyun return 0;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun ret = 0;
243*4882a593Smuzhiyun __pipe_lock(pipe);
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /*
246*4882a593Smuzhiyun * We only wake up writers if the pipe was full when we started
247*4882a593Smuzhiyun * reading in order to avoid unnecessary wakeups.
248*4882a593Smuzhiyun *
249*4882a593Smuzhiyun * But when we do wake up writers, we do so using a sync wakeup
250*4882a593Smuzhiyun * (WF_SYNC), because we want them to get going and generate more
251*4882a593Smuzhiyun * data for us.
252*4882a593Smuzhiyun */
253*4882a593Smuzhiyun was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
254*4882a593Smuzhiyun for (;;) {
255*4882a593Smuzhiyun /* Read ->head with a barrier vs post_one_notification() */
256*4882a593Smuzhiyun unsigned int head = smp_load_acquire(&pipe->head);
257*4882a593Smuzhiyun unsigned int tail = pipe->tail;
258*4882a593Smuzhiyun unsigned int mask = pipe->ring_size - 1;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun #ifdef CONFIG_WATCH_QUEUE
261*4882a593Smuzhiyun if (pipe->note_loss) {
262*4882a593Smuzhiyun struct watch_notification n;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun if (total_len < 8) {
265*4882a593Smuzhiyun if (ret == 0)
266*4882a593Smuzhiyun ret = -ENOBUFS;
267*4882a593Smuzhiyun break;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun n.type = WATCH_TYPE_META;
271*4882a593Smuzhiyun n.subtype = WATCH_META_LOSS_NOTIFICATION;
272*4882a593Smuzhiyun n.info = watch_sizeof(n);
273*4882a593Smuzhiyun if (copy_to_iter(&n, sizeof(n), to) != sizeof(n)) {
274*4882a593Smuzhiyun if (ret == 0)
275*4882a593Smuzhiyun ret = -EFAULT;
276*4882a593Smuzhiyun break;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun ret += sizeof(n);
279*4882a593Smuzhiyun total_len -= sizeof(n);
280*4882a593Smuzhiyun pipe->note_loss = false;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun #endif
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if (!pipe_empty(head, tail)) {
285*4882a593Smuzhiyun struct pipe_buffer *buf = &pipe->bufs[tail & mask];
286*4882a593Smuzhiyun size_t chars = buf->len;
287*4882a593Smuzhiyun size_t written;
288*4882a593Smuzhiyun int error;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun if (chars > total_len) {
291*4882a593Smuzhiyun if (buf->flags & PIPE_BUF_FLAG_WHOLE) {
292*4882a593Smuzhiyun if (ret == 0)
293*4882a593Smuzhiyun ret = -ENOBUFS;
294*4882a593Smuzhiyun break;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun chars = total_len;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun error = pipe_buf_confirm(pipe, buf);
300*4882a593Smuzhiyun if (error) {
301*4882a593Smuzhiyun if (!ret)
302*4882a593Smuzhiyun ret = error;
303*4882a593Smuzhiyun break;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun written = copy_page_to_iter(buf->page, buf->offset, chars, to);
307*4882a593Smuzhiyun if (unlikely(written < chars)) {
308*4882a593Smuzhiyun if (!ret)
309*4882a593Smuzhiyun ret = -EFAULT;
310*4882a593Smuzhiyun break;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun ret += chars;
313*4882a593Smuzhiyun buf->offset += chars;
314*4882a593Smuzhiyun buf->len -= chars;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* Was it a packet buffer? Clean up and exit */
317*4882a593Smuzhiyun if (buf->flags & PIPE_BUF_FLAG_PACKET) {
318*4882a593Smuzhiyun total_len = chars;
319*4882a593Smuzhiyun buf->len = 0;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun if (!buf->len) {
323*4882a593Smuzhiyun pipe_buf_release(pipe, buf);
324*4882a593Smuzhiyun spin_lock_irq(&pipe->rd_wait.lock);
325*4882a593Smuzhiyun #ifdef CONFIG_WATCH_QUEUE
326*4882a593Smuzhiyun if (buf->flags & PIPE_BUF_FLAG_LOSS)
327*4882a593Smuzhiyun pipe->note_loss = true;
328*4882a593Smuzhiyun #endif
329*4882a593Smuzhiyun tail++;
330*4882a593Smuzhiyun pipe->tail = tail;
331*4882a593Smuzhiyun spin_unlock_irq(&pipe->rd_wait.lock);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun total_len -= chars;
334*4882a593Smuzhiyun if (!total_len)
335*4882a593Smuzhiyun break; /* common path: read succeeded */
336*4882a593Smuzhiyun if (!pipe_empty(head, tail)) /* More to do? */
337*4882a593Smuzhiyun continue;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (!pipe->writers)
341*4882a593Smuzhiyun break;
342*4882a593Smuzhiyun if (ret)
343*4882a593Smuzhiyun break;
344*4882a593Smuzhiyun if (filp->f_flags & O_NONBLOCK) {
345*4882a593Smuzhiyun ret = -EAGAIN;
346*4882a593Smuzhiyun break;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun __pipe_unlock(pipe);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /*
351*4882a593Smuzhiyun * We only get here if we didn't actually read anything.
352*4882a593Smuzhiyun *
353*4882a593Smuzhiyun * However, we could have seen (and removed) a zero-sized
354*4882a593Smuzhiyun * pipe buffer, and might have made space in the buffers
355*4882a593Smuzhiyun * that way.
356*4882a593Smuzhiyun *
357*4882a593Smuzhiyun * You can't make zero-sized pipe buffers by doing an empty
358*4882a593Smuzhiyun * write (not even in packet mode), but they can happen if
359*4882a593Smuzhiyun * the writer gets an EFAULT when trying to fill a buffer
360*4882a593Smuzhiyun * that already got allocated and inserted in the buffer
361*4882a593Smuzhiyun * array.
362*4882a593Smuzhiyun *
363*4882a593Smuzhiyun * So we still need to wake up any pending writers in the
364*4882a593Smuzhiyun * _very_ unlikely case that the pipe was full, but we got
365*4882a593Smuzhiyun * no data.
366*4882a593Smuzhiyun */
367*4882a593Smuzhiyun if (unlikely(was_full)) {
368*4882a593Smuzhiyun wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
369*4882a593Smuzhiyun kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /*
373*4882a593Smuzhiyun * But because we didn't read anything, at this point we can
374*4882a593Smuzhiyun * just return directly with -ERESTARTSYS if we're interrupted,
375*4882a593Smuzhiyun * since we've done any required wakeups and there's no need
376*4882a593Smuzhiyun * to mark anything accessed. And we've dropped the lock.
377*4882a593Smuzhiyun */
378*4882a593Smuzhiyun if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
379*4882a593Smuzhiyun return -ERESTARTSYS;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun __pipe_lock(pipe);
382*4882a593Smuzhiyun was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
383*4882a593Smuzhiyun wake_next_reader = true;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun if (pipe_empty(pipe->head, pipe->tail))
386*4882a593Smuzhiyun wake_next_reader = false;
387*4882a593Smuzhiyun __pipe_unlock(pipe);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun if (was_full) {
390*4882a593Smuzhiyun wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
391*4882a593Smuzhiyun kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun if (wake_next_reader)
394*4882a593Smuzhiyun wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
395*4882a593Smuzhiyun if (ret > 0)
396*4882a593Smuzhiyun file_accessed(filp);
397*4882a593Smuzhiyun return ret;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
is_packetized(struct file * file)400*4882a593Smuzhiyun static inline int is_packetized(struct file *file)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun return (file->f_flags & O_DIRECT) != 0;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
pipe_writable(const struct pipe_inode_info * pipe)406*4882a593Smuzhiyun static inline bool pipe_writable(const struct pipe_inode_info *pipe)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun unsigned int head = READ_ONCE(pipe->head);
409*4882a593Smuzhiyun unsigned int tail = READ_ONCE(pipe->tail);
410*4882a593Smuzhiyun unsigned int max_usage = READ_ONCE(pipe->max_usage);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun return !pipe_full(head, tail, max_usage) ||
413*4882a593Smuzhiyun !READ_ONCE(pipe->readers);
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun static ssize_t
pipe_write(struct kiocb * iocb,struct iov_iter * from)417*4882a593Smuzhiyun pipe_write(struct kiocb *iocb, struct iov_iter *from)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun struct file *filp = iocb->ki_filp;
420*4882a593Smuzhiyun struct pipe_inode_info *pipe = filp->private_data;
421*4882a593Smuzhiyun unsigned int head;
422*4882a593Smuzhiyun ssize_t ret = 0;
423*4882a593Smuzhiyun size_t total_len = iov_iter_count(from);
424*4882a593Smuzhiyun ssize_t chars;
425*4882a593Smuzhiyun bool was_empty = false;
426*4882a593Smuzhiyun bool wake_next_writer = false;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /* Null write succeeds. */
429*4882a593Smuzhiyun if (unlikely(total_len == 0))
430*4882a593Smuzhiyun return 0;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun __pipe_lock(pipe);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun if (!pipe->readers) {
435*4882a593Smuzhiyun send_sig(SIGPIPE, current, 0);
436*4882a593Smuzhiyun ret = -EPIPE;
437*4882a593Smuzhiyun goto out;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun #ifdef CONFIG_WATCH_QUEUE
441*4882a593Smuzhiyun if (pipe->watch_queue) {
442*4882a593Smuzhiyun ret = -EXDEV;
443*4882a593Smuzhiyun goto out;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun #endif
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun /*
448*4882a593Smuzhiyun * Epoll nonsensically wants a wakeup whether the pipe
449*4882a593Smuzhiyun * was already empty or not.
450*4882a593Smuzhiyun *
451*4882a593Smuzhiyun * If it wasn't empty we try to merge new data into
452*4882a593Smuzhiyun * the last buffer.
453*4882a593Smuzhiyun *
454*4882a593Smuzhiyun * That naturally merges small writes, but it also
455*4882a593Smuzhiyun * page-aligns the rest of the writes for large writes
456*4882a593Smuzhiyun * spanning multiple pages.
457*4882a593Smuzhiyun */
458*4882a593Smuzhiyun head = pipe->head;
459*4882a593Smuzhiyun was_empty = true;
460*4882a593Smuzhiyun chars = total_len & (PAGE_SIZE-1);
461*4882a593Smuzhiyun if (chars && !pipe_empty(head, pipe->tail)) {
462*4882a593Smuzhiyun unsigned int mask = pipe->ring_size - 1;
463*4882a593Smuzhiyun struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
464*4882a593Smuzhiyun int offset = buf->offset + buf->len;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
467*4882a593Smuzhiyun offset + chars <= PAGE_SIZE) {
468*4882a593Smuzhiyun ret = pipe_buf_confirm(pipe, buf);
469*4882a593Smuzhiyun if (ret)
470*4882a593Smuzhiyun goto out;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun ret = copy_page_from_iter(buf->page, offset, chars, from);
473*4882a593Smuzhiyun if (unlikely(ret < chars)) {
474*4882a593Smuzhiyun ret = -EFAULT;
475*4882a593Smuzhiyun goto out;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun buf->len += ret;
479*4882a593Smuzhiyun if (!iov_iter_count(from))
480*4882a593Smuzhiyun goto out;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun for (;;) {
485*4882a593Smuzhiyun if (!pipe->readers) {
486*4882a593Smuzhiyun send_sig(SIGPIPE, current, 0);
487*4882a593Smuzhiyun if (!ret)
488*4882a593Smuzhiyun ret = -EPIPE;
489*4882a593Smuzhiyun break;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun head = pipe->head;
493*4882a593Smuzhiyun if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
494*4882a593Smuzhiyun unsigned int mask = pipe->ring_size - 1;
495*4882a593Smuzhiyun struct pipe_buffer *buf = &pipe->bufs[head & mask];
496*4882a593Smuzhiyun struct page *page = pipe->tmp_page;
497*4882a593Smuzhiyun int copied;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun if (!page) {
500*4882a593Smuzhiyun page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
501*4882a593Smuzhiyun if (unlikely(!page)) {
502*4882a593Smuzhiyun ret = ret ? : -ENOMEM;
503*4882a593Smuzhiyun break;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun pipe->tmp_page = page;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun /* Allocate a slot in the ring in advance and attach an
509*4882a593Smuzhiyun * empty buffer. If we fault or otherwise fail to use
510*4882a593Smuzhiyun * it, either the reader will consume it or it'll still
511*4882a593Smuzhiyun * be there for the next write.
512*4882a593Smuzhiyun */
513*4882a593Smuzhiyun spin_lock_irq(&pipe->rd_wait.lock);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun head = pipe->head;
516*4882a593Smuzhiyun if (pipe_full(head, pipe->tail, pipe->max_usage)) {
517*4882a593Smuzhiyun spin_unlock_irq(&pipe->rd_wait.lock);
518*4882a593Smuzhiyun continue;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun pipe->head = head + 1;
522*4882a593Smuzhiyun spin_unlock_irq(&pipe->rd_wait.lock);
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun /* Insert it into the buffer array */
525*4882a593Smuzhiyun buf = &pipe->bufs[head & mask];
526*4882a593Smuzhiyun buf->page = page;
527*4882a593Smuzhiyun buf->ops = &anon_pipe_buf_ops;
528*4882a593Smuzhiyun buf->offset = 0;
529*4882a593Smuzhiyun buf->len = 0;
530*4882a593Smuzhiyun if (is_packetized(filp))
531*4882a593Smuzhiyun buf->flags = PIPE_BUF_FLAG_PACKET;
532*4882a593Smuzhiyun else
533*4882a593Smuzhiyun buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
534*4882a593Smuzhiyun pipe->tmp_page = NULL;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
537*4882a593Smuzhiyun if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
538*4882a593Smuzhiyun if (!ret)
539*4882a593Smuzhiyun ret = -EFAULT;
540*4882a593Smuzhiyun break;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun ret += copied;
543*4882a593Smuzhiyun buf->offset = 0;
544*4882a593Smuzhiyun buf->len = copied;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun if (!iov_iter_count(from))
547*4882a593Smuzhiyun break;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun if (!pipe_full(head, pipe->tail, pipe->max_usage))
551*4882a593Smuzhiyun continue;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun /* Wait for buffer space to become available. */
554*4882a593Smuzhiyun if (filp->f_flags & O_NONBLOCK) {
555*4882a593Smuzhiyun if (!ret)
556*4882a593Smuzhiyun ret = -EAGAIN;
557*4882a593Smuzhiyun break;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun if (signal_pending(current)) {
560*4882a593Smuzhiyun if (!ret)
561*4882a593Smuzhiyun ret = -ERESTARTSYS;
562*4882a593Smuzhiyun break;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun /*
566*4882a593Smuzhiyun * We're going to release the pipe lock and wait for more
567*4882a593Smuzhiyun * space. We wake up any readers if necessary, and then
568*4882a593Smuzhiyun * after waiting we need to re-check whether the pipe
569*4882a593Smuzhiyun * become empty while we dropped the lock.
570*4882a593Smuzhiyun */
571*4882a593Smuzhiyun __pipe_unlock(pipe);
572*4882a593Smuzhiyun if (was_empty) {
573*4882a593Smuzhiyun wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
574*4882a593Smuzhiyun kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
577*4882a593Smuzhiyun __pipe_lock(pipe);
578*4882a593Smuzhiyun was_empty = pipe_empty(pipe->head, pipe->tail);
579*4882a593Smuzhiyun wake_next_writer = true;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun out:
582*4882a593Smuzhiyun if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
583*4882a593Smuzhiyun wake_next_writer = false;
584*4882a593Smuzhiyun __pipe_unlock(pipe);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun /*
587*4882a593Smuzhiyun * If we do do a wakeup event, we do a 'sync' wakeup, because we
588*4882a593Smuzhiyun * want the reader to start processing things asap, rather than
589*4882a593Smuzhiyun * leave the data pending.
590*4882a593Smuzhiyun *
591*4882a593Smuzhiyun * This is particularly important for small writes, because of
592*4882a593Smuzhiyun * how (for example) the GNU make jobserver uses small writes to
593*4882a593Smuzhiyun * wake up pending jobs
594*4882a593Smuzhiyun */
595*4882a593Smuzhiyun if (was_empty) {
596*4882a593Smuzhiyun wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
597*4882a593Smuzhiyun kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun if (wake_next_writer)
600*4882a593Smuzhiyun wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
601*4882a593Smuzhiyun if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
602*4882a593Smuzhiyun int err = file_update_time(filp);
603*4882a593Smuzhiyun if (err)
604*4882a593Smuzhiyun ret = err;
605*4882a593Smuzhiyun sb_end_write(file_inode(filp)->i_sb);
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun return ret;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
pipe_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)610*4882a593Smuzhiyun static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun struct pipe_inode_info *pipe = filp->private_data;
613*4882a593Smuzhiyun int count, head, tail, mask;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun switch (cmd) {
616*4882a593Smuzhiyun case FIONREAD:
617*4882a593Smuzhiyun __pipe_lock(pipe);
618*4882a593Smuzhiyun count = 0;
619*4882a593Smuzhiyun head = pipe->head;
620*4882a593Smuzhiyun tail = pipe->tail;
621*4882a593Smuzhiyun mask = pipe->ring_size - 1;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun while (tail != head) {
624*4882a593Smuzhiyun count += pipe->bufs[tail & mask].len;
625*4882a593Smuzhiyun tail++;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun __pipe_unlock(pipe);
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun return put_user(count, (int __user *)arg);
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun #ifdef CONFIG_WATCH_QUEUE
632*4882a593Smuzhiyun case IOC_WATCH_QUEUE_SET_SIZE: {
633*4882a593Smuzhiyun int ret;
634*4882a593Smuzhiyun __pipe_lock(pipe);
635*4882a593Smuzhiyun ret = watch_queue_set_size(pipe, arg);
636*4882a593Smuzhiyun __pipe_unlock(pipe);
637*4882a593Smuzhiyun return ret;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun case IOC_WATCH_QUEUE_SET_FILTER:
641*4882a593Smuzhiyun return watch_queue_set_filter(
642*4882a593Smuzhiyun pipe, (struct watch_notification_filter __user *)arg);
643*4882a593Smuzhiyun #endif
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun default:
646*4882a593Smuzhiyun return -ENOIOCTLCMD;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun /* No kernel lock held - fine */
651*4882a593Smuzhiyun static __poll_t
pipe_poll(struct file * filp,poll_table * wait)652*4882a593Smuzhiyun pipe_poll(struct file *filp, poll_table *wait)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun __poll_t mask;
655*4882a593Smuzhiyun struct pipe_inode_info *pipe = filp->private_data;
656*4882a593Smuzhiyun unsigned int head, tail;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /*
659*4882a593Smuzhiyun * Reading pipe state only -- no need for acquiring the semaphore.
660*4882a593Smuzhiyun *
661*4882a593Smuzhiyun * But because this is racy, the code has to add the
662*4882a593Smuzhiyun * entry to the poll table _first_ ..
663*4882a593Smuzhiyun */
664*4882a593Smuzhiyun if (filp->f_mode & FMODE_READ)
665*4882a593Smuzhiyun poll_wait(filp, &pipe->rd_wait, wait);
666*4882a593Smuzhiyun if (filp->f_mode & FMODE_WRITE)
667*4882a593Smuzhiyun poll_wait(filp, &pipe->wr_wait, wait);
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /*
670*4882a593Smuzhiyun * .. and only then can you do the racy tests. That way,
671*4882a593Smuzhiyun * if something changes and you got it wrong, the poll
672*4882a593Smuzhiyun * table entry will wake you up and fix it.
673*4882a593Smuzhiyun */
674*4882a593Smuzhiyun head = READ_ONCE(pipe->head);
675*4882a593Smuzhiyun tail = READ_ONCE(pipe->tail);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun mask = 0;
678*4882a593Smuzhiyun if (filp->f_mode & FMODE_READ) {
679*4882a593Smuzhiyun if (!pipe_empty(head, tail))
680*4882a593Smuzhiyun mask |= EPOLLIN | EPOLLRDNORM;
681*4882a593Smuzhiyun if (!pipe->writers && filp->f_version != pipe->w_counter)
682*4882a593Smuzhiyun mask |= EPOLLHUP;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun if (filp->f_mode & FMODE_WRITE) {
686*4882a593Smuzhiyun if (!pipe_full(head, tail, pipe->max_usage))
687*4882a593Smuzhiyun mask |= EPOLLOUT | EPOLLWRNORM;
688*4882a593Smuzhiyun /*
689*4882a593Smuzhiyun * Most Unices do not set EPOLLERR for FIFOs but on Linux they
690*4882a593Smuzhiyun * behave exactly like pipes for poll().
691*4882a593Smuzhiyun */
692*4882a593Smuzhiyun if (!pipe->readers)
693*4882a593Smuzhiyun mask |= EPOLLERR;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun return mask;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
put_pipe_info(struct inode * inode,struct pipe_inode_info * pipe)699*4882a593Smuzhiyun static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun int kill = 0;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun spin_lock(&inode->i_lock);
704*4882a593Smuzhiyun if (!--pipe->files) {
705*4882a593Smuzhiyun inode->i_pipe = NULL;
706*4882a593Smuzhiyun kill = 1;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun spin_unlock(&inode->i_lock);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun if (kill)
711*4882a593Smuzhiyun free_pipe_info(pipe);
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun static int
pipe_release(struct inode * inode,struct file * file)715*4882a593Smuzhiyun pipe_release(struct inode *inode, struct file *file)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun struct pipe_inode_info *pipe = file->private_data;
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun __pipe_lock(pipe);
720*4882a593Smuzhiyun if (file->f_mode & FMODE_READ)
721*4882a593Smuzhiyun pipe->readers--;
722*4882a593Smuzhiyun if (file->f_mode & FMODE_WRITE)
723*4882a593Smuzhiyun pipe->writers--;
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun /* Was that the last reader or writer, but not the other side? */
726*4882a593Smuzhiyun if (!pipe->readers != !pipe->writers) {
727*4882a593Smuzhiyun wake_up_interruptible_all(&pipe->rd_wait);
728*4882a593Smuzhiyun wake_up_interruptible_all(&pipe->wr_wait);
729*4882a593Smuzhiyun kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
730*4882a593Smuzhiyun kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun __pipe_unlock(pipe);
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun put_pipe_info(inode, pipe);
735*4882a593Smuzhiyun return 0;
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun static int
pipe_fasync(int fd,struct file * filp,int on)739*4882a593Smuzhiyun pipe_fasync(int fd, struct file *filp, int on)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun struct pipe_inode_info *pipe = filp->private_data;
742*4882a593Smuzhiyun int retval = 0;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun __pipe_lock(pipe);
745*4882a593Smuzhiyun if (filp->f_mode & FMODE_READ)
746*4882a593Smuzhiyun retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
747*4882a593Smuzhiyun if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
748*4882a593Smuzhiyun retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
749*4882a593Smuzhiyun if (retval < 0 && (filp->f_mode & FMODE_READ))
750*4882a593Smuzhiyun /* this can happen only if on == T */
751*4882a593Smuzhiyun fasync_helper(-1, filp, 0, &pipe->fasync_readers);
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun __pipe_unlock(pipe);
754*4882a593Smuzhiyun return retval;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
account_pipe_buffers(struct user_struct * user,unsigned long old,unsigned long new)757*4882a593Smuzhiyun unsigned long account_pipe_buffers(struct user_struct *user,
758*4882a593Smuzhiyun unsigned long old, unsigned long new)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun return atomic_long_add_return(new - old, &user->pipe_bufs);
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
too_many_pipe_buffers_soft(unsigned long user_bufs)763*4882a593Smuzhiyun bool too_many_pipe_buffers_soft(unsigned long user_bufs)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun return soft_limit && user_bufs > soft_limit;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
too_many_pipe_buffers_hard(unsigned long user_bufs)770*4882a593Smuzhiyun bool too_many_pipe_buffers_hard(unsigned long user_bufs)
771*4882a593Smuzhiyun {
772*4882a593Smuzhiyun unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun return hard_limit && user_bufs > hard_limit;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
pipe_is_unprivileged_user(void)777*4882a593Smuzhiyun bool pipe_is_unprivileged_user(void)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
alloc_pipe_info(void)782*4882a593Smuzhiyun struct pipe_inode_info *alloc_pipe_info(void)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun struct pipe_inode_info *pipe;
785*4882a593Smuzhiyun unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
786*4882a593Smuzhiyun struct user_struct *user = get_current_user();
787*4882a593Smuzhiyun unsigned long user_bufs;
788*4882a593Smuzhiyun unsigned int max_size = READ_ONCE(pipe_max_size);
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
791*4882a593Smuzhiyun if (pipe == NULL)
792*4882a593Smuzhiyun goto out_free_uid;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
795*4882a593Smuzhiyun pipe_bufs = max_size >> PAGE_SHIFT;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
800*4882a593Smuzhiyun user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
801*4882a593Smuzhiyun pipe_bufs = PIPE_MIN_DEF_BUFFERS;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
805*4882a593Smuzhiyun goto out_revert_acct;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
808*4882a593Smuzhiyun GFP_KERNEL_ACCOUNT);
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun if (pipe->bufs) {
811*4882a593Smuzhiyun init_waitqueue_head(&pipe->rd_wait);
812*4882a593Smuzhiyun init_waitqueue_head(&pipe->wr_wait);
813*4882a593Smuzhiyun pipe->r_counter = pipe->w_counter = 1;
814*4882a593Smuzhiyun pipe->max_usage = pipe_bufs;
815*4882a593Smuzhiyun pipe->ring_size = pipe_bufs;
816*4882a593Smuzhiyun pipe->nr_accounted = pipe_bufs;
817*4882a593Smuzhiyun pipe->user = user;
818*4882a593Smuzhiyun mutex_init(&pipe->mutex);
819*4882a593Smuzhiyun return pipe;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun out_revert_acct:
823*4882a593Smuzhiyun (void) account_pipe_buffers(user, pipe_bufs, 0);
824*4882a593Smuzhiyun kfree(pipe);
825*4882a593Smuzhiyun out_free_uid:
826*4882a593Smuzhiyun free_uid(user);
827*4882a593Smuzhiyun return NULL;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
free_pipe_info(struct pipe_inode_info * pipe)830*4882a593Smuzhiyun void free_pipe_info(struct pipe_inode_info *pipe)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun int i;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun #ifdef CONFIG_WATCH_QUEUE
835*4882a593Smuzhiyun if (pipe->watch_queue)
836*4882a593Smuzhiyun watch_queue_clear(pipe->watch_queue);
837*4882a593Smuzhiyun #endif
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
840*4882a593Smuzhiyun free_uid(pipe->user);
841*4882a593Smuzhiyun for (i = 0; i < pipe->ring_size; i++) {
842*4882a593Smuzhiyun struct pipe_buffer *buf = pipe->bufs + i;
843*4882a593Smuzhiyun if (buf->ops)
844*4882a593Smuzhiyun pipe_buf_release(pipe, buf);
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun #ifdef CONFIG_WATCH_QUEUE
847*4882a593Smuzhiyun if (pipe->watch_queue)
848*4882a593Smuzhiyun put_watch_queue(pipe->watch_queue);
849*4882a593Smuzhiyun #endif
850*4882a593Smuzhiyun if (pipe->tmp_page)
851*4882a593Smuzhiyun __free_page(pipe->tmp_page);
852*4882a593Smuzhiyun kfree(pipe->bufs);
853*4882a593Smuzhiyun kfree(pipe);
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun static struct vfsmount *pipe_mnt __read_mostly;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun /*
859*4882a593Smuzhiyun * pipefs_dname() is called from d_path().
860*4882a593Smuzhiyun */
pipefs_dname(struct dentry * dentry,char * buffer,int buflen)861*4882a593Smuzhiyun static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
864*4882a593Smuzhiyun d_inode(dentry)->i_ino);
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun static const struct dentry_operations pipefs_dentry_operations = {
868*4882a593Smuzhiyun .d_dname = pipefs_dname,
869*4882a593Smuzhiyun };
870*4882a593Smuzhiyun
get_pipe_inode(void)871*4882a593Smuzhiyun static struct inode * get_pipe_inode(void)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
874*4882a593Smuzhiyun struct pipe_inode_info *pipe;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun if (!inode)
877*4882a593Smuzhiyun goto fail_inode;
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun inode->i_ino = get_next_ino();
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun pipe = alloc_pipe_info();
882*4882a593Smuzhiyun if (!pipe)
883*4882a593Smuzhiyun goto fail_iput;
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun inode->i_pipe = pipe;
886*4882a593Smuzhiyun pipe->files = 2;
887*4882a593Smuzhiyun pipe->readers = pipe->writers = 1;
888*4882a593Smuzhiyun inode->i_fop = &pipefifo_fops;
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun /*
891*4882a593Smuzhiyun * Mark the inode dirty from the very beginning,
892*4882a593Smuzhiyun * that way it will never be moved to the dirty
893*4882a593Smuzhiyun * list because "mark_inode_dirty()" will think
894*4882a593Smuzhiyun * that it already _is_ on the dirty list.
895*4882a593Smuzhiyun */
896*4882a593Smuzhiyun inode->i_state = I_DIRTY;
897*4882a593Smuzhiyun inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
898*4882a593Smuzhiyun inode->i_uid = current_fsuid();
899*4882a593Smuzhiyun inode->i_gid = current_fsgid();
900*4882a593Smuzhiyun inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun return inode;
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun fail_iput:
905*4882a593Smuzhiyun iput(inode);
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun fail_inode:
908*4882a593Smuzhiyun return NULL;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
create_pipe_files(struct file ** res,int flags)911*4882a593Smuzhiyun int create_pipe_files(struct file **res, int flags)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun struct inode *inode = get_pipe_inode();
914*4882a593Smuzhiyun struct file *f;
915*4882a593Smuzhiyun int error;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun if (!inode)
918*4882a593Smuzhiyun return -ENFILE;
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun if (flags & O_NOTIFICATION_PIPE) {
921*4882a593Smuzhiyun error = watch_queue_init(inode->i_pipe);
922*4882a593Smuzhiyun if (error) {
923*4882a593Smuzhiyun free_pipe_info(inode->i_pipe);
924*4882a593Smuzhiyun iput(inode);
925*4882a593Smuzhiyun return error;
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun f = alloc_file_pseudo(inode, pipe_mnt, "",
930*4882a593Smuzhiyun O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
931*4882a593Smuzhiyun &pipefifo_fops);
932*4882a593Smuzhiyun if (IS_ERR(f)) {
933*4882a593Smuzhiyun free_pipe_info(inode->i_pipe);
934*4882a593Smuzhiyun iput(inode);
935*4882a593Smuzhiyun return PTR_ERR(f);
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun f->private_data = inode->i_pipe;
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
941*4882a593Smuzhiyun &pipefifo_fops);
942*4882a593Smuzhiyun if (IS_ERR(res[0])) {
943*4882a593Smuzhiyun put_pipe_info(inode, inode->i_pipe);
944*4882a593Smuzhiyun fput(f);
945*4882a593Smuzhiyun return PTR_ERR(res[0]);
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun res[0]->private_data = inode->i_pipe;
948*4882a593Smuzhiyun res[1] = f;
949*4882a593Smuzhiyun stream_open(inode, res[0]);
950*4882a593Smuzhiyun stream_open(inode, res[1]);
951*4882a593Smuzhiyun return 0;
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
__do_pipe_flags(int * fd,struct file ** files,int flags)954*4882a593Smuzhiyun static int __do_pipe_flags(int *fd, struct file **files, int flags)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun int error;
957*4882a593Smuzhiyun int fdw, fdr;
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE))
960*4882a593Smuzhiyun return -EINVAL;
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun error = create_pipe_files(files, flags);
963*4882a593Smuzhiyun if (error)
964*4882a593Smuzhiyun return error;
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun error = get_unused_fd_flags(flags);
967*4882a593Smuzhiyun if (error < 0)
968*4882a593Smuzhiyun goto err_read_pipe;
969*4882a593Smuzhiyun fdr = error;
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun error = get_unused_fd_flags(flags);
972*4882a593Smuzhiyun if (error < 0)
973*4882a593Smuzhiyun goto err_fdr;
974*4882a593Smuzhiyun fdw = error;
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun audit_fd_pair(fdr, fdw);
977*4882a593Smuzhiyun fd[0] = fdr;
978*4882a593Smuzhiyun fd[1] = fdw;
979*4882a593Smuzhiyun return 0;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun err_fdr:
982*4882a593Smuzhiyun put_unused_fd(fdr);
983*4882a593Smuzhiyun err_read_pipe:
984*4882a593Smuzhiyun fput(files[0]);
985*4882a593Smuzhiyun fput(files[1]);
986*4882a593Smuzhiyun return error;
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun
do_pipe_flags(int * fd,int flags)989*4882a593Smuzhiyun int do_pipe_flags(int *fd, int flags)
990*4882a593Smuzhiyun {
991*4882a593Smuzhiyun struct file *files[2];
992*4882a593Smuzhiyun int error = __do_pipe_flags(fd, files, flags);
993*4882a593Smuzhiyun if (!error) {
994*4882a593Smuzhiyun fd_install(fd[0], files[0]);
995*4882a593Smuzhiyun fd_install(fd[1], files[1]);
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun return error;
998*4882a593Smuzhiyun }
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun /*
1001*4882a593Smuzhiyun * sys_pipe() is the normal C calling standard for creating
1002*4882a593Smuzhiyun * a pipe. It's not the way Unix traditionally does this, though.
1003*4882a593Smuzhiyun */
do_pipe2(int __user * fildes,int flags)1004*4882a593Smuzhiyun static int do_pipe2(int __user *fildes, int flags)
1005*4882a593Smuzhiyun {
1006*4882a593Smuzhiyun struct file *files[2];
1007*4882a593Smuzhiyun int fd[2];
1008*4882a593Smuzhiyun int error;
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun error = __do_pipe_flags(fd, files, flags);
1011*4882a593Smuzhiyun if (!error) {
1012*4882a593Smuzhiyun if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
1013*4882a593Smuzhiyun fput(files[0]);
1014*4882a593Smuzhiyun fput(files[1]);
1015*4882a593Smuzhiyun put_unused_fd(fd[0]);
1016*4882a593Smuzhiyun put_unused_fd(fd[1]);
1017*4882a593Smuzhiyun error = -EFAULT;
1018*4882a593Smuzhiyun } else {
1019*4882a593Smuzhiyun fd_install(fd[0], files[0]);
1020*4882a593Smuzhiyun fd_install(fd[1], files[1]);
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun return error;
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun
SYSCALL_DEFINE2(pipe2,int __user *,fildes,int,flags)1026*4882a593Smuzhiyun SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1027*4882a593Smuzhiyun {
1028*4882a593Smuzhiyun return do_pipe2(fildes, flags);
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun
SYSCALL_DEFINE1(pipe,int __user *,fildes)1031*4882a593Smuzhiyun SYSCALL_DEFINE1(pipe, int __user *, fildes)
1032*4882a593Smuzhiyun {
1033*4882a593Smuzhiyun return do_pipe2(fildes, 0);
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun /*
1037*4882a593Smuzhiyun * This is the stupid "wait for pipe to be readable or writable"
1038*4882a593Smuzhiyun * model.
1039*4882a593Smuzhiyun *
1040*4882a593Smuzhiyun * See pipe_read/write() for the proper kind of exclusive wait,
1041*4882a593Smuzhiyun * but that requires that we wake up any other readers/writers
1042*4882a593Smuzhiyun * if we then do not end up reading everything (ie the whole
1043*4882a593Smuzhiyun * "wake_next_reader/writer" logic in pipe_read/write()).
1044*4882a593Smuzhiyun */
pipe_wait_readable(struct pipe_inode_info * pipe)1045*4882a593Smuzhiyun void pipe_wait_readable(struct pipe_inode_info *pipe)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun pipe_unlock(pipe);
1048*4882a593Smuzhiyun wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe));
1049*4882a593Smuzhiyun pipe_lock(pipe);
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun
pipe_wait_writable(struct pipe_inode_info * pipe)1052*4882a593Smuzhiyun void pipe_wait_writable(struct pipe_inode_info *pipe)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun pipe_unlock(pipe);
1055*4882a593Smuzhiyun wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe));
1056*4882a593Smuzhiyun pipe_lock(pipe);
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun /*
1060*4882a593Smuzhiyun * This depends on both the wait (here) and the wakeup (wake_up_partner)
1061*4882a593Smuzhiyun * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot
1062*4882a593Smuzhiyun * race with the count check and waitqueue prep.
1063*4882a593Smuzhiyun *
1064*4882a593Smuzhiyun * Normally in order to avoid races, you'd do the prepare_to_wait() first,
1065*4882a593Smuzhiyun * then check the condition you're waiting for, and only then sleep. But
1066*4882a593Smuzhiyun * because of the pipe lock, we can check the condition before being on
1067*4882a593Smuzhiyun * the wait queue.
1068*4882a593Smuzhiyun *
1069*4882a593Smuzhiyun * We use the 'rd_wait' waitqueue for pipe partner waiting.
1070*4882a593Smuzhiyun */
wait_for_partner(struct pipe_inode_info * pipe,unsigned int * cnt)1071*4882a593Smuzhiyun static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
1072*4882a593Smuzhiyun {
1073*4882a593Smuzhiyun DEFINE_WAIT(rdwait);
1074*4882a593Smuzhiyun int cur = *cnt;
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun while (cur == *cnt) {
1077*4882a593Smuzhiyun prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE);
1078*4882a593Smuzhiyun pipe_unlock(pipe);
1079*4882a593Smuzhiyun schedule();
1080*4882a593Smuzhiyun finish_wait(&pipe->rd_wait, &rdwait);
1081*4882a593Smuzhiyun pipe_lock(pipe);
1082*4882a593Smuzhiyun if (signal_pending(current))
1083*4882a593Smuzhiyun break;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun return cur == *cnt ? -ERESTARTSYS : 0;
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun
wake_up_partner(struct pipe_inode_info * pipe)1088*4882a593Smuzhiyun static void wake_up_partner(struct pipe_inode_info *pipe)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun wake_up_interruptible_all(&pipe->rd_wait);
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun
fifo_open(struct inode * inode,struct file * filp)1093*4882a593Smuzhiyun static int fifo_open(struct inode *inode, struct file *filp)
1094*4882a593Smuzhiyun {
1095*4882a593Smuzhiyun struct pipe_inode_info *pipe;
1096*4882a593Smuzhiyun bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1097*4882a593Smuzhiyun int ret;
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun filp->f_version = 0;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun spin_lock(&inode->i_lock);
1102*4882a593Smuzhiyun if (inode->i_pipe) {
1103*4882a593Smuzhiyun pipe = inode->i_pipe;
1104*4882a593Smuzhiyun pipe->files++;
1105*4882a593Smuzhiyun spin_unlock(&inode->i_lock);
1106*4882a593Smuzhiyun } else {
1107*4882a593Smuzhiyun spin_unlock(&inode->i_lock);
1108*4882a593Smuzhiyun pipe = alloc_pipe_info();
1109*4882a593Smuzhiyun if (!pipe)
1110*4882a593Smuzhiyun return -ENOMEM;
1111*4882a593Smuzhiyun pipe->files = 1;
1112*4882a593Smuzhiyun spin_lock(&inode->i_lock);
1113*4882a593Smuzhiyun if (unlikely(inode->i_pipe)) {
1114*4882a593Smuzhiyun inode->i_pipe->files++;
1115*4882a593Smuzhiyun spin_unlock(&inode->i_lock);
1116*4882a593Smuzhiyun free_pipe_info(pipe);
1117*4882a593Smuzhiyun pipe = inode->i_pipe;
1118*4882a593Smuzhiyun } else {
1119*4882a593Smuzhiyun inode->i_pipe = pipe;
1120*4882a593Smuzhiyun spin_unlock(&inode->i_lock);
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun filp->private_data = pipe;
1124*4882a593Smuzhiyun /* OK, we have a pipe and it's pinned down */
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun __pipe_lock(pipe);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun /* We can only do regular read/write on fifos */
1129*4882a593Smuzhiyun stream_open(inode, filp);
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
1132*4882a593Smuzhiyun case FMODE_READ:
1133*4882a593Smuzhiyun /*
1134*4882a593Smuzhiyun * O_RDONLY
1135*4882a593Smuzhiyun * POSIX.1 says that O_NONBLOCK means return with the FIFO
1136*4882a593Smuzhiyun * opened, even when there is no process writing the FIFO.
1137*4882a593Smuzhiyun */
1138*4882a593Smuzhiyun pipe->r_counter++;
1139*4882a593Smuzhiyun if (pipe->readers++ == 0)
1140*4882a593Smuzhiyun wake_up_partner(pipe);
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun if (!is_pipe && !pipe->writers) {
1143*4882a593Smuzhiyun if ((filp->f_flags & O_NONBLOCK)) {
1144*4882a593Smuzhiyun /* suppress EPOLLHUP until we have
1145*4882a593Smuzhiyun * seen a writer */
1146*4882a593Smuzhiyun filp->f_version = pipe->w_counter;
1147*4882a593Smuzhiyun } else {
1148*4882a593Smuzhiyun if (wait_for_partner(pipe, &pipe->w_counter))
1149*4882a593Smuzhiyun goto err_rd;
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun break;
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun case FMODE_WRITE:
1155*4882a593Smuzhiyun /*
1156*4882a593Smuzhiyun * O_WRONLY
1157*4882a593Smuzhiyun * POSIX.1 says that O_NONBLOCK means return -1 with
1158*4882a593Smuzhiyun * errno=ENXIO when there is no process reading the FIFO.
1159*4882a593Smuzhiyun */
1160*4882a593Smuzhiyun ret = -ENXIO;
1161*4882a593Smuzhiyun if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1162*4882a593Smuzhiyun goto err;
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun pipe->w_counter++;
1165*4882a593Smuzhiyun if (!pipe->writers++)
1166*4882a593Smuzhiyun wake_up_partner(pipe);
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun if (!is_pipe && !pipe->readers) {
1169*4882a593Smuzhiyun if (wait_for_partner(pipe, &pipe->r_counter))
1170*4882a593Smuzhiyun goto err_wr;
1171*4882a593Smuzhiyun }
1172*4882a593Smuzhiyun break;
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun case FMODE_READ | FMODE_WRITE:
1175*4882a593Smuzhiyun /*
1176*4882a593Smuzhiyun * O_RDWR
1177*4882a593Smuzhiyun * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1178*4882a593Smuzhiyun * This implementation will NEVER block on a O_RDWR open, since
1179*4882a593Smuzhiyun * the process can at least talk to itself.
1180*4882a593Smuzhiyun */
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun pipe->readers++;
1183*4882a593Smuzhiyun pipe->writers++;
1184*4882a593Smuzhiyun pipe->r_counter++;
1185*4882a593Smuzhiyun pipe->w_counter++;
1186*4882a593Smuzhiyun if (pipe->readers == 1 || pipe->writers == 1)
1187*4882a593Smuzhiyun wake_up_partner(pipe);
1188*4882a593Smuzhiyun break;
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun default:
1191*4882a593Smuzhiyun ret = -EINVAL;
1192*4882a593Smuzhiyun goto err;
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun /* Ok! */
1196*4882a593Smuzhiyun __pipe_unlock(pipe);
1197*4882a593Smuzhiyun return 0;
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun err_rd:
1200*4882a593Smuzhiyun if (!--pipe->readers)
1201*4882a593Smuzhiyun wake_up_interruptible(&pipe->wr_wait);
1202*4882a593Smuzhiyun ret = -ERESTARTSYS;
1203*4882a593Smuzhiyun goto err;
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun err_wr:
1206*4882a593Smuzhiyun if (!--pipe->writers)
1207*4882a593Smuzhiyun wake_up_interruptible_all(&pipe->rd_wait);
1208*4882a593Smuzhiyun ret = -ERESTARTSYS;
1209*4882a593Smuzhiyun goto err;
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun err:
1212*4882a593Smuzhiyun __pipe_unlock(pipe);
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun put_pipe_info(inode, pipe);
1215*4882a593Smuzhiyun return ret;
1216*4882a593Smuzhiyun }
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun const struct file_operations pipefifo_fops = {
1219*4882a593Smuzhiyun .open = fifo_open,
1220*4882a593Smuzhiyun .llseek = no_llseek,
1221*4882a593Smuzhiyun .read_iter = pipe_read,
1222*4882a593Smuzhiyun .write_iter = pipe_write,
1223*4882a593Smuzhiyun .poll = pipe_poll,
1224*4882a593Smuzhiyun .unlocked_ioctl = pipe_ioctl,
1225*4882a593Smuzhiyun .release = pipe_release,
1226*4882a593Smuzhiyun .fasync = pipe_fasync,
1227*4882a593Smuzhiyun .splice_write = iter_file_splice_write,
1228*4882a593Smuzhiyun };
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun /*
1231*4882a593Smuzhiyun * Currently we rely on the pipe array holding a power-of-2 number
1232*4882a593Smuzhiyun * of pages. Returns 0 on error.
1233*4882a593Smuzhiyun */
round_pipe_size(unsigned long size)1234*4882a593Smuzhiyun unsigned int round_pipe_size(unsigned long size)
1235*4882a593Smuzhiyun {
1236*4882a593Smuzhiyun if (size > (1U << 31))
1237*4882a593Smuzhiyun return 0;
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun /* Minimum pipe size, as required by POSIX */
1240*4882a593Smuzhiyun if (size < PAGE_SIZE)
1241*4882a593Smuzhiyun return PAGE_SIZE;
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun return roundup_pow_of_two(size);
1244*4882a593Smuzhiyun }
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun /*
1247*4882a593Smuzhiyun * Resize the pipe ring to a number of slots.
1248*4882a593Smuzhiyun *
1249*4882a593Smuzhiyun * Note the pipe can be reduced in capacity, but only if the current
1250*4882a593Smuzhiyun * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
1251*4882a593Smuzhiyun * returned instead.
1252*4882a593Smuzhiyun */
pipe_resize_ring(struct pipe_inode_info * pipe,unsigned int nr_slots)1253*4882a593Smuzhiyun int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1254*4882a593Smuzhiyun {
1255*4882a593Smuzhiyun struct pipe_buffer *bufs;
1256*4882a593Smuzhiyun unsigned int head, tail, mask, n;
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun bufs = kcalloc(nr_slots, sizeof(*bufs),
1259*4882a593Smuzhiyun GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1260*4882a593Smuzhiyun if (unlikely(!bufs))
1261*4882a593Smuzhiyun return -ENOMEM;
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun spin_lock_irq(&pipe->rd_wait.lock);
1264*4882a593Smuzhiyun mask = pipe->ring_size - 1;
1265*4882a593Smuzhiyun head = pipe->head;
1266*4882a593Smuzhiyun tail = pipe->tail;
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun n = pipe_occupancy(head, tail);
1269*4882a593Smuzhiyun if (nr_slots < n) {
1270*4882a593Smuzhiyun spin_unlock_irq(&pipe->rd_wait.lock);
1271*4882a593Smuzhiyun kfree(bufs);
1272*4882a593Smuzhiyun return -EBUSY;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun /*
1276*4882a593Smuzhiyun * The pipe array wraps around, so just start the new one at zero
1277*4882a593Smuzhiyun * and adjust the indices.
1278*4882a593Smuzhiyun */
1279*4882a593Smuzhiyun if (n > 0) {
1280*4882a593Smuzhiyun unsigned int h = head & mask;
1281*4882a593Smuzhiyun unsigned int t = tail & mask;
1282*4882a593Smuzhiyun if (h > t) {
1283*4882a593Smuzhiyun memcpy(bufs, pipe->bufs + t,
1284*4882a593Smuzhiyun n * sizeof(struct pipe_buffer));
1285*4882a593Smuzhiyun } else {
1286*4882a593Smuzhiyun unsigned int tsize = pipe->ring_size - t;
1287*4882a593Smuzhiyun if (h > 0)
1288*4882a593Smuzhiyun memcpy(bufs + tsize, pipe->bufs,
1289*4882a593Smuzhiyun h * sizeof(struct pipe_buffer));
1290*4882a593Smuzhiyun memcpy(bufs, pipe->bufs + t,
1291*4882a593Smuzhiyun tsize * sizeof(struct pipe_buffer));
1292*4882a593Smuzhiyun }
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun head = n;
1296*4882a593Smuzhiyun tail = 0;
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun kfree(pipe->bufs);
1299*4882a593Smuzhiyun pipe->bufs = bufs;
1300*4882a593Smuzhiyun pipe->ring_size = nr_slots;
1301*4882a593Smuzhiyun if (pipe->max_usage > nr_slots)
1302*4882a593Smuzhiyun pipe->max_usage = nr_slots;
1303*4882a593Smuzhiyun pipe->tail = tail;
1304*4882a593Smuzhiyun pipe->head = head;
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun spin_unlock_irq(&pipe->rd_wait.lock);
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun /* This might have made more room for writers */
1309*4882a593Smuzhiyun wake_up_interruptible(&pipe->wr_wait);
1310*4882a593Smuzhiyun return 0;
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun /*
1314*4882a593Smuzhiyun * Allocate a new array of pipe buffers and copy the info over. Returns the
1315*4882a593Smuzhiyun * pipe size if successful, or return -ERROR on error.
1316*4882a593Smuzhiyun */
pipe_set_size(struct pipe_inode_info * pipe,unsigned long arg)1317*4882a593Smuzhiyun static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1318*4882a593Smuzhiyun {
1319*4882a593Smuzhiyun unsigned long user_bufs;
1320*4882a593Smuzhiyun unsigned int nr_slots, size;
1321*4882a593Smuzhiyun long ret = 0;
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun #ifdef CONFIG_WATCH_QUEUE
1324*4882a593Smuzhiyun if (pipe->watch_queue)
1325*4882a593Smuzhiyun return -EBUSY;
1326*4882a593Smuzhiyun #endif
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun size = round_pipe_size(arg);
1329*4882a593Smuzhiyun nr_slots = size >> PAGE_SHIFT;
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun if (!nr_slots)
1332*4882a593Smuzhiyun return -EINVAL;
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun /*
1335*4882a593Smuzhiyun * If trying to increase the pipe capacity, check that an
1336*4882a593Smuzhiyun * unprivileged user is not trying to exceed various limits
1337*4882a593Smuzhiyun * (soft limit check here, hard limit check just below).
1338*4882a593Smuzhiyun * Decreasing the pipe capacity is always permitted, even
1339*4882a593Smuzhiyun * if the user is currently over a limit.
1340*4882a593Smuzhiyun */
1341*4882a593Smuzhiyun if (nr_slots > pipe->max_usage &&
1342*4882a593Smuzhiyun size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1343*4882a593Smuzhiyun return -EPERM;
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots);
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun if (nr_slots > pipe->max_usage &&
1348*4882a593Smuzhiyun (too_many_pipe_buffers_hard(user_bufs) ||
1349*4882a593Smuzhiyun too_many_pipe_buffers_soft(user_bufs)) &&
1350*4882a593Smuzhiyun pipe_is_unprivileged_user()) {
1351*4882a593Smuzhiyun ret = -EPERM;
1352*4882a593Smuzhiyun goto out_revert_acct;
1353*4882a593Smuzhiyun }
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun ret = pipe_resize_ring(pipe, nr_slots);
1356*4882a593Smuzhiyun if (ret < 0)
1357*4882a593Smuzhiyun goto out_revert_acct;
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun pipe->max_usage = nr_slots;
1360*4882a593Smuzhiyun pipe->nr_accounted = nr_slots;
1361*4882a593Smuzhiyun return pipe->max_usage * PAGE_SIZE;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun out_revert_acct:
1364*4882a593Smuzhiyun (void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted);
1365*4882a593Smuzhiyun return ret;
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun /*
1369*4882a593Smuzhiyun * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1370*4882a593Smuzhiyun * location, so checking ->i_pipe is not enough to verify that this is a
1371*4882a593Smuzhiyun * pipe.
1372*4882a593Smuzhiyun */
get_pipe_info(struct file * file,bool for_splice)1373*4882a593Smuzhiyun struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
1374*4882a593Smuzhiyun {
1375*4882a593Smuzhiyun struct pipe_inode_info *pipe = file->private_data;
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun if (file->f_op != &pipefifo_fops || !pipe)
1378*4882a593Smuzhiyun return NULL;
1379*4882a593Smuzhiyun #ifdef CONFIG_WATCH_QUEUE
1380*4882a593Smuzhiyun if (for_splice && pipe->watch_queue)
1381*4882a593Smuzhiyun return NULL;
1382*4882a593Smuzhiyun #endif
1383*4882a593Smuzhiyun return pipe;
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun
pipe_fcntl(struct file * file,unsigned int cmd,unsigned long arg)1386*4882a593Smuzhiyun long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1387*4882a593Smuzhiyun {
1388*4882a593Smuzhiyun struct pipe_inode_info *pipe;
1389*4882a593Smuzhiyun long ret;
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun pipe = get_pipe_info(file, false);
1392*4882a593Smuzhiyun if (!pipe)
1393*4882a593Smuzhiyun return -EBADF;
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun __pipe_lock(pipe);
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun switch (cmd) {
1398*4882a593Smuzhiyun case F_SETPIPE_SZ:
1399*4882a593Smuzhiyun ret = pipe_set_size(pipe, arg);
1400*4882a593Smuzhiyun break;
1401*4882a593Smuzhiyun case F_GETPIPE_SZ:
1402*4882a593Smuzhiyun ret = pipe->max_usage * PAGE_SIZE;
1403*4882a593Smuzhiyun break;
1404*4882a593Smuzhiyun default:
1405*4882a593Smuzhiyun ret = -EINVAL;
1406*4882a593Smuzhiyun break;
1407*4882a593Smuzhiyun }
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun __pipe_unlock(pipe);
1410*4882a593Smuzhiyun return ret;
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun
1413*4882a593Smuzhiyun static const struct super_operations pipefs_ops = {
1414*4882a593Smuzhiyun .destroy_inode = free_inode_nonrcu,
1415*4882a593Smuzhiyun .statfs = simple_statfs,
1416*4882a593Smuzhiyun };
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun /*
1419*4882a593Smuzhiyun * pipefs should _never_ be mounted by userland - too much of security hassle,
1420*4882a593Smuzhiyun * no real gain from having the whole whorehouse mounted. So we don't need
1421*4882a593Smuzhiyun * any operations on the root directory. However, we need a non-trivial
1422*4882a593Smuzhiyun * d_name - pipe: will go nicely and kill the special-casing in procfs.
1423*4882a593Smuzhiyun */
1424*4882a593Smuzhiyun
pipefs_init_fs_context(struct fs_context * fc)1425*4882a593Smuzhiyun static int pipefs_init_fs_context(struct fs_context *fc)
1426*4882a593Smuzhiyun {
1427*4882a593Smuzhiyun struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1428*4882a593Smuzhiyun if (!ctx)
1429*4882a593Smuzhiyun return -ENOMEM;
1430*4882a593Smuzhiyun ctx->ops = &pipefs_ops;
1431*4882a593Smuzhiyun ctx->dops = &pipefs_dentry_operations;
1432*4882a593Smuzhiyun return 0;
1433*4882a593Smuzhiyun }
1434*4882a593Smuzhiyun
1435*4882a593Smuzhiyun static struct file_system_type pipe_fs_type = {
1436*4882a593Smuzhiyun .name = "pipefs",
1437*4882a593Smuzhiyun .init_fs_context = pipefs_init_fs_context,
1438*4882a593Smuzhiyun .kill_sb = kill_anon_super,
1439*4882a593Smuzhiyun };
1440*4882a593Smuzhiyun
init_pipe_fs(void)1441*4882a593Smuzhiyun static int __init init_pipe_fs(void)
1442*4882a593Smuzhiyun {
1443*4882a593Smuzhiyun int err = register_filesystem(&pipe_fs_type);
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun if (!err) {
1446*4882a593Smuzhiyun pipe_mnt = kern_mount(&pipe_fs_type);
1447*4882a593Smuzhiyun if (IS_ERR(pipe_mnt)) {
1448*4882a593Smuzhiyun err = PTR_ERR(pipe_mnt);
1449*4882a593Smuzhiyun unregister_filesystem(&pipe_fs_type);
1450*4882a593Smuzhiyun }
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun return err;
1453*4882a593Smuzhiyun }
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun fs_initcall(init_pipe_fs);
1456