1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include <unistd.h>
7#include <stdlib.h>
8#include <termios.h>
9#include <pty.h>
10#include <signal.h>
11#include <fcntl.h>
12#include <errno.h>
13#include <string.h>
14#include <sched.h>
15#include <sys/socket.h>
16#include <sys/poll.h>
17#include "init.h"
18#include "user.h"
19#include "kern_util.h"
20#include "sigio.h"
21#include "os.h"
22#include "um_malloc.h"
23#include "init.h"
24
25/* Protected by sigio_lock(), also used by sigio_cleanup, which is an
26 * exitcall.
27 */
28static int write_sigio_pid = -1;
29
30/* These arrays are initialized before the sigio thread is started, and
31 * the descriptors closed after it is killed.  So, it can't see them change.
32 * On the UML side, they are changed under the sigio_lock.
33 */
34#define SIGIO_FDS_INIT {-1, -1}
35
36static int write_sigio_fds[2] = SIGIO_FDS_INIT;
37static int sigio_private[2] = SIGIO_FDS_INIT;
38
39struct pollfds {
40	struct pollfd *poll;
41	int size;
42	int used;
43};
44
45/* Protected by sigio_lock().  Used by the sigio thread, but the UML thread
46 * synchronizes with it.
47 */
48static struct pollfds current_poll;
49static struct pollfds next_poll;
50static struct pollfds all_sigio_fds;
51
52static int write_sigio_thread(void *unused)
53{
54	struct pollfds *fds, tmp;
55	struct pollfd *p;
56	int i, n, respond_fd;
57	char c;
58
59        signal(SIGWINCH, SIG_IGN);
60	fds = &current_poll;
61	while(1){
62		n = poll(fds->poll, fds->used, -1);
63		if(n < 0){
64			if(errno == EINTR) continue;
65			printk("write_sigio_thread : poll returned %d, "
66			       "errno = %d\n", n, errno);
67		}
68		for(i = 0; i < fds->used; i++){
69			p = &fds->poll[i];
70			if(p->revents == 0) continue;
71			if(p->fd == sigio_private[1]){
72				CATCH_EINTR(n = read(sigio_private[1], &c,
73						     sizeof(c)));
74				if(n != sizeof(c))
75					printk("write_sigio_thread : "
76					       "read on socket failed, "
77					       "err = %d\n", errno);
78				tmp = current_poll;
79				current_poll = next_poll;
80				next_poll = tmp;
81				respond_fd = sigio_private[1];
82			}
83			else {
84				respond_fd = write_sigio_fds[1];
85				fds->used--;
86				memmove(&fds->poll[i], &fds->poll[i + 1],
87					(fds->used - i) * sizeof(*fds->poll));
88			}
89
90			CATCH_EINTR(n = write(respond_fd, &c, sizeof(c)));
91			if(n != sizeof(c))
92				printk("write_sigio_thread : write on socket "
93				       "failed, err = %d\n", errno);
94		}
95	}
96
97	return 0;
98}
99
100static int need_poll(struct pollfds *polls, int n)
101{
102	struct pollfd *new;
103
104	if(n <= polls->size)
105		return 0;
106
107	new = um_kmalloc_atomic(n * sizeof(struct pollfd));
108	if(new == NULL){
109		printk("need_poll : failed to allocate new pollfds\n");
110		return -ENOMEM;
111	}
112
113	memcpy(new, polls->poll, polls->used * sizeof(struct pollfd));
114	kfree(polls->poll);
115
116	polls->poll = new;
117	polls->size = n;
118	return 0;
119}
120
121/* Must be called with sigio_lock held, because it's needed by the marked
122 * critical section.
123 */
124static void update_thread(void)
125{
126	unsigned long flags;
127	int n;
128	char c;
129
130	flags = set_signals(0);
131	n = write(sigio_private[0], &c, sizeof(c));
132	if(n != sizeof(c)){
133		printk("update_thread : write failed, err = %d\n", errno);
134		goto fail;
135	}
136
137	CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c)));
138	if(n != sizeof(c)){
139		printk("update_thread : read failed, err = %d\n", errno);
140		goto fail;
141	}
142
143	set_signals(flags);
144	return;
145 fail:
146	/* Critical section start */
147	if(write_sigio_pid != -1)
148		os_kill_process(write_sigio_pid, 1);
149	write_sigio_pid = -1;
150	close(sigio_private[0]);
151	close(sigio_private[1]);
152	close(write_sigio_fds[0]);
153	close(write_sigio_fds[1]);
154	/* Critical section end */
155	set_signals(flags);
156}
157
158int add_sigio_fd(int fd)
159{
160	struct pollfd *p;
161	int err = 0, i, n;
162
163	sigio_lock();
164	for(i = 0; i < all_sigio_fds.used; i++){
165		if(all_sigio_fds.poll[i].fd == fd)
166			break;
167	}
168	if(i == all_sigio_fds.used)
169		goto out;
170
171	p = &all_sigio_fds.poll[i];
172
173	for(i = 0; i < current_poll.used; i++){
174		if(current_poll.poll[i].fd == fd)
175			goto out;
176	}
177
178	n = current_poll.used;
179	err = need_poll(&next_poll, n + 1);
180	if(err)
181		goto out;
182
183	memcpy(next_poll.poll, current_poll.poll,
184	       current_poll.used * sizeof(struct pollfd));
185	next_poll.poll[n] = *p;
186	next_poll.used = n + 1;
187	update_thread();
188 out:
189	sigio_unlock();
190	return err;
191}
192
193int ignore_sigio_fd(int fd)
194{
195	struct pollfd *p;
196	int err = 0, i, n = 0;
197
198	/* This is called from exitcalls elsewhere in UML - if
199	 * sigio_cleanup has already run, then update_thread will hang
200	 * or fail because the thread is no longer running.
201	 */
202	if(write_sigio_pid == -1)
203		return -EIO;
204
205	sigio_lock();
206	for(i = 0; i < current_poll.used; i++){
207		if(current_poll.poll[i].fd == fd) break;
208	}
209	if(i == current_poll.used)
210		goto out;
211
212	err = need_poll(&next_poll, current_poll.used - 1);
213	if(err)
214		goto out;
215
216	for(i = 0; i < current_poll.used; i++){
217		p = &current_poll.poll[i];
218		if(p->fd != fd)
219			next_poll.poll[n++] = *p;
220	}
221	next_poll.used = current_poll.used - 1;
222
223	update_thread();
224 out:
225	sigio_unlock();
226	return err;
227}
228
229static struct pollfd *setup_initial_poll(int fd)
230{
231	struct pollfd *p;
232
233	p = um_kmalloc(sizeof(struct pollfd));
234	if (p == NULL) {
235		printk("setup_initial_poll : failed to allocate poll\n");
236		return NULL;
237	}
238	*p = ((struct pollfd) { .fd		= fd,
239				.events 	= POLLIN,
240				.revents 	= 0 });
241	return p;
242}
243
244static void write_sigio_workaround(void)
245{
246	unsigned long stack;
247	struct pollfd *p;
248	int err;
249	int l_write_sigio_fds[2];
250	int l_sigio_private[2];
251	int l_write_sigio_pid;
252
253	/* We call this *tons* of times - and most ones we must just fail. */
254	sigio_lock();
255	l_write_sigio_pid = write_sigio_pid;
256	sigio_unlock();
257
258	if (l_write_sigio_pid != -1)
259		return;
260
261	err = os_pipe(l_write_sigio_fds, 1, 1);
262	if(err < 0){
263		printk("write_sigio_workaround - os_pipe 1 failed, "
264		       "err = %d\n", -err);
265		return;
266	}
267	err = os_pipe(l_sigio_private, 1, 1);
268	if(err < 0){
269		printk("write_sigio_workaround - os_pipe 2 failed, "
270		       "err = %d\n", -err);
271		goto out_close1;
272	}
273
274	p = setup_initial_poll(l_sigio_private[1]);
275	if(!p)
276		goto out_close2;
277
278	sigio_lock();
279
280	/* Did we race? Don't try to optimize this, please, it's not so likely
281	 * to happen, and no more than once at the boot. */
282	if(write_sigio_pid != -1)
283		goto out_free;
284
285	current_poll = ((struct pollfds) { .poll 	= p,
286					   .used 	= 1,
287					   .size 	= 1 });
288
289	if (write_sigio_irq(l_write_sigio_fds[0]))
290		goto out_clear_poll;
291
292	memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
293	memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
294
295	write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
296					    CLONE_FILES | CLONE_VM, &stack, 0);
297
298	if (write_sigio_pid < 0)
299		goto out_clear;
300
301	sigio_unlock();
302	return;
303
304out_clear:
305	write_sigio_pid = -1;
306	write_sigio_fds[0] = -1;
307	write_sigio_fds[1] = -1;
308	sigio_private[0] = -1;
309	sigio_private[1] = -1;
310out_clear_poll:
311	current_poll = ((struct pollfds) { .poll	= NULL,
312					   .size	= 0,
313					   .used	= 0 });
314out_free:
315	sigio_unlock();
316	kfree(p);
317out_close2:
318	close(l_sigio_private[0]);
319	close(l_sigio_private[1]);
320out_close1:
321	close(l_write_sigio_fds[0]);
322	close(l_write_sigio_fds[1]);
323}
324
325/* Changed during early boot */
326static int pty_output_sigio = 0;
327static int pty_close_sigio = 0;
328
329void maybe_sigio_broken(int fd, int read)
330{
331	int err;
332
333	if(!isatty(fd))
334		return;
335
336	if((read || pty_output_sigio) && (!read || pty_close_sigio))
337		return;
338
339	write_sigio_workaround();
340
341	sigio_lock();
342	err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
343	if(err){
344		printk("maybe_sigio_broken - failed to add pollfd for "
345		       "descriptor %d\n", fd);
346		goto out;
347	}
348
349	all_sigio_fds.poll[all_sigio_fds.used++] =
350		((struct pollfd) { .fd  	= fd,
351				   .events 	= read ? POLLIN : POLLOUT,
352				   .revents 	= 0 });
353out:
354	sigio_unlock();
355}
356
357static void sigio_cleanup(void)
358{
359	if(write_sigio_pid != -1){
360		os_kill_process(write_sigio_pid, 1);
361		write_sigio_pid = -1;
362	}
363}
364
365__uml_exitcall(sigio_cleanup);
366
367/* Used as a flag during SIGIO testing early in boot */
368static volatile int got_sigio = 0;
369
370static void __init handler(int sig)
371{
372	got_sigio = 1;
373}
374
375struct openpty_arg {
376	int master;
377	int slave;
378	int err;
379};
380
381static void openpty_cb(void *arg)
382{
383	struct openpty_arg *info = arg;
384
385	info->err = 0;
386	if(openpty(&info->master, &info->slave, NULL, NULL, NULL))
387		info->err = -errno;
388}
389
390static int async_pty(int master, int slave)
391{
392	int flags;
393
394	flags = fcntl(master, F_GETFL);
395	if(flags < 0)
396		return -errno;
397
398	if((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) ||
399	   (fcntl(master, F_SETOWN, os_getpid()) < 0))
400		return -errno;
401
402	if((fcntl(slave, F_SETFL, flags | O_NONBLOCK) < 0))
403		return -errno;
404
405	return(0);
406}
407
408static void __init check_one_sigio(void (*proc)(int, int))
409{
410	struct sigaction old, new;
411	struct openpty_arg pty = { .master = -1, .slave = -1 };
412	int master, slave, err;
413
414	initial_thread_cb(openpty_cb, &pty);
415	if(pty.err){
416		printk("openpty failed, errno = %d\n", -pty.err);
417		return;
418	}
419
420	master = pty.master;
421	slave = pty.slave;
422
423	if((master == -1) || (slave == -1)){
424		printk("openpty failed to allocate a pty\n");
425		return;
426	}
427
428	/* Not now, but complain so we now where we failed. */
429	err = raw(master);
430	if (err < 0)
431		panic("check_sigio : __raw failed, errno = %d\n", -err);
432
433	err = async_pty(master, slave);
434	if(err < 0)
435		panic("tty_fds : sigio_async failed, err = %d\n", -err);
436
437	if(sigaction(SIGIO, NULL, &old) < 0)
438		panic("check_sigio : sigaction 1 failed, errno = %d\n", errno);
439	new = old;
440	new.sa_handler = handler;
441	if(sigaction(SIGIO, &new, NULL) < 0)
442		panic("check_sigio : sigaction 2 failed, errno = %d\n", errno);
443
444	got_sigio = 0;
445	(*proc)(master, slave);
446
447	close(master);
448	close(slave);
449
450	if(sigaction(SIGIO, &old, NULL) < 0)
451		panic("check_sigio : sigaction 3 failed, errno = %d\n", errno);
452}
453
454static void tty_output(int master, int slave)
455{
456	int n;
457	char buf[512];
458
459	printk("Checking that host ptys support output SIGIO...");
460
461	memset(buf, 0, sizeof(buf));
462
463	while(write(master, buf, sizeof(buf)) > 0) ;
464	if(errno != EAGAIN)
465		panic("tty_output : write failed, errno = %d\n", errno);
466	while(((n = read(slave, buf, sizeof(buf))) > 0) && !got_sigio) ;
467
468	if(got_sigio){
469		printk("Yes\n");
470		pty_output_sigio = 1;
471	}
472	else if(n == -EAGAIN)
473		printk("No, enabling workaround\n");
474	else panic("tty_output : read failed, err = %d\n", n);
475}
476
477static void tty_close(int master, int slave)
478{
479	printk("Checking that host ptys support SIGIO on close...");
480
481	close(slave);
482	if(got_sigio){
483		printk("Yes\n");
484		pty_close_sigio = 1;
485	}
486	else printk("No, enabling workaround\n");
487}
488
489void __init check_sigio(void)
490{
491	if((os_access("/dev/ptmx", OS_ACC_R_OK) < 0) &&
492	   (os_access("/dev/ptyp0", OS_ACC_R_OK) < 0)){
493		printk("No pseudo-terminals available - skipping pty SIGIO "
494		       "check\n");
495		return;
496	}
497	check_one_sigio(tty_output);
498	check_one_sigio(tty_close);
499}
500
501/* Here because it only does the SIGIO testing for now */
502void __init os_check_bugs(void)
503{
504	check_sigio();
505}
506