1/*
2 *  linux/fs/fcntl.c
3 *
4 *  Copyright (C) 1991, 1992  Linus Torvalds
5 */
6
7#include <linux/syscalls.h>
8#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/fs.h>
11#include <linux/file.h>
12#include <linux/fdtable.h>
13#include <linux/capability.h>
14#include <linux/dnotify.h>
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <linux/pipe_fs_i.h>
18#include <linux/security.h>
19#include <linux/ptrace.h>
20#include <linux/signal.h>
21#include <linux/rcupdate.h>
22#include <linux/pid_namespace.h>
23
24#include <asm/poll.h>
25#include <asm/siginfo.h>
26#include <asm/uaccess.h>
27
28void set_close_on_exec(unsigned int fd, int flag)
29{
30	struct files_struct *files = current->files;
31	struct fdtable *fdt;
32	spin_lock(&files->file_lock);
33	fdt = files_fdtable(files);
34	if (flag)
35		FD_SET(fd, fdt->close_on_exec);
36	else
37		FD_CLR(fd, fdt->close_on_exec);
38	spin_unlock(&files->file_lock);
39}
40
41static int get_close_on_exec(unsigned int fd)
42{
43	struct files_struct *files = current->files;
44	struct fdtable *fdt;
45	int res;
46	rcu_read_lock();
47	fdt = files_fdtable(files);
48	res = FD_ISSET(fd, fdt->close_on_exec);
49	rcu_read_unlock();
50	return res;
51}
52
53SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
54{
55	int err = -EBADF;
56	struct file * file, *tofree;
57	struct files_struct * files = current->files;
58	struct fdtable *fdt;
59
60	if ((flags & ~O_CLOEXEC) != 0)
61		return -EINVAL;
62
63	if (unlikely(oldfd == newfd))
64		return -EINVAL;
65
66	spin_lock(&files->file_lock);
67	err = expand_files(files, newfd);
68	file = fcheck(oldfd);
69	if (unlikely(!file))
70		goto Ebadf;
71	if (unlikely(err < 0)) {
72		if (err == -EMFILE)
73			goto Ebadf;
74		goto out_unlock;
75	}
76	/*
77	 * We need to detect attempts to do dup2() over allocated but still
78	 * not finished descriptor.  NB: OpenBSD avoids that at the price of
79	 * extra work in their equivalent of fget() - they insert struct
80	 * file immediately after grabbing descriptor, mark it larval if
81	 * more work (e.g. actual opening) is needed and make sure that
82	 * fget() treats larval files as absent.  Potentially interesting,
83	 * but while extra work in fget() is trivial, locking implications
84	 * and amount of surgery on open()-related paths in VFS are not.
85	 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
86	 * deadlocks in rather amusing ways, AFAICS.  All of that is out of
87	 * scope of POSIX or SUS, since neither considers shared descriptor
88	 * tables and this condition does not arise without those.
89	 */
90	err = -EBUSY;
91	fdt = files_fdtable(files);
92	tofree = fdt->fd[newfd];
93	if (!tofree && FD_ISSET(newfd, fdt->open_fds))
94		goto out_unlock;
95	get_file(file);
96	rcu_assign_pointer(fdt->fd[newfd], file);
97	FD_SET(newfd, fdt->open_fds);
98	if (flags & O_CLOEXEC)
99		FD_SET(newfd, fdt->close_on_exec);
100	else
101		FD_CLR(newfd, fdt->close_on_exec);
102	spin_unlock(&files->file_lock);
103
104	if (tofree)
105		filp_close(tofree, files);
106
107	return newfd;
108
109Ebadf:
110	err = -EBADF;
111out_unlock:
112	spin_unlock(&files->file_lock);
113	return err;
114}
115
116SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
117{
118	if (unlikely(newfd == oldfd)) { /* corner case */
119		struct files_struct *files = current->files;
120		int retval = oldfd;
121
122		rcu_read_lock();
123		if (!fcheck_files(files, oldfd))
124			retval = -EBADF;
125		rcu_read_unlock();
126		return retval;
127	}
128	return sys_dup3(oldfd, newfd, 0);
129}
130
131SYSCALL_DEFINE1(dup, unsigned int, fildes)
132{
133	int ret = -EBADF;
134	struct file *file = fget(fildes);
135
136	if (file) {
137		ret = get_unused_fd();
138		if (ret >= 0)
139			fd_install(ret, file);
140		else
141			fput(file);
142	}
143	return ret;
144}
145
146#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
147
148static int setfl(int fd, struct file * filp, unsigned long arg)
149{
150	struct inode * inode = filp->f_path.dentry->d_inode;
151	int error = 0;
152
153	/*
154	 * O_APPEND cannot be cleared if the file is marked as append-only
155	 * and the file is open for write.
156	 */
157	if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
158		return -EPERM;
159
160	/* O_NOATIME can only be set by the owner or superuser */
161	if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
162		if (!is_owner_or_cap(inode))
163			return -EPERM;
164
165	/* required for strict SunOS emulation */
166	if (O_NONBLOCK != O_NDELAY)
167	       if (arg & O_NDELAY)
168		   arg |= O_NONBLOCK;
169
170	if (arg & O_DIRECT) {
171		if (!filp->f_mapping || !filp->f_mapping->a_ops ||
172			!filp->f_mapping->a_ops->direct_IO)
173				return -EINVAL;
174	}
175
176	if (filp->f_op && filp->f_op->check_flags)
177		error = filp->f_op->check_flags(arg);
178	if (error)
179		return error;
180
181	/*
182	 * ->fasync() is responsible for setting the FASYNC bit.
183	 */
184	if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op &&
185			filp->f_op->fasync) {
186		error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
187		if (error < 0)
188			goto out;
189		if (error > 0)
190			error = 0;
191	}
192	spin_lock(&filp->f_lock);
193	filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
194	spin_unlock(&filp->f_lock);
195
196 out:
197	return error;
198}
199
200static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
201                     int force)
202{
203	write_lock_irq(&filp->f_owner.lock);
204	if (force || !filp->f_owner.pid) {
205		put_pid(filp->f_owner.pid);
206		filp->f_owner.pid = get_pid(pid);
207		filp->f_owner.pid_type = type;
208
209		if (pid) {
210			const struct cred *cred = current_cred();
211			filp->f_owner.uid = cred->uid;
212			filp->f_owner.euid = cred->euid;
213		}
214	}
215	write_unlock_irq(&filp->f_owner.lock);
216}
217
218int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
219		int force)
220{
221	int err;
222
223	err = security_file_set_fowner(filp);
224	if (err)
225		return err;
226
227	f_modown(filp, pid, type, force);
228	return 0;
229}
230EXPORT_SYMBOL(__f_setown);
231
232int f_setown(struct file *filp, unsigned long arg, int force)
233{
234	enum pid_type type;
235	struct pid *pid;
236	int who = arg;
237	int result;
238	type = PIDTYPE_PID;
239	if (who < 0) {
240		type = PIDTYPE_PGID;
241		who = -who;
242	}
243	rcu_read_lock();
244	pid = find_vpid(who);
245	result = __f_setown(filp, pid, type, force);
246	rcu_read_unlock();
247	return result;
248}
249EXPORT_SYMBOL(f_setown);
250
251void f_delown(struct file *filp)
252{
253	f_modown(filp, NULL, PIDTYPE_PID, 1);
254}
255
256pid_t f_getown(struct file *filp)
257{
258	pid_t pid;
259	read_lock(&filp->f_owner.lock);
260	pid = pid_vnr(filp->f_owner.pid);
261	if (filp->f_owner.pid_type == PIDTYPE_PGID)
262		pid = -pid;
263	read_unlock(&filp->f_owner.lock);
264	return pid;
265}
266
267static int f_setown_ex(struct file *filp, unsigned long arg)
268{
269	struct f_owner_ex * __user owner_p = (void * __user)arg;
270	struct f_owner_ex owner;
271	struct pid *pid;
272	int type;
273	int ret;
274
275	ret = copy_from_user(&owner, owner_p, sizeof(owner));
276	if (ret)
277		return -EFAULT;
278
279	switch (owner.type) {
280	case F_OWNER_TID:
281		type = PIDTYPE_MAX;
282		break;
283
284	case F_OWNER_PID:
285		type = PIDTYPE_PID;
286		break;
287
288	case F_OWNER_PGRP:
289		type = PIDTYPE_PGID;
290		break;
291
292	default:
293		return -EINVAL;
294	}
295
296	rcu_read_lock();
297	pid = find_vpid(owner.pid);
298	if (owner.pid && !pid)
299		ret = -ESRCH;
300	else
301		ret = __f_setown(filp, pid, type, 1);
302	rcu_read_unlock();
303
304	return ret;
305}
306
307static int f_getown_ex(struct file *filp, unsigned long arg)
308{
309	struct f_owner_ex * __user owner_p = (void * __user)arg;
310	struct f_owner_ex owner;
311	int ret = 0;
312
313	read_lock(&filp->f_owner.lock);
314	owner.pid = pid_vnr(filp->f_owner.pid);
315	switch (filp->f_owner.pid_type) {
316	case PIDTYPE_MAX:
317		owner.type = F_OWNER_TID;
318		break;
319
320	case PIDTYPE_PID:
321		owner.type = F_OWNER_PID;
322		break;
323
324	case PIDTYPE_PGID:
325		owner.type = F_OWNER_PGRP;
326		break;
327
328	default:
329		WARN_ON(1);
330		ret = -EINVAL;
331		break;
332	}
333	read_unlock(&filp->f_owner.lock);
334
335	if (!ret) {
336		ret = copy_to_user(owner_p, &owner, sizeof(owner));
337		if (ret)
338			ret = -EFAULT;
339	}
340	return ret;
341}
342
343static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
344		struct file *filp)
345{
346	long err = -EINVAL;
347
348	switch (cmd) {
349	case F_DUPFD:
350	case F_DUPFD_CLOEXEC:
351		if (arg >= rlimit(RLIMIT_NOFILE))
352			break;
353		err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
354		if (err >= 0) {
355			get_file(filp);
356			fd_install(err, filp);
357		}
358		break;
359	case F_GETFD:
360		err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
361		break;
362	case F_SETFD:
363		err = 0;
364		set_close_on_exec(fd, arg & FD_CLOEXEC);
365		break;
366	case F_GETFL:
367		err = filp->f_flags;
368		break;
369	case F_SETFL:
370		err = setfl(fd, filp, arg);
371		break;
372	case F_GETLK:
373		err = fcntl_getlk(filp, (struct flock __user *) arg);
374		break;
375	case F_SETLK:
376	case F_SETLKW:
377		err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg);
378		break;
379	case F_GETOWN:
380		err = f_getown(filp);
381		force_successful_syscall_return();
382		break;
383	case F_SETOWN:
384		err = f_setown(filp, arg, 1);
385		break;
386	case F_GETOWN_EX:
387		err = f_getown_ex(filp, arg);
388		break;
389	case F_SETOWN_EX:
390		err = f_setown_ex(filp, arg);
391		break;
392	case F_GETSIG:
393		err = filp->f_owner.signum;
394		break;
395	case F_SETSIG:
396		/* arg == 0 restores default behaviour. */
397		if (!valid_signal(arg)) {
398			break;
399		}
400		err = 0;
401		filp->f_owner.signum = arg;
402		break;
403	case F_GETLEASE:
404		err = fcntl_getlease(filp);
405		break;
406	case F_SETLEASE:
407		err = fcntl_setlease(fd, filp, arg);
408		break;
409	case F_NOTIFY:
410		err = fcntl_dirnotify(fd, filp, arg);
411		break;
412	case F_SETPIPE_SZ:
413	case F_GETPIPE_SZ:
414		err = pipe_fcntl(filp, cmd, arg);
415		break;
416	default:
417		break;
418	}
419	return err;
420}
421
422SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
423{
424	struct file *filp;
425	long err = -EBADF;
426
427	filp = fget(fd);
428	if (!filp)
429		goto out;
430
431	err = security_file_fcntl(filp, cmd, arg);
432	if (err) {
433		fput(filp);
434		return err;
435	}
436
437	err = do_fcntl(fd, cmd, arg, filp);
438
439 	fput(filp);
440out:
441	return err;
442}
443
444#if BITS_PER_LONG == 32
445SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
446		unsigned long, arg)
447{
448	struct file * filp;
449	long err;
450
451	err = -EBADF;
452	filp = fget(fd);
453	if (!filp)
454		goto out;
455
456	err = security_file_fcntl(filp, cmd, arg);
457	if (err) {
458		fput(filp);
459		return err;
460	}
461	err = -EBADF;
462
463	switch (cmd) {
464		case F_GETLK64:
465			err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
466			break;
467		case F_SETLK64:
468		case F_SETLKW64:
469			err = fcntl_setlk64(fd, filp, cmd,
470					(struct flock64 __user *) arg);
471			break;
472		default:
473			err = do_fcntl(fd, cmd, arg, filp);
474			break;
475	}
476	fput(filp);
477out:
478	return err;
479}
480#endif
481
482/* Table to convert sigio signal codes into poll band bitmaps */
483
484static const long band_table[NSIGPOLL] = {
485	POLLIN | POLLRDNORM,			/* POLL_IN */
486	POLLOUT | POLLWRNORM | POLLWRBAND,	/* POLL_OUT */
487	POLLIN | POLLRDNORM | POLLMSG,		/* POLL_MSG */
488	POLLERR,				/* POLL_ERR */
489	POLLPRI | POLLRDBAND,			/* POLL_PRI */
490	POLLHUP | POLLERR			/* POLL_HUP */
491};
492
493static inline int sigio_perm(struct task_struct *p,
494                             struct fown_struct *fown, int sig)
495{
496	const struct cred *cred;
497	int ret;
498
499	rcu_read_lock();
500	cred = __task_cred(p);
501	ret = ((fown->euid == 0 ||
502		fown->euid == cred->suid || fown->euid == cred->uid ||
503		fown->uid  == cred->suid || fown->uid  == cred->uid) &&
504	       !security_file_send_sigiotask(p, fown, sig));
505	rcu_read_unlock();
506	return ret;
507}
508
509static void send_sigio_to_task(struct task_struct *p,
510			       struct fown_struct *fown,
511			       int fd, int reason, int group)
512{
513	/*
514	 * F_SETSIG can change ->signum lockless in parallel, make
515	 * sure we read it once and use the same value throughout.
516	 */
517	int signum = ACCESS_ONCE(fown->signum);
518
519	if (!sigio_perm(p, fown, signum))
520		return;
521
522	switch (signum) {
523		siginfo_t si;
524		default:
525			/* Queue a rt signal with the appropriate fd as its
526			   value.  We use SI_SIGIO as the source, not
527			   SI_KERNEL, since kernel signals always get
528			   delivered even if we can't queue.  Failure to
529			   queue in this case _should_ be reported; we fall
530			   back to SIGIO in that case. --sct */
531			si.si_signo = signum;
532			si.si_errno = 0;
533		        si.si_code  = reason;
534			/* Make sure we are called with one of the POLL_*
535			   reasons, otherwise we could leak kernel stack into
536			   userspace.  */
537			BUG_ON((reason & __SI_MASK) != __SI_POLL);
538			if (reason - POLL_IN >= NSIGPOLL)
539				si.si_band  = ~0L;
540			else
541				si.si_band = band_table[reason - POLL_IN];
542			si.si_fd    = fd;
543			if (!do_send_sig_info(signum, &si, p, group))
544				break;
545		/* fall-through: fall back on the old plain SIGIO signal */
546		case 0:
547			do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, group);
548	}
549}
550
551void send_sigio(struct fown_struct *fown, int fd, int band)
552{
553	struct task_struct *p;
554	enum pid_type type;
555	struct pid *pid;
556	int group = 1;
557
558	read_lock(&fown->lock);
559
560	type = fown->pid_type;
561	if (type == PIDTYPE_MAX) {
562		group = 0;
563		type = PIDTYPE_PID;
564	}
565
566	pid = fown->pid;
567	if (!pid)
568		goto out_unlock_fown;
569
570	read_lock(&tasklist_lock);
571	do_each_pid_task(pid, type, p) {
572		send_sigio_to_task(p, fown, fd, band, group);
573	} while_each_pid_task(pid, type, p);
574	read_unlock(&tasklist_lock);
575 out_unlock_fown:
576	read_unlock(&fown->lock);
577}
578
579static void send_sigurg_to_task(struct task_struct *p,
580				struct fown_struct *fown, int group)
581{
582	if (sigio_perm(p, fown, SIGURG))
583		do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, group);
584}
585
586int send_sigurg(struct fown_struct *fown)
587{
588	struct task_struct *p;
589	enum pid_type type;
590	struct pid *pid;
591	int group = 1;
592	int ret = 0;
593
594	read_lock(&fown->lock);
595
596	type = fown->pid_type;
597	if (type == PIDTYPE_MAX) {
598		group = 0;
599		type = PIDTYPE_PID;
600	}
601
602	pid = fown->pid;
603	if (!pid)
604		goto out_unlock_fown;
605
606	ret = 1;
607
608	read_lock(&tasklist_lock);
609	do_each_pid_task(pid, type, p) {
610		send_sigurg_to_task(p, fown, group);
611	} while_each_pid_task(pid, type, p);
612	read_unlock(&tasklist_lock);
613 out_unlock_fown:
614	read_unlock(&fown->lock);
615	return ret;
616}
617
618static DEFINE_SPINLOCK(fasync_lock);
619static struct kmem_cache *fasync_cache __read_mostly;
620
621static void fasync_free_rcu(struct rcu_head *head)
622{
623	kmem_cache_free(fasync_cache,
624			container_of(head, struct fasync_struct, fa_rcu));
625}
626
627/*
628 * Remove a fasync entry. If successfully removed, return
629 * positive and clear the FASYNC flag. If no entry exists,
630 * do nothing and return 0.
631 *
632 * NOTE! It is very important that the FASYNC flag always
633 * match the state "is the filp on a fasync list".
634 *
635 */
636static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
637{
638	struct fasync_struct *fa, **fp;
639	int result = 0;
640
641	spin_lock(&filp->f_lock);
642	spin_lock(&fasync_lock);
643	for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
644		if (fa->fa_file != filp)
645			continue;
646
647		spin_lock_irq(&fa->fa_lock);
648		fa->fa_file = NULL;
649		spin_unlock_irq(&fa->fa_lock);
650
651		*fp = fa->fa_next;
652		call_rcu(&fa->fa_rcu, fasync_free_rcu);
653		filp->f_flags &= ~FASYNC;
654		result = 1;
655		break;
656	}
657	spin_unlock(&fasync_lock);
658	spin_unlock(&filp->f_lock);
659	return result;
660}
661
662/*
663 * Add a fasync entry. Return negative on error, positive if
664 * added, and zero if did nothing but change an existing one.
665 *
666 * NOTE! It is very important that the FASYNC flag always
667 * match the state "is the filp on a fasync list".
668 */
669static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
670{
671	struct fasync_struct *new, *fa, **fp;
672	int result = 0;
673
674	new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
675	if (!new)
676		return -ENOMEM;
677
678	spin_lock(&filp->f_lock);
679	spin_lock(&fasync_lock);
680	for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
681		if (fa->fa_file != filp)
682			continue;
683
684		spin_lock_irq(&fa->fa_lock);
685		fa->fa_fd = fd;
686		spin_unlock_irq(&fa->fa_lock);
687
688		kmem_cache_free(fasync_cache, new);
689		goto out;
690	}
691
692	spin_lock_init(&new->fa_lock);
693	new->magic = FASYNC_MAGIC;
694	new->fa_file = filp;
695	new->fa_fd = fd;
696	new->fa_next = *fapp;
697	rcu_assign_pointer(*fapp, new);
698	result = 1;
699	filp->f_flags |= FASYNC;
700
701out:
702	spin_unlock(&fasync_lock);
703	spin_unlock(&filp->f_lock);
704	return result;
705}
706
707/*
708 * fasync_helper() is used by almost all character device drivers
709 * to set up the fasync queue, and for regular files by the file
710 * lease code. It returns negative on error, 0 if it did no changes
711 * and positive if it added/deleted the entry.
712 */
713int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
714{
715	if (!on)
716		return fasync_remove_entry(filp, fapp);
717	return fasync_add_entry(fd, filp, fapp);
718}
719
720EXPORT_SYMBOL(fasync_helper);
721
722/*
723 * rcu_read_lock() is held
724 */
725static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
726{
727	while (fa) {
728		struct fown_struct *fown;
729		unsigned long flags;
730
731		if (fa->magic != FASYNC_MAGIC) {
732			printk(KERN_ERR "kill_fasync: bad magic number in "
733			       "fasync_struct!\n");
734			return;
735		}
736		spin_lock_irqsave(&fa->fa_lock, flags);
737		if (fa->fa_file) {
738			fown = &fa->fa_file->f_owner;
739			/* Don't send SIGURG to processes which have not set a
740			   queued signum: SIGURG has its own default signalling
741			   mechanism. */
742			if (!(sig == SIGURG && fown->signum == 0))
743				send_sigio(fown, fa->fa_fd, band);
744		}
745		spin_unlock_irqrestore(&fa->fa_lock, flags);
746		fa = rcu_dereference(fa->fa_next);
747	}
748}
749
750void kill_fasync(struct fasync_struct **fp, int sig, int band)
751{
752	/* First a quick test without locking: usually
753	 * the list is empty.
754	 */
755	if (*fp) {
756		rcu_read_lock();
757		kill_fasync_rcu(rcu_dereference(*fp), sig, band);
758		rcu_read_unlock();
759	}
760}
761EXPORT_SYMBOL(kill_fasync);
762
763static int __init fcntl_init(void)
764{
765	/*
766	 * Please add new bits here to ensure allocation uniqueness.
767	 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
768	 * is defined as O_NONBLOCK on some platforms and not on others.
769	 */
770	BUILD_BUG_ON(18 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
771		O_RDONLY	| O_WRONLY	| O_RDWR	|
772		O_CREAT		| O_EXCL	| O_NOCTTY	|
773		O_TRUNC		| O_APPEND	| /* O_NONBLOCK	| */
774		__O_SYNC	| O_DSYNC	| FASYNC	|
775		O_DIRECT	| O_LARGEFILE	| O_DIRECTORY	|
776		O_NOFOLLOW	| O_NOATIME	| O_CLOEXEC	|
777		FMODE_EXEC
778		));
779
780	fasync_cache = kmem_cache_create("fasync_cache",
781		sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
782	return 0;
783}
784
785module_init(fcntl_init)
786