1/*
2 *  linux/drivers/char/mem.c
3 *
4 *  Copyright (C) 1991, 1992  Linus Torvalds
5 *
6 *  Added devfs support.
7 *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
11#include <linux/config.h>
12#include <linux/mm.h>
13#include <linux/miscdevice.h>
14#include <linux/tpqic02.h>
15#include <linux/ftape.h>
16#include <linux/slab.h>
17#include <linux/vmalloc.h>
18#include <linux/mman.h>
19#include <linux/random.h>
20#include <linux/init.h>
21#include <linux/raw.h>
22#include <linux/tty.h>
23#include <linux/capability.h>
24
25#include <asm/uaccess.h>
26#include <asm/io.h>
27#include <asm/pgalloc.h>
28
29#ifdef CONFIG_I2C
30extern int i2c_init_all(void);
31#endif
32#ifdef CONFIG_FB
33extern void fbmem_init(void);
34#endif
35#ifdef CONFIG_PROM_CONSOLE
36extern void prom_con_init(void);
37#endif
38#ifdef CONFIG_MDA_CONSOLE
39extern void mda_console_init(void);
40#endif
41#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
42extern void tapechar_init(void);
43#endif
44
45static ssize_t do_write_mem(struct file * file, void *p, unsigned long realp,
46			    const char * buf, size_t count, loff_t *ppos)
47{
48	ssize_t written;
49
50	written = 0;
51#if defined(__sparc__) || defined(__mc68000__)
52	/* we don't have page 0 mapped on sparc and m68k.. */
53	if (realp < PAGE_SIZE) {
54		unsigned long sz = PAGE_SIZE-realp;
55		if (sz > count) sz = count;
56		/* Hmm. Do something? */
57		buf+=sz;
58		p+=sz;
59		count-=sz;
60		written+=sz;
61	}
62#endif
63	if (copy_from_user(p, buf, count))
64		return -EFAULT;
65	written += count;
66	*ppos += written;
67	return written;
68}
69
70
71/*
72 * This funcion reads the *physical* memory. The f_pos points directly to the
73 * memory location.
74 */
75static ssize_t read_mem(struct file * file, char * buf,
76			size_t count, loff_t *ppos)
77{
78	unsigned long p = *ppos;
79	unsigned long end_mem;
80	ssize_t read;
81
82	end_mem = __pa(high_memory);
83	if (p >= end_mem)
84		return 0;
85	if (count > end_mem - p)
86		count = end_mem - p;
87	read = 0;
88#if defined(__sparc__) || defined(__mc68000__)
89	/* we don't have page 0 mapped on sparc and m68k.. */
90	if (p < PAGE_SIZE) {
91		unsigned long sz = PAGE_SIZE-p;
92		if (sz > count)
93			sz = count;
94		if (sz > 0) {
95			if (clear_user(buf, sz))
96				return -EFAULT;
97			buf += sz;
98			p += sz;
99			count -= sz;
100			read += sz;
101		}
102	}
103#endif
104	if (copy_to_user(buf, __va(p), count))
105		return -EFAULT;
106	read += count;
107	*ppos += read;
108	return read;
109}
110
111static ssize_t write_mem(struct file * file, const char * buf,
112			 size_t count, loff_t *ppos)
113{
114	unsigned long p = *ppos;
115	unsigned long end_mem;
116
117	end_mem = __pa(high_memory);
118	if (p >= end_mem)
119		return 0;
120	if (count > end_mem - p)
121		count = end_mem - p;
122	return do_write_mem(file, __va(p), p, buf, count, ppos);
123}
124
125#ifndef pgprot_noncached
126
127/*
128 * This should probably be per-architecture in <asm/pgtable.h>
129 */
130static inline pgprot_t pgprot_noncached(pgprot_t _prot)
131{
132	unsigned long prot = pgprot_val(_prot);
133
134#if defined(__i386__) || defined(__x86_64__)
135	/* On PPro and successors, PCD alone doesn't always mean
136	    uncached because of interactions with the MTRRs. PCD | PWT
137	    means definitely uncached. */
138	if (boot_cpu_data.x86 > 3)
139		prot |= _PAGE_PCD | _PAGE_PWT;
140#elif defined(__powerpc__)
141	prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
142#elif defined(__mc68000__)
143#ifdef SUN3_PAGE_NOCACHE
144	if (MMU_IS_SUN3)
145		prot |= SUN3_PAGE_NOCACHE;
146	else
147#endif
148	if (MMU_IS_851 || MMU_IS_030)
149		prot |= _PAGE_NOCACHE030;
150	/* Use no-cache mode, serialized */
151	else if (MMU_IS_040 || MMU_IS_060)
152		prot = (prot & _CACHEMASK040) | _PAGE_NOCACHE_S;
153#endif
154
155	return __pgprot(prot);
156}
157
158#endif /* !pgprot_noncached */
159
160/*
161 * Architectures vary in how they handle caching for addresses
162 * outside of main memory.
163 */
164static inline int noncached_address(unsigned long addr)
165{
166#if defined(__i386__)
167	/*
168	 * On the PPro and successors, the MTRRs are used to set
169	 * memory types for physical addresses outside main memory,
170	 * so blindly setting PCD or PWT on those pages is wrong.
171	 * For Pentiums and earlier, the surround logic should disable
172	 * caching for the high addresses through the KEN pin, but
173	 * we maintain the tradition of paranoia in this code.
174	 */
175 	return !( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ||
176		  test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) ||
177		  test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) ||
178		  test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) )
179	  && addr >= __pa(high_memory);
180#else
181	return addr >= __pa(high_memory);
182#endif
183}
184
185static int mmap_mem(struct file * file, struct vm_area_struct * vma)
186{
187	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
188
189	/*
190	 * Accessing memory above the top the kernel knows about or
191	 * through a file pointer that was marked O_SYNC will be
192	 * done non-cached.
193	 */
194	if (noncached_address(offset) || (file->f_flags & O_SYNC))
195		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
196
197	/* Don't try to swap out physical pages.. */
198	vma->vm_flags |= VM_RESERVED;
199
200	/*
201	 * Don't dump addresses that are not real memory to a core file.
202	 */
203	if (offset >= __pa(high_memory) || (file->f_flags & O_SYNC))
204		vma->vm_flags |= VM_IO;
205
206	if (remap_page_range(vma->vm_start, offset, vma->vm_end-vma->vm_start,
207			     vma->vm_page_prot))
208		return -EAGAIN;
209	return 0;
210}
211
212/*
213 * This function reads the *virtual* memory as seen by the kernel.
214 */
215static ssize_t read_kmem(struct file *file, char *buf,
216			 size_t count, loff_t *ppos)
217{
218	unsigned long p = *ppos;
219	ssize_t read = 0;
220	ssize_t virtr = 0;
221	char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
222
223	if (p < (unsigned long) high_memory) {
224		read = count;
225		if (count > (unsigned long) high_memory - p)
226			read = (unsigned long) high_memory - p;
227
228#if defined(__sparc__) || defined(__mc68000__)
229		/* we don't have page 0 mapped on sparc and m68k.. */
230		if (p < PAGE_SIZE && read > 0) {
231			size_t tmp = PAGE_SIZE - p;
232			if (tmp > read) tmp = read;
233			if (clear_user(buf, tmp))
234				return -EFAULT;
235			buf += tmp;
236			p += tmp;
237			read -= tmp;
238			count -= tmp;
239		}
240#endif
241		if (copy_to_user(buf, (char *)p, read))
242			return -EFAULT;
243		p += read;
244		buf += read;
245		count -= read;
246	}
247
248	if (count > 0) {
249		kbuf = (char *)__get_free_page(GFP_KERNEL);
250		if (!kbuf)
251			return -ENOMEM;
252		while (count > 0) {
253			int len = count;
254
255			if (len > PAGE_SIZE)
256				len = PAGE_SIZE;
257			len = vread(kbuf, (char *)p, len);
258			if (!len)
259				break;
260			if (copy_to_user(buf, kbuf, len)) {
261				free_page((unsigned long)kbuf);
262				return -EFAULT;
263			}
264			count -= len;
265			buf += len;
266			virtr += len;
267			p += len;
268		}
269		free_page((unsigned long)kbuf);
270	}
271 	*ppos = p;
272 	return virtr + read;
273}
274
275extern long vwrite(char *buf, char *addr, unsigned long count);
276
277/*
278 * This function writes to the *virtual* memory as seen by the kernel.
279 */
280static ssize_t write_kmem(struct file * file, const char * buf,
281			  size_t count, loff_t *ppos)
282{
283	unsigned long p = *ppos;
284	ssize_t wrote = 0;
285	ssize_t virtr = 0;
286	char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
287
288	if (p < (unsigned long) high_memory) {
289		wrote = count;
290		if (count > (unsigned long) high_memory - p)
291			wrote = (unsigned long) high_memory - p;
292
293		wrote = do_write_mem(file, (void*)p, p, buf, wrote, ppos);
294
295		p += wrote;
296		buf += wrote;
297		count -= wrote;
298	}
299
300	if (count > 0) {
301		kbuf = (char *)__get_free_page(GFP_KERNEL);
302		if (!kbuf)
303			return -ENOMEM;
304		while (count > 0) {
305			int len = count;
306
307			if (len > PAGE_SIZE)
308				len = PAGE_SIZE;
309			if (len && copy_from_user(kbuf, buf, len)) {
310				free_page((unsigned long)kbuf);
311				return -EFAULT;
312			}
313			len = vwrite(kbuf, (char *)p, len);
314			count -= len;
315			buf += len;
316			virtr += len;
317			p += len;
318		}
319		free_page((unsigned long)kbuf);
320	}
321
322 	*ppos = p;
323 	return virtr + wrote;
324}
325
326#if defined(CONFIG_ISA) || !defined(__mc68000__)
327static ssize_t read_port(struct file * file, char * buf,
328			 size_t count, loff_t *ppos)
329{
330	unsigned long i = *ppos;
331	char *tmp = buf;
332
333	if (verify_area(VERIFY_WRITE,buf,count))
334		return -EFAULT;
335	while (count-- > 0 && i < 65536) {
336		if (__put_user(inb(i),tmp) < 0)
337			return -EFAULT;
338		i++;
339		tmp++;
340	}
341	*ppos = i;
342	return tmp-buf;
343}
344
345static ssize_t write_port(struct file * file, const char * buf,
346			  size_t count, loff_t *ppos)
347{
348	unsigned long i = *ppos;
349	const char * tmp = buf;
350
351	if (verify_area(VERIFY_READ,buf,count))
352		return -EFAULT;
353	while (count-- > 0 && i < 65536) {
354		char c;
355		if (__get_user(c, tmp))
356			return -EFAULT;
357		outb(c,i);
358		i++;
359		tmp++;
360	}
361	*ppos = i;
362	return tmp-buf;
363}
364#endif
365
366static ssize_t read_null(struct file * file, char * buf,
367			 size_t count, loff_t *ppos)
368{
369	return 0;
370}
371
372static ssize_t write_null(struct file * file, const char * buf,
373			  size_t count, loff_t *ppos)
374{
375	return count;
376}
377
378/*
379 * For fun, we are using the MMU for this.
380 */
381static inline size_t read_zero_pagealigned(char * buf, size_t size)
382{
383	struct mm_struct *mm;
384	struct vm_area_struct * vma;
385	unsigned long addr=(unsigned long)buf;
386
387	mm = current->mm;
388	/* Oops, this was forgotten before. -ben */
389	down_read(&mm->mmap_sem);
390
391	/* For private mappings, just map in zero pages. */
392	for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
393		unsigned long count;
394
395		if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
396			goto out_up;
397		if (vma->vm_flags & VM_SHARED)
398			break;
399		count = vma->vm_end - addr;
400		if (count > size)
401			count = size;
402
403		zap_page_range(mm, addr, count);
404        	zeromap_page_range(addr, count, PAGE_COPY);
405
406		size -= count;
407		buf += count;
408		addr += count;
409		if (size == 0)
410			goto out_up;
411	}
412
413	up_read(&mm->mmap_sem);
414
415	/* The shared case is hard. Let's do the conventional zeroing. */
416	do {
417		unsigned long unwritten = clear_user(buf, PAGE_SIZE);
418		if (unwritten)
419			return size + unwritten - PAGE_SIZE;
420		if (current->need_resched)
421			schedule();
422		buf += PAGE_SIZE;
423		size -= PAGE_SIZE;
424	} while (size);
425
426	return size;
427out_up:
428	up_read(&mm->mmap_sem);
429	return size;
430}
431
432static ssize_t read_zero(struct file * file, char * buf,
433			 size_t count, loff_t *ppos)
434{
435	unsigned long left, unwritten, written = 0;
436
437	if (!count)
438		return 0;
439
440	if (!access_ok(VERIFY_WRITE, buf, count))
441		return -EFAULT;
442
443	left = count;
444
445	/* do we want to be clever? Arbitrary cut-off */
446	if (count >= PAGE_SIZE*4) {
447		unsigned long partial;
448
449		/* How much left of the page? */
450		partial = (PAGE_SIZE-1) & -(unsigned long) buf;
451		unwritten = clear_user(buf, partial);
452		written = partial - unwritten;
453		if (unwritten)
454			goto out;
455		left -= partial;
456		buf += partial;
457		unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
458		written += (left & PAGE_MASK) - unwritten;
459		if (unwritten)
460			goto out;
461		buf += left & PAGE_MASK;
462		left &= ~PAGE_MASK;
463	}
464	unwritten = clear_user(buf, left);
465	written += left - unwritten;
466out:
467	return written ? written : -EFAULT;
468}
469
470static int mmap_zero(struct file * file, struct vm_area_struct * vma)
471{
472	if (vma->vm_flags & VM_SHARED)
473		return shmem_zero_setup(vma);
474	if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
475		return -EAGAIN;
476	return 0;
477}
478
479static ssize_t write_full(struct file * file, const char * buf,
480			  size_t count, loff_t *ppos)
481{
482	return -ENOSPC;
483}
484
485/*
486 * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
487 * can fopen() both devices with "a" now.  This was previously impossible.
488 * -- SRB.
489 */
490
491static loff_t null_lseek(struct file * file, loff_t offset, int orig)
492{
493	return file->f_pos = 0;
494}
495
496/*
497 * The memory devices use the full 32/64 bits of the offset, and so we cannot
498 * check against negative addresses: they are ok. The return value is weird,
499 * though, in that case (0).
500 *
501 * also note that seeking relative to the "end of file" isn't supported:
502 * it has no meaning, so it returns -EINVAL.
503 */
504static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
505{
506	switch (orig) {
507		case 0:
508			file->f_pos = offset;
509			return file->f_pos;
510		case 1:
511			file->f_pos += offset;
512			return file->f_pos;
513		default:
514			return -EINVAL;
515	}
516}
517
518static int open_port(struct inode * inode, struct file * filp)
519{
520	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
521}
522
523#define mmap_kmem	mmap_mem
524#define zero_lseek	null_lseek
525#define full_lseek      null_lseek
526#define write_zero	write_null
527#define read_full       read_zero
528#define open_mem	open_port
529#define open_kmem	open_mem
530
531static struct file_operations mem_fops = {
532	llseek:		memory_lseek,
533	read:		read_mem,
534	write:		write_mem,
535	mmap:		mmap_mem,
536	open:		open_mem,
537};
538
539static struct file_operations kmem_fops = {
540	llseek:		memory_lseek,
541	read:		read_kmem,
542	write:		write_kmem,
543	mmap:		mmap_kmem,
544	open:		open_kmem,
545};
546
547static struct file_operations null_fops = {
548	llseek:		null_lseek,
549	read:		read_null,
550	write:		write_null,
551};
552
553#if defined(CONFIG_ISA) || !defined(__mc68000__)
554static struct file_operations port_fops = {
555	llseek:		memory_lseek,
556	read:		read_port,
557	write:		write_port,
558	open:		open_port,
559};
560#endif
561
562static struct file_operations zero_fops = {
563	llseek:		zero_lseek,
564	read:		read_zero,
565	write:		write_zero,
566	mmap:		mmap_zero,
567};
568
569static struct file_operations full_fops = {
570	llseek:		full_lseek,
571	read:		read_full,
572	write:		write_full,
573};
574
575static int memory_open(struct inode * inode, struct file * filp)
576{
577	switch (MINOR(inode->i_rdev)) {
578		case 1:
579			filp->f_op = &mem_fops;
580			break;
581		case 2:
582			filp->f_op = &kmem_fops;
583			break;
584		case 3:
585			filp->f_op = &null_fops;
586			break;
587#if defined(CONFIG_ISA) || !defined(__mc68000__)
588		case 4:
589			filp->f_op = &port_fops;
590			break;
591#endif
592		case 5:
593			filp->f_op = &zero_fops;
594			break;
595		case 7:
596			filp->f_op = &full_fops;
597			break;
598		case 8:
599			filp->f_op = &random_fops;
600			break;
601		case 9:
602			filp->f_op = &urandom_fops;
603			break;
604		default:
605			return -ENXIO;
606	}
607	if (filp->f_op && filp->f_op->open)
608		return filp->f_op->open(inode,filp);
609	return 0;
610}
611
612void __init memory_devfs_register (void)
613{
614    /*  These are never unregistered  */
615    static const struct {
616	unsigned short minor;
617	char *name;
618	umode_t mode;
619	struct file_operations *fops;
620    } list[] = { /* list of minor devices */
621	{1, "mem",     S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
622	{2, "kmem",    S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
623	{3, "null",    S_IRUGO | S_IWUGO,           &null_fops},
624#if defined(CONFIG_ISA) || !defined(__mc68000__) || \
625    defined(CONFIG_BCM94702_CPCI)
626	{4, "port",    S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
627#endif
628	{5, "zero",    S_IRUGO | S_IWUGO,           &zero_fops},
629	{7, "full",    S_IRUGO | S_IWUGO,           &full_fops},
630	{8, "random",  S_IRUGO | S_IWUSR,           &random_fops},
631	{9, "urandom", S_IRUGO | S_IWUSR,           &urandom_fops}
632    };
633    int i;
634
635    for (i=0; i<(sizeof(list)/sizeof(*list)); i++)
636	devfs_register (NULL, list[i].name, DEVFS_FL_NONE,
637			MEM_MAJOR, list[i].minor,
638			list[i].mode | S_IFCHR,
639			list[i].fops, NULL);
640}
641
642static struct file_operations memory_fops = {
643	open:		memory_open,	/* just a selector for the real open */
644};
645
646int __init chr_dev_init(void)
647{
648	if (devfs_register_chrdev(MEM_MAJOR,"mem",&memory_fops))
649		printk("unable to get major %d for memory devs\n", MEM_MAJOR);
650	memory_devfs_register();
651	rand_initialize();
652#ifdef CONFIG_I2C
653	i2c_init_all();
654#endif
655#if defined(CONFIG_FB)
656	fbmem_init();
657#endif
658#if defined(CONFIG_PROM_CONSOLE)
659	prom_con_init();
660#endif
661#if defined(CONFIG_MDA_CONSOLE)
662	mda_console_init();
663#endif
664	tty_init();
665#ifdef CONFIG_M68K_PRINTER
666	lp_m68k_init();
667#endif
668	misc_init();
669#if CONFIG_QIC02_TAPE
670	qic02_tape_init();
671#endif
672#ifdef CONFIG_FTAPE
673	ftape_init();
674#endif
675#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
676	tapechar_init();
677#endif
678	return 0;
679}
680
681__initcall(chr_dev_init);
682