1/*
2 *	linux/mm/filemap_xip.c
3 *
4 * Copyright (C) 2005 IBM Corporation
5 * Author: Carsten Otte <cotte@de.ibm.com>
6 *
7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
8 *
9 */
10
11#include <linux/fs.h>
12#include <linux/pagemap.h>
13#include <linux/module.h>
14#include <linux/uio.h>
15#include <linux/rmap.h>
16#include <linux/mmu_notifier.h>
17#include <linux/sched.h>
18#include <linux/seqlock.h>
19#include <linux/mutex.h>
20#include <linux/gfp.h>
21#include <asm/tlbflush.h>
22#include <asm/io.h>
23
24/*
25 * We do use our own empty page to avoid interference with other users
26 * of ZERO_PAGE(), such as /dev/zero
27 */
28static DEFINE_MUTEX(xip_sparse_mutex);
29static seqcount_t xip_sparse_seq = SEQCNT_ZERO;
30static struct page *__xip_sparse_page;
31
32/* called under xip_sparse_mutex */
33static struct page *xip_sparse_page(void)
34{
35	if (!__xip_sparse_page) {
36		struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
37
38		if (page)
39			__xip_sparse_page = page;
40	}
41	return __xip_sparse_page;
42}
43
44/*
45 * This is a file read routine for execute in place files, and uses
46 * the mapping->a_ops->get_xip_mem() function for the actual low-level
47 * stuff.
48 *
49 * Note the struct file* is not used at all.  It may be NULL.
50 */
51static ssize_t
52do_xip_mapping_read(struct address_space *mapping,
53		    struct file_ra_state *_ra,
54		    struct file *filp,
55		    char __user *buf,
56		    size_t len,
57		    loff_t *ppos)
58{
59	struct inode *inode = mapping->host;
60	pgoff_t index, end_index;
61	unsigned long offset;
62	loff_t isize, pos;
63	size_t copied = 0, error = 0;
64
65	BUG_ON(!mapping->a_ops->get_xip_mem);
66
67	pos = *ppos;
68	index = pos >> PAGE_CACHE_SHIFT;
69	offset = pos & ~PAGE_CACHE_MASK;
70
71	isize = i_size_read(inode);
72	if (!isize)
73		goto out;
74
75	end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
76	do {
77		unsigned long nr, left;
78		void *xip_mem;
79		unsigned long xip_pfn;
80		int zero = 0;
81
82		/* nr is the maximum number of bytes to copy from this page */
83		nr = PAGE_CACHE_SIZE;
84		if (index >= end_index) {
85			if (index > end_index)
86				goto out;
87			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
88			if (nr <= offset) {
89				goto out;
90			}
91		}
92		nr = nr - offset;
93		if (nr > len - copied)
94			nr = len - copied;
95
96		error = mapping->a_ops->get_xip_mem(mapping, index, 0,
97							&xip_mem, &xip_pfn);
98		if (unlikely(error)) {
99			if (error == -ENODATA) {
100				/* sparse */
101				zero = 1;
102			} else
103				goto out;
104		}
105
106		/* If users can be writing to this page using arbitrary
107		 * virtual addresses, take care about potential aliasing
108		 * before reading the page on the kernel side.
109		 */
110		if (mapping_writably_mapped(mapping))
111			/* address based flush */ ;
112
113		/*
114		 * Ok, we have the mem, so now we can copy it to user space...
115		 *
116		 * The actor routine returns how many bytes were actually used..
117		 * NOTE! This may not be the same as how much of a user buffer
118		 * we filled up (we may be padding etc), so we can only update
119		 * "pos" here (the actor routine has to update the user buffer
120		 * pointers and the remaining count).
121		 */
122		if (!zero)
123			left = __copy_to_user(buf+copied, xip_mem+offset, nr);
124		else
125			left = __clear_user(buf + copied, nr);
126
127		if (left) {
128			error = -EFAULT;
129			goto out;
130		}
131
132		copied += (nr - left);
133		offset += (nr - left);
134		index += offset >> PAGE_CACHE_SHIFT;
135		offset &= ~PAGE_CACHE_MASK;
136	} while (copied < len);
137
138out:
139	*ppos = pos + copied;
140	if (filp)
141		file_accessed(filp);
142
143	return (copied ? copied : error);
144}
145
146ssize_t
147xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
148{
149	if (!access_ok(VERIFY_WRITE, buf, len))
150		return -EFAULT;
151
152	return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
153			    buf, len, ppos);
154}
155EXPORT_SYMBOL_GPL(xip_file_read);
156
157/*
158 * __xip_unmap is invoked from xip_unmap and
159 * xip_write
160 *
161 * This function walks all vmas of the address_space and unmaps the
162 * __xip_sparse_page when found at pgoff.
163 */
164static void
165__xip_unmap (struct address_space * mapping,
166		     unsigned long pgoff)
167{
168	struct vm_area_struct *vma;
169	struct mm_struct *mm;
170	struct prio_tree_iter iter;
171	unsigned long address;
172	pte_t *pte;
173	pte_t pteval;
174	spinlock_t *ptl;
175	struct page *page;
176	unsigned count;
177	int locked = 0;
178
179	count = read_seqcount_begin(&xip_sparse_seq);
180
181	page = __xip_sparse_page;
182	if (!page)
183		return;
184
185retry:
186	spin_lock(&mapping->i_mmap_lock);
187	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
188		mm = vma->vm_mm;
189		address = vma->vm_start +
190			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
191		BUG_ON(address < vma->vm_start || address >= vma->vm_end);
192		pte = page_check_address(page, mm, address, &ptl, 1);
193		if (pte) {
194			/* Nuke the page table entry. */
195			flush_cache_page(vma, address, pte_pfn(*pte));
196			pteval = ptep_clear_flush_notify(vma, address, pte);
197			page_remove_rmap(page);
198			dec_mm_counter(mm, MM_FILEPAGES);
199			BUG_ON(pte_dirty(pteval));
200			pte_unmap_unlock(pte, ptl);
201			page_cache_release(page);
202		}
203	}
204	spin_unlock(&mapping->i_mmap_lock);
205
206	if (locked) {
207		mutex_unlock(&xip_sparse_mutex);
208	} else if (read_seqcount_retry(&xip_sparse_seq, count)) {
209		mutex_lock(&xip_sparse_mutex);
210		locked = 1;
211		goto retry;
212	}
213}
214
215/*
216 * xip_fault() is invoked via the vma operations vector for a
217 * mapped memory region to read in file data during a page fault.
218 *
219 * This function is derived from filemap_fault, but used for execute in place
220 */
221static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
222{
223	struct file *file = vma->vm_file;
224	struct address_space *mapping = file->f_mapping;
225	struct inode *inode = mapping->host;
226	pgoff_t size;
227	void *xip_mem;
228	unsigned long xip_pfn;
229	struct page *page;
230	int error;
231
232again:
233	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
234	if (vmf->pgoff >= size)
235		return VM_FAULT_SIGBUS;
236
237	error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
238						&xip_mem, &xip_pfn);
239	if (likely(!error))
240		goto found;
241	if (error != -ENODATA)
242		return VM_FAULT_OOM;
243
244	/* sparse block */
245	if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
246	    (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
247	    (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
248		int err;
249
250		/* maybe shared writable, allocate new block */
251		mutex_lock(&xip_sparse_mutex);
252		error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
253							&xip_mem, &xip_pfn);
254		mutex_unlock(&xip_sparse_mutex);
255		if (error)
256			return VM_FAULT_SIGBUS;
257		/* unmap sparse mappings at pgoff from all other vmas */
258		__xip_unmap(mapping, vmf->pgoff);
259
260found:
261		err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
262							xip_pfn);
263		if (err == -ENOMEM)
264			return VM_FAULT_OOM;
265		BUG_ON(err);
266		return VM_FAULT_NOPAGE;
267	} else {
268		int err, ret = VM_FAULT_OOM;
269
270		mutex_lock(&xip_sparse_mutex);
271		write_seqcount_begin(&xip_sparse_seq);
272		error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
273							&xip_mem, &xip_pfn);
274		if (unlikely(!error)) {
275			write_seqcount_end(&xip_sparse_seq);
276			mutex_unlock(&xip_sparse_mutex);
277			goto again;
278		}
279		if (error != -ENODATA)
280			goto out;
281		/* not shared and writable, use xip_sparse_page() */
282		page = xip_sparse_page();
283		if (!page)
284			goto out;
285		err = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
286							page);
287		if (err == -ENOMEM)
288			goto out;
289
290		ret = VM_FAULT_NOPAGE;
291out:
292		write_seqcount_end(&xip_sparse_seq);
293		mutex_unlock(&xip_sparse_mutex);
294
295		return ret;
296	}
297}
298
299static const struct vm_operations_struct xip_file_vm_ops = {
300	.fault	= xip_file_fault,
301};
302
303int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
304{
305	BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
306
307	file_accessed(file);
308	vma->vm_ops = &xip_file_vm_ops;
309	vma->vm_flags |= VM_CAN_NONLINEAR | VM_MIXEDMAP;
310	return 0;
311}
312EXPORT_SYMBOL_GPL(xip_file_mmap);
313
314static ssize_t
315__xip_file_write(struct file *filp, const char __user *buf,
316		  size_t count, loff_t pos, loff_t *ppos)
317{
318	struct address_space * mapping = filp->f_mapping;
319	const struct address_space_operations *a_ops = mapping->a_ops;
320	struct inode 	*inode = mapping->host;
321	long		status = 0;
322	size_t		bytes;
323	ssize_t		written = 0;
324
325	BUG_ON(!mapping->a_ops->get_xip_mem);
326
327	do {
328		unsigned long index;
329		unsigned long offset;
330		size_t copied;
331		void *xip_mem;
332		unsigned long xip_pfn;
333
334		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
335		index = pos >> PAGE_CACHE_SHIFT;
336		bytes = PAGE_CACHE_SIZE - offset;
337		if (bytes > count)
338			bytes = count;
339
340		status = a_ops->get_xip_mem(mapping, index, 0,
341						&xip_mem, &xip_pfn);
342		if (status == -ENODATA) {
343			/* we allocate a new page unmap it */
344			mutex_lock(&xip_sparse_mutex);
345			status = a_ops->get_xip_mem(mapping, index, 1,
346							&xip_mem, &xip_pfn);
347			mutex_unlock(&xip_sparse_mutex);
348			if (!status)
349				/* unmap page at pgoff from all other vmas */
350				__xip_unmap(mapping, index);
351		}
352
353		if (status)
354			break;
355
356		copied = bytes -
357			__copy_from_user_nocache(xip_mem + offset, buf, bytes);
358
359		if (likely(copied > 0)) {
360			status = copied;
361
362			if (status >= 0) {
363				written += status;
364				count -= status;
365				pos += status;
366				buf += status;
367			}
368		}
369		if (unlikely(copied != bytes))
370			if (status >= 0)
371				status = -EFAULT;
372		if (status < 0)
373			break;
374	} while (count);
375	*ppos = pos;
376	/*
377	 * No need to use i_size_read() here, the i_size
378	 * cannot change under us because we hold i_mutex.
379	 */
380	if (pos > inode->i_size) {
381		i_size_write(inode, pos);
382		mark_inode_dirty(inode);
383	}
384
385	return written ? written : status;
386}
387
388ssize_t
389xip_file_write(struct file *filp, const char __user *buf, size_t len,
390	       loff_t *ppos)
391{
392	struct address_space *mapping = filp->f_mapping;
393	struct inode *inode = mapping->host;
394	size_t count;
395	loff_t pos;
396	ssize_t ret;
397
398	mutex_lock(&inode->i_mutex);
399
400	if (!access_ok(VERIFY_READ, buf, len)) {
401		ret=-EFAULT;
402		goto out_up;
403	}
404
405	pos = *ppos;
406	count = len;
407
408	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
409
410	/* We can write back this queue in page reclaim */
411	current->backing_dev_info = mapping->backing_dev_info;
412
413	ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
414	if (ret)
415		goto out_backing;
416	if (count == 0)
417		goto out_backing;
418
419	ret = file_remove_suid(filp);
420	if (ret)
421		goto out_backing;
422
423	file_update_time(filp);
424
425	ret = __xip_file_write (filp, buf, count, pos, ppos);
426
427 out_backing:
428	current->backing_dev_info = NULL;
429 out_up:
430	mutex_unlock(&inode->i_mutex);
431	return ret;
432}
433EXPORT_SYMBOL_GPL(xip_file_write);
434
435/*
436 * truncate a page used for execute in place
437 * functionality is analog to block_truncate_page but does use get_xip_mem
438 * to get the page instead of page cache
439 */
440int
441xip_truncate_page(struct address_space *mapping, loff_t from)
442{
443	pgoff_t index = from >> PAGE_CACHE_SHIFT;
444	unsigned offset = from & (PAGE_CACHE_SIZE-1);
445	unsigned blocksize;
446	unsigned length;
447	void *xip_mem;
448	unsigned long xip_pfn;
449	int err;
450
451	BUG_ON(!mapping->a_ops->get_xip_mem);
452
453	blocksize = 1 << mapping->host->i_blkbits;
454	length = offset & (blocksize - 1);
455
456	/* Block boundary? Nothing to do */
457	if (!length)
458		return 0;
459
460	length = blocksize - length;
461
462	err = mapping->a_ops->get_xip_mem(mapping, index, 0,
463						&xip_mem, &xip_pfn);
464	if (unlikely(err)) {
465		if (err == -ENODATA)
466			/* Hole? No need to truncate */
467			return 0;
468		else
469			return err;
470	}
471	memset(xip_mem + offset, 0, length);
472	return 0;
473}
474EXPORT_SYMBOL_GPL(xip_truncate_page);
475