• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/video/
1/*
2 *  linux/drivers/video/fb_defio.c
3 *
4 *  Copyright (C) 2006 Jaya Kumar
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/mm.h>
16#include <linux/vmalloc.h>
17#include <linux/delay.h>
18#include <linux/interrupt.h>
19#include <linux/fb.h>
20#include <linux/list.h>
21
22/* to support deferred IO */
23#include <linux/rmap.h>
24#include <linux/pagemap.h>
25
26struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
27{
28	void *screen_base = (void __force *) info->screen_base;
29	struct page *page;
30
31	if (is_vmalloc_addr(screen_base + offs))
32		page = vmalloc_to_page(screen_base + offs);
33	else
34		page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
35
36	return page;
37}
38
39/* this is to find and return the vmalloc-ed fb pages */
40static int fb_deferred_io_fault(struct vm_area_struct *vma,
41				struct vm_fault *vmf)
42{
43	unsigned long offset;
44	struct page *page;
45	struct fb_info *info = vma->vm_private_data;
46
47	offset = vmf->pgoff << PAGE_SHIFT;
48	if (offset >= info->fix.smem_len)
49		return VM_FAULT_SIGBUS;
50
51	page = fb_deferred_io_page(info, offset);
52	if (!page)
53		return VM_FAULT_SIGBUS;
54
55	get_page(page);
56
57	if (vma->vm_file)
58		page->mapping = vma->vm_file->f_mapping;
59	else
60		printk(KERN_ERR "no mapping available\n");
61
62	BUG_ON(!page->mapping);
63	page->index = vmf->pgoff;
64
65	vmf->page = page;
66	return 0;
67}
68
69int fb_deferred_io_fsync(struct file *file, int datasync)
70{
71	struct fb_info *info = file->private_data;
72
73	/* Skip if deferred io is compiled-in but disabled on this fbdev */
74	if (!info->fbdefio)
75		return 0;
76
77	/* Kill off the delayed work */
78	cancel_rearming_delayed_work(&info->deferred_work);
79
80	/* Run it immediately */
81	return schedule_delayed_work(&info->deferred_work, 0);
82}
83EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
84
85/* vm_ops->page_mkwrite handler */
86static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
87				  struct vm_fault *vmf)
88{
89	struct page *page = vmf->page;
90	struct fb_info *info = vma->vm_private_data;
91	struct fb_deferred_io *fbdefio = info->fbdefio;
92	struct page *cur;
93
94	/* this is a callback we get when userspace first tries to
95	write to the page. we schedule a workqueue. that workqueue
96	will eventually mkclean the touched pages and execute the
97	deferred framebuffer IO. then if userspace touches a page
98	again, we repeat the same scheme */
99
100	/* protect against the workqueue changing the page list */
101	mutex_lock(&fbdefio->lock);
102
103	/*
104	 * We want the page to remain locked from ->page_mkwrite until
105	 * the PTE is marked dirty to avoid page_mkclean() being called
106	 * before the PTE is updated, which would leave the page ignored
107	 * by defio.
108	 * Do this by locking the page here and informing the caller
109	 * about it with VM_FAULT_LOCKED.
110	 */
111	lock_page(page);
112
113	/* we loop through the pagelist before adding in order
114	to keep the pagelist sorted */
115	list_for_each_entry(cur, &fbdefio->pagelist, lru) {
116		/* this check is to catch the case where a new
117		process could start writing to the same page
118		through a new pte. this new access can cause the
119		mkwrite even when the original ps's pte is marked
120		writable */
121		if (unlikely(cur == page))
122			goto page_already_added;
123		else if (cur->index > page->index)
124			break;
125	}
126
127	list_add_tail(&page->lru, &cur->lru);
128
129page_already_added:
130	mutex_unlock(&fbdefio->lock);
131
132	/* come back after delay to process the deferred IO */
133	schedule_delayed_work(&info->deferred_work, fbdefio->delay);
134	return VM_FAULT_LOCKED;
135}
136
137static const struct vm_operations_struct fb_deferred_io_vm_ops = {
138	.fault		= fb_deferred_io_fault,
139	.page_mkwrite	= fb_deferred_io_mkwrite,
140};
141
142static int fb_deferred_io_set_page_dirty(struct page *page)
143{
144	if (!PageDirty(page))
145		SetPageDirty(page);
146	return 0;
147}
148
149static const struct address_space_operations fb_deferred_io_aops = {
150	.set_page_dirty = fb_deferred_io_set_page_dirty,
151};
152
153static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
154{
155	vma->vm_ops = &fb_deferred_io_vm_ops;
156	vma->vm_flags |= ( VM_RESERVED | VM_DONTEXPAND );
157	if (!(info->flags & FBINFO_VIRTFB))
158		vma->vm_flags |= VM_IO;
159	vma->vm_private_data = info;
160	return 0;
161}
162
163/* workqueue callback */
164static void fb_deferred_io_work(struct work_struct *work)
165{
166	struct fb_info *info = container_of(work, struct fb_info,
167						deferred_work.work);
168	struct list_head *node, *next;
169	struct page *cur;
170	struct fb_deferred_io *fbdefio = info->fbdefio;
171
172	/* here we mkclean the pages, then do all deferred IO */
173	mutex_lock(&fbdefio->lock);
174	list_for_each_entry(cur, &fbdefio->pagelist, lru) {
175		lock_page(cur);
176		page_mkclean(cur);
177		unlock_page(cur);
178	}
179
180	/* driver's callback with pagelist */
181	fbdefio->deferred_io(info, &fbdefio->pagelist);
182
183	/* clear the list */
184	list_for_each_safe(node, next, &fbdefio->pagelist) {
185		list_del(node);
186	}
187	mutex_unlock(&fbdefio->lock);
188}
189
190void fb_deferred_io_init(struct fb_info *info)
191{
192	struct fb_deferred_io *fbdefio = info->fbdefio;
193
194	BUG_ON(!fbdefio);
195	mutex_init(&fbdefio->lock);
196	info->fbops->fb_mmap = fb_deferred_io_mmap;
197	INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
198	INIT_LIST_HEAD(&fbdefio->pagelist);
199	if (fbdefio->delay == 0) /* set a default of 1 s */
200		fbdefio->delay = HZ;
201}
202EXPORT_SYMBOL_GPL(fb_deferred_io_init);
203
204void fb_deferred_io_open(struct fb_info *info,
205			 struct inode *inode,
206			 struct file *file)
207{
208	file->f_mapping->a_ops = &fb_deferred_io_aops;
209}
210EXPORT_SYMBOL_GPL(fb_deferred_io_open);
211
212void fb_deferred_io_cleanup(struct fb_info *info)
213{
214	struct fb_deferred_io *fbdefio = info->fbdefio;
215	struct page *page;
216	int i;
217
218	BUG_ON(!fbdefio);
219	cancel_delayed_work(&info->deferred_work);
220	flush_scheduled_work();
221
222	/* clear out the mapping that we setup */
223	for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
224		page = fb_deferred_io_page(info, i);
225		page->mapping = NULL;
226	}
227
228	info->fbops->fb_mmap = NULL;
229	mutex_destroy(&fbdefio->lock);
230}
231EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
232
233MODULE_LICENSE("GPL");
234