• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/tile/mm/
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 *   This program is free software; you can redistribute it and/or
5 *   modify it under the terms of the GNU General Public License
6 *   as published by the Free Software Foundation, version 2.
7 *
8 *   This program is distributed in the hope that it will be useful, but
9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 *   NON INFRINGEMENT.  See the GNU General Public License for
12 *   more details.
13 *
14 * TILE Huge TLB Page Support for Kernel.
15 * Taken from i386 hugetlb implementation:
16 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
17 */
18
19#include <linux/init.h>
20#include <linux/fs.h>
21#include <linux/mm.h>
22#include <linux/hugetlb.h>
23#include <linux/pagemap.h>
24#include <linux/smp_lock.h>
25#include <linux/slab.h>
26#include <linux/err.h>
27#include <linux/sysctl.h>
28#include <linux/mman.h>
29#include <asm/tlb.h>
30#include <asm/tlbflush.h>
31
32pte_t *huge_pte_alloc(struct mm_struct *mm,
33		      unsigned long addr, unsigned long sz)
34{
35	pgd_t *pgd;
36	pud_t *pud;
37	pte_t *pte = NULL;
38
39	/* We do not yet support multiple huge page sizes. */
40	BUG_ON(sz != PMD_SIZE);
41
42	pgd = pgd_offset(mm, addr);
43	pud = pud_alloc(mm, pgd, addr);
44	if (pud)
45		pte = (pte_t *) pmd_alloc(mm, pud, addr);
46	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
47
48	return pte;
49}
50
51pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
52{
53	pgd_t *pgd;
54	pud_t *pud;
55	pmd_t *pmd = NULL;
56
57	pgd = pgd_offset(mm, addr);
58	if (pgd_present(*pgd)) {
59		pud = pud_offset(pgd, addr);
60		if (pud_present(*pud))
61			pmd = pmd_offset(pud, addr);
62	}
63	return (pte_t *) pmd;
64}
65
66#ifdef HUGETLB_TEST
67struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
68			      int write)
69{
70	unsigned long start = address;
71	int length = 1;
72	int nr;
73	struct page *page;
74	struct vm_area_struct *vma;
75
76	vma = find_vma(mm, addr);
77	if (!vma || !is_vm_hugetlb_page(vma))
78		return ERR_PTR(-EINVAL);
79
80	pte = huge_pte_offset(mm, address);
81
82	/* hugetlb should be locked, and hence, prefaulted */
83	WARN_ON(!pte || pte_none(*pte));
84
85	page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
86
87	WARN_ON(!PageHead(page));
88
89	return page;
90}
91
92int pmd_huge(pmd_t pmd)
93{
94	return 0;
95}
96
97int pud_huge(pud_t pud)
98{
99	return 0;
100}
101
102struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
103			     pmd_t *pmd, int write)
104{
105	return NULL;
106}
107
108#else
109
110struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
111			      int write)
112{
113	return ERR_PTR(-EINVAL);
114}
115
116int pmd_huge(pmd_t pmd)
117{
118	return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
119}
120
121int pud_huge(pud_t pud)
122{
123	return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
124}
125
126struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
127			     pmd_t *pmd, int write)
128{
129	struct page *page;
130
131	page = pte_page(*(pte_t *)pmd);
132	if (page)
133		page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
134	return page;
135}
136
137struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
138			     pud_t *pud, int write)
139{
140	struct page *page;
141
142	page = pte_page(*(pte_t *)pud);
143	if (page)
144		page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
145	return page;
146}
147
148int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
149{
150	return 0;
151}
152
153#endif
154
155#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
156static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
157		unsigned long addr, unsigned long len,
158		unsigned long pgoff, unsigned long flags)
159{
160	struct hstate *h = hstate_file(file);
161	struct mm_struct *mm = current->mm;
162	struct vm_area_struct *vma;
163	unsigned long start_addr;
164
165	if (len > mm->cached_hole_size) {
166		start_addr = mm->free_area_cache;
167	} else {
168		start_addr = TASK_UNMAPPED_BASE;
169		mm->cached_hole_size = 0;
170	}
171
172full_search:
173	addr = ALIGN(start_addr, huge_page_size(h));
174
175	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
176		/* At this point:  (!vma || addr < vma->vm_end). */
177		if (TASK_SIZE - len < addr) {
178			/*
179			 * Start a new search - just in case we missed
180			 * some holes.
181			 */
182			if (start_addr != TASK_UNMAPPED_BASE) {
183				start_addr = TASK_UNMAPPED_BASE;
184				mm->cached_hole_size = 0;
185				goto full_search;
186			}
187			return -ENOMEM;
188		}
189		if (!vma || addr + len <= vma->vm_start) {
190			mm->free_area_cache = addr + len;
191			return addr;
192		}
193		if (addr + mm->cached_hole_size < vma->vm_start)
194			mm->cached_hole_size = vma->vm_start - addr;
195		addr = ALIGN(vma->vm_end, huge_page_size(h));
196	}
197}
198
199static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
200		unsigned long addr0, unsigned long len,
201		unsigned long pgoff, unsigned long flags)
202{
203	struct hstate *h = hstate_file(file);
204	struct mm_struct *mm = current->mm;
205	struct vm_area_struct *vma, *prev_vma;
206	unsigned long base = mm->mmap_base, addr = addr0;
207	unsigned long largest_hole = mm->cached_hole_size;
208	int first_time = 1;
209
210	/* don't allow allocations above current base */
211	if (mm->free_area_cache > base)
212		mm->free_area_cache = base;
213
214	if (len <= largest_hole) {
215		largest_hole = 0;
216		mm->free_area_cache  = base;
217	}
218try_again:
219	/* make sure it can fit in the remaining address space */
220	if (mm->free_area_cache < len)
221		goto fail;
222
223	/* either no address requested or cant fit in requested address hole */
224	addr = (mm->free_area_cache - len) & huge_page_mask(h);
225	do {
226		/*
227		 * Lookup failure means no vma is above this address,
228		 * i.e. return with success:
229		 */
230		vma = find_vma_prev(mm, addr, &prev_vma);
231		if (!vma) {
232			return addr;
233			break;
234		}
235
236		/*
237		 * new region fits between prev_vma->vm_end and
238		 * vma->vm_start, use it:
239		 */
240		if (addr + len <= vma->vm_start &&
241			    (!prev_vma || (addr >= prev_vma->vm_end))) {
242			/* remember the address as a hint for next time */
243			mm->cached_hole_size = largest_hole;
244			mm->free_area_cache = addr;
245			return addr;
246		} else {
247			/* pull free_area_cache down to the first hole */
248			if (mm->free_area_cache == vma->vm_end) {
249				mm->free_area_cache = vma->vm_start;
250				mm->cached_hole_size = largest_hole;
251			}
252		}
253
254		/* remember the largest hole we saw so far */
255		if (addr + largest_hole < vma->vm_start)
256			largest_hole = vma->vm_start - addr;
257
258		/* try just below the current vma->vm_start */
259		addr = (vma->vm_start - len) & huge_page_mask(h);
260
261	} while (len <= vma->vm_start);
262
263fail:
264	/*
265	 * if hint left us with no space for the requested
266	 * mapping then try again:
267	 */
268	if (first_time) {
269		mm->free_area_cache = base;
270		largest_hole = 0;
271		first_time = 0;
272		goto try_again;
273	}
274	/*
275	 * A failed mmap() very likely causes application failure,
276	 * so fall back to the bottom-up function here. This scenario
277	 * can happen with large stack limits and large mmap()
278	 * allocations.
279	 */
280	mm->free_area_cache = TASK_UNMAPPED_BASE;
281	mm->cached_hole_size = ~0UL;
282	addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
283			len, pgoff, flags);
284
285	/*
286	 * Restore the topdown base:
287	 */
288	mm->free_area_cache = base;
289	mm->cached_hole_size = ~0UL;
290
291	return addr;
292}
293
294unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
295		unsigned long len, unsigned long pgoff, unsigned long flags)
296{
297	struct hstate *h = hstate_file(file);
298	struct mm_struct *mm = current->mm;
299	struct vm_area_struct *vma;
300
301	if (len & ~huge_page_mask(h))
302		return -EINVAL;
303	if (len > TASK_SIZE)
304		return -ENOMEM;
305
306	if (flags & MAP_FIXED) {
307		if (prepare_hugepage_range(file, addr, len))
308			return -EINVAL;
309		return addr;
310	}
311
312	if (addr) {
313		addr = ALIGN(addr, huge_page_size(h));
314		vma = find_vma(mm, addr);
315		if (TASK_SIZE - len >= addr &&
316		    (!vma || addr + len <= vma->vm_start))
317			return addr;
318	}
319	if (current->mm->get_unmapped_area == arch_get_unmapped_area)
320		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
321				pgoff, flags);
322	else
323		return hugetlb_get_unmapped_area_topdown(file, addr, len,
324				pgoff, flags);
325}
326
327static __init int setup_hugepagesz(char *opt)
328{
329	unsigned long ps = memparse(opt, &opt);
330	if (ps == PMD_SIZE) {
331		hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
332	} else if (ps == PUD_SIZE) {
333		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
334	} else {
335		pr_err("hugepagesz: Unsupported page size %lu M\n",
336			ps >> 20);
337		return 0;
338	}
339	return 1;
340}
341__setup("hugepagesz=", setup_hugepagesz);
342
343#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
344