• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/s390/mm/
1/*
2 *  linux/arch/s390/mm/mmap.c
3 *
4 *  flexible mmap layout support
5 *
6 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
7 * All Rights Reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
22 *
23 *
24 * Started by Ingo Molnar <mingo@elte.hu>
25 */
26
27#include <linux/personality.h>
28#include <linux/mm.h>
29#include <linux/module.h>
30#include <asm/pgalloc.h>
31#include <asm/compat.h>
32
33/*
34 * Top of mmap area (just below the process stack).
35 *
36 * Leave an at least ~128 MB hole.
37 */
38#define MIN_GAP (128*1024*1024)
39#define MAX_GAP (STACK_TOP/6*5)
40
41static inline unsigned long mmap_base(void)
42{
43	unsigned long gap = rlimit(RLIMIT_STACK);
44
45	if (gap < MIN_GAP)
46		gap = MIN_GAP;
47	else if (gap > MAX_GAP)
48		gap = MAX_GAP;
49
50	return STACK_TOP - (gap & PAGE_MASK);
51}
52
53static inline int mmap_is_legacy(void)
54{
55#ifdef CONFIG_64BIT
56	/*
57	 * Force standard allocation for 64 bit programs.
58	 */
59	if (!is_compat_task())
60		return 1;
61#endif
62	return sysctl_legacy_va_layout ||
63	    (current->personality & ADDR_COMPAT_LAYOUT) ||
64	    rlimit(RLIMIT_STACK) == RLIM_INFINITY;
65}
66
67#ifndef CONFIG_64BIT
68
69/*
70 * This function, called very early during the creation of a new
71 * process VM image, sets up which VM layout function to use:
72 */
73void arch_pick_mmap_layout(struct mm_struct *mm)
74{
75	/*
76	 * Fall back to the standard layout if the personality
77	 * bit is set, or if the expected stack growth is unlimited:
78	 */
79	if (mmap_is_legacy()) {
80		mm->mmap_base = TASK_UNMAPPED_BASE;
81		mm->get_unmapped_area = arch_get_unmapped_area;
82		mm->unmap_area = arch_unmap_area;
83	} else {
84		mm->mmap_base = mmap_base();
85		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
86		mm->unmap_area = arch_unmap_area_topdown;
87	}
88}
89EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
90
91#else
92
93int s390_mmap_check(unsigned long addr, unsigned long len)
94{
95	if (!is_compat_task() &&
96	    len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
97		return crst_table_upgrade(current->mm, 1UL << 53);
98	return 0;
99}
100
101static unsigned long
102s390_get_unmapped_area(struct file *filp, unsigned long addr,
103		unsigned long len, unsigned long pgoff, unsigned long flags)
104{
105	struct mm_struct *mm = current->mm;
106	unsigned long area;
107	int rc;
108
109	area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
110	if (!(area & ~PAGE_MASK))
111		return area;
112	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
113		/* Upgrade the page table to 4 levels and retry. */
114		rc = crst_table_upgrade(mm, 1UL << 53);
115		if (rc)
116			return (unsigned long) rc;
117		area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
118	}
119	return area;
120}
121
122static unsigned long
123s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
124			  const unsigned long len, const unsigned long pgoff,
125			  const unsigned long flags)
126{
127	struct mm_struct *mm = current->mm;
128	unsigned long area;
129	int rc;
130
131	area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
132	if (!(area & ~PAGE_MASK))
133		return area;
134	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
135		/* Upgrade the page table to 4 levels and retry. */
136		rc = crst_table_upgrade(mm, 1UL << 53);
137		if (rc)
138			return (unsigned long) rc;
139		area = arch_get_unmapped_area_topdown(filp, addr, len,
140						      pgoff, flags);
141	}
142	return area;
143}
144/*
145 * This function, called very early during the creation of a new
146 * process VM image, sets up which VM layout function to use:
147 */
148void arch_pick_mmap_layout(struct mm_struct *mm)
149{
150	/*
151	 * Fall back to the standard layout if the personality
152	 * bit is set, or if the expected stack growth is unlimited:
153	 */
154	if (mmap_is_legacy()) {
155		mm->mmap_base = TASK_UNMAPPED_BASE;
156		mm->get_unmapped_area = s390_get_unmapped_area;
157		mm->unmap_area = arch_unmap_area;
158	} else {
159		mm->mmap_base = mmap_base();
160		mm->get_unmapped_area = s390_get_unmapped_area_topdown;
161		mm->unmap_area = arch_unmap_area_topdown;
162	}
163}
164EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
165
166#endif
167