• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/blackfin/include/asm/
1/*
2 * Copyright 2004-2009 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later.
5 */
6
7#ifndef __BLACKFIN_MMU_CONTEXT_H__
8#define __BLACKFIN_MMU_CONTEXT_H__
9
10#include <linux/slab.h>
11#include <linux/sched.h>
12#include <asm/setup.h>
13#include <asm/page.h>
14#include <asm/pgalloc.h>
15#include <asm/cplbinit.h>
16#include <asm/sections.h>
17
18/* Note: L1 stacks are CPU-private things, so we bluntly disable this
19   feature in SMP mode, and use the per-CPU scratch SRAM bank only to
20   store the PDA instead. */
21
22extern void *current_l1_stack_save;
23extern int nr_l1stack_tasks;
24extern void *l1_stack_base;
25extern unsigned long l1_stack_len;
26
27extern int l1sram_free(const void*);
28extern void *l1sram_alloc_max(void*);
29
30static inline void free_l1stack(void)
31{
32	nr_l1stack_tasks--;
33	if (nr_l1stack_tasks == 0)
34		l1sram_free(l1_stack_base);
35}
36
37static inline unsigned long
38alloc_l1stack(unsigned long length, unsigned long *stack_base)
39{
40	if (nr_l1stack_tasks == 0) {
41		l1_stack_base = l1sram_alloc_max(&l1_stack_len);
42		if (!l1_stack_base)
43			return 0;
44	}
45
46	if (l1_stack_len < length) {
47		if (nr_l1stack_tasks == 0)
48			l1sram_free(l1_stack_base);
49		return 0;
50	}
51	*stack_base = (unsigned long)l1_stack_base;
52	nr_l1stack_tasks++;
53	return l1_stack_len;
54}
55
56static inline int
57activate_l1stack(struct mm_struct *mm, unsigned long sp_base)
58{
59	if (current_l1_stack_save)
60		memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
61	mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base;
62	memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
63	return 1;
64}
65
66#define deactivate_mm(tsk,mm)	do { } while (0)
67
68#define activate_mm(prev, next) switch_mm(prev, next, NULL)
69
70static inline void __switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
71			       struct task_struct *tsk)
72{
73#ifdef CONFIG_MPU
74	unsigned int cpu = smp_processor_id();
75#endif
76	if (prev_mm == next_mm)
77		return;
78#ifdef CONFIG_MPU
79	if (prev_mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
80		flush_switched_cplbs(cpu);
81		set_mask_dcplbs(next_mm->context.page_rwx_mask, cpu);
82	}
83#endif
84
85#ifdef CONFIG_APP_STACK_L1
86	/* L1 stack switching.  */
87	if (!next_mm->context.l1_stack_save)
88		return;
89	if (next_mm->context.l1_stack_save == current_l1_stack_save)
90		return;
91	if (current_l1_stack_save) {
92		memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
93	}
94	current_l1_stack_save = next_mm->context.l1_stack_save;
95	memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
96#endif
97}
98
99#ifdef CONFIG_IPIPE
100#define lock_mm_switch(flags)	local_irq_save_hw_cond(flags)
101#define unlock_mm_switch(flags)	local_irq_restore_hw_cond(flags)
102#else
103#define lock_mm_switch(flags)	do { (void)(flags); } while (0)
104#define unlock_mm_switch(flags)	do { (void)(flags); } while (0)
105#endif /* CONFIG_IPIPE */
106
107#ifdef CONFIG_MPU
108static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
109			     struct task_struct *tsk)
110{
111	unsigned long flags;
112	lock_mm_switch(flags);
113	__switch_mm(prev, next, tsk);
114	unlock_mm_switch(flags);
115}
116
117static inline void protect_page(struct mm_struct *mm, unsigned long addr,
118				unsigned long flags)
119{
120	unsigned long *mask = mm->context.page_rwx_mask;
121	unsigned long page;
122	unsigned long idx;
123	unsigned long bit;
124
125	if (unlikely(addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE))
126		page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> 12;
127	else
128		page = addr >> 12;
129	idx = page >> 5;
130	bit = 1 << (page & 31);
131
132	if (flags & VM_READ)
133		mask[idx] |= bit;
134	else
135		mask[idx] &= ~bit;
136	mask += page_mask_nelts;
137	if (flags & VM_WRITE)
138		mask[idx] |= bit;
139	else
140		mask[idx] &= ~bit;
141	mask += page_mask_nelts;
142	if (flags & VM_EXEC)
143		mask[idx] |= bit;
144	else
145		mask[idx] &= ~bit;
146}
147
148static inline void update_protections(struct mm_struct *mm)
149{
150	unsigned int cpu = smp_processor_id();
151	if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
152		flush_switched_cplbs(cpu);
153		set_mask_dcplbs(mm->context.page_rwx_mask, cpu);
154	}
155}
156#else /* !CONFIG_MPU */
157static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
158			     struct task_struct *tsk)
159{
160	__switch_mm(prev, next, tsk);
161}
162#endif
163
164static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
165{
166}
167
168/* Called when creating a new context during fork() or execve().  */
169static inline int
170init_new_context(struct task_struct *tsk, struct mm_struct *mm)
171{
172#ifdef CONFIG_MPU
173	unsigned long p = __get_free_pages(GFP_KERNEL, page_mask_order);
174	mm->context.page_rwx_mask = (unsigned long *)p;
175	memset(mm->context.page_rwx_mask, 0,
176	       page_mask_nelts * 3 * sizeof(long));
177#endif
178	return 0;
179}
180
181static inline void destroy_context(struct mm_struct *mm)
182{
183	struct sram_list_struct *tmp;
184#ifdef CONFIG_MPU
185	unsigned int cpu = smp_processor_id();
186#endif
187
188#ifdef CONFIG_APP_STACK_L1
189	if (current_l1_stack_save == mm->context.l1_stack_save)
190		current_l1_stack_save = 0;
191	if (mm->context.l1_stack_save)
192		free_l1stack();
193#endif
194
195	while ((tmp = mm->context.sram_list)) {
196		mm->context.sram_list = tmp->next;
197		sram_free(tmp->addr);
198		kfree(tmp);
199	}
200#ifdef CONFIG_MPU
201	if (current_rwx_mask[cpu] == mm->context.page_rwx_mask)
202		current_rwx_mask[cpu] = NULL;
203	free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order);
204#endif
205}
206
207#define ipipe_mm_switch_protect(flags)		\
208	local_irq_save_hw_cond(flags)
209
210#define ipipe_mm_switch_unprotect(flags)	\
211	local_irq_restore_hw_cond(flags)
212
213#endif
214