1#ifndef __PPC64_MMU_CONTEXT_H
2#define __PPC64_MMU_CONTEXT_H
3
4#include <linux/spinlock.h>
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <asm/mmu.h>
8#include <asm/ppcdebug.h>
9
10/*
11 * Copyright (C) 2001 PPC 64 Team, IBM Corp
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#define NO_CONTEXT		0
20#define FIRST_USER_CONTEXT	0x10    /* First 16 reserved for kernel */
21#define LAST_USER_CONTEXT	0x8000  /* Same as PID_MAX for now... */
22#define NUM_USER_CONTEXT	(LAST_USER_CONTEXT-FIRST_USER_CONTEXT)
23
24/* Choose whether we want to implement our context
25 * number allocator as a LIFO or FIFO queue.
26 */
27#define MMU_CONTEXT_LIFO
28
29struct mmu_context_queue_t {
30	spinlock_t lock;
31	long head;
32	long tail;
33	long size;
34	mm_context_t elements[LAST_USER_CONTEXT];
35};
36
37extern struct mmu_context_queue_t mmu_context_queue;
38
39static inline void
40enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
41{
42}
43
44extern void flush_stab(void);
45
46/*
47 * The context number queue has underflowed.
48 * Meaning: we tried to push a context number that was freed
49 * back onto the context queue and the queue was already full.
50 */
51static inline void
52mmu_context_underflow(void)
53{
54	printk(KERN_DEBUG "mmu_context_underflow\n");
55	panic("mmu_context_underflow");
56}
57
58
59/*
60 * Set up the context for a new address space.
61 */
62static inline int
63init_new_context(struct task_struct *tsk, struct mm_struct *mm)
64{
65	long head, size;
66
67	spin_lock( &mmu_context_queue.lock );
68
69	if ( (size = mmu_context_queue.size) <= 0 ) {
70		spin_unlock( &mmu_context_queue.lock );
71		return -ENOMEM;
72	}
73
74	head = mmu_context_queue.head;
75	mm->context = mmu_context_queue.elements[head];
76
77	head = (head < LAST_USER_CONTEXT-1) ? head+1 : 0;
78	mmu_context_queue.head = head;
79	mmu_context_queue.size = size-1;
80
81	spin_unlock( &mmu_context_queue.lock );
82
83	return 0;
84}
85
86/*
87 * We're finished using the context for an address space.
88 */
89static inline void
90destroy_context(struct mm_struct *mm)
91{
92	long index, size = mmu_context_queue.size;
93
94	spin_lock( &mmu_context_queue.lock );
95
96	if ( (size = mmu_context_queue.size) >= NUM_USER_CONTEXT ) {
97		spin_unlock( &mmu_context_queue.lock );
98		mmu_context_underflow();
99	}
100
101#ifdef MMU_CONTEXT_LIFO
102	index = mmu_context_queue.head;
103	index = (index > 0) ? index-1 : LAST_USER_CONTEXT-1;
104	mmu_context_queue.head = index;
105#else
106	index = mmu_context_queue.tail;
107	index = (index < LAST_USER_CONTEXT-1) ? index+1 : 0;
108	mmu_context_queue.tail = index;
109#endif
110
111	mmu_context_queue.size = size+1;
112	mmu_context_queue.elements[index] = mm->context;
113
114	spin_unlock( &mmu_context_queue.lock );
115}
116
117
118/*
119 * switch_mm is the entry point called from the architecture independent
120 * code in kernel/sched.c
121 */
122static inline void
123switch_mm(struct mm_struct *prev, struct mm_struct *next,
124	  struct task_struct *tsk, int cpu)
125{
126	tsk->thread.pgdir = next->pgd;	/* cache the pgdir in the thread
127					   maybe not needed any more */
128	flush_stab();
129}
130
131/*
132 * After we have set current->mm to a new value, this activates
133 * the context for the new mm so we see the new mappings.
134 */
135static inline void
136activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
137{
138	current->thread.pgdir = mm->pgd;
139	flush_stab();
140}
141
142
143#define VSID_RANDOMIZER 42470972311
144#define VSID_MASK	0xfffffffff
145
146
147/* This is only valid for kernel (including vmalloc, imalloc and bolted) EA's
148 */
149static inline unsigned long
150get_kernel_vsid( unsigned long ea )
151{
152	unsigned long ordinal, vsid;
153
154	ordinal = (((ea >> 28) & 0x1fffff) * LAST_USER_CONTEXT) | (ea >> 60);
155	vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
156
157	ifppcdebug(PPCDBG_HTABSTRESS) {
158		/* For debug, this path creates a very poor vsid distribuition.
159		 * A user program can access virtual addresses in the form
160		 * 0x0yyyyxxxx000 where yyyy = xxxx to cause multiple mappings
161		 * to hash to the same page table group.
162		 */
163		ordinal = ((ea >> 28) & 0x1fff) | (ea >> 44);
164		vsid = ordinal & VSID_MASK;
165	}
166
167	return vsid;
168}
169
170/* This is only valid for user EA's (user EA's do not exceed 2^41 (EADDR_SIZE))
171 */
172static inline unsigned long
173get_vsid( unsigned long context, unsigned long ea )
174{
175	unsigned long ordinal, vsid;
176
177	ordinal = (((ea >> 28) & 0x1fffff) * LAST_USER_CONTEXT) | context;
178	vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
179
180	ifppcdebug(PPCDBG_HTABSTRESS) {
181		/* See comment above. */
182		ordinal = ((ea >> 28) & 0x1fff) | (context << 16);
183		vsid = ordinal & VSID_MASK;
184	}
185
186	return vsid;
187}
188
189#endif /* __PPC64_MMU_CONTEXT_H */
190