1/*
2 * include/asm-alpha/processor.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 */
6
7#ifndef __ASM_ALPHA_PROCESSOR_H
8#define __ASM_ALPHA_PROCESSOR_H
9
10#include <linux/personality.h>	/* for ADDR_LIMIT_32BIT */
11
12/*
13 * Returns current instruction pointer ("program counter").
14 */
15#define current_text_addr() \
16  ({ void *__pc; __asm__ ("br %0,.+4" : "=r"(__pc)); __pc; })
17
18/*
19 * We have a 42-bit user address space: 4TB user VM...
20 */
21#define TASK_SIZE (0x40000000000UL)
22
23/* This decides where the kernel will search for a free chunk of vm
24 * space during mmap's.
25 */
26#define TASK_UNMAPPED_BASE \
27  ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2)
28
29/*
30 * Bus types
31 */
32#define EISA_bus 1
33#define EISA_bus__is_a_macro /* for versions in ksyms.c */
34#define MCA_bus 0
35#define MCA_bus__is_a_macro /* for versions in ksyms.c */
36
37typedef struct {
38	unsigned long seg;
39} mm_segment_t;
40
41struct thread_struct {
42	/* the fields below are used by PALcode and must match struct pcb: */
43	unsigned long ksp;
44	unsigned long usp;
45	unsigned long ptbr;
46	unsigned int pcc;
47	unsigned int asn;
48	unsigned long unique;
49	/*
50	 * bit  0: floating point enable
51	 * bit 62: performance monitor enable
52	 */
53	unsigned long pal_flags;
54	unsigned long res1, res2;
55
56	/*
57	 * The fields below are Linux-specific:
58	 *
59	 * bit 1..5: IEEE_TRAP_ENABLE bits (see fpu.h)
60	 * bit 6..8: UAC bits (see sysinfo.h)
61	 * bit 17..21: IEEE_STATUS_MASK bits (see fpu.h)
62	 * bit 63: die_if_kernel recursion lock
63	 */
64	unsigned long flags;
65
66	/* Perform syscall argument validation (get/set_fs). */
67	mm_segment_t fs;
68
69	/* Breakpoint handling for ptrace.  */
70	unsigned long bpt_addr[2];
71	unsigned int bpt_insn[2];
72	int bpt_nsaved;
73};
74
75#define INIT_THREAD  { \
76	0, 0, 0, \
77	0, 0, 0, \
78	0, 0, 0, \
79	0, \
80	KERNEL_DS \
81}
82
83#define THREAD_SIZE (2*PAGE_SIZE)
84
85#include <asm/ptrace.h>
86
87/*
88 * Return saved PC of a blocked thread.  This assumes the frame
89 * pointer is the 6th saved long on the kernel stack and that the
90 * saved return address is the first long in the frame.  This all
91 * holds provided the thread blocked through a call to schedule() ($15
92 * is the frame pointer in schedule() and $15 is saved at offset 48 by
93 * entry.S:do_switch_stack).
94 *
95 * Under heavy swap load I've seen this lose in an ugly way.  So do
96 * some extra sanity checking on the ranges we expect these pointers
97 * to be in so that we can fail gracefully.  This is just for ps after
98 * all.  -- r~
99 */
100extern inline unsigned long thread_saved_pc(struct thread_struct *t)
101{
102	unsigned long fp, sp = t->ksp, base = (unsigned long)t;
103
104	if (sp > base && sp+6*8 < base + 16*1024) {
105		fp = ((unsigned long*)sp)[6];
106		if (fp > sp && fp < base + 16*1024)
107			return *(unsigned long *)fp;
108	}
109
110	return 0;
111}
112
113/* Do necessary setup to start up a newly executed thread.  */
114extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
115
116struct task_struct;
117
118/* Free all resources held by a thread. */
119extern void release_thread(struct task_struct *);
120
121/* Create a kernel thread without removing it from tasklists.  */
122extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
123
124#define copy_segments(tsk, mm)		do { } while (0)
125#define release_segments(mm)		do { } while (0)
126
127unsigned long get_wchan(struct task_struct *p);
128
129/* See arch/alpha/kernel/ptrace.c for details.  */
130#define PT_REG(reg)	(PAGE_SIZE*2 - sizeof(struct pt_regs)		\
131			 + (long)&((struct pt_regs *)0)->reg)
132
133#define SW_REG(reg)	(PAGE_SIZE*2 - sizeof(struct pt_regs)		\
134			 - sizeof(struct switch_stack)			\
135			 + (long)&((struct switch_stack *)0)->reg)
136
137#define KSTK_EIP(tsk) \
138    (*(unsigned long *)(PT_REG(pc) + (unsigned long)(tsk)))
139
140#define KSTK_ESP(tsk)	((tsk) == current ? rdusp() : (tsk)->thread.usp)
141
142/* NOTE: The task struct and the stack go together!  */
143#define alloc_task_struct() \
144        ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
145#define free_task_struct(p)     free_pages((unsigned long)(p),1)
146#define get_task_struct(tsk)      atomic_inc(&virt_to_page(tsk)->count)
147
148#define init_task	(init_task_union.task)
149#define init_stack	(init_task_union.stack)
150
151#define cpu_relax()	do { } while (0)
152
153#define ARCH_HAS_PREFETCH
154#define ARCH_HAS_PREFETCHW
155#define ARCH_HAS_SPINLOCK_PREFETCH
156
157extern inline void prefetch(const void *ptr)
158{
159	__asm__ ("ldl $31,%0" : : "m"(*(char *)ptr));
160}
161
162extern inline void prefetchw(const void *ptr)
163{
164	__asm__ ("ldl $31,%0" : : "m"(*(char *)ptr));
165}
166
167extern inline void spin_lock_prefetch(const void *ptr)
168{
169	__asm__ ("ldl $31,%0" : : "m"(*(char *)ptr));
170}
171
172
173
174#endif /* __ASM_ALPHA_PROCESSOR_H */
175