1/*
2 * include/asm-m68knommu/processor.h
3 *
4 * Copyright (C) 1995 Hamish Macdonald
5 */
6
7#ifndef __ASM_M68K_PROCESSOR_H
8#define __ASM_M68K_PROCESSOR_H
9
10/*
11 * Default implementation of macro that returns current
12 * instruction pointer ("program counter").
13 */
14#define current_text_addr() ({ __label__ _l; _l: &&_l;})
15
16#include <linux/compiler.h>
17#include <linux/threads.h>
18#include <asm/types.h>
19#include <asm/segment.h>
20#include <asm/fpu.h>
21#include <asm/ptrace.h>
22#include <asm/current.h>
23
24static inline unsigned long rdusp(void)
25{
26#ifdef CONFIG_COLDFIRE
27	extern unsigned int sw_usp;
28	return(sw_usp);
29#else
30  	unsigned long usp;
31	__asm__ __volatile__("move %/usp,%0" : "=a" (usp));
32	return usp;
33#endif
34}
35
36static inline void wrusp(unsigned long usp)
37{
38#ifdef CONFIG_COLDFIRE
39	extern unsigned int sw_usp;
40	sw_usp = usp;
41#else
42	__asm__ __volatile__("move %0,%/usp" : : "a" (usp));
43#endif
44}
45
46/*
47 * User space process size: 3.75GB. This is hardcoded into a few places,
48 * so don't change it unless you know what you are doing.
49 */
50#define TASK_SIZE	(0xF0000000UL)
51
52/*
53 * This decides where the kernel will search for a free chunk of vm
54 * space during mmap's. We won't be using it
55 */
56#define TASK_UNMAPPED_BASE	0
57
58/*
59 * if you change this structure, you must change the code and offsets
60 * in m68k/machasm.S
61 */
62
63struct thread_struct {
64	unsigned long  ksp;		/* kernel stack pointer */
65	unsigned long  usp;		/* user stack pointer */
66	unsigned short sr;		/* saved status register */
67	unsigned short fs;		/* saved fs (sfc, dfc) */
68	unsigned long  crp[2];		/* cpu root pointer */
69	unsigned long  esp0;		/* points to SR of stack frame */
70	unsigned long  fp[8*3];
71	unsigned long  fpcntl[3];	/* fp control regs */
72	unsigned char  fpstate[FPSTATESIZE];  /* floating point state */
73};
74
75#define INIT_THREAD  { \
76	sizeof(init_stack) + (unsigned long) init_stack, 0, \
77	PS_S, __KERNEL_DS, \
78	{0, 0}, 0, {0,}, {0, 0, 0}, {0,}, \
79}
80
81/*
82 * Coldfire stacks need to be re-aligned on trap exit, conventional
83 * 68k can handle this case cleanly.
84 */
85#if defined(CONFIG_COLDFIRE)
86#define	reformat(_regs)		do { (_regs)->format = 0x4; } while(0)
87#else
88#define	reformat(_regs)		do { } while (0)
89#endif
90
91/*
92 * Do necessary setup to start up a newly executed thread.
93 *
94 * pass the data segment into user programs if it exists,
95 * it can't hurt anything as far as I can tell
96 */
97#define start_thread(_regs, _pc, _usp)			\
98do {							\
99	set_fs(USER_DS); /* reads from user space */	\
100	(_regs)->pc = (_pc);				\
101	((struct switch_stack *)(_regs))[-1].a6 = 0;	\
102	reformat(_regs);				\
103	if (current->mm)				\
104		(_regs)->d5 = current->mm->start_data;	\
105	(_regs)->sr &= ~0x2000;				\
106	wrusp(_usp);					\
107} while(0)
108
109/* Forward declaration, a strange C thing */
110struct task_struct;
111
112/* Free all resources held by a thread. */
113static inline void release_thread(struct task_struct *dead_task)
114{
115}
116
117/* Prepare to copy thread state - unlazy all lazy status */
118#define prepare_to_copy(tsk)	do { } while (0)
119
120extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
121
122/*
123 * Free current thread data structures etc..
124 */
125static inline void exit_thread(void)
126{
127}
128
129unsigned long thread_saved_pc(struct task_struct *tsk);
130unsigned long get_wchan(struct task_struct *p);
131
132#define	KSTK_EIP(tsk)	\
133    ({			\
134	unsigned long eip = 0;	 \
135	if ((tsk)->thread.esp0 > PAGE_SIZE && \
136	    (virt_addr_valid((tsk)->thread.esp0))) \
137	      eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
138	eip; })
139#define	KSTK_ESP(tsk)	((tsk) == current ? rdusp() : (tsk)->thread.usp)
140
141#define cpu_relax()    barrier()
142
143#endif
144