1#ifndef _ASM_IA64_SYSTEM_H
2#define _ASM_IA64_SYSTEM_H
3
4/*
5 * System defines. Note that this is included both from .c and .S
6 * files, so it does only defines, not any C code.  This is based
7 * on information published in the Processor Abstraction Layer
8 * and the System Abstraction Layer manual.
9 *
10 * Copyright (C) 1998-2002 Hewlett-Packard Co
11 *	David Mosberger-Tang <davidm@hpl.hp.com>
12 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
13 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
14 */
15#include <linux/config.h>
16
17#include <asm/kregs.h>
18#include <asm/page.h>
19#include <asm/pal.h>
20
21#define KERNEL_START		(PAGE_OFFSET + 68*1024*1024)
22
23#define GATE_ADDR		(0xa000000000000000 + PAGE_SIZE)
24#define PERCPU_ADDR		(0xa000000000000000 + 2*PAGE_SIZE)
25
26#ifndef __ASSEMBLY__
27
28#include <linux/kernel.h>
29#include <linux/types.h>
30
31struct pci_vector_struct {
32	__u16 segment;	/* PCI Segment number */
33	__u16 bus;	/* PCI Bus number */
34	__u32 pci_id;	/* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */
35	__u8 pin;	/* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
36	__u32 irq;	/* IRQ assigned */
37};
38
39extern struct ia64_boot_param {
40	__u64 command_line;		/* physical address of command line arguments */
41	__u64 efi_systab;		/* physical address of EFI system table */
42	__u64 efi_memmap;		/* physical address of EFI memory map */
43	__u64 efi_memmap_size;		/* size of EFI memory map */
44	__u64 efi_memdesc_size;		/* size of an EFI memory map descriptor */
45	__u32 efi_memdesc_version;	/* memory descriptor version */
46	struct {
47		__u16 num_cols;	/* number of columns on console output device */
48		__u16 num_rows;	/* number of rows on console output device */
49		__u16 orig_x;	/* cursor's x position */
50		__u16 orig_y;	/* cursor's y position */
51	} console_info;
52	__u64 fpswa;		/* physical address of the fpswa interface */
53	__u64 initrd_start;
54	__u64 initrd_size;
55} *ia64_boot_param;
56
57static inline void
58ia64_insn_group_barrier (void)
59{
60	__asm__ __volatile__ (";;" ::: "memory");
61}
62
63/*
64 * Macros to force memory ordering.  In these descriptions, "previous"
65 * and "subsequent" refer to program order; "visible" means that all
66 * architecturally visible effects of a memory access have occurred
67 * (at a minimum, this means the memory has been read or written).
68 *
69 *   wmb():	Guarantees that all preceding stores to memory-
70 *		like regions are visible before any subsequent
71 *		stores and that all following stores will be
72 *		visible only after all previous stores.
73 *   rmb():	Like wmb(), but for reads.
74 *   mb():	wmb()/rmb() combo, i.e., all previous memory
75 *		accesses are visible before all subsequent
76 *		accesses and vice versa.  This is also known as
77 *		a "fence."
78 *
79 * Note: "mb()" and its variants cannot be used as a fence to order
80 * accesses to memory mapped I/O registers.  For that, mf.a needs to
81 * be used.  However, we don't want to always use mf.a because (a)
82 * it's (presumably) much slower than mf and (b) mf.a is supported for
83 * sequential memory pages only.
84 */
85#define mb()	__asm__ __volatile__ ("mf" ::: "memory")
86#define rmb()	mb()
87#define wmb()	mb()
88
89#ifdef CONFIG_SMP
90# define smp_mb()	mb()
91# define smp_rmb()	rmb()
92# define smp_wmb()	wmb()
93#else
94# define smp_mb()	barrier()
95# define smp_rmb()	barrier()
96# define smp_wmb()	barrier()
97#endif
98
99#define set_mb(var, value)	do { (var) = (value); mb(); } while (0)
100#define set_wmb(var, value)	do { (var) = (value); mb(); } while (0)
101
102#define safe_halt()         ia64_pal_halt(1)                /* PAL_HALT */
103
104/*
105 * The group barrier in front of the rsm & ssm are necessary to ensure
106 * that none of the previous instructions in the same group are
107 * affected by the rsm/ssm.
108 */
109/* For spinlocks etc */
110
111#ifdef CONFIG_IA64_DEBUG_IRQ
112
113  extern unsigned long last_cli_ip;
114
115# define local_irq_save(x)								\
116do {											\
117	unsigned long ip, psr;								\
118											\
119	__asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" : "=r" (psr) :: "memory");	\
120	if (psr & (1UL << 14)) {							\
121		__asm__ ("mov %0=ip" : "=r"(ip));					\
122		last_cli_ip = ip;							\
123	}										\
124	(x) = psr;									\
125} while (0)
126
127# define local_irq_disable()								\
128do {											\
129	unsigned long ip, psr;								\
130											\
131	__asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" : "=r" (psr) :: "memory");	\
132	if (psr & (1UL << 14)) {							\
133		__asm__ ("mov %0=ip" : "=r"(ip));					\
134		last_cli_ip = ip;							\
135	}										\
136} while (0)
137
138# define local_irq_restore(x)							\
139do {										\
140	unsigned long ip, old_psr, psr = (x);					\
141										\
142	__asm__ __volatile__ ("mov %0=psr;"					\
143			      "cmp.ne p6,p7=%1,r0;;"				\
144			      "(p6) ssm psr.i;"					\
145			      "(p7) rsm psr.i;;"				\
146			      "srlz.d"						\
147			      : "=&r" (old_psr) : "r"((psr) & IA64_PSR_I)	\
148			      : "p6", "p7", "memory");				\
149	if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I)) {			\
150		__asm__ ("mov %0=ip" : "=r"(ip));				\
151		last_cli_ip = ip;						\
152	}									\
153} while (0)
154
155#else /* !CONFIG_IA64_DEBUG_IRQ */
156  /* clearing of psr.i is implicitly serialized (visible by next insn) */
157# define local_irq_save(x)	__asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;"	\
158						      : "=r" (x) :: "memory")
159# define local_irq_disable()	__asm__ __volatile__ (";; rsm psr.i;;" ::: "memory")
160/* (potentially) setting psr.i requires data serialization: */
161# define local_irq_restore(x)	__asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;"		\
162						      "(p6) ssm psr.i;"			\
163						      "(p7) rsm psr.i;;"		\
164						      "srlz.d"				\
165						      :: "r"((x) & IA64_PSR_I)		\
166						      : "p6", "p7", "memory")
167#endif /* !CONFIG_IA64_DEBUG_IRQ */
168
169#define local_irq_enable()	__asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory")
170
171#define __cli()			local_irq_disable ()
172#define __save_flags(flags)	__asm__ __volatile__ ("mov %0=psr" : "=r" (flags) :: "memory")
173#define __save_and_cli(flags)	local_irq_save(flags)
174#define save_and_cli(flags)	__save_and_cli(flags)
175#define __sti()			local_irq_enable ()
176#define __restore_flags(flags)	local_irq_restore(flags)
177
178#ifdef CONFIG_SMP
179  extern void __global_cli (void);
180  extern void __global_sti (void);
181  extern unsigned long __global_save_flags (void);
182  extern void __global_restore_flags (unsigned long);
183# define cli()			__global_cli()
184# define sti()			__global_sti()
185# define save_flags(flags)	((flags) = __global_save_flags())
186# define restore_flags(flags)	__global_restore_flags(flags)
187#else /* !CONFIG_SMP */
188# define cli()			__cli()
189# define sti()			__sti()
190# define save_flags(flags)	__save_flags(flags)
191# define restore_flags(flags)	__restore_flags(flags)
192#endif /* !CONFIG_SMP */
193
194/*
195 * Force an unresolved reference if someone tries to use
196 * ia64_fetch_and_add() with a bad value.
197 */
198extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
199extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
200
201#define IA64_FETCHADD(tmp,v,n,sz)						\
202({										\
203	switch (sz) {								\
204	      case 4:								\
205		__asm__ __volatile__ ("fetchadd4.rel %0=[%1],%2"		\
206				      : "=r"(tmp) : "r"(v), "i"(n) : "memory");	\
207		break;								\
208										\
209	      case 8:								\
210		__asm__ __volatile__ ("fetchadd8.rel %0=[%1],%2"		\
211				      : "=r"(tmp) : "r"(v), "i"(n) : "memory");	\
212		break;								\
213										\
214	      default:								\
215		__bad_size_for_ia64_fetch_and_add();				\
216	}									\
217})
218
219#define ia64_fetch_and_add(i,v)							\
220({										\
221	__u64 _tmp;								\
222	volatile __typeof__(*(v)) *_v = (v);					\
223	switch (i) {								\
224	      case -16:	IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); break;	\
225	      case  -8:	IA64_FETCHADD(_tmp, _v,  -8, sizeof(*(v))); break;	\
226	      case  -4:	IA64_FETCHADD(_tmp, _v,  -4, sizeof(*(v))); break;	\
227	      case  -1:	IA64_FETCHADD(_tmp, _v,  -1, sizeof(*(v))); break;	\
228	      case   1:	IA64_FETCHADD(_tmp, _v,   1, sizeof(*(v))); break;	\
229	      case   4:	IA64_FETCHADD(_tmp, _v,   4, sizeof(*(v))); break;	\
230	      case   8:	IA64_FETCHADD(_tmp, _v,   8, sizeof(*(v))); break;	\
231	      case  16:	IA64_FETCHADD(_tmp, _v,  16, sizeof(*(v))); break;	\
232	      default:								\
233		_tmp = __bad_increment_for_ia64_fetch_and_add();		\
234		break;								\
235	}									\
236	(__typeof__(*(v))) (_tmp + (i));	/* return new value */		\
237})
238
239/*
240 * This function doesn't exist, so you'll get a linker error if
241 * something tries to do an invalid xchg().
242 */
243extern void __xchg_called_with_bad_pointer (void);
244
245static __inline__ unsigned long
246__xchg (unsigned long x, volatile void *ptr, int size)
247{
248	unsigned long result;
249
250	switch (size) {
251	      case 1:
252		__asm__ __volatile ("xchg1 %0=[%1],%2" : "=r" (result)
253				    : "r" (ptr), "r" (x) : "memory");
254		return result;
255
256	      case 2:
257		__asm__ __volatile ("xchg2 %0=[%1],%2" : "=r" (result)
258				    : "r" (ptr), "r" (x) : "memory");
259		return result;
260
261	      case 4:
262		__asm__ __volatile ("xchg4 %0=[%1],%2" : "=r" (result)
263				    : "r" (ptr), "r" (x) : "memory");
264		return result;
265
266	      case 8:
267		__asm__ __volatile ("xchg8 %0=[%1],%2" : "=r" (result)
268				    : "r" (ptr), "r" (x) : "memory");
269		return result;
270	}
271	__xchg_called_with_bad_pointer();
272	return x;
273}
274
275#define xchg(ptr,x)							     \
276  ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
277
278/*
279 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
280 * store NEW in MEM.  Return the initial value in MEM.  Success is
281 * indicated by comparing RETURN with OLD.
282 */
283
284#define __HAVE_ARCH_CMPXCHG 1
285
286/*
287 * This function doesn't exist, so you'll get a linker error
288 * if something tries to do an invalid cmpxchg().
289 */
290extern long __cmpxchg_called_with_bad_pointer(void);
291
292#define ia64_cmpxchg(sem,ptr,old,new,size)						\
293({											\
294	__typeof__(ptr) _p_ = (ptr);							\
295	__typeof__(new) _n_ = (new);							\
296	__u64 _o_, _r_;									\
297											\
298	switch (size) {									\
299	      case 1: _o_ = (__u8 ) (long) (old); break;				\
300	      case 2: _o_ = (__u16) (long) (old); break;				\
301	      case 4: _o_ = (__u32) (long) (old); break;				\
302	      case 8: _o_ = (__u64) (long) (old); break;				\
303	      default: break;								\
304	}										\
305	 __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_));				\
306	switch (size) {									\
307	      case 1:									\
308		__asm__ __volatile__ ("cmpxchg1."sem" %0=[%1],%2,ar.ccv"		\
309				      : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory");	\
310		break;									\
311											\
312	      case 2:									\
313		__asm__ __volatile__ ("cmpxchg2."sem" %0=[%1],%2,ar.ccv"		\
314				      : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory");	\
315		break;									\
316											\
317	      case 4:									\
318		__asm__ __volatile__ ("cmpxchg4."sem" %0=[%1],%2,ar.ccv"		\
319				      : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory");	\
320		break;									\
321											\
322	      case 8:									\
323		__asm__ __volatile__ ("cmpxchg8."sem" %0=[%1],%2,ar.ccv"		\
324				      : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory");	\
325		break;									\
326											\
327	      default:									\
328		_r_ = __cmpxchg_called_with_bad_pointer();				\
329		break;									\
330	}										\
331	(__typeof__(old)) _r_;								\
332})
333
334#define cmpxchg_acq(ptr,o,n)	ia64_cmpxchg("acq", (ptr), (o), (n), sizeof(*(ptr)))
335#define cmpxchg_rel(ptr,o,n)	ia64_cmpxchg("rel", (ptr), (o), (n), sizeof(*(ptr)))
336
337/* for compatibility with other platforms: */
338#define cmpxchg(ptr,o,n)	cmpxchg_acq(ptr,o,n)
339
340#ifdef CONFIG_IA64_DEBUG_CMPXCHG
341# define CMPXCHG_BUGCHECK_DECL	int _cmpxchg_bugcheck_count = 128;
342# define CMPXCHG_BUGCHECK(v)							\
343  do {										\
344	if (_cmpxchg_bugcheck_count-- <= 0) {					\
345		void *ip;							\
346		extern int printk(const char *fmt, ...);			\
347		asm ("mov %0=ip" : "=r"(ip));					\
348		printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));	\
349		break;								\
350	}									\
351  } while (0)
352#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
353# define CMPXCHG_BUGCHECK_DECL
354# define CMPXCHG_BUGCHECK(v)
355#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
356
357#ifdef __KERNEL__
358
359#define prepare_to_switch()    do { } while(0)
360
361#ifdef CONFIG_IA32_SUPPORT
362# define IS_IA32_PROCESS(regs)	(ia64_psr(regs)->is != 0)
363#else
364# define IS_IA32_PROCESS(regs)		0
365#endif
366
367/*
368 * Context switch from one thread to another.  If the two threads have
369 * different address spaces, schedule() has already taken care of
370 * switching to the new address space by calling switch_mm().
371 *
372 * Disabling access to the fph partition and the debug-register
373 * context switch MUST be done before calling ia64_switch_to() since a
374 * newly created thread returns directly to
375 * ia64_ret_from_syscall_clear_r8.
376 */
377extern struct task_struct *ia64_switch_to (void *next_task);
378
379extern void ia64_save_extra (struct task_struct *task);
380extern void ia64_load_extra (struct task_struct *task);
381
382#if defined(CONFIG_SMP) && defined(CONFIG_PERFMON)
383# define PERFMON_IS_SYSWIDE() (local_cpu_data->pfm_syst_wide != 0)
384#else
385# define PERFMON_IS_SYSWIDE() (0)
386#endif
387
388#define __switch_to(prev,next,last) do {						\
389	if (((prev)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID))	\
390	    || IS_IA32_PROCESS(ia64_task_regs(prev)) || PERFMON_IS_SYSWIDE())	\
391		ia64_save_extra(prev);							\
392	if (((next)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID))	\
393	    || IS_IA32_PROCESS(ia64_task_regs(next)) || PERFMON_IS_SYSWIDE())	\
394		ia64_load_extra(next);							\
395	(last) = ia64_switch_to((next));						\
396} while (0)
397
398#ifdef CONFIG_SMP
399
400/* Return true if this CPU can call the console drivers in printk() */
401#define arch_consoles_callable() (cpu_online_map & (1UL << smp_processor_id()))
402
403/*
404 * In the SMP case, we save the fph state when context-switching
405 * away from a thread that modified fph.  This way, when the thread
406 * gets scheduled on another CPU, the CPU can pick up the state from
407 * task->thread.fph, avoiding the complication of having to fetch
408 * the latest fph state from another CPU.
409 */
410# define switch_to(prev,next,last) do {					\
411	if (ia64_psr(ia64_task_regs(prev))->mfh) {			\
412		ia64_psr(ia64_task_regs(prev))->mfh = 0;		\
413		(prev)->thread.flags |= IA64_THREAD_FPH_VALID;		\
414		__ia64_save_fpu((prev)->thread.fph);			\
415		(prev)->thread.last_fph_cpu = smp_processor_id();	\
416	}								\
417	if ((next)->thread.flags & IA64_THREAD_FPH_VALID) {		\
418		if (((next)->thread.last_fph_cpu == smp_processor_id()) \
419		    && (ia64_get_fpu_owner() == next)) {		\
420			ia64_psr(ia64_task_regs(next))->dfh = 0;	\
421			ia64_psr(ia64_task_regs(next))->mfh = 0;	\
422		} else {						\
423			ia64_psr(ia64_task_regs(next))->dfh = 1;	\
424		}							\
425	}								\
426	__switch_to(prev,next,last);					\
427  } while (0)
428#else
429# define switch_to(prev,next,last) do {					\
430	ia64_psr(ia64_task_regs(next))->dfh = (ia64_get_fpu_owner() != (next));	\
431	__switch_to(prev,next,last);					\
432} while (0)
433#endif
434
435#endif /* __KERNEL__ */
436
437#endif /* __ASSEMBLY__ */
438
439#endif /* _ASM_IA64_SYSTEM_H */
440