• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/s390/include/asm/
1/*
2 * Copyright IBM Corp. 1999, 2009
3 *
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 */
6
7#ifndef __ASM_SYSTEM_H
8#define __ASM_SYSTEM_H
9
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <asm/types.h>
13#include <asm/ptrace.h>
14#include <asm/setup.h>
15#include <asm/processor.h>
16#include <asm/lowcore.h>
17
18#ifdef __KERNEL__
19
20struct task_struct;
21
22extern struct task_struct *__switch_to(void *, void *);
23
24static inline void save_fp_regs(s390_fp_regs *fpregs)
25{
26	asm volatile(
27		"	std	0,%O0+8(%R0)\n"
28		"	std	2,%O0+24(%R0)\n"
29		"	std	4,%O0+40(%R0)\n"
30		"	std	6,%O0+56(%R0)"
31		: "=Q" (*fpregs) : "Q" (*fpregs));
32	if (!MACHINE_HAS_IEEE)
33		return;
34	asm volatile(
35		"	stfpc	%0\n"
36		"	std	1,%O0+16(%R0)\n"
37		"	std	3,%O0+32(%R0)\n"
38		"	std	5,%O0+48(%R0)\n"
39		"	std	7,%O0+64(%R0)\n"
40		"	std	8,%O0+72(%R0)\n"
41		"	std	9,%O0+80(%R0)\n"
42		"	std	10,%O0+88(%R0)\n"
43		"	std	11,%O0+96(%R0)\n"
44		"	std	12,%O0+104(%R0)\n"
45		"	std	13,%O0+112(%R0)\n"
46		"	std	14,%O0+120(%R0)\n"
47		"	std	15,%O0+128(%R0)\n"
48		: "=Q" (*fpregs) : "Q" (*fpregs));
49}
50
51static inline void restore_fp_regs(s390_fp_regs *fpregs)
52{
53	asm volatile(
54		"	ld	0,%O0+8(%R0)\n"
55		"	ld	2,%O0+24(%R0)\n"
56		"	ld	4,%O0+40(%R0)\n"
57		"	ld	6,%O0+56(%R0)"
58		: : "Q" (*fpregs));
59	if (!MACHINE_HAS_IEEE)
60		return;
61	asm volatile(
62		"	lfpc	%0\n"
63		"	ld	1,%O0+16(%R0)\n"
64		"	ld	3,%O0+32(%R0)\n"
65		"	ld	5,%O0+48(%R0)\n"
66		"	ld	7,%O0+64(%R0)\n"
67		"	ld	8,%O0+72(%R0)\n"
68		"	ld	9,%O0+80(%R0)\n"
69		"	ld	10,%O0+88(%R0)\n"
70		"	ld	11,%O0+96(%R0)\n"
71		"	ld	12,%O0+104(%R0)\n"
72		"	ld	13,%O0+112(%R0)\n"
73		"	ld	14,%O0+120(%R0)\n"
74		"	ld	15,%O0+128(%R0)\n"
75		: : "Q" (*fpregs));
76}
77
78static inline void save_access_regs(unsigned int *acrs)
79{
80	asm volatile("stam 0,15,%0" : "=Q" (*acrs));
81}
82
83static inline void restore_access_regs(unsigned int *acrs)
84{
85	asm volatile("lam 0,15,%0" : : "Q" (*acrs));
86}
87
88#define switch_to(prev,next,last) do {					     \
89	if (prev == next)						     \
90		break;							     \
91	save_fp_regs(&prev->thread.fp_regs);				     \
92	restore_fp_regs(&next->thread.fp_regs);				     \
93	save_access_regs(&prev->thread.acrs[0]);			     \
94	restore_access_regs(&next->thread.acrs[0]);			     \
95	prev = __switch_to(prev,next);					     \
96} while (0)
97
98extern void account_vtime(struct task_struct *, struct task_struct *);
99extern void account_tick_vtime(struct task_struct *);
100extern void account_system_vtime(struct task_struct *);
101
102#ifdef CONFIG_PFAULT
103extern void pfault_irq_init(void);
104extern int pfault_init(void);
105extern void pfault_fini(void);
106#else /* CONFIG_PFAULT */
107#define pfault_irq_init()	do { } while (0)
108#define pfault_init()		({-1;})
109#define pfault_fini()		do { } while (0)
110#endif /* CONFIG_PFAULT */
111
112extern void cmma_init(void);
113extern int memcpy_real(void *, void *, size_t);
114
115#define finish_arch_switch(prev) do {					     \
116	set_fs(current->thread.mm_segment);				     \
117	account_vtime(prev, current);					     \
118} while (0)
119
120#define nop() asm volatile("nop")
121
122#define xchg(ptr,x)							  \
123({									  \
124	__typeof__(*(ptr)) __ret;					  \
125	__ret = (__typeof__(*(ptr)))					  \
126		__xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \
127	__ret;								  \
128})
129
130extern void __xchg_called_with_bad_pointer(void);
131
132static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
133{
134	unsigned long addr, old;
135	int shift;
136
137        switch (size) {
138	case 1:
139		addr = (unsigned long) ptr;
140		shift = (3 ^ (addr & 3)) << 3;
141		addr ^= addr & 3;
142		asm volatile(
143			"	l	%0,%4\n"
144			"0:	lr	0,%0\n"
145			"	nr	0,%3\n"
146			"	or	0,%2\n"
147			"	cs	%0,0,%4\n"
148			"	jl	0b\n"
149			: "=&d" (old), "=Q" (*(int *) addr)
150			: "d" (x << shift), "d" (~(255 << shift)),
151			  "Q" (*(int *) addr) : "memory", "cc", "0");
152		return old >> shift;
153	case 2:
154		addr = (unsigned long) ptr;
155		shift = (2 ^ (addr & 2)) << 3;
156		addr ^= addr & 2;
157		asm volatile(
158			"	l	%0,%4\n"
159			"0:	lr	0,%0\n"
160			"	nr	0,%3\n"
161			"	or	0,%2\n"
162			"	cs	%0,0,%4\n"
163			"	jl	0b\n"
164			: "=&d" (old), "=Q" (*(int *) addr)
165			: "d" (x << shift), "d" (~(65535 << shift)),
166			  "Q" (*(int *) addr) : "memory", "cc", "0");
167		return old >> shift;
168	case 4:
169		asm volatile(
170			"	l	%0,%3\n"
171			"0:	cs	%0,%2,%3\n"
172			"	jl	0b\n"
173			: "=&d" (old), "=Q" (*(int *) ptr)
174			: "d" (x), "Q" (*(int *) ptr)
175			: "memory", "cc");
176		return old;
177#ifdef __s390x__
178	case 8:
179		asm volatile(
180			"	lg	%0,%3\n"
181			"0:	csg	%0,%2,%3\n"
182			"	jl	0b\n"
183			: "=&d" (old), "=m" (*(long *) ptr)
184			: "d" (x), "Q" (*(long *) ptr)
185			: "memory", "cc");
186		return old;
187#endif /* __s390x__ */
188	}
189	__xchg_called_with_bad_pointer();
190	return x;
191}
192
193/*
194 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
195 * store NEW in MEM.  Return the initial value in MEM.  Success is
196 * indicated by comparing RETURN with OLD.
197 */
198
199#define __HAVE_ARCH_CMPXCHG 1
200
201#define cmpxchg(ptr, o, n)						\
202	((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),	\
203					(unsigned long)(n), sizeof(*(ptr))))
204
205extern void __cmpxchg_called_with_bad_pointer(void);
206
207static inline unsigned long
208__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
209{
210	unsigned long addr, prev, tmp;
211	int shift;
212
213        switch (size) {
214	case 1:
215		addr = (unsigned long) ptr;
216		shift = (3 ^ (addr & 3)) << 3;
217		addr ^= addr & 3;
218		asm volatile(
219			"	l	%0,%2\n"
220			"0:	nr	%0,%5\n"
221			"	lr	%1,%0\n"
222			"	or	%0,%3\n"
223			"	or	%1,%4\n"
224			"	cs	%0,%1,%2\n"
225			"	jnl	1f\n"
226			"	xr	%1,%0\n"
227			"	nr	%1,%5\n"
228			"	jnz	0b\n"
229			"1:"
230			: "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
231			: "d" (old << shift), "d" (new << shift),
232			  "d" (~(255 << shift)), "Q" (*(int *) ptr)
233			: "memory", "cc");
234		return prev >> shift;
235	case 2:
236		addr = (unsigned long) ptr;
237		shift = (2 ^ (addr & 2)) << 3;
238		addr ^= addr & 2;
239		asm volatile(
240			"	l	%0,%2\n"
241			"0:	nr	%0,%5\n"
242			"	lr	%1,%0\n"
243			"	or	%0,%3\n"
244			"	or	%1,%4\n"
245			"	cs	%0,%1,%2\n"
246			"	jnl	1f\n"
247			"	xr	%1,%0\n"
248			"	nr	%1,%5\n"
249			"	jnz	0b\n"
250			"1:"
251			: "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
252			: "d" (old << shift), "d" (new << shift),
253			  "d" (~(65535 << shift)), "Q" (*(int *) ptr)
254			: "memory", "cc");
255		return prev >> shift;
256	case 4:
257		asm volatile(
258			"	cs	%0,%3,%1\n"
259			: "=&d" (prev), "=Q" (*(int *) ptr)
260			: "0" (old), "d" (new), "Q" (*(int *) ptr)
261			: "memory", "cc");
262		return prev;
263#ifdef __s390x__
264	case 8:
265		asm volatile(
266			"	csg	%0,%3,%1\n"
267			: "=&d" (prev), "=Q" (*(long *) ptr)
268			: "0" (old), "d" (new), "Q" (*(long *) ptr)
269			: "memory", "cc");
270		return prev;
271#endif /* __s390x__ */
272        }
273	__cmpxchg_called_with_bad_pointer();
274	return old;
275}
276
277/*
278 * Force strict CPU ordering.
279 * And yes, this is required on UP too when we're talking
280 * to devices.
281 *
282 * This is very similar to the ppc eieio/sync instruction in that is
283 * does a checkpoint syncronisation & makes sure that
284 * all memory ops have completed wrt other CPU's ( see 7-15 POP  DJB ).
285 */
286
287#define eieio()	asm volatile("bcr 15,0" : : : "memory")
288#define SYNC_OTHER_CORES(x)   eieio()
289#define mb()    eieio()
290#define rmb()   eieio()
291#define wmb()   eieio()
292#define read_barrier_depends() do { } while(0)
293#define smp_mb()       mb()
294#define smp_rmb()      rmb()
295#define smp_wmb()      wmb()
296#define smp_read_barrier_depends()    read_barrier_depends()
297#define smp_mb__before_clear_bit()     smp_mb()
298#define smp_mb__after_clear_bit()      smp_mb()
299
300
301#define set_mb(var, value)      do { var = value; mb(); } while (0)
302
303#ifdef __s390x__
304
305#define __ctl_load(array, low, high) ({				\
306	typedef struct { char _[sizeof(array)]; } addrtype;	\
307	asm volatile(						\
308		"	lctlg	%1,%2,%0\n"			\
309		: : "Q" (*(addrtype *)(&array)),		\
310		    "i" (low), "i" (high));			\
311	})
312
313#define __ctl_store(array, low, high) ({			\
314	typedef struct { char _[sizeof(array)]; } addrtype;	\
315	asm volatile(						\
316		"	stctg	%1,%2,%0\n"			\
317		: "=Q" (*(addrtype *)(&array))			\
318		: "i" (low), "i" (high));			\
319	})
320
321#else /* __s390x__ */
322
323#define __ctl_load(array, low, high) ({				\
324	typedef struct { char _[sizeof(array)]; } addrtype;	\
325	asm volatile(						\
326		"	lctl	%1,%2,%0\n"			\
327		: : "Q" (*(addrtype *)(&array)),		\
328		    "i" (low), "i" (high));			\
329})
330
331#define __ctl_store(array, low, high) ({			\
332	typedef struct { char _[sizeof(array)]; } addrtype;	\
333	asm volatile(						\
334		"	stctl	%1,%2,%0\n"			\
335		: "=Q" (*(addrtype *)(&array))			\
336		: "i" (low), "i" (high));			\
337	})
338
339#endif /* __s390x__ */
340
341#define __ctl_set_bit(cr, bit) ({	\
342	unsigned long __dummy;		\
343	__ctl_store(__dummy, cr, cr);	\
344	__dummy |= 1UL << (bit);	\
345	__ctl_load(__dummy, cr, cr);	\
346})
347
348#define __ctl_clear_bit(cr, bit) ({	\
349	unsigned long __dummy;		\
350	__ctl_store(__dummy, cr, cr);	\
351	__dummy &= ~(1UL << (bit));	\
352	__ctl_load(__dummy, cr, cr);	\
353})
354
355#include <linux/irqflags.h>
356
357#include <asm-generic/cmpxchg-local.h>
358
359static inline unsigned long __cmpxchg_local(volatile void *ptr,
360				      unsigned long old,
361				      unsigned long new, int size)
362{
363	switch (size) {
364	case 1:
365	case 2:
366	case 4:
367#ifdef __s390x__
368	case 8:
369#endif
370		return __cmpxchg(ptr, old, new, size);
371	default:
372		return __cmpxchg_local_generic(ptr, old, new, size);
373	}
374
375	return old;
376}
377
378/*
379 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
380 * them available.
381 */
382#define cmpxchg_local(ptr, o, n)					\
383	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
384			(unsigned long)(n), sizeof(*(ptr))))
385#ifdef __s390x__
386#define cmpxchg64_local(ptr, o, n)					\
387  ({									\
388	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
389	cmpxchg_local((ptr), (o), (n));					\
390  })
391#else
392#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
393#endif
394
395/*
396 * Use to set psw mask except for the first byte which
397 * won't be changed by this function.
398 */
399static inline void
400__set_psw_mask(unsigned long mask)
401{
402	__load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8)));
403}
404
405#define local_mcck_enable()  __set_psw_mask(psw_kernel_bits)
406#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
407
408#ifdef CONFIG_SMP
409
410extern void smp_ctl_set_bit(int cr, int bit);
411extern void smp_ctl_clear_bit(int cr, int bit);
412#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
413#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
414
415#else
416
417#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
418#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
419
420#endif /* CONFIG_SMP */
421
422static inline unsigned int stfl(void)
423{
424	asm volatile(
425		"	.insn	s,0xb2b10000,0(0)\n" /* stfl */
426		"0:\n"
427		EX_TABLE(0b,0b));
428	return S390_lowcore.stfl_fac_list;
429}
430
431static inline int __stfle(unsigned long long *list, int doublewords)
432{
433	typedef struct { unsigned long long _[doublewords]; } addrtype;
434	register unsigned long __nr asm("0") = doublewords - 1;
435
436	asm volatile(".insn s,0xb2b00000,%0" /* stfle */
437		     : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc");
438	return __nr + 1;
439}
440
441static inline int stfle(unsigned long long *list, int doublewords)
442{
443	if (!(stfl() & (1UL << 24)))
444		return -EOPNOTSUPP;
445	return __stfle(list, doublewords);
446}
447
448static inline unsigned short stap(void)
449{
450	unsigned short cpu_address;
451
452	asm volatile("stap %0" : "=m" (cpu_address));
453	return cpu_address;
454}
455
456extern void (*_machine_restart)(char *command);
457extern void (*_machine_halt)(void);
458extern void (*_machine_power_off)(void);
459
460#define arch_align_stack(x) (x)
461
462static inline int tprot(unsigned long addr)
463{
464	int rc = -EFAULT;
465
466	asm volatile(
467		"	tprot	0(%1),0\n"
468		"0:	ipm	%0\n"
469		"	srl	%0,28\n"
470		"1:\n"
471		EX_TABLE(0b,1b)
472		: "+d" (rc) : "a" (addr) : "cc");
473	return rc;
474}
475
476#endif /* __KERNEL__ */
477
478#endif
479